Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
new file mode 100644
index 0000000..d0713c7d
--- /dev/null
+++ b/arch/m68k/Kconfig
@@ -0,0 +1,670 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+config M68K
+	bool
+	default y
+
+config MMU
+	bool
+	default y
+
+config UID16
+	bool
+	default y
+
+config RWSEM_GENERIC_SPINLOCK
+	bool
+	default y
+
+config RWSEM_XCHGADD_ALGORITHM
+	bool
+
+config GENERIC_CALIBRATE_DELAY
+	bool
+	default y
+
+mainmenu "Linux/68k Kernel Configuration"
+
+source "init/Kconfig"
+
+menu "Platform dependent setup"
+
+config EISA
+	bool
+	---help---
+	  The Extended Industry Standard Architecture (EISA) bus was
+	  developed as an open alternative to the IBM MicroChannel bus.
+
+	  The EISA bus provided some of the features of the IBM MicroChannel
+	  bus while maintaining backward compatibility with cards made for
+	  the older ISA bus.  The EISA bus saw limited use between 1988 and
+	  1995 when it was made obsolete by the PCI bus.
+
+	  Say Y here if you are building a kernel for an EISA-based machine.
+
+	  Otherwise, say N.
+
+config MCA
+	bool
+	help
+	  MicroChannel Architecture is found in some IBM PS/2 machines and
+	  laptops.  It is a bus system similar to PCI or ISA. See
+	  <file:Documentation/mca.txt> (and especially the web page given
+	  there) before attempting to build an MCA bus kernel.
+
+config PCMCIA
+	tristate
+	---help---
+	  Say Y here if you want to attach PCMCIA- or PC-cards to your Linux
+	  computer.  These are credit-card size devices such as network cards,
+	  modems or hard drives often used with laptops computers.  There are
+	  actually two varieties of these cards: the older 16 bit PCMCIA cards
+	  and the newer 32 bit CardBus cards.  If you want to use CardBus
+	  cards, you need to say Y here and also to "CardBus support" below.
+
+	  To use your PC-cards, you will need supporting software from David
+	  Hinds' pcmcia-cs package (see the file <file:Documentation/Changes>
+	  for location).  Please also read the PCMCIA-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>.
+
+	  To compile this driver as modules, choose M here: the
+	  modules will be called pcmcia_core and ds.
+
+config SUN3
+	bool "Sun3 support"
+	select M68020
+	select MMU_SUN3 if MMU
+	help
+	  This option enables support for the Sun 3 series of workstations
+	  (3/50, 3/60, 3/1xx, 3/2xx systems). Enabling this option requires
+	  that all other hardware types must be disabled, as Sun 3 kernels
+	  are incompatible with all other m68k targets (including Sun 3x!).
+
+	  If you don't want to compile a kernel exclusively for a Sun 3, say N.
+
+config AMIGA
+	bool "Amiga support"
+	depends on !MMU_SUN3
+	help
+	  This option enables support for the Amiga series of computers. If
+	  you plan to use this kernel on an Amiga, say Y here and browse the
+	  material available in <file:Documentation/m68k>; otherwise say N.
+
+config ATARI
+	bool "Atari support"
+	depends on !MMU_SUN3
+	help
+	  This option enables support for the 68000-based Atari series of
+	  computers (including the TT, Falcon and Medusa). If you plan to use
+	  this kernel on an Atari, say Y here and browse the material
+	  available in <file:Documentation/m68k>; otherwise say N.
+
+config HADES
+	bool "Hades support"
+	depends on ATARI && BROKEN
+	help
+	  This option enables support for the Hades Atari clone. If you plan
+	  to use this kernel on a Hades, say Y here; otherwise say N.
+
+config PCI
+	bool
+	depends on HADES
+	default y
+	help
+	  Find out whether you have a PCI motherboard. PCI is the name of a
+	  bus system, i.e. the way the CPU talks to the other stuff inside
+	  your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
+	  VESA. If you have PCI, say Y, otherwise N.
+
+	  The PCI-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>, contains valuable
+	  information about which PCI hardware does work under Linux and which
+	  doesn't.
+
+config MAC
+	bool "Macintosh support"
+	depends on !MMU_SUN3
+	help
+	  This option enables support for the Apple Macintosh series of
+	  computers (yes, there is experimental support now, at least for part
+	  of the series).
+
+	  Say N unless you're willing to code the remaining necessary support.
+	  ;)
+
+config NUBUS
+	bool
+	depends on MAC
+	default y
+
+config M68K_L2_CACHE
+	bool
+	depends on MAC
+	default y
+
+config APOLLO
+	bool "Apollo support"
+	depends on !MMU_SUN3
+	help
+	  Say Y here if you want to run Linux on an MC680x0-based Apollo
+	  Domain workstation such as the DN3500.
+
+config VME
+	bool "VME (Motorola and BVM) support"
+	depends on !MMU_SUN3
+	help
+	  Say Y here if you want to build a kernel for a 680x0 based VME
+	  board.  Boards currently supported include Motorola boards MVME147,
+	  MVME162, MVME166, MVME167, MVME172, and MVME177.  BVME4000 and
+	  BVME6000 boards from BVM Ltd are also supported.
+
+config MVME147
+	bool "MVME147 support"
+	depends on VME
+	help
+	  Say Y to include support for early Motorola VME boards.  This will
+	  build a kernel which can run on MVME147 single-board computers.  If
+	  you select this option you will have to select the appropriate
+	  drivers for SCSI, Ethernet and serial ports later on.
+
+config MVME16x
+	bool "MVME162, 166 and 167 support"
+	depends on VME
+	help
+	  Say Y to include support for Motorola VME boards.  This will build a
+	  kernel which can run on MVME162, MVME166, MVME167, MVME172, and
+	  MVME177 boards.  If you select this option you will have to select
+	  the appropriate drivers for SCSI, Ethernet and serial ports later
+	  on.
+
+config BVME6000
+	bool "BVME4000 and BVME6000 support"
+	depends on VME
+	help
+	  Say Y to include support for VME boards from BVM Ltd.  This will
+	  build a kernel which can run on BVME4000 and BVME6000 boards.  If
+	  you select this option you will have to select the appropriate
+	  drivers for SCSI, Ethernet and serial ports later on.
+
+config HP300
+	bool "HP9000/300 and HP9000/400 support"
+	depends on !MMU_SUN3
+	help
+	  This option enables support for the HP9000/300 and HP9000/400 series
+	  of workstations. Support for these machines is still somewhat
+	  experimental. If you plan to try to use the kernel on such a machine
+	  say Y here.
+	  Everybody else says N.
+
+config DIO
+	bool "DIO bus support"
+	depends on HP300
+	default y
+	help
+	  Say Y here to enable support for the "DIO" expansion bus used in
+	  HP300 machines. If you are using such a system you almost certainly
+	  want this.
+
+config SUN3X
+	bool "Sun3x support"
+	depends on !MMU_SUN3
+	select M68030
+	help
+	  This option enables support for the Sun 3x series of workstations.
+	  Be warned that this support is very experimental.
+	  Note that Sun 3x kernels are not compatible with Sun 3 hardware.
+	  General Linux information on the Sun 3x series (now discontinued)
+	  is at <http://www.angelfire.com/ca2/tech68k/sun3.html>.
+
+	  If you don't want to compile a kernel for a Sun 3x, say N.
+
+config Q40
+	bool "Q40/Q60 support"
+	depends on !MMU_SUN3
+	help
+	  The Q40 is a Motorola 68040-based successor to the Sinclair QL
+	  manufactured in Germany.  There is an official Q40 home page at
+	  <http://www.q40.de/>.  This option enables support for the Q40 and
+	  Q60. Select your CPU below.  For 68LC060 don't forget to enable FPU
+	  emulation.
+
+comment "Processor type"
+
+config M68020
+	bool "68020 support"
+	help
+	  If you anticipate running this kernel on a computer with a MC68020
+	  processor, say Y. Otherwise, say N. Note that the 68020 requires a
+	  68851 MMU (Memory Management Unit) to run Linux/m68k, except on the
+	  Sun 3, which provides its own version.
+
+config M68030
+	bool "68030 support"
+	depends on !MMU_SUN3
+	help
+	  If you anticipate running this kernel on a computer with a MC68030
+	  processor, say Y. Otherwise, say N. Note that a MC68EC030 will not
+	  work, as it does not include an MMU (Memory Management Unit).
+
+config M68040
+	bool "68040 support"
+	depends on !MMU_SUN3
+	help
+	  If you anticipate running this kernel on a computer with a MC68LC040
+	  or MC68040 processor, say Y. Otherwise, say N. Note that an
+	  MC68EC040 will not work, as it does not include an MMU (Memory
+	  Management Unit).
+
+config M68060
+	bool "68060 support"
+	depends on !MMU_SUN3
+	help
+	  If you anticipate running this kernel on a computer with a MC68060
+	  processor, say Y. Otherwise, say N.
+
+config MMU_MOTOROLA
+	bool
+	depends on MMU && !MMU_SUN3
+	default y
+
+config MMU_SUN3
+	bool
+
+config M68KFPU_EMU
+	bool "Math emulation support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
+	  At some point in the future, this will cause floating-point math
+	  instructions to be emulated by the kernel on machines that lack a
+	  floating-point math coprocessor.  Thrill-seekers and chronically
+	  sleep-deprived psychotic hacker types can say Y now, everyone else
+	  should probably wait a while.
+
+config M68KFPU_EMU_EXTRAPREC
+	bool "Math emulation extra precision"
+	depends on M68KFPU_EMU
+	help
+	  The fpu uses normally a few bit more during calculations for
+	  correct rounding, the emulator can (often) do the same but this
+	  extra calculation can cost quite some time, so you can disable
+	  it here. The emulator will then "only" calculate with a 64 bit
+	  mantissa and round slightly incorrect, what is more then enough
+	  for normal usage.
+
+config M68KFPU_EMU_ONLY
+	bool "Math emulation only kernel"
+	depends on M68KFPU_EMU
+	help
+	  This option prevents any floating-point instructions from being
+	  compiled into the kernel, thereby the kernel doesn't save any
+	  floating point context anymore during task switches, so this
+	  kernel will only be usable on machines without a floating-point
+	  math coprocessor. This makes the kernel a bit faster as no tests
+	  needs to be executed whether a floating-point instruction in the
+	  kernel should be executed or not.
+
+config ADVANCED
+	bool "Advanced configuration options"
+	---help---
+	  This gives you access to some advanced options for the CPU. The
+	  defaults should be fine for most users, but these options may make
+	  it possible for you to improve performance somewhat if you know what
+	  you are doing.
+
+	  Note that the answer to this question won't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about these options.
+
+	  Most users should say N to this question.
+
+config RMW_INSNS
+	bool "Use read-modify-write instructions"
+	depends on ADVANCED
+	---help---
+	  This allows to use certain instructions that work with indivisible
+	  read-modify-write bus cycles. While this is faster than the
+	  workaround of disabling interrupts, it can conflict with DMA
+	  ( = direct memory access) on many Amiga systems, and it is also said
+	  to destabilize other machines. It is very likely that this will
+	  cause serious problems on any Amiga or Atari Medusa if set. The only
+	  configuration where it should work are 68030-based Ataris, where it
+	  apparently improves performance. But you've been warned! Unless you
+	  really know what you are doing, say N. Try Y only if you're quite
+	  adventurous.
+
+config SINGLE_MEMORY_CHUNK
+	bool "Use one physical chunk of memory only"
+	depends on ADVANCED && !SUN3
+	help
+	  Ignore all but the first contiguous chunk of physical memory for VM
+	  purposes.  This will save a few bytes kernel size and may speed up
+	  some operations.  Say N if not sure.
+
+config 060_WRITETHROUGH
+	bool "Use write-through caching for 68060 supervisor accesses"
+	depends on ADVANCED && M68060
+	---help---
+	  The 68060 generally uses copyback caching of recently accessed data.
+	  Copyback caching means that memory writes will be held in an on-chip
+	  cache and only written back to memory some time later.  Saying Y
+	  here will force supervisor (kernel) accesses to use writethrough
+	  caching.  Writethrough caching means that data is written to memory
+	  straight away, so that cache and memory data always agree.
+	  Writethrough caching is less efficient, but is needed for some
+	  drivers on 68060 based systems where the 68060 bus snooping signal
+	  is hardwired on.  The 53c710 SCSI driver is known to suffer from
+	  this problem.
+
+endmenu
+
+menu "General setup"
+
+source "fs/Kconfig.binfmt"
+
+config ZORRO
+	bool "Amiga Zorro (AutoConfig) bus support"
+	depends on AMIGA
+	help
+	  This enables support for the Zorro bus in the Amiga. If you have
+	  expansion cards in your Amiga that conform to the Amiga
+	  AutoConfig(tm) specification, say Y, otherwise N. Note that even
+	  expansion cards that do not fit in the Zorro slots but fit in e.g.
+	  the CPU slot may fall in this category, so you have to say Y to let
+	  Linux use these.
+
+config AMIGA_PCMCIA
+	bool "Amiga 1200/600 PCMCIA support (EXPERIMENTAL)"
+	depends on AMIGA && EXPERIMENTAL
+	help
+	  Include support in the kernel for pcmcia on Amiga 1200 and Amiga
+	  600. If you intend to use pcmcia cards say Y; otherwise say N.
+
+config STRAM_SWAP
+	bool "Support for ST-RAM as swap space"
+	depends on ATARI && BROKEN
+	---help---
+	  Some Atari 68k machines (including the 520STF and 1020STE) divide
+	  their addressable memory into ST and TT sections.  The TT section
+	  (up to 512MB) is the main memory; the ST section (up to 4MB) is
+	  accessible to the built-in graphics board, runs slower, and is
+	  present mainly for backward compatibility with older machines.
+
+	  This enables support for using (parts of) ST-RAM as swap space,
+	  instead of as normal system memory. This can first enhance system
+	  performance if you have lots of alternate RAM (compared to the size
+	  of ST-RAM), because executable code always will reside in faster
+	  memory. ST-RAM will remain as ultra-fast swap space. On the other
+	  hand, it allows much improved dynamic allocations of ST-RAM buffers
+	  for device driver modules (e.g. floppy, ACSI, SLM printer, DMA
+	  sound). The probability that such allocations at module load time
+	  fail is drastically reduced.
+
+config STRAM_PROC
+	bool "ST-RAM statistics in /proc"
+	depends on ATARI
+	help
+	  Say Y here to report ST-RAM usage statistics in /proc/stram.  See
+	  the help for CONFIG_STRAM_SWAP for discussion of ST-RAM and its
+	  uses.
+
+config HEARTBEAT
+	bool "Use power LED as a heartbeat" if AMIGA || APOLLO || ATARI || MAC ||Q40
+	default y if !AMIGA && !APOLLO && !ATARI && !MAC && !Q40 && HP300
+	help
+	  Use the power-on LED on your machine as a load meter.  The exact
+	  behavior is platform-dependent, but normally the flash frequency is
+	  a hyperbolic function of the 5-minute load average.
+
+# We have a dedicated heartbeat LED. :-)
+config PROC_HARDWARE
+	bool "/proc/hardware support"
+	help
+	  Say Y here to support the /proc/hardware file, which gives you
+	  access to information about the machine you're running on,
+	  including the model, CPU, MMU, clock speed, BogoMIPS rating,
+	  and memory size.
+
+config ISA
+	bool
+	depends on Q40 || AMIGA_PCMCIA || GG2
+	default y
+	help
+	  Find out whether you have ISA slots on your motherboard.  ISA is the
+	  name of a bus system, i.e. the way the CPU talks to the other stuff
+	  inside your box.  Other bus systems are PCI, EISA, MicroChannel
+	  (MCA) or VESA.  ISA is an older system, now being displaced by PCI;
+	  newer boards don't support it.  If you have ISA, say Y, otherwise N.
+
+config GENERIC_ISA_DMA
+	bool
+	depends on Q40 || AMIGA_PCMCIA || GG2
+	default y
+
+source "drivers/pci/Kconfig"
+
+source "drivers/zorro/Kconfig"
+
+endmenu
+
+source "drivers/Kconfig"
+
+menu "Character devices"
+
+config ATARI_MFPSER
+	tristate "Atari MFP serial support"
+	depends on ATARI
+	---help---
+	  If you like to use the MFP serial ports ("Modem1", "Serial1") under
+	  Linux, say Y. The driver equally supports all kinds of MFP serial
+	  ports and automatically detects whether Serial1 is available.
+
+	  To compile this driver as a module, choose M here.
+
+	  Note for Falcon users: You also have an MFP port, it's just not
+	  wired to the outside... But you could use the port under Linux.
+
+config ATARI_SCC
+	tristate "Atari SCC serial support"
+	depends on ATARI
+	---help---
+	  If you have serial ports based on a Zilog SCC chip (Modem2, Serial2,
+	  LAN) and like to use them under Linux, say Y. All built-in SCC's are
+	  supported (TT, MegaSTE, Falcon), and also the ST-ESCC. If you have
+	  two connectors for channel A (Serial2 and LAN), they are visible as
+	  two separate devices.
+
+	  To compile this driver as a module, choose M here.
+
+config ATARI_SCC_DMA
+	bool "Atari SCC serial DMA support"
+	depends on ATARI_SCC
+	help
+	  This enables DMA support for receiving data on channel A of the SCC.
+	  If you have a TT you may say Y here and read
+	  drivers/char/atari_SCC.README. All other users should say N here,
+	  because only the TT has SCC-DMA, even if your machine keeps claiming
+	  so at boot time.
+
+config ATARI_MIDI
+	tristate "Atari MIDI serial support"
+	depends on ATARI
+	help
+	  If you want to use your Atari's MIDI port in Linux, say Y.
+
+	  To compile this driver as a module, choose M here.
+
+config ATARI_DSP56K
+	tristate "Atari DSP56k support (EXPERIMENTAL)"
+	depends on ATARI && EXPERIMENTAL
+	help
+	  If you want to be able to use the DSP56001 in Falcons, say Y. This
+	  driver is still experimental, and if you don't know what it is, or
+	  if you don't have this processor, just say N.
+
+	  To compile this driver as a module, choose M here.
+
+config AMIGA_BUILTIN_SERIAL
+	tristate "Amiga builtin serial support"
+	depends on AMIGA
+	help
+	  If you want to use your Amiga's built-in serial port in Linux,
+	  answer Y.
+
+	  To compile this driver as a module, choose M here.
+
+config WHIPPET_SERIAL
+	tristate "Hisoft Whippet PCMCIA serial support"
+	depends on AMIGA_PCMCIA
+	help
+	  HiSoft has a web page at <http://www.hisoft.co.uk/>, but there
+	  is no listing for the Whippet in their Amiga section.
+
+config MULTIFACE_III_TTY
+	tristate "Multiface Card III serial support"
+	depends on AMIGA
+	help
+	  If you want to use a Multiface III card's serial port in Linux,
+	  answer Y.
+
+	  To compile this driver as a module, choose M here.
+
+config GVPIOEXT
+	tristate "GVP IO-Extender support"
+	depends on PARPORT=n && ZORRO
+	help
+	  If you want to use a GVP IO-Extender serial card in Linux, say Y.
+	  Otherwise, say N.
+
+config GVPIOEXT_LP
+	tristate "GVP IO-Extender parallel printer support"
+	depends on GVPIOEXT
+	help
+	  Say Y to enable driving a printer from the parallel port on your
+	  GVP IO-Extender card, N otherwise.
+
+config GVPIOEXT_PLIP
+	tristate "GVP IO-Extender PLIP support"
+	depends on GVPIOEXT
+	help
+	  Say Y to enable doing IP over the parallel port on your GVP
+	  IO-Extender card, N otherwise.
+
+config MAC_SCC
+	tristate "Macintosh serial support"
+	depends on MAC
+
+config MAC_HID
+	bool
+	depends on INPUT_ADBHID
+	default y
+
+config MAC_ADBKEYCODES
+	bool "Support for ADB raw keycodes"
+	depends on INPUT_ADBHID
+	help
+	  This provides support for sending raw ADB keycodes to console
+	  devices.  This is the default up to 2.4.0, but in future this may be
+	  phased out in favor of generic Linux keycodes.  If you say Y here,
+	  you can dynamically switch via the
+	  /proc/sys/dev/mac_hid/keyboard_sends_linux_keycodes
+	  sysctl and with the "keyboard_sends_linux_keycodes=" kernel
+	  argument.
+
+	  If unsure, say Y here.
+
+config ADB_KEYBOARD
+	bool "Support for ADB keyboard (old driver)"
+	depends on MAC && !INPUT_ADBHID
+	help
+	  This option allows you to use an ADB keyboard attached to your
+	  machine. Note that this disables any other (ie. PS/2) keyboard
+	  support, even if your machine is physically capable of using both at
+	  the same time.
+
+	  If you use an ADB keyboard (4 pin connector), say Y here.
+	  If you use a PS/2 keyboard (6 pin connector), say N here.
+
+config HPDCA
+	tristate "HP DCA serial support"
+	depends on DIO && SERIAL_8250
+	help
+	  If you want to use the internal "DCA" serial ports on an HP300
+	  machine, say Y here.
+
+config HPAPCI
+	tristate "HP APCI serial support"
+	depends on HP300 && SERIAL_8250 && EXPERIMENTAL
+	help
+	  If you want to use the internal "APCI" serial ports on an HP400
+	  machine, say Y here.
+
+config MVME147_SCC
+	bool "SCC support for MVME147 serial ports"
+	depends on MVME147
+	help
+	  This is the driver for the serial ports on the Motorola MVME147
+	  boards.  Everyone using one of these boards should say Y here.
+
+config SERIAL167
+	bool "CD2401 support for MVME166/7 serial ports"
+	depends on MVME16x && BROKEN
+	help
+	  This is the driver for the serial ports on the Motorola MVME166,
+	  167, and 172 boards.  Everyone using one of these boards should say
+	  Y here.
+
+config MVME162_SCC
+	bool "SCC support for MVME162 serial ports"
+	depends on MVME16x
+	help
+	  This is the driver for the serial ports on the Motorola MVME162 and
+	  172 boards.  Everyone using one of these boards should say Y here.
+
+config BVME6000_SCC
+	bool "SCC support for BVME6000 serial ports"
+	depends on BVME6000
+	help
+	  This is the driver for the serial ports on the BVME4000 and BVME6000
+	  boards from BVM Ltd.  Everyone using one of these boards should say
+	  Y here.
+
+config DN_SERIAL
+	bool "Support for DN serial port (dummy)"
+	depends on APOLLO
+
+config SERIAL_CONSOLE
+	bool "Support for serial port console"
+	depends on (AMIGA || ATARI || MAC || SUN3 || SUN3X || VME || APOLLO) && (ATARI_MFPSER=y || ATARI_SCC=y || ATARI_MIDI=y || MAC_SCC=y || AMIGA_BUILTIN_SERIAL=y || GVPIOEXT=y || MULTIFACE_III_TTY=y || SERIAL=y || MVME147_SCC || SERIAL167 || MVME162_SCC || BVME6000_SCC || DN_SERIAL)
+	---help---
+	  If you say Y here, it will be possible to use a serial port as the
+	  system console (the system console is the device which receives all
+	  kernel messages and warnings and which allows logins in single user
+	  mode). This could be useful if some terminal or printer is connected
+	  to that serial port.
+
+	  Even if you say Y here, the currently visible virtual console
+	  (/dev/tty0) will still be used as the system console by default, but
+	  you can alter that using a kernel command line option such as
+	  "console=ttyS1". (Try "man bootparam" or see the documentation of
+	  your boot loader (lilo or loadlin) about how to pass options to the
+	  kernel at boot time.)
+
+	  If you don't have a VGA card installed and you say Y here, the
+	  kernel will automatically use the first serial line, /dev/ttyS0, as
+	  system console.
+
+	  If unsure, say N.
+
+endmenu
+
+source "fs/Kconfig"
+
+source "arch/m68k/Kconfig.debug"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
diff --git a/arch/m68k/Kconfig.debug b/arch/m68k/Kconfig.debug
new file mode 100644
index 0000000..f53b6d5
--- /dev/null
+++ b/arch/m68k/Kconfig.debug
@@ -0,0 +1,5 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+endmenu
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
new file mode 100644
index 0000000..466e740
--- /dev/null
+++ b/arch/m68k/Makefile
@@ -0,0 +1,126 @@
+#
+# m68k/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Hamish Macdonald
+#
+
+# test for cross compiling
+COMPILE_ARCH = $(shell uname -m)
+
+# override top level makefile
+AS += -m68020
+LDFLAGS := -m m68kelf
+ifneq ($(COMPILE_ARCH),$(ARCH))
+	# prefix for cross-compiling binaries
+	CROSS_COMPILE = m68k-linux-
+endif
+
+ifdef CONFIG_SUN3
+LDFLAGS_vmlinux = -N
+endif
+
+CHECKFLAGS += -D__mc68000__
+
+# without -fno-strength-reduce the 53c7xx.c driver fails ;-(
+CFLAGS += -pipe -fno-strength-reduce -ffixed-a2
+
+# enable processor switch if compiled only for a single cpu
+ifndef CONFIG_M68020
+ifndef CONFIG_M68030
+
+ifndef CONFIG_M68060
+CFLAGS := $(CFLAGS) -m68040
+endif
+
+ifndef CONFIG_M68040
+CFLAGS := $(CFLAGS) -m68060
+endif
+
+endif
+endif
+
+ifdef CONFIG_KGDB
+# If configured for kgdb support, include debugging infos and keep the
+# frame pointer
+CFLAGS := $(subst -fomit-frame-pointer,,$(CFLAGS)) -g
+endif
+
+ifndef CONFIG_SUN3
+head-y := arch/m68k/kernel/head.o
+else
+head-y := arch/m68k/kernel/sun3-head.o
+endif
+
+core-y				+= arch/m68k/kernel/	arch/m68k/mm/
+libs-y				+= arch/m68k/lib/
+
+core-$(CONFIG_Q40)		+= arch/m68k/q40/
+core-$(CONFIG_AMIGA)		+= arch/m68k/amiga/
+core-$(CONFIG_ATARI)		+= arch/m68k/atari/
+core-$(CONFIG_MAC)		+= arch/m68k/mac/
+core-$(CONFIG_HP300)		+= arch/m68k/hp300/
+core-$(CONFIG_APOLLO)		+= arch/m68k/apollo/
+core-$(CONFIG_MVME147)		+= arch/m68k/mvme147/
+core-$(CONFIG_MVME16x)		+= arch/m68k/mvme16x/
+core-$(CONFIG_BVME6000)		+= arch/m68k/bvme6000/
+core-$(CONFIG_SUN3X)		+= arch/m68k/sun3x/	arch/m68k/sun3/
+core-$(CONFIG_SUN3)		+= arch/m68k/sun3/	arch/m68k/sun3/prom/
+core-$(CONFIG_M68040)		+= arch/m68k/fpsp040/
+core-$(CONFIG_M68060)		+= arch/m68k/ifpsp060/
+core-$(CONFIG_M68KFPU_EMU)	+= arch/m68k/math-emu/
+
+all:	zImage
+
+lilo:	vmlinux
+	if [ -f $(INSTALL_PATH)/vmlinux ]; then mv -f $(INSTALL_PATH)/vmlinux $(INSTALL_PATH)/vmlinux.old; fi
+	if [ -f $(INSTALL_PATH)/System.map ]; then mv -f $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi
+	cat vmlinux > $(INSTALL_PATH)/vmlinux
+	cp System.map $(INSTALL_PATH)/System.map
+	if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
+
+zImage compressed: vmlinux.gz
+
+vmlinux.gz: vmlinux
+
+ifndef CONFIG_KGDB
+	cp vmlinux vmlinux.tmp
+	$(STRIP) vmlinux.tmp
+	gzip -9c vmlinux.tmp >vmlinux.gz
+	rm vmlinux.tmp
+else
+	gzip -9c vmlinux >vmlinux.gz
+endif
+
+bzImage: vmlinux.bz2
+
+vmlinux.bz2: vmlinux
+
+ifndef CONFIG_KGDB
+	cp vmlinux vmlinux.tmp
+	$(STRIP) vmlinux.tmp
+	bzip2 -1c vmlinux.tmp >vmlinux.bz2
+	rm vmlinux.tmp
+else
+	bzip2 -1c vmlinux >vmlinux.bz2
+endif
+
+prepare: include/asm-$(ARCH)/offsets.h
+CLEAN_FILES += include/asm-$(ARCH)/offsets.h
+
+arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \
+				   include/config/MARKER
+
+include/asm-$(ARCH)/offsets.h: arch/$(ARCH)/kernel/asm-offsets.s
+	$(call filechk,gen-asm-offsets)
+
+archclean:
+	rm -f vmlinux.gz vmlinux.bz2
diff --git a/arch/m68k/amiga/Makefile b/arch/m68k/amiga/Makefile
new file mode 100644
index 0000000..8b41565
--- /dev/null
+++ b/arch/m68k/amiga/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for Linux arch/m68k/amiga source directory
+#
+
+obj-y		:= config.o amiints.o cia.o chipram.o amisound.o amiga_ksyms.o
+
+obj-$(CONFIG_AMIGA_PCMCIA)	+= pcmcia.o
diff --git a/arch/m68k/amiga/amiga_ksyms.c b/arch/m68k/amiga/amiga_ksyms.c
new file mode 100644
index 0000000..b7bd84c
--- /dev/null
+++ b/arch/m68k/amiga/amiga_ksyms.c
@@ -0,0 +1,36 @@
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/ptrace.h>
+#include <asm/amigahw.h>
+#include <asm/amigaints.h>
+#include <asm/amipcmcia.h>
+
+extern volatile u_short amiga_audio_min_period;
+extern u_short amiga_audio_period;
+
+/*
+ * Add things here when you find the need for it.
+ */
+EXPORT_SYMBOL(amiga_model);
+EXPORT_SYMBOL(amiga_chipset);
+EXPORT_SYMBOL(amiga_hw_present);
+EXPORT_SYMBOL(amiga_eclock);
+EXPORT_SYMBOL(amiga_colorclock);
+EXPORT_SYMBOL(amiga_chip_alloc);
+EXPORT_SYMBOL(amiga_chip_free);
+EXPORT_SYMBOL(amiga_chip_avail);
+EXPORT_SYMBOL(amiga_chip_size);
+EXPORT_SYMBOL(amiga_audio_period);
+EXPORT_SYMBOL(amiga_audio_min_period);
+EXPORT_SYMBOL(amiga_do_irq);
+EXPORT_SYMBOL(amiga_do_irq_list);
+
+#ifdef CONFIG_AMIGA_PCMCIA
+  EXPORT_SYMBOL(pcmcia_reset);
+  EXPORT_SYMBOL(pcmcia_copy_tuple);
+  EXPORT_SYMBOL(pcmcia_program_voltage);
+  EXPORT_SYMBOL(pcmcia_access_speed);
+  EXPORT_SYMBOL(pcmcia_write_enable);
+  EXPORT_SYMBOL(pcmcia_write_disable);
+#endif
diff --git a/arch/m68k/amiga/amiints.c b/arch/m68k/amiga/amiints.c
new file mode 100644
index 0000000..d9edf2d
--- /dev/null
+++ b/arch/m68k/amiga/amiints.c
@@ -0,0 +1,520 @@
+/*
+ * linux/arch/m68k/amiga/amiints.c -- Amiga Linux interrupt handling code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * 11/07/96: rewritten interrupt handling, irq lists are exists now only for
+ *           this sources where it makes sense (VERTB/PORTS/EXTER) and you must
+ *           be careful that dev_id for this sources is unique since this the
+ *           only possibility to distinguish between different handlers for
+ *           free_irq. irq lists also have different irq flags:
+ *           - IRQ_FLG_FAST: handler is inserted at top of list (after other
+ *                           fast handlers)
+ *           - IRQ_FLG_SLOW: handler is inserted at bottom of list and before
+ *                           they're executed irq level is set to the previous
+ *                           one, but handlers don't need to be reentrant, if
+ *                           reentrance occurred, slow handlers will be just
+ *                           called again.
+ *           The whole interrupt handling for CIAs is moved to cia.c
+ *           /Roman Zippel
+ *
+ * 07/08/99: rewamp of the interrupt handling - we now have two types of
+ *           interrupts, normal and fast handlers, fast handlers being
+ *           marked with SA_INTERRUPT and runs with all other interrupts
+ *           disabled. Normal interrupts disable their own source but
+ *           run with all other interrupt sources enabled.
+ *           PORTS and EXTER interrupts are always shared even if the
+ *           drivers do not explicitly mark this when calling
+ *           request_irq which they really should do.
+ *           This is similar to the way interrupts are handled on all
+ *           other architectures and makes a ton of sense besides
+ *           having the advantage of making it easier to share
+ *           drivers.
+ *           /Jes
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/seq_file.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/amigahw.h>
+#include <asm/amigaints.h>
+#include <asm/amipcmcia.h>
+
+extern int cia_request_irq(struct ciabase *base,int irq,
+                           irqreturn_t (*handler)(int, void *, struct pt_regs *),
+                           unsigned long flags, const char *devname, void *dev_id);
+extern void cia_free_irq(struct ciabase *base, unsigned int irq, void *dev_id);
+extern void cia_init_IRQ(struct ciabase *base);
+extern int cia_get_irq_list(struct ciabase *base, struct seq_file *p);
+
+/* irq node variables for amiga interrupt sources */
+static irq_node_t *ami_irq_list[AMI_STD_IRQS];
+
+static unsigned short amiga_intena_vals[AMI_STD_IRQS] = {
+	[IRQ_AMIGA_VERTB]	= IF_VERTB,
+	[IRQ_AMIGA_COPPER]	= IF_COPER,
+	[IRQ_AMIGA_AUD0]	= IF_AUD0,
+	[IRQ_AMIGA_AUD1]	= IF_AUD1,
+	[IRQ_AMIGA_AUD2]	= IF_AUD2,
+	[IRQ_AMIGA_AUD3]	= IF_AUD3,
+	[IRQ_AMIGA_BLIT]	= IF_BLIT,
+	[IRQ_AMIGA_DSKSYN]	= IF_DSKSYN,
+	[IRQ_AMIGA_DSKBLK]	= IF_DSKBLK,
+	[IRQ_AMIGA_RBF]		= IF_RBF,
+	[IRQ_AMIGA_TBE]		= IF_TBE,
+	[IRQ_AMIGA_SOFT]	= IF_SOFT,
+	[IRQ_AMIGA_PORTS]	= IF_PORTS,
+	[IRQ_AMIGA_EXTER]	= IF_EXTER
+};
+static const unsigned char ami_servers[AMI_STD_IRQS] = {
+	[IRQ_AMIGA_VERTB]	= 1,
+	[IRQ_AMIGA_PORTS]	= 1,
+	[IRQ_AMIGA_EXTER]	= 1
+};
+
+static short ami_ablecount[AMI_IRQS];
+
+static irqreturn_t ami_badint(int irq, void *dev_id, struct pt_regs *fp)
+{
+	num_spurious += 1;
+	return IRQ_NONE;
+}
+
+/*
+ * void amiga_init_IRQ(void)
+ *
+ * Parameters:	None
+ *
+ * Returns:	Nothing
+ *
+ * This function should be called during kernel startup to initialize
+ * the amiga IRQ handling routines.
+ */
+
+void __init amiga_init_IRQ(void)
+{
+	int i;
+
+	/* initialize handlers */
+	for (i = 0; i < AMI_STD_IRQS; i++) {
+		if (ami_servers[i]) {
+			ami_irq_list[i] = NULL;
+		} else {
+			ami_irq_list[i] = new_irq_node();
+			ami_irq_list[i]->handler = ami_badint;
+			ami_irq_list[i]->flags   = 0;
+			ami_irq_list[i]->dev_id  = NULL;
+			ami_irq_list[i]->devname = NULL;
+			ami_irq_list[i]->next    = NULL;
+		}
+	}
+	for (i = 0; i < AMI_IRQS; i++)
+		ami_ablecount[i] = 0;
+
+	/* turn off PCMCIA interrupts */
+	if (AMIGAHW_PRESENT(PCMCIA))
+		gayle.inten = GAYLE_IRQ_IDE;
+
+	/* turn off all interrupts and enable the master interrupt bit */
+	custom.intena = 0x7fff;
+	custom.intreq = 0x7fff;
+	custom.intena = IF_SETCLR | IF_INTEN;
+
+	cia_init_IRQ(&ciaa_base);
+	cia_init_IRQ(&ciab_base);
+}
+
+static inline int amiga_insert_irq(irq_node_t **list, irq_node_t *node)
+{
+	unsigned long flags;
+	irq_node_t *cur;
+
+	if (!node->dev_id)
+		printk("%s: Warning: dev_id of %s is zero\n",
+		       __FUNCTION__, node->devname);
+
+	local_irq_save(flags);
+
+	cur = *list;
+
+	if (node->flags & SA_INTERRUPT) {
+		if (node->flags & SA_SHIRQ)
+			return -EBUSY;
+		/*
+		 * There should never be more than one
+		 */
+		while (cur && cur->flags & SA_INTERRUPT) {
+			list = &cur->next;
+			cur = cur->next;
+		}
+	} else {
+		while (cur) {
+			list = &cur->next;
+			cur = cur->next;
+		}
+	}
+
+	node->next = cur;
+	*list = node;
+
+	local_irq_restore(flags);
+	return 0;
+}
+
+static inline void amiga_delete_irq(irq_node_t **list, void *dev_id)
+{
+	unsigned long flags;
+	irq_node_t *node;
+
+	local_irq_save(flags);
+
+	for (node = *list; node; list = &node->next, node = *list) {
+		if (node->dev_id == dev_id) {
+			*list = node->next;
+			/* Mark it as free. */
+			node->handler = NULL;
+			local_irq_restore(flags);
+			return;
+		}
+	}
+	local_irq_restore(flags);
+	printk ("%s: tried to remove invalid irq\n", __FUNCTION__);
+}
+
+/*
+ * amiga_request_irq : add an interrupt service routine for a particular
+ *                     machine specific interrupt source.
+ *                     If the addition was successful, it returns 0.
+ */
+
+int amiga_request_irq(unsigned int irq,
+		      irqreturn_t (*handler)(int, void *, struct pt_regs *),
+                      unsigned long flags, const char *devname, void *dev_id)
+{
+	irq_node_t *node;
+	int error = 0;
+
+	if (irq >= AMI_IRQS) {
+		printk ("%s: Unknown IRQ %d from %s\n", __FUNCTION__,
+			irq, devname);
+		return -ENXIO;
+	}
+
+	if (irq >= IRQ_AMIGA_AUTO)
+		return cpu_request_irq(irq - IRQ_AMIGA_AUTO, handler,
+		                       flags, devname, dev_id);
+
+	if (irq >= IRQ_AMIGA_CIAB)
+		return cia_request_irq(&ciab_base, irq - IRQ_AMIGA_CIAB,
+		                       handler, flags, devname, dev_id);
+
+	if (irq >= IRQ_AMIGA_CIAA)
+		return cia_request_irq(&ciaa_base, irq - IRQ_AMIGA_CIAA,
+		                       handler, flags, devname, dev_id);
+
+	/*
+	 * IRQ_AMIGA_PORTS & IRQ_AMIGA_EXTER defaults to shared,
+	 * we could add a check here for the SA_SHIRQ flag but all drivers
+	 * should be aware of sharing anyway.
+	 */
+	if (ami_servers[irq]) {
+		if (!(node = new_irq_node()))
+			return -ENOMEM;
+		node->handler = handler;
+		node->flags   = flags;
+		node->dev_id  = dev_id;
+		node->devname = devname;
+		node->next    = NULL;
+		error = amiga_insert_irq(&ami_irq_list[irq], node);
+	} else {
+		ami_irq_list[irq]->handler = handler;
+		ami_irq_list[irq]->flags   = flags;
+		ami_irq_list[irq]->dev_id  = dev_id;
+		ami_irq_list[irq]->devname = devname;
+	}
+
+	/* enable the interrupt */
+	if (irq < IRQ_AMIGA_PORTS && !ami_ablecount[irq])
+		custom.intena = IF_SETCLR | amiga_intena_vals[irq];
+
+	return error;
+}
+
+void amiga_free_irq(unsigned int irq, void *dev_id)
+{
+	if (irq >= AMI_IRQS) {
+		printk ("%s: Unknown IRQ %d\n", __FUNCTION__, irq);
+		return;
+	}
+
+	if (irq >= IRQ_AMIGA_AUTO)
+		cpu_free_irq(irq - IRQ_AMIGA_AUTO, dev_id);
+
+	if (irq >= IRQ_AMIGA_CIAB) {
+		cia_free_irq(&ciab_base, irq - IRQ_AMIGA_CIAB, dev_id);
+		return;
+	}
+
+	if (irq >= IRQ_AMIGA_CIAA) {
+		cia_free_irq(&ciaa_base, irq - IRQ_AMIGA_CIAA, dev_id);
+		return;
+	}
+
+	if (ami_servers[irq]) {
+		amiga_delete_irq(&ami_irq_list[irq], dev_id);
+		/* if server list empty, disable the interrupt */
+		if (!ami_irq_list[irq] && irq < IRQ_AMIGA_PORTS)
+			custom.intena = amiga_intena_vals[irq];
+	} else {
+		if (ami_irq_list[irq]->dev_id != dev_id)
+			printk("%s: removing probably wrong IRQ %d from %s\n",
+			       __FUNCTION__, irq, ami_irq_list[irq]->devname);
+		ami_irq_list[irq]->handler = ami_badint;
+		ami_irq_list[irq]->flags   = 0;
+		ami_irq_list[irq]->dev_id  = NULL;
+		ami_irq_list[irq]->devname = NULL;
+		custom.intena = amiga_intena_vals[irq];
+	}
+}
+
+/*
+ * Enable/disable a particular machine specific interrupt source.
+ * Note that this may affect other interrupts in case of a shared interrupt.
+ * This function should only be called for a _very_ short time to change some
+ * internal data, that may not be changed by the interrupt at the same time.
+ * ami_(enable|disable)_irq calls may also be nested.
+ */
+
+void amiga_enable_irq(unsigned int irq)
+{
+	if (irq >= AMI_IRQS) {
+		printk("%s: Unknown IRQ %d\n", __FUNCTION__, irq);
+		return;
+	}
+
+	if (--ami_ablecount[irq])
+		return;
+
+	/* No action for auto-vector interrupts */
+	if (irq >= IRQ_AMIGA_AUTO){
+		printk("%s: Trying to enable auto-vector IRQ %i\n",
+		       __FUNCTION__, irq - IRQ_AMIGA_AUTO);
+		return;
+	}
+
+	if (irq >= IRQ_AMIGA_CIAB) {
+		cia_set_irq(&ciab_base, (1 << (irq - IRQ_AMIGA_CIAB)));
+		cia_able_irq(&ciab_base, CIA_ICR_SETCLR |
+		             (1 << (irq - IRQ_AMIGA_CIAB)));
+		return;
+	}
+
+	if (irq >= IRQ_AMIGA_CIAA) {
+		cia_set_irq(&ciaa_base, (1 << (irq - IRQ_AMIGA_CIAA)));
+		cia_able_irq(&ciaa_base, CIA_ICR_SETCLR |
+		             (1 << (irq - IRQ_AMIGA_CIAA)));
+		return;
+	}
+
+	/* enable the interrupt */
+	custom.intena = IF_SETCLR | amiga_intena_vals[irq];
+}
+
+void amiga_disable_irq(unsigned int irq)
+{
+	if (irq >= AMI_IRQS) {
+		printk("%s: Unknown IRQ %d\n", __FUNCTION__, irq);
+		return;
+	}
+
+	if (ami_ablecount[irq]++)
+		return;
+
+	/* No action for auto-vector interrupts */
+	if (irq >= IRQ_AMIGA_AUTO) {
+		printk("%s: Trying to disable auto-vector IRQ %i\n",
+		       __FUNCTION__, irq - IRQ_AMIGA_AUTO);
+		return;
+	}
+
+	if (irq >= IRQ_AMIGA_CIAB) {
+		cia_able_irq(&ciab_base, 1 << (irq - IRQ_AMIGA_CIAB));
+		return;
+	}
+
+	if (irq >= IRQ_AMIGA_CIAA) {
+		cia_able_irq(&ciaa_base, 1 << (irq - IRQ_AMIGA_CIAA));
+		return;
+	}
+
+	/* disable the interrupt */
+	custom.intena = amiga_intena_vals[irq];
+}
+
+inline void amiga_do_irq(int irq, struct pt_regs *fp)
+{
+	kstat_cpu(0).irqs[SYS_IRQS + irq]++;
+	ami_irq_list[irq]->handler(irq, ami_irq_list[irq]->dev_id, fp);
+}
+
+void amiga_do_irq_list(int irq, struct pt_regs *fp)
+{
+	irq_node_t *node;
+
+	kstat_cpu(0).irqs[SYS_IRQS + irq]++;
+
+	custom.intreq = amiga_intena_vals[irq];
+
+	for (node = ami_irq_list[irq]; node; node = node->next)
+		node->handler(irq, node->dev_id, fp);
+}
+
+/*
+ * The builtin Amiga hardware interrupt handlers.
+ */
+
+static irqreturn_t ami_int1(int irq, void *dev_id, struct pt_regs *fp)
+{
+	unsigned short ints = custom.intreqr & custom.intenar;
+
+	/* if serial transmit buffer empty, interrupt */
+	if (ints & IF_TBE) {
+		custom.intreq = IF_TBE;
+		amiga_do_irq(IRQ_AMIGA_TBE, fp);
+	}
+
+	/* if floppy disk transfer complete, interrupt */
+	if (ints & IF_DSKBLK) {
+		custom.intreq = IF_DSKBLK;
+		amiga_do_irq(IRQ_AMIGA_DSKBLK, fp);
+	}
+
+	/* if software interrupt set, interrupt */
+	if (ints & IF_SOFT) {
+		custom.intreq = IF_SOFT;
+		amiga_do_irq(IRQ_AMIGA_SOFT, fp);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ami_int3(int irq, void *dev_id, struct pt_regs *fp)
+{
+	unsigned short ints = custom.intreqr & custom.intenar;
+
+	/* if a blitter interrupt */
+	if (ints & IF_BLIT) {
+		custom.intreq = IF_BLIT;
+		amiga_do_irq(IRQ_AMIGA_BLIT, fp);
+	}
+
+	/* if a copper interrupt */
+	if (ints & IF_COPER) {
+		custom.intreq = IF_COPER;
+		amiga_do_irq(IRQ_AMIGA_COPPER, fp);
+	}
+
+	/* if a vertical blank interrupt */
+	if (ints & IF_VERTB)
+		amiga_do_irq_list(IRQ_AMIGA_VERTB, fp);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ami_int4(int irq, void *dev_id, struct pt_regs *fp)
+{
+	unsigned short ints = custom.intreqr & custom.intenar;
+
+	/* if audio 0 interrupt */
+	if (ints & IF_AUD0) {
+		custom.intreq = IF_AUD0;
+		amiga_do_irq(IRQ_AMIGA_AUD0, fp);
+	}
+
+	/* if audio 1 interrupt */
+	if (ints & IF_AUD1) {
+		custom.intreq = IF_AUD1;
+		amiga_do_irq(IRQ_AMIGA_AUD1, fp);
+	}
+
+	/* if audio 2 interrupt */
+	if (ints & IF_AUD2) {
+		custom.intreq = IF_AUD2;
+		amiga_do_irq(IRQ_AMIGA_AUD2, fp);
+	}
+
+	/* if audio 3 interrupt */
+	if (ints & IF_AUD3) {
+		custom.intreq = IF_AUD3;
+		amiga_do_irq(IRQ_AMIGA_AUD3, fp);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ami_int5(int irq, void *dev_id, struct pt_regs *fp)
+{
+	unsigned short ints = custom.intreqr & custom.intenar;
+
+	/* if serial receive buffer full interrupt */
+	if (ints & IF_RBF) {
+		/* acknowledge of IF_RBF must be done by the serial interrupt */
+		amiga_do_irq(IRQ_AMIGA_RBF, fp);
+	}
+
+	/* if a disk sync interrupt */
+	if (ints & IF_DSKSYN) {
+		custom.intreq = IF_DSKSYN;
+		amiga_do_irq(IRQ_AMIGA_DSKSYN, fp);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ami_int7(int irq, void *dev_id, struct pt_regs *fp)
+{
+	panic ("level 7 interrupt received\n");
+}
+
+irqreturn_t (*amiga_default_handler[SYS_IRQS])(int, void *, struct pt_regs *) = {
+	[0] = ami_badint,
+	[1] = ami_int1,
+	[2] = ami_badint,
+	[3] = ami_int3,
+	[4] = ami_int4,
+	[5] = ami_int5,
+	[6] = ami_badint,
+	[7] = ami_int7
+};
+
+int show_amiga_interrupts(struct seq_file *p, void *v)
+{
+	int i;
+	irq_node_t *node;
+
+	for (i = 0; i < AMI_STD_IRQS; i++) {
+		if (!(node = ami_irq_list[i]))
+			continue;
+		seq_printf(p, "ami  %2d: %10u ", i,
+		               kstat_cpu(0).irqs[SYS_IRQS + i]);
+		do {
+			if (node->flags & SA_INTERRUPT)
+				seq_puts(p, "F ");
+			else
+				seq_puts(p, "  ");
+			seq_printf(p, "%s\n", node->devname);
+			if ((node = node->next))
+				seq_puts(p, "                    ");
+		} while (node);
+	}
+
+	cia_get_irq_list(&ciaa_base, p);
+	cia_get_irq_list(&ciab_base, p);
+	return 0;
+}
diff --git a/arch/m68k/amiga/amisound.c b/arch/m68k/amiga/amisound.c
new file mode 100644
index 0000000..cb5d9363
--- /dev/null
+++ b/arch/m68k/amiga/amisound.c
@@ -0,0 +1,113 @@
+/*
+ * linux/arch/m68k/amiga/amisound.c
+ *
+ * amiga sound driver for Linux/m68k
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/config.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <asm/system.h>
+#include <asm/amigahw.h>
+
+static unsigned short *snd_data;
+static const signed char sine_data[] = {
+	0,  39,  75,  103,  121,  127,  121,  103,  75,  39,
+	0, -39, -75, -103, -121, -127, -121, -103, -75, -39
+};
+#define DATA_SIZE	(sizeof(sine_data)/sizeof(sine_data[0]))
+
+    /*
+     * The minimum period for audio may be modified by the frame buffer
+     * device since it depends on htotal (for OCS/ECS/AGA)
+     */
+
+volatile unsigned short amiga_audio_min_period = 124; /* Default for pre-OCS */
+
+#define MAX_PERIOD	(65535)
+
+
+    /*
+     *	Current period (set by dmasound.c)
+     */
+
+unsigned short amiga_audio_period = MAX_PERIOD;
+
+static unsigned long clock_constant;
+
+void __init amiga_init_sound(void)
+{
+	static struct resource beep_res = { .name = "Beep" };
+
+	snd_data = amiga_chip_alloc_res(sizeof(sine_data), &beep_res);
+	if (!snd_data) {
+		printk (KERN_CRIT "amiga init_sound: failed to allocate chipmem\n");
+		return;
+	}
+	memcpy (snd_data, sine_data, sizeof(sine_data));
+
+	/* setup divisor */
+	clock_constant = (amiga_colorclock+DATA_SIZE/2)/DATA_SIZE;
+
+	/* without amifb, turn video off and enable high quality sound */
+#ifndef CONFIG_FB_AMIGA
+	amifb_video_off();
+#endif
+}
+
+static void nosound( unsigned long ignored );
+static struct timer_list sound_timer = TIMER_INITIALIZER(nosound, 0, 0);
+
+void amiga_mksound( unsigned int hz, unsigned int ticks )
+{
+	unsigned long flags;
+
+	if (!snd_data)
+		return;
+
+	local_irq_save(flags);
+	del_timer( &sound_timer );
+
+	if (hz > 20 && hz < 32767) {
+		unsigned long period = (clock_constant / hz);
+
+		if (period < amiga_audio_min_period)
+			period = amiga_audio_min_period;
+		if (period > MAX_PERIOD)
+			period = MAX_PERIOD;
+
+		/* setup pointer to data, period, length and volume */
+		custom.aud[2].audlc = snd_data;
+		custom.aud[2].audlen = sizeof(sine_data)/2;
+		custom.aud[2].audper = (unsigned short)period;
+		custom.aud[2].audvol = 32; /* 50% of maxvol */
+
+		if (ticks) {
+			sound_timer.expires = jiffies + ticks;
+			add_timer( &sound_timer );
+		}
+
+		/* turn on DMA for audio channel 2 */
+		custom.dmacon = DMAF_SETCLR | DMAF_AUD2;
+
+	} else
+		nosound( 0 );
+
+	local_irq_restore(flags);
+}
+
+
+static void nosound( unsigned long ignored )
+{
+	/* turn off DMA for audio channel 2 */
+	custom.dmacon = DMAF_AUD2;
+	/* restore period to previous value after beeping */
+	custom.aud[2].audper = amiga_audio_period;
+}
diff --git a/arch/m68k/amiga/chipram.c b/arch/m68k/amiga/chipram.c
new file mode 100644
index 0000000..558d5fd
--- /dev/null
+++ b/arch/m68k/amiga/chipram.c
@@ -0,0 +1,133 @@
+/*
+**  linux/amiga/chipram.c
+**
+**      Modified 03-May-94 by Geert Uytterhoeven <geert@linux-m68k.org>
+**          - 64-bit aligned allocations for full AGA compatibility
+**
+**	Rewritten 15/9/2000 by Geert to use resource management
+*/
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <asm/page.h>
+#include <asm/amigahw.h>
+
+unsigned long amiga_chip_size;
+
+static struct resource chipram_res = {
+    .name = "Chip RAM", .start = CHIP_PHYSADDR
+};
+static unsigned long chipavail;
+
+
+void __init amiga_chip_init(void)
+{
+    if (!AMIGAHW_PRESENT(CHIP_RAM))
+	return;
+
+#ifndef CONFIG_APUS_FAST_EXCEPT
+    /*
+     *  Remove the first 4 pages where PPC exception handlers will be located
+     */
+    amiga_chip_size -= 0x4000;
+#endif
+    chipram_res.end = amiga_chip_size-1;
+    request_resource(&iomem_resource, &chipram_res);
+
+    chipavail = amiga_chip_size;
+}
+
+
+void *amiga_chip_alloc(unsigned long size, const char *name)
+{
+    struct resource *res;
+
+    /* round up */
+    size = PAGE_ALIGN(size);
+
+#ifdef DEBUG
+    printk("amiga_chip_alloc: allocate %ld bytes\n", size);
+#endif
+    res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+    if (!res)
+	return NULL;
+    memset(res, 0, sizeof(struct resource));
+    res->name = name;
+
+    if (allocate_resource(&chipram_res, res, size, 0, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0) {
+	kfree(res);
+	return NULL;
+    }
+    chipavail -= size;
+#ifdef DEBUG
+    printk("amiga_chip_alloc: returning %lx\n", res->start);
+#endif
+    return (void *)ZTWO_VADDR(res->start);
+}
+
+
+    /*
+     *  Warning:
+     *  amiga_chip_alloc_res is meant only for drivers that need to allocate
+     *  Chip RAM before kmalloc() is functional. As a consequence, those
+     *  drivers must not free that Chip RAM afterwards.
+     */
+
+void * __init amiga_chip_alloc_res(unsigned long size, struct resource *res)
+{
+    unsigned long start;
+
+    /* round up */
+    size = PAGE_ALIGN(size);
+    /* dmesg into chipmem prefers memory at the safe end */
+    start = CHIP_PHYSADDR + chipavail - size;
+
+#ifdef DEBUG
+    printk("amiga_chip_alloc_res: allocate %ld bytes\n", size);
+#endif
+    if (allocate_resource(&chipram_res, res, size, start, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0) {
+	printk("amiga_chip_alloc_res: first alloc failed!\n");
+	if (allocate_resource(&chipram_res, res, size, 0, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0)
+	    return NULL;
+    }
+    chipavail -= size;
+#ifdef DEBUG
+    printk("amiga_chip_alloc_res: returning %lx\n", res->start);
+#endif
+    return (void *)ZTWO_VADDR(res->start);
+}
+
+void amiga_chip_free(void *ptr)
+{
+    unsigned long start = ZTWO_PADDR(ptr);
+    struct resource **p, *res;
+    unsigned long size;
+
+    for (p = &chipram_res.child; (res = *p); p = &res->sibling) {
+	if (res->start != start)
+	    continue;
+	*p = res->sibling;
+	size = res->end-start;
+#ifdef DEBUG
+	printk("amiga_chip_free: free %ld bytes at %p\n", size, ptr);
+#endif
+	chipavail += size;
+	kfree(res);
+	return;
+    }
+    printk("amiga_chip_free: trying to free nonexistent region at %p\n", ptr);
+}
+
+
+unsigned long amiga_chip_avail(void)
+{
+#ifdef DEBUG
+	printk("amiga_chip_avail : %ld bytes\n", chipavail);
+#endif
+	return chipavail;
+}
diff --git a/arch/m68k/amiga/cia.c b/arch/m68k/amiga/cia.c
new file mode 100644
index 0000000..7d55682
--- /dev/null
+++ b/arch/m68k/amiga/cia.c
@@ -0,0 +1,180 @@
+/*
+ *  linux/arch/m68k/amiga/cia.c - CIA support
+ *
+ *  Copyright (C) 1996 Roman Zippel
+ *
+ *  The concept of some functions bases on the original Amiga OS function
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+
+#include <asm/irq.h>
+#include <asm/amigahw.h>
+#include <asm/amigaints.h>
+
+struct ciabase {
+	volatile struct CIA *cia;
+	unsigned char icr_mask, icr_data;
+	unsigned short int_mask;
+	int handler_irq, cia_irq, server_irq;
+	char *name;
+	irq_handler_t irq_list[CIA_IRQS];
+} ciaa_base = {
+	.cia		= &ciaa,
+	.int_mask	= IF_PORTS,
+	.handler_irq	= IRQ_AMIGA_AUTO_2,
+	.cia_irq	= IRQ_AMIGA_CIAA,
+	.server_irq	= IRQ_AMIGA_PORTS,
+	.name		= "CIAA handler"
+}, ciab_base = {
+	.cia		= &ciab,
+	.int_mask	= IF_EXTER,
+	.handler_irq	= IRQ_AMIGA_AUTO_6,
+	.cia_irq	= IRQ_AMIGA_CIAB,
+	.server_irq	= IRQ_AMIGA_EXTER,
+	.name		= "CIAB handler"
+};
+
+/*
+ *  Cause or clear CIA interrupts, return old interrupt status.
+ */
+
+unsigned char cia_set_irq(struct ciabase *base, unsigned char mask)
+{
+	unsigned char old;
+
+	old = (base->icr_data |= base->cia->icr);
+	if (mask & CIA_ICR_SETCLR)
+		base->icr_data |= mask;
+	else
+		base->icr_data &= ~mask;
+	if (base->icr_data & base->icr_mask)
+		custom.intreq = IF_SETCLR | base->int_mask;
+	return old & base->icr_mask;
+}
+
+/*
+ *  Enable or disable CIA interrupts, return old interrupt mask,
+ *  interrupts will only be enabled if a handler exists
+ */
+
+unsigned char cia_able_irq(struct ciabase *base, unsigned char mask)
+{
+	unsigned char old, tmp;
+	int i;
+
+	old = base->icr_mask;
+	base->icr_data |= base->cia->icr;
+	base->cia->icr = mask;
+	if (mask & CIA_ICR_SETCLR)
+		base->icr_mask |= mask;
+	else
+		base->icr_mask &= ~mask;
+	base->icr_mask &= CIA_ICR_ALL;
+	for (i = 0, tmp = 1; i < CIA_IRQS; i++, tmp <<= 1) {
+		if ((tmp & base->icr_mask) && !base->irq_list[i].handler) {
+			base->icr_mask &= ~tmp;
+			base->cia->icr = tmp;
+		}
+	}
+	if (base->icr_data & base->icr_mask)
+		custom.intreq = IF_SETCLR | base->int_mask;
+	return old;
+}
+
+int cia_request_irq(struct ciabase *base, unsigned int irq,
+                    irqreturn_t (*handler)(int, void *, struct pt_regs *),
+                    unsigned long flags, const char *devname, void *dev_id)
+{
+	unsigned char mask;
+
+	base->irq_list[irq].handler = handler;
+	base->irq_list[irq].flags   = flags;
+	base->irq_list[irq].dev_id  = dev_id;
+	base->irq_list[irq].devname = devname;
+
+	/* enable the interrupt */
+	mask = 1 << irq;
+	cia_set_irq(base, mask);
+	cia_able_irq(base, CIA_ICR_SETCLR | mask);
+	return 0;
+}
+
+void cia_free_irq(struct ciabase *base, unsigned int irq, void *dev_id)
+{
+	if (base->irq_list[irq].dev_id != dev_id)
+		printk("%s: removing probably wrong IRQ %i from %s\n",
+		       __FUNCTION__, base->cia_irq + irq,
+		       base->irq_list[irq].devname);
+
+	base->irq_list[irq].handler = NULL;
+	base->irq_list[irq].flags   = 0;
+
+	cia_able_irq(base, 1 << irq);
+}
+
+static irqreturn_t cia_handler(int irq, void *dev_id, struct pt_regs *fp)
+{
+	struct ciabase *base = (struct ciabase *)dev_id;
+	int mach_irq, i;
+	unsigned char ints;
+
+	mach_irq = base->cia_irq;
+	irq = SYS_IRQS + mach_irq;
+	ints = cia_set_irq(base, CIA_ICR_ALL);
+	custom.intreq = base->int_mask;
+	for (i = 0; i < CIA_IRQS; i++, irq++, mach_irq++) {
+		if (ints & 1) {
+			kstat_cpu(0).irqs[irq]++;
+			base->irq_list[i].handler(mach_irq, base->irq_list[i].dev_id, fp);
+		}
+		ints >>= 1;
+	}
+	amiga_do_irq_list(base->server_irq, fp);
+	return IRQ_HANDLED;
+}
+
+void __init cia_init_IRQ(struct ciabase *base)
+{
+	int i;
+
+	/* init isr handlers */
+	for (i = 0; i < CIA_IRQS; i++) {
+		base->irq_list[i].handler = NULL;
+		base->irq_list[i].flags   = 0;
+	}
+
+	/* clear any pending interrupt and turn off all interrupts */
+	cia_set_irq(base, CIA_ICR_ALL);
+	cia_able_irq(base, CIA_ICR_ALL);
+
+	/* install CIA handler */
+	request_irq(base->handler_irq, cia_handler, 0, base->name, base);
+
+	custom.intena = IF_SETCLR | base->int_mask;
+}
+
+int cia_get_irq_list(struct ciabase *base, struct seq_file *p)
+{
+	int i, j;
+
+	j = base->cia_irq;
+	for (i = 0; i < CIA_IRQS; i++) {
+		seq_printf(p, "cia  %2d: %10d ", j + i,
+			       kstat_cpu(0).irqs[SYS_IRQS + j + i]);
+		seq_puts(p, "  ");
+		seq_printf(p, "%s\n", base->irq_list[i].devname);
+	}
+	return 0;
+}
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
new file mode 100644
index 0000000..4775e18
--- /dev/null
+++ b/arch/m68k/amiga/config.c
@@ -0,0 +1,1007 @@
+/*
+ *  linux/arch/m68k/amiga/config.c
+ *
+ *  Copyright (C) 1993 Hamish Macdonald
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * Miscellaneous Amiga stuff
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/rtc.h>
+#include <linux/init.h>
+#include <linux/vt_kern.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#ifdef CONFIG_ZORRO
+#include <linux/zorro.h>
+#endif
+
+#include <asm/bootinfo.h>
+#include <asm/setup.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/amigahw.h>
+#include <asm/amigaints.h>
+#include <asm/irq.h>
+#include <asm/rtc.h>
+#include <asm/machdep.h>
+#include <asm/io.h>
+
+unsigned long amiga_model;
+unsigned long amiga_eclock;
+unsigned long amiga_masterclock;
+unsigned long amiga_colorclock;
+unsigned long amiga_chipset;
+unsigned char amiga_vblank;
+unsigned char amiga_psfreq;
+struct amiga_hw_present amiga_hw_present;
+
+static char s_a500[] __initdata = "A500";
+static char s_a500p[] __initdata = "A500+";
+static char s_a600[] __initdata = "A600";
+static char s_a1000[] __initdata = "A1000";
+static char s_a1200[] __initdata = "A1200";
+static char s_a2000[] __initdata = "A2000";
+static char s_a2500[] __initdata = "A2500";
+static char s_a3000[] __initdata = "A3000";
+static char s_a3000t[] __initdata = "A3000T";
+static char s_a3000p[] __initdata = "A3000+";
+static char s_a4000[] __initdata = "A4000";
+static char s_a4000t[] __initdata = "A4000T";
+static char s_cdtv[] __initdata = "CDTV";
+static char s_cd32[] __initdata = "CD32";
+static char s_draco[] __initdata = "Draco";
+static char *amiga_models[] __initdata = {
+    [AMI_500-AMI_500]		= s_a500,
+    [AMI_500PLUS-AMI_500]	= s_a500p,
+    [AMI_600-AMI_500]		= s_a600,
+    [AMI_1000-AMI_500]		= s_a1000,
+    [AMI_1200-AMI_500]		= s_a1200,
+    [AMI_2000-AMI_500]		= s_a2000,
+    [AMI_2500-AMI_500]		= s_a2500,
+    [AMI_3000-AMI_500]		= s_a3000,
+    [AMI_3000T-AMI_500]		= s_a3000t,
+    [AMI_3000PLUS-AMI_500]	= s_a3000p,
+    [AMI_4000-AMI_500]		= s_a4000,
+    [AMI_4000T-AMI_500]		= s_a4000t,
+    [AMI_CDTV-AMI_500]		= s_cdtv,
+    [AMI_CD32-AMI_500]		= s_cd32,
+    [AMI_DRACO-AMI_500]		= s_draco,
+};
+
+static char amiga_model_name[13] = "Amiga ";
+
+extern char m68k_debug_device[];
+
+static void amiga_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *));
+/* amiga specific irq functions */
+extern void amiga_init_IRQ (void);
+extern irqreturn_t (*amiga_default_handler[]) (int, void *, struct pt_regs *);
+extern int amiga_request_irq (unsigned int irq,
+			      irqreturn_t (*handler)(int, void *, struct pt_regs *),
+                              unsigned long flags, const char *devname,
+			      void *dev_id);
+extern void amiga_free_irq (unsigned int irq, void *dev_id);
+extern void amiga_enable_irq (unsigned int);
+extern void amiga_disable_irq (unsigned int);
+static void amiga_get_model(char *model);
+static int amiga_get_hardware_list(char *buffer);
+extern int show_amiga_interrupts (struct seq_file *, void *);
+/* amiga specific timer functions */
+static unsigned long amiga_gettimeoffset (void);
+static int a3000_hwclk (int, struct rtc_time *);
+static int a2000_hwclk (int, struct rtc_time *);
+static int amiga_set_clock_mmss (unsigned long);
+static unsigned int amiga_get_ss (void);
+extern void amiga_mksound( unsigned int count, unsigned int ticks );
+#ifdef CONFIG_AMIGA_FLOPPY
+extern void amiga_floppy_setup(char *, int *);
+#endif
+static void amiga_reset (void);
+extern void amiga_init_sound(void);
+static void amiga_savekmsg_init(void);
+static void amiga_mem_console_write(struct console *co, const char *b,
+				    unsigned int count);
+void amiga_serial_console_write(struct console *co, const char *s,
+				unsigned int count);
+static void amiga_debug_init(void);
+#ifdef CONFIG_HEARTBEAT
+static void amiga_heartbeat(int on);
+#endif
+
+static struct console amiga_console_driver = {
+	.name =		"debug",
+	.flags =	CON_PRINTBUFFER,
+	.index =	-1,
+};
+
+
+    /*
+     *  Motherboard Resources present in all Amiga models
+     */
+
+static struct {
+    struct resource _ciab, _ciaa, _custom, _kickstart;
+} mb_resources = {
+    ._ciab = {
+	.name = "CIA B", .start = 0x00bfd000, .end = 0x00bfdfff
+    },
+    ._ciaa = {
+	.name = "CIA A", .start = 0x00bfe000, .end = 0x00bfefff
+    },
+    ._custom = {
+	.name = "Custom I/O", .start = 0x00dff000, .end = 0x00dfffff
+    },
+    ._kickstart = {
+	.name = "Kickstart ROM", .start = 0x00f80000, .end = 0x00ffffff
+    }
+};
+
+static struct resource rtc_resource = {
+    .start = 0x00dc0000, .end = 0x00dcffff
+};
+
+static struct resource ram_resource[NUM_MEMINFO];
+
+
+    /*
+     *  Parse an Amiga-specific record in the bootinfo
+     */
+
+int amiga_parse_bootinfo(const struct bi_record *record)
+{
+    int unknown = 0;
+    const unsigned long *data = record->data;
+
+    switch (record->tag) {
+	case BI_AMIGA_MODEL:
+	    amiga_model = *data;
+	    break;
+
+	case BI_AMIGA_ECLOCK:
+	    amiga_eclock = *data;
+	    break;
+
+	case BI_AMIGA_CHIPSET:
+	    amiga_chipset = *data;
+	    break;
+
+	case BI_AMIGA_CHIP_SIZE:
+	    amiga_chip_size = *(const int *)data;
+	    break;
+
+	case BI_AMIGA_VBLANK:
+	    amiga_vblank = *(const unsigned char *)data;
+	    break;
+
+	case BI_AMIGA_PSFREQ:
+	    amiga_psfreq = *(const unsigned char *)data;
+	    break;
+
+	case BI_AMIGA_AUTOCON:
+#ifdef CONFIG_ZORRO
+	    if (zorro_num_autocon < ZORRO_NUM_AUTO) {
+		const struct ConfigDev *cd = (struct ConfigDev *)data;
+		struct zorro_dev *dev = &zorro_autocon[zorro_num_autocon++];
+		dev->rom = cd->cd_Rom;
+		dev->slotaddr = cd->cd_SlotAddr;
+		dev->slotsize = cd->cd_SlotSize;
+		dev->resource.start = (unsigned long)cd->cd_BoardAddr;
+		dev->resource.end = dev->resource.start+cd->cd_BoardSize-1;
+	    } else
+		printk("amiga_parse_bootinfo: too many AutoConfig devices\n");
+#endif /* CONFIG_ZORRO */
+	    break;
+
+	case BI_AMIGA_SERPER:
+	    /* serial port period: ignored here */
+	    break;
+
+	default:
+	    unknown = 1;
+    }
+    return(unknown);
+}
+
+    /*
+     *  Identify builtin hardware
+     */
+
+static void __init amiga_identify(void)
+{
+  /* Fill in some default values, if necessary */
+  if (amiga_eclock == 0)
+    amiga_eclock = 709379;
+
+  memset(&amiga_hw_present, 0, sizeof(amiga_hw_present));
+
+  printk("Amiga hardware found: ");
+  if (amiga_model >= AMI_500 && amiga_model <= AMI_DRACO) {
+    printk("[%s] ", amiga_models[amiga_model-AMI_500]);
+    strcat(amiga_model_name, amiga_models[amiga_model-AMI_500]);
+  }
+
+  switch(amiga_model) {
+  case AMI_UNKNOWN:
+    goto Generic;
+
+  case AMI_600:
+  case AMI_1200:
+    AMIGAHW_SET(A1200_IDE);
+    AMIGAHW_SET(PCMCIA);
+  case AMI_500:
+  case AMI_500PLUS:
+  case AMI_1000:
+  case AMI_2000:
+  case AMI_2500:
+    AMIGAHW_SET(A2000_CLK);	/* Is this correct for all models? */
+    goto Generic;
+
+  case AMI_3000:
+  case AMI_3000T:
+    AMIGAHW_SET(AMBER_FF);
+    AMIGAHW_SET(MAGIC_REKICK);
+    /* fall through */
+  case AMI_3000PLUS:
+    AMIGAHW_SET(A3000_SCSI);
+    AMIGAHW_SET(A3000_CLK);
+    AMIGAHW_SET(ZORRO3);
+    goto Generic;
+
+  case AMI_4000T:
+    AMIGAHW_SET(A4000_SCSI);
+    /* fall through */
+  case AMI_4000:
+    AMIGAHW_SET(A4000_IDE);
+    AMIGAHW_SET(A3000_CLK);
+    AMIGAHW_SET(ZORRO3);
+    goto Generic;
+
+  case AMI_CDTV:
+  case AMI_CD32:
+    AMIGAHW_SET(CD_ROM);
+    AMIGAHW_SET(A2000_CLK);             /* Is this correct? */
+    goto Generic;
+
+  Generic:
+    AMIGAHW_SET(AMI_VIDEO);
+    AMIGAHW_SET(AMI_BLITTER);
+    AMIGAHW_SET(AMI_AUDIO);
+    AMIGAHW_SET(AMI_FLOPPY);
+    AMIGAHW_SET(AMI_KEYBOARD);
+    AMIGAHW_SET(AMI_MOUSE);
+    AMIGAHW_SET(AMI_SERIAL);
+    AMIGAHW_SET(AMI_PARALLEL);
+    AMIGAHW_SET(CHIP_RAM);
+    AMIGAHW_SET(PAULA);
+
+    switch(amiga_chipset) {
+    case CS_OCS:
+    case CS_ECS:
+    case CS_AGA:
+      switch (custom.deniseid & 0xf) {
+      case 0x0c:
+	AMIGAHW_SET(DENISE_HR);
+	break;
+      case 0x08:
+	AMIGAHW_SET(LISA);
+	break;
+      }
+      break;
+    default:
+      AMIGAHW_SET(DENISE);
+      break;
+    }
+    switch ((custom.vposr>>8) & 0x7f) {
+    case 0x00:
+      AMIGAHW_SET(AGNUS_PAL);
+      break;
+    case 0x10:
+      AMIGAHW_SET(AGNUS_NTSC);
+      break;
+    case 0x20:
+    case 0x21:
+      AMIGAHW_SET(AGNUS_HR_PAL);
+      break;
+    case 0x30:
+    case 0x31:
+      AMIGAHW_SET(AGNUS_HR_NTSC);
+      break;
+    case 0x22:
+    case 0x23:
+      AMIGAHW_SET(ALICE_PAL);
+      break;
+    case 0x32:
+    case 0x33:
+      AMIGAHW_SET(ALICE_NTSC);
+      break;
+    }
+    AMIGAHW_SET(ZORRO);
+    break;
+
+  case AMI_DRACO:
+    panic("No support for Draco yet");
+
+  default:
+    panic("Unknown Amiga Model");
+  }
+
+#define AMIGAHW_ANNOUNCE(name, str)			\
+  if (AMIGAHW_PRESENT(name))				\
+    printk(str)
+
+  AMIGAHW_ANNOUNCE(AMI_VIDEO, "VIDEO ");
+  AMIGAHW_ANNOUNCE(AMI_BLITTER, "BLITTER ");
+  AMIGAHW_ANNOUNCE(AMBER_FF, "AMBER_FF ");
+  AMIGAHW_ANNOUNCE(AMI_AUDIO, "AUDIO ");
+  AMIGAHW_ANNOUNCE(AMI_FLOPPY, "FLOPPY ");
+  AMIGAHW_ANNOUNCE(A3000_SCSI, "A3000_SCSI ");
+  AMIGAHW_ANNOUNCE(A4000_SCSI, "A4000_SCSI ");
+  AMIGAHW_ANNOUNCE(A1200_IDE, "A1200_IDE ");
+  AMIGAHW_ANNOUNCE(A4000_IDE, "A4000_IDE ");
+  AMIGAHW_ANNOUNCE(CD_ROM, "CD_ROM ");
+  AMIGAHW_ANNOUNCE(AMI_KEYBOARD, "KEYBOARD ");
+  AMIGAHW_ANNOUNCE(AMI_MOUSE, "MOUSE ");
+  AMIGAHW_ANNOUNCE(AMI_SERIAL, "SERIAL ");
+  AMIGAHW_ANNOUNCE(AMI_PARALLEL, "PARALLEL ");
+  AMIGAHW_ANNOUNCE(A2000_CLK, "A2000_CLK ");
+  AMIGAHW_ANNOUNCE(A3000_CLK, "A3000_CLK ");
+  AMIGAHW_ANNOUNCE(CHIP_RAM, "CHIP_RAM ");
+  AMIGAHW_ANNOUNCE(PAULA, "PAULA ");
+  AMIGAHW_ANNOUNCE(DENISE, "DENISE ");
+  AMIGAHW_ANNOUNCE(DENISE_HR, "DENISE_HR ");
+  AMIGAHW_ANNOUNCE(LISA, "LISA ");
+  AMIGAHW_ANNOUNCE(AGNUS_PAL, "AGNUS_PAL ");
+  AMIGAHW_ANNOUNCE(AGNUS_NTSC, "AGNUS_NTSC ");
+  AMIGAHW_ANNOUNCE(AGNUS_HR_PAL, "AGNUS_HR_PAL ");
+  AMIGAHW_ANNOUNCE(AGNUS_HR_NTSC, "AGNUS_HR_NTSC ");
+  AMIGAHW_ANNOUNCE(ALICE_PAL, "ALICE_PAL ");
+  AMIGAHW_ANNOUNCE(ALICE_NTSC, "ALICE_NTSC ");
+  AMIGAHW_ANNOUNCE(MAGIC_REKICK, "MAGIC_REKICK ");
+  AMIGAHW_ANNOUNCE(PCMCIA, "PCMCIA ");
+  if (AMIGAHW_PRESENT(ZORRO))
+    printk("ZORRO%s ", AMIGAHW_PRESENT(ZORRO3) ? "3" : "");
+  printk("\n");
+
+#undef AMIGAHW_ANNOUNCE
+}
+
+    /*
+     *  Setup the Amiga configuration info
+     */
+
+void __init config_amiga(void)
+{
+  int i;
+
+  amiga_debug_init();
+  amiga_identify();
+
+  /* Yuk, we don't have PCI memory */
+  iomem_resource.name = "Memory";
+  for (i = 0; i < 4; i++)
+    request_resource(&iomem_resource, &((struct resource *)&mb_resources)[i]);
+
+  mach_sched_init      = amiga_sched_init;
+  mach_init_IRQ        = amiga_init_IRQ;
+  mach_default_handler = &amiga_default_handler;
+  mach_request_irq     = amiga_request_irq;
+  mach_free_irq        = amiga_free_irq;
+  enable_irq           = amiga_enable_irq;
+  disable_irq          = amiga_disable_irq;
+  mach_get_model       = amiga_get_model;
+  mach_get_hardware_list = amiga_get_hardware_list;
+  mach_get_irq_list    = show_amiga_interrupts;
+  mach_gettimeoffset   = amiga_gettimeoffset;
+  if (AMIGAHW_PRESENT(A3000_CLK)){
+    mach_hwclk         = a3000_hwclk;
+    rtc_resource.name = "A3000 RTC";
+    request_resource(&iomem_resource, &rtc_resource);
+  }
+  else{ /* if (AMIGAHW_PRESENT(A2000_CLK)) */
+    mach_hwclk         = a2000_hwclk;
+    rtc_resource.name = "A2000 RTC";
+    request_resource(&iomem_resource, &rtc_resource);
+  }
+
+  mach_max_dma_address = 0xffffffff; /*
+				      * default MAX_DMA=0xffffffff
+				      * on all machines. If we don't
+				      * do so, the SCSI code will not
+				      * be able to allocate any mem
+				      * for transfers, unless we are
+				      * dealing with a Z2 mem only
+				      * system.                  /Jes
+				      */
+
+  mach_set_clock_mmss  = amiga_set_clock_mmss;
+  mach_get_ss          = amiga_get_ss;
+#ifdef CONFIG_AMIGA_FLOPPY
+  mach_floppy_setup    = amiga_floppy_setup;
+#endif
+  mach_reset           = amiga_reset;
+#ifdef CONFIG_DUMMY_CONSOLE
+  conswitchp           = &dummy_con;
+#endif
+#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
+  mach_beep            = amiga_mksound;
+#endif
+
+#ifdef CONFIG_HEARTBEAT
+  mach_heartbeat = amiga_heartbeat;
+#endif
+
+  /* Fill in the clock values (based on the 700 kHz E-Clock) */
+  amiga_masterclock = 40*amiga_eclock;	/* 28 MHz */
+  amiga_colorclock = 5*amiga_eclock;	/* 3.5 MHz */
+
+  /* clear all DMA bits */
+  custom.dmacon = DMAF_ALL;
+  /* ensure that the DMA master bit is set */
+  custom.dmacon = DMAF_SETCLR | DMAF_MASTER;
+
+  /* don't use Z2 RAM as system memory on Z3 capable machines */
+  if (AMIGAHW_PRESENT(ZORRO3)) {
+    int i, j;
+    u32 disabled_z2mem = 0;
+    for (i = 0; i < m68k_num_memory; i++)
+      if (m68k_memory[i].addr < 16*1024*1024) {
+	if (i == 0) {
+	  /* don't cut off the branch we're sitting on */
+	  printk("Warning: kernel runs in Zorro II memory\n");
+	  continue;
+	}
+	disabled_z2mem += m68k_memory[i].size;
+	m68k_num_memory--;
+	for (j = i; j < m68k_num_memory; j++)
+	  m68k_memory[j] = m68k_memory[j+1];
+	i--;
+      }
+    if (disabled_z2mem)
+      printk("%dK of Zorro II memory will not be used as system memory\n",
+	     disabled_z2mem>>10);
+  }
+
+  /* request all RAM */
+  for (i = 0; i < m68k_num_memory; i++) {
+    ram_resource[i].name =
+      (m68k_memory[i].addr >= 0x01000000) ? "32-bit Fast RAM" :
+      (m68k_memory[i].addr < 0x00c00000) ? "16-bit Fast RAM" :
+      "16-bit Slow RAM";
+    ram_resource[i].start = m68k_memory[i].addr;
+    ram_resource[i].end = m68k_memory[i].addr+m68k_memory[i].size-1;
+    request_resource(&iomem_resource, &ram_resource[i]);
+  }
+
+  /* initialize chipram allocator */
+  amiga_chip_init ();
+
+  /* debugging using chipram */
+  if (!strcmp( m68k_debug_device, "mem" )){
+	  if (!AMIGAHW_PRESENT(CHIP_RAM))
+		  printk("Warning: no chipram present for debugging\n");
+	  else {
+		  amiga_savekmsg_init();
+		  amiga_console_driver.write = amiga_mem_console_write;
+		  register_console(&amiga_console_driver);
+	  }
+  }
+
+  /* our beloved beeper */
+  if (AMIGAHW_PRESENT(AMI_AUDIO))
+	  amiga_init_sound();
+
+  /*
+   * if it is an A3000, set the magic bit that forces
+   * a hard rekick
+   */
+  if (AMIGAHW_PRESENT(MAGIC_REKICK))
+	  *(unsigned char *)ZTWO_VADDR(0xde0002) |= 0x80;
+}
+
+static unsigned short jiffy_ticks;
+
+static void __init amiga_sched_init(irqreturn_t (*timer_routine)(int, void *,
+							  struct pt_regs *))
+{
+	static struct resource sched_res = {
+	    .name = "timer", .start = 0x00bfd400, .end = 0x00bfd5ff,
+	};
+	jiffy_ticks = (amiga_eclock+HZ/2)/HZ;
+
+	if (request_resource(&mb_resources._ciab, &sched_res))
+	    printk("Cannot allocate ciab.ta{lo,hi}\n");
+	ciab.cra &= 0xC0;   /* turn off timer A, continuous mode, from Eclk */
+	ciab.talo = jiffy_ticks % 256;
+	ciab.tahi = jiffy_ticks / 256;
+
+	/* install interrupt service routine for CIAB Timer A
+	 *
+	 * Please don't change this to use ciaa, as it interferes with the
+	 * SCSI code. We'll have to take a look at this later
+	 */
+	request_irq(IRQ_AMIGA_CIAB_TA, timer_routine, 0, "timer", NULL);
+	/* start timer */
+	ciab.cra |= 0x11;
+}
+
+#define TICK_SIZE 10000
+
+/* This is always executed with interrupts disabled.  */
+static unsigned long amiga_gettimeoffset (void)
+{
+	unsigned short hi, lo, hi2;
+	unsigned long ticks, offset = 0;
+
+	/* read CIA B timer A current value */
+	hi  = ciab.tahi;
+	lo  = ciab.talo;
+	hi2 = ciab.tahi;
+
+	if (hi != hi2) {
+		lo = ciab.talo;
+		hi = hi2;
+	}
+
+	ticks = hi << 8 | lo;
+
+	if (ticks > jiffy_ticks / 2)
+		/* check for pending interrupt */
+		if (cia_set_irq(&ciab_base, 0) & CIA_ICR_TA)
+			offset = 10000;
+
+	ticks = jiffy_ticks - ticks;
+	ticks = (10000 * ticks) / jiffy_ticks;
+
+	return ticks + offset;
+}
+
+static int a3000_hwclk(int op, struct rtc_time *t)
+{
+	tod_3000.cntrl1 = TOD3000_CNTRL1_HOLD;
+
+	if (!op) { /* read */
+		t->tm_sec  = tod_3000.second1 * 10 + tod_3000.second2;
+		t->tm_min  = tod_3000.minute1 * 10 + tod_3000.minute2;
+		t->tm_hour = tod_3000.hour1   * 10 + tod_3000.hour2;
+		t->tm_mday = tod_3000.day1    * 10 + tod_3000.day2;
+		t->tm_wday = tod_3000.weekday;
+		t->tm_mon  = tod_3000.month1  * 10 + tod_3000.month2 - 1;
+		t->tm_year = tod_3000.year1   * 10 + tod_3000.year2;
+		if (t->tm_year <= 69)
+			t->tm_year += 100;
+	} else {
+		tod_3000.second1 = t->tm_sec / 10;
+		tod_3000.second2 = t->tm_sec % 10;
+		tod_3000.minute1 = t->tm_min / 10;
+		tod_3000.minute2 = t->tm_min % 10;
+		tod_3000.hour1   = t->tm_hour / 10;
+		tod_3000.hour2   = t->tm_hour % 10;
+		tod_3000.day1    = t->tm_mday / 10;
+		tod_3000.day2    = t->tm_mday % 10;
+		if (t->tm_wday != -1)
+			tod_3000.weekday = t->tm_wday;
+		tod_3000.month1  = (t->tm_mon + 1) / 10;
+		tod_3000.month2  = (t->tm_mon + 1) % 10;
+		if (t->tm_year >= 100)
+			t->tm_year -= 100;
+		tod_3000.year1   = t->tm_year / 10;
+		tod_3000.year2   = t->tm_year % 10;
+	}
+
+	tod_3000.cntrl1 = TOD3000_CNTRL1_FREE;
+
+	return 0;
+}
+
+static int a2000_hwclk(int op, struct rtc_time *t)
+{
+	int cnt = 5;
+
+	tod_2000.cntrl1 = TOD2000_CNTRL1_HOLD;
+
+	while ((tod_2000.cntrl1 & TOD2000_CNTRL1_BUSY) && cnt--)
+	{
+	        tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD;
+	        udelay(70);
+	        tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD;
+	}
+
+	if (!cnt)
+		printk(KERN_INFO "hwclk: timed out waiting for RTC (0x%x)\n", tod_2000.cntrl1);
+
+	if (!op) { /* read */
+		t->tm_sec  = tod_2000.second1     * 10 + tod_2000.second2;
+		t->tm_min  = tod_2000.minute1     * 10 + tod_2000.minute2;
+		t->tm_hour = (tod_2000.hour1 & 3) * 10 + tod_2000.hour2;
+		t->tm_mday = tod_2000.day1        * 10 + tod_2000.day2;
+		t->tm_wday = tod_2000.weekday;
+		t->tm_mon  = tod_2000.month1      * 10 + tod_2000.month2 - 1;
+		t->tm_year = tod_2000.year1       * 10 + tod_2000.year2;
+		if (t->tm_year <= 69)
+			t->tm_year += 100;
+
+		if (!(tod_2000.cntrl3 & TOD2000_CNTRL3_24HMODE)){
+			if (!(tod_2000.hour1 & TOD2000_HOUR1_PM) && t->tm_hour == 12)
+				t->tm_hour = 0;
+			else if ((tod_2000.hour1 & TOD2000_HOUR1_PM) && t->tm_hour != 12)
+				t->tm_hour += 12;
+		}
+	} else {
+		tod_2000.second1 = t->tm_sec / 10;
+		tod_2000.second2 = t->tm_sec % 10;
+		tod_2000.minute1 = t->tm_min / 10;
+		tod_2000.minute2 = t->tm_min % 10;
+		if (tod_2000.cntrl3 & TOD2000_CNTRL3_24HMODE)
+			tod_2000.hour1 = t->tm_hour / 10;
+		else if (t->tm_hour >= 12)
+			tod_2000.hour1 = TOD2000_HOUR1_PM +
+				(t->tm_hour - 12) / 10;
+		else
+			tod_2000.hour1 = t->tm_hour / 10;
+		tod_2000.hour2   = t->tm_hour % 10;
+		tod_2000.day1    = t->tm_mday / 10;
+		tod_2000.day2    = t->tm_mday % 10;
+		if (t->tm_wday != -1)
+			tod_2000.weekday = t->tm_wday;
+		tod_2000.month1  = (t->tm_mon + 1) / 10;
+		tod_2000.month2  = (t->tm_mon + 1) % 10;
+		if (t->tm_year >= 100)
+			t->tm_year -= 100;
+		tod_2000.year1   = t->tm_year / 10;
+		tod_2000.year2   = t->tm_year % 10;
+	}
+
+	tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD;
+
+	return 0;
+}
+
+static int amiga_set_clock_mmss (unsigned long nowtime)
+{
+	short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
+
+	if (AMIGAHW_PRESENT(A3000_CLK)) {
+		tod_3000.cntrl1 = TOD3000_CNTRL1_HOLD;
+
+		tod_3000.second1 = real_seconds / 10;
+		tod_3000.second2 = real_seconds % 10;
+		tod_3000.minute1 = real_minutes / 10;
+		tod_3000.minute2 = real_minutes % 10;
+
+		tod_3000.cntrl1 = TOD3000_CNTRL1_FREE;
+	} else /* if (AMIGAHW_PRESENT(A2000_CLK)) */ {
+		int cnt = 5;
+
+		tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD;
+
+		while ((tod_2000.cntrl1 & TOD2000_CNTRL1_BUSY) && cnt--)
+		{
+			tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD;
+			udelay(70);
+			tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD;
+		}
+
+		if (!cnt)
+			printk(KERN_INFO "set_clock_mmss: timed out waiting for RTC (0x%x)\n", tod_2000.cntrl1);
+
+		tod_2000.second1 = real_seconds / 10;
+		tod_2000.second2 = real_seconds % 10;
+		tod_2000.minute1 = real_minutes / 10;
+		tod_2000.minute2 = real_minutes % 10;
+
+		tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD;
+	}
+
+	return 0;
+}
+
+static unsigned int amiga_get_ss( void )
+{
+	unsigned int s;
+
+	if (AMIGAHW_PRESENT(A3000_CLK)) {
+		tod_3000.cntrl1 = TOD3000_CNTRL1_HOLD;
+		s = tod_3000.second1 * 10 + tod_3000.second2;
+		tod_3000.cntrl1 = TOD3000_CNTRL1_FREE;
+	} else /* if (AMIGAHW_PRESENT(A2000_CLK)) */ {
+		s = tod_2000.second1 * 10 + tod_2000.second2;
+	}
+	return s;
+}
+
+static NORET_TYPE void amiga_reset( void )
+    ATTRIB_NORET;
+
+static void amiga_reset (void)
+{
+  unsigned long jmp_addr040 = virt_to_phys(&&jmp_addr_label040);
+  unsigned long jmp_addr = virt_to_phys(&&jmp_addr_label);
+
+  local_irq_disable();
+  if (CPU_IS_040_OR_060)
+    /* Setup transparent translation registers for mapping
+     * of 16 MB kernel segment before disabling translation
+     */
+    __asm__ __volatile__
+      ("movel    %0,%/d0\n\t"
+       "andl     #0xff000000,%/d0\n\t"
+       "orw      #0xe020,%/d0\n\t"   /* map 16 MB, enable, cacheable */
+       ".chip    68040\n\t"
+       "movec    %%d0,%%itt0\n\t"
+       "movec    %%d0,%%dtt0\n\t"
+       ".chip    68k\n\t"
+       "jmp      %0@\n\t"
+       : /* no outputs */
+       : "a" (jmp_addr040));
+  else
+    /* for 680[23]0, just disable translation and jump to the physical
+     * address of the label
+     */
+    __asm__ __volatile__
+      ("pmove  %/tc,%@\n\t"
+       "bclr   #7,%@\n\t"
+       "pmove  %@,%/tc\n\t"
+       "jmp    %0@\n\t"
+       : /* no outputs */
+       : "a" (jmp_addr));
+ jmp_addr_label040:
+  /* disable translation on '040 now */
+  __asm__ __volatile__
+    ("moveq #0,%/d0\n\t"
+     ".chip 68040\n\t"
+     "movec %%d0,%%tc\n\t"	/* disable MMU */
+     ".chip 68k\n\t"
+     : /* no outputs */
+     : /* no inputs */
+     : "d0");
+
+ jmp_addr_label:
+  /* pickup reset address from AmigaOS ROM, reset devices and jump
+   * to reset address
+   */
+  __asm__ __volatile__
+    ("movew #0x2700,%/sr\n\t"
+     "leal  0x01000000,%/a0\n\t"
+     "subl  %/a0@(-0x14),%/a0\n\t"
+     "movel %/a0@(4),%/a0\n\t"
+     "subql #2,%/a0\n\t"
+     "bra   1f\n\t"
+     /* align on a longword boundary */
+     __ALIGN_STR "\n"
+     "1:\n\t"
+     "reset\n\t"
+     "jmp   %/a0@" : /* Just that gcc scans it for % escapes */ );
+
+  for (;;);
+
+}
+
+
+    /*
+     *  Debugging
+     */
+
+#define SAVEKMSG_MAXMEM		128*1024
+
+#define SAVEKMSG_MAGIC1		0x53415645	/* 'SAVE' */
+#define SAVEKMSG_MAGIC2		0x4B4D5347	/* 'KMSG' */
+
+struct savekmsg {
+    unsigned long magic1;		/* SAVEKMSG_MAGIC1 */
+    unsigned long magic2;		/* SAVEKMSG_MAGIC2 */
+    unsigned long magicptr;		/* address of magic1 */
+    unsigned long size;
+    char data[0];
+};
+
+static struct savekmsg *savekmsg;
+
+static void amiga_mem_console_write(struct console *co, const char *s,
+				    unsigned int count)
+{
+    if (savekmsg->size+count <= SAVEKMSG_MAXMEM-sizeof(struct savekmsg)) {
+        memcpy(savekmsg->data+savekmsg->size, s, count);
+        savekmsg->size += count;
+    }
+}
+
+static void amiga_savekmsg_init(void)
+{
+    static struct resource debug_res = { .name = "Debug" };
+
+    savekmsg = amiga_chip_alloc_res(SAVEKMSG_MAXMEM, &debug_res);
+    savekmsg->magic1 = SAVEKMSG_MAGIC1;
+    savekmsg->magic2 = SAVEKMSG_MAGIC2;
+    savekmsg->magicptr = ZTWO_PADDR(savekmsg);
+    savekmsg->size = 0;
+}
+
+static void amiga_serial_putc(char c)
+{
+    custom.serdat = (unsigned char)c | 0x100;
+    while (!(custom.serdatr & 0x2000))
+	;
+}
+
+void amiga_serial_console_write(struct console *co, const char *s,
+				       unsigned int count)
+{
+    while (count--) {
+	if (*s == '\n')
+	    amiga_serial_putc('\r');
+	amiga_serial_putc(*s++);
+    }
+}
+
+#ifdef CONFIG_SERIAL_CONSOLE
+void amiga_serial_puts(const char *s)
+{
+    amiga_serial_console_write(NULL, s, strlen(s));
+}
+
+int amiga_serial_console_wait_key(struct console *co)
+{
+    int ch;
+
+    while (!(custom.intreqr & IF_RBF))
+	barrier();
+    ch = custom.serdatr & 0xff;
+    /* clear the interrupt, so that another character can be read */
+    custom.intreq = IF_RBF;
+    return ch;
+}
+
+void amiga_serial_gets(struct console *co, char *s, int len)
+{
+    int ch, cnt = 0;
+
+    while (1) {
+	ch = amiga_serial_console_wait_key(co);
+
+	/* Check for backspace. */
+	if (ch == 8 || ch == 127) {
+	    if (cnt == 0) {
+		amiga_serial_putc('\007');
+		continue;
+	    }
+	    cnt--;
+	    amiga_serial_puts("\010 \010");
+	    continue;
+	}
+
+	/* Check for enter. */
+	if (ch == 10 || ch == 13)
+	    break;
+
+	/* See if line is too long. */
+	if (cnt >= len + 1) {
+	    amiga_serial_putc(7);
+	    cnt--;
+	    continue;
+	}
+
+	/* Store and echo character. */
+	s[cnt++] = ch;
+	amiga_serial_putc(ch);
+    }
+    /* Print enter. */
+    amiga_serial_puts("\r\n");
+    s[cnt] = 0;
+}
+#endif
+
+static void __init amiga_debug_init(void)
+{
+	if (!strcmp( m68k_debug_device, "ser" )) {
+		/* no initialization required (?) */
+		amiga_console_driver.write = amiga_serial_console_write;
+		register_console(&amiga_console_driver);
+	}
+}
+
+#ifdef CONFIG_HEARTBEAT
+static void amiga_heartbeat(int on)
+{
+    if (on)
+	ciaa.pra &= ~2;
+    else
+	ciaa.pra |= 2;
+}
+#endif
+
+    /*
+     *  Amiga specific parts of /proc
+     */
+
+static void amiga_get_model(char *model)
+{
+    strcpy(model, amiga_model_name);
+}
+
+
+static int amiga_get_hardware_list(char *buffer)
+{
+    int len = 0;
+
+    if (AMIGAHW_PRESENT(CHIP_RAM))
+	len += sprintf(buffer+len, "Chip RAM:\t%ldK\n", amiga_chip_size>>10);
+    len += sprintf(buffer+len, "PS Freq:\t%dHz\nEClock Freq:\t%ldHz\n",
+		   amiga_psfreq, amiga_eclock);
+    if (AMIGAHW_PRESENT(AMI_VIDEO)) {
+	char *type;
+	switch(amiga_chipset) {
+	    case CS_OCS:
+		type = "OCS";
+		break;
+	    case CS_ECS:
+		type = "ECS";
+		break;
+	    case CS_AGA:
+		type = "AGA";
+		break;
+	    default:
+		type = "Old or Unknown";
+		break;
+	}
+	len += sprintf(buffer+len, "Graphics:\t%s\n", type);
+    }
+
+#define AMIGAHW_ANNOUNCE(name, str)			\
+    if (AMIGAHW_PRESENT(name))				\
+	len += sprintf (buffer+len, "\t%s\n", str)
+
+    len += sprintf (buffer + len, "Detected hardware:\n");
+
+    AMIGAHW_ANNOUNCE(AMI_VIDEO, "Amiga Video");
+    AMIGAHW_ANNOUNCE(AMI_BLITTER, "Blitter");
+    AMIGAHW_ANNOUNCE(AMBER_FF, "Amber Flicker Fixer");
+    AMIGAHW_ANNOUNCE(AMI_AUDIO, "Amiga Audio");
+    AMIGAHW_ANNOUNCE(AMI_FLOPPY, "Floppy Controller");
+    AMIGAHW_ANNOUNCE(A3000_SCSI, "SCSI Controller WD33C93 (A3000 style)");
+    AMIGAHW_ANNOUNCE(A4000_SCSI, "SCSI Controller NCR53C710 (A4000T style)");
+    AMIGAHW_ANNOUNCE(A1200_IDE, "IDE Interface (A1200 style)");
+    AMIGAHW_ANNOUNCE(A4000_IDE, "IDE Interface (A4000 style)");
+    AMIGAHW_ANNOUNCE(CD_ROM, "Internal CD ROM drive");
+    AMIGAHW_ANNOUNCE(AMI_KEYBOARD, "Keyboard");
+    AMIGAHW_ANNOUNCE(AMI_MOUSE, "Mouse Port");
+    AMIGAHW_ANNOUNCE(AMI_SERIAL, "Serial Port");
+    AMIGAHW_ANNOUNCE(AMI_PARALLEL, "Parallel Port");
+    AMIGAHW_ANNOUNCE(A2000_CLK, "Hardware Clock (A2000 style)");
+    AMIGAHW_ANNOUNCE(A3000_CLK, "Hardware Clock (A3000 style)");
+    AMIGAHW_ANNOUNCE(CHIP_RAM, "Chip RAM");
+    AMIGAHW_ANNOUNCE(PAULA, "Paula 8364");
+    AMIGAHW_ANNOUNCE(DENISE, "Denise 8362");
+    AMIGAHW_ANNOUNCE(DENISE_HR, "Denise 8373");
+    AMIGAHW_ANNOUNCE(LISA, "Lisa 8375");
+    AMIGAHW_ANNOUNCE(AGNUS_PAL, "Normal/Fat PAL Agnus 8367/8371");
+    AMIGAHW_ANNOUNCE(AGNUS_NTSC, "Normal/Fat NTSC Agnus 8361/8370");
+    AMIGAHW_ANNOUNCE(AGNUS_HR_PAL, "Fat Hires PAL Agnus 8372");
+    AMIGAHW_ANNOUNCE(AGNUS_HR_NTSC, "Fat Hires NTSC Agnus 8372");
+    AMIGAHW_ANNOUNCE(ALICE_PAL, "PAL Alice 8374");
+    AMIGAHW_ANNOUNCE(ALICE_NTSC, "NTSC Alice 8374");
+    AMIGAHW_ANNOUNCE(MAGIC_REKICK, "Magic Hard Rekick");
+    AMIGAHW_ANNOUNCE(PCMCIA, "PCMCIA Slot");
+#ifdef CONFIG_ZORRO
+    if (AMIGAHW_PRESENT(ZORRO))
+	len += sprintf(buffer+len, "\tZorro II%s AutoConfig: %d Expansion "
+				   "Device%s\n",
+		       AMIGAHW_PRESENT(ZORRO3) ? "I" : "",
+		       zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
+#endif /* CONFIG_ZORRO */
+
+#undef AMIGAHW_ANNOUNCE
+
+    return(len);
+}
diff --git a/arch/m68k/amiga/pcmcia.c b/arch/m68k/amiga/pcmcia.c
new file mode 100644
index 0000000..fc57c6e
--- /dev/null
+++ b/arch/m68k/amiga/pcmcia.c
@@ -0,0 +1,113 @@
+/*
+** asm-m68k/pcmcia.c -- Amiga Linux PCMCIA support
+**                      most information was found by disassembling card.resource
+**                      I'm still looking for an official doc !
+**
+** Copyright 1997 by Alain Malek
+**
+** This file is subject to the terms and conditions of the GNU General Public
+** License.  See the file COPYING in the main directory of this archive
+** for more details.
+**
+** Created: 12/10/97 by Alain Malek
+*/
+
+#include <linux/types.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <asm/amigayle.h>
+#include <asm/amipcmcia.h>
+
+/* gayle config byte for program voltage and access speed */
+static unsigned char cfg_byte = GAYLE_CFG_0V|GAYLE_CFG_150NS;
+
+void pcmcia_reset(void)
+{
+	unsigned long reset_start_time = jiffies;
+	unsigned char b;
+
+	gayle_reset = 0x00;
+	while (time_before(jiffies, reset_start_time + 1*HZ/100));
+	b = gayle_reset;
+}
+
+
+/* copy a tuple, including tuple header. return nb bytes copied */
+/* be carefull as this may trigger a GAYLE_IRQ_WR interrupt ! */
+
+int pcmcia_copy_tuple(unsigned char tuple_id, void *tuple, int max_len)
+{
+	unsigned char id, *dest;
+	int cnt, pos, len;
+
+	dest = tuple;
+	pos = 0;
+
+	id = gayle_attribute[pos];
+
+	while((id != CISTPL_END) && (pos < 0x10000)) {
+		len = (int)gayle_attribute[pos+2] + 2;
+		if (id == tuple_id) {
+			len = (len > max_len)?max_len:len;
+			for (cnt = 0; cnt < len; cnt++) {
+				*dest++ = gayle_attribute[pos+(cnt<<1)];
+			}
+
+			return len;
+		}
+		pos += len<<1;
+		id = gayle_attribute[pos];
+	}
+
+	return 0;
+}
+
+void pcmcia_program_voltage(int voltage)
+{
+	unsigned char v;
+
+	switch (voltage) {
+	case PCMCIA_0V:
+		v = GAYLE_CFG_0V;
+		break;
+	case PCMCIA_5V:
+		v = GAYLE_CFG_5V;
+		break;
+	case PCMCIA_12V:
+		v = GAYLE_CFG_12V;
+		break;
+	default:
+		v = GAYLE_CFG_0V;
+	}
+
+	cfg_byte = (cfg_byte & 0xfc) | v;
+	gayle.config = cfg_byte;
+
+}
+
+void pcmcia_access_speed(int speed)
+{
+	unsigned char s;
+
+	if (speed <= PCMCIA_SPEED_100NS)
+		s = GAYLE_CFG_100NS;
+	else if (speed <= PCMCIA_SPEED_150NS)
+		s = GAYLE_CFG_150NS;
+	else if (speed <= PCMCIA_SPEED_250NS)
+		s = GAYLE_CFG_250NS;
+	else
+		s = GAYLE_CFG_720NS;
+
+	cfg_byte = (cfg_byte & 0xf3) | s;
+	gayle.config = cfg_byte;
+}
+
+void pcmcia_write_enable(void)
+{
+	gayle.cardstatus = GAYLE_CS_WR|GAYLE_CS_DA;
+}
+
+void pcmcia_write_disable(void)
+{
+	gayle.cardstatus = 0;
+}
diff --git a/arch/m68k/apollo/Makefile b/arch/m68k/apollo/Makefile
new file mode 100644
index 0000000..39264f3
--- /dev/null
+++ b/arch/m68k/apollo/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for Linux arch/m68k/amiga source directory
+#
+
+obj-y		:= config.o dn_ints.o dma.o
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
new file mode 100644
index 0000000..2649294
--- /dev/null
+++ b/arch/m68k/apollo/config.c
@@ -0,0 +1,305 @@
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/rtc.h>
+#include <linux/vt_kern.h>
+#include <linux/interrupt.h>
+
+#include <asm/setup.h>
+#include <asm/bootinfo.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/apollohw.h>
+#include <asm/irq.h>
+#include <asm/rtc.h>
+#include <asm/machdep.h>
+
+u_long sio01_physaddr;
+u_long sio23_physaddr;
+u_long rtc_physaddr;
+u_long pica_physaddr;
+u_long picb_physaddr;
+u_long cpuctrl_physaddr;
+u_long timer_physaddr;
+u_long apollo_model;
+
+extern void dn_sched_init(irqreturn_t (*handler)(int,void *,struct pt_regs *));
+extern void dn_init_IRQ(void);
+extern int dn_request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long flags, const char *devname, void *dev_id);
+extern void dn_free_irq(unsigned int irq, void *dev_id);
+extern void dn_enable_irq(unsigned int);
+extern void dn_disable_irq(unsigned int);
+extern int show_dn_interrupts(struct seq_file *, void *);
+extern unsigned long dn_gettimeoffset(void);
+extern int dn_dummy_hwclk(int, struct rtc_time *);
+extern int dn_dummy_set_clock_mmss(unsigned long);
+extern void dn_dummy_reset(void);
+extern void dn_dummy_waitbut(void);
+extern struct fb_info *dn_fb_init(long *);
+extern void dn_dummy_debug_init(void);
+extern void dn_dummy_video_setup(char *,int *);
+extern irqreturn_t dn_process_int(int irq, struct pt_regs *fp);
+#ifdef CONFIG_HEARTBEAT
+static void dn_heartbeat(int on);
+#endif
+static irqreturn_t dn_timer_int(int irq,void *, struct pt_regs *);
+static irqreturn_t (*sched_timer_handler)(int, void *, struct pt_regs *)=NULL;
+static void dn_get_model(char *model);
+static const char *apollo_models[] = {
+	[APOLLO_DN3000-APOLLO_DN3000] = "DN3000 (Otter)",
+	[APOLLO_DN3010-APOLLO_DN3000] = "DN3010 (Otter)",
+	[APOLLO_DN3500-APOLLO_DN3000] = "DN3500 (Cougar II)",
+	[APOLLO_DN4000-APOLLO_DN3000] = "DN4000 (Mink)",
+	[APOLLO_DN4500-APOLLO_DN3000] = "DN4500 (Roadrunner)"
+};
+
+int apollo_parse_bootinfo(const struct bi_record *record) {
+
+	int unknown = 0;
+	const unsigned long *data = record->data;
+
+	switch(record->tag) {
+		case BI_APOLLO_MODEL:
+			apollo_model=*data;
+			break;
+
+		default:
+			 unknown=1;
+	}
+
+	return unknown;
+}
+
+void dn_setup_model(void) {
+
+
+	printk("Apollo hardware found: ");
+	printk("[%s]\n", apollo_models[apollo_model - APOLLO_DN3000]);
+
+	switch(apollo_model) {
+		case APOLLO_UNKNOWN:
+			panic("Unknown apollo model");
+			break;
+		case APOLLO_DN3000:
+		case APOLLO_DN3010:
+			sio01_physaddr=SAU8_SIO01_PHYSADDR;
+			rtc_physaddr=SAU8_RTC_PHYSADDR;
+			pica_physaddr=SAU8_PICA;
+			picb_physaddr=SAU8_PICB;
+			cpuctrl_physaddr=SAU8_CPUCTRL;
+			timer_physaddr=SAU8_TIMER;
+			break;
+		case APOLLO_DN4000:
+			sio01_physaddr=SAU7_SIO01_PHYSADDR;
+			sio23_physaddr=SAU7_SIO23_PHYSADDR;
+			rtc_physaddr=SAU7_RTC_PHYSADDR;
+			pica_physaddr=SAU7_PICA;
+			picb_physaddr=SAU7_PICB;
+			cpuctrl_physaddr=SAU7_CPUCTRL;
+			timer_physaddr=SAU7_TIMER;
+			break;
+		case APOLLO_DN4500:
+			panic("Apollo model not yet supported");
+			break;
+		case APOLLO_DN3500:
+			sio01_physaddr=SAU7_SIO01_PHYSADDR;
+			sio23_physaddr=SAU7_SIO23_PHYSADDR;
+			rtc_physaddr=SAU7_RTC_PHYSADDR;
+			pica_physaddr=SAU7_PICA;
+			picb_physaddr=SAU7_PICB;
+			cpuctrl_physaddr=SAU7_CPUCTRL;
+			timer_physaddr=SAU7_TIMER;
+			break;
+		default:
+			panic("Undefined apollo model");
+			break;
+	}
+
+
+}
+
+int dn_serial_console_wait_key(struct console *co) {
+
+	while(!(sio01.srb_csrb & 1))
+		barrier();
+	return sio01.rhrb_thrb;
+}
+
+void dn_serial_console_write (struct console *co, const char *str,unsigned int count)
+{
+   while(count--) {
+	if (*str == '\n') {
+	sio01.rhrb_thrb = (unsigned char)'\r';
+	while (!(sio01.srb_csrb & 0x4))
+                ;
+	}
+    sio01.rhrb_thrb = (unsigned char)*str++;
+    while (!(sio01.srb_csrb & 0x4))
+            ;
+  }
+}
+
+void dn_serial_print (const char *str)
+{
+    while (*str) {
+        if (*str == '\n') {
+            sio01.rhrb_thrb = (unsigned char)'\r';
+            while (!(sio01.srb_csrb & 0x4))
+                ;
+        }
+        sio01.rhrb_thrb = (unsigned char)*str++;
+        while (!(sio01.srb_csrb & 0x4))
+            ;
+    }
+}
+
+void config_apollo(void) {
+
+	int i;
+
+	dn_setup_model();
+
+	mach_sched_init=dn_sched_init; /* */
+	mach_init_IRQ=dn_init_IRQ;
+	mach_default_handler=NULL;
+	mach_request_irq     = dn_request_irq;
+	mach_free_irq        = dn_free_irq;
+	enable_irq      = dn_enable_irq;
+	disable_irq     = dn_disable_irq;
+	mach_get_irq_list    = show_dn_interrupts;
+	mach_gettimeoffset   = dn_gettimeoffset;
+	mach_max_dma_address = 0xffffffff;
+	mach_hwclk           = dn_dummy_hwclk; /* */
+	mach_set_clock_mmss  = dn_dummy_set_clock_mmss; /* */
+	mach_process_int     = dn_process_int;
+	mach_reset	     = dn_dummy_reset;  /* */
+#ifdef CONFIG_DUMMY_CONSOLE
+        conswitchp           = &dummy_con;
+#endif
+#ifdef CONFIG_HEARTBEAT
+	mach_heartbeat = dn_heartbeat;
+#endif
+	mach_get_model       = dn_get_model;
+
+	cpuctrl=0xaa00;
+
+	/* clear DMA translation table */
+	for(i=0;i<0x400;i++)
+		addr_xlat_map[i]=0;
+
+}
+
+irqreturn_t dn_timer_int(int irq, void *dev_id, struct pt_regs *fp) {
+
+	volatile unsigned char x;
+
+	sched_timer_handler(irq,dev_id,fp);
+
+	x=*(volatile unsigned char *)(timer+3);
+	x=*(volatile unsigned char *)(timer+5);
+
+	return IRQ_HANDLED;
+}
+
+void dn_sched_init(irqreturn_t (*timer_routine)(int, void *, struct pt_regs *)) {
+
+	/* program timer 1 */
+	*(volatile unsigned char *)(timer+3)=0x01;
+	*(volatile unsigned char *)(timer+1)=0x40;
+	*(volatile unsigned char *)(timer+5)=0x09;
+	*(volatile unsigned char *)(timer+7)=0xc4;
+
+	/* enable IRQ of PIC B */
+	*(volatile unsigned char *)(pica+1)&=(~8);
+
+#if 0
+	printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
+	printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
+#endif
+
+	sched_timer_handler=timer_routine;
+	request_irq(0,dn_timer_int,0,NULL,NULL);
+
+}
+
+unsigned long dn_gettimeoffset(void) {
+
+	return 0xdeadbeef;
+
+}
+
+int dn_dummy_hwclk(int op, struct rtc_time *t) {
+
+
+  if(!op) { /* read */
+    t->tm_sec=rtc->second;
+    t->tm_min=rtc->minute;
+    t->tm_hour=rtc->hours;
+    t->tm_mday=rtc->day_of_month;
+    t->tm_wday=rtc->day_of_week;
+    t->tm_mon=rtc->month;
+    t->tm_year=rtc->year;
+  } else {
+    rtc->second=t->tm_sec;
+    rtc->minute=t->tm_min;
+    rtc->hours=t->tm_hour;
+    rtc->day_of_month=t->tm_mday;
+    if(t->tm_wday!=-1)
+      rtc->day_of_week=t->tm_wday;
+    rtc->month=t->tm_mon;
+    rtc->year=t->tm_year;
+  }
+
+  return 0;
+
+}
+
+int dn_dummy_set_clock_mmss(unsigned long nowtime) {
+
+  printk("set_clock_mmss\n");
+
+  return 0;
+
+}
+
+void dn_dummy_reset(void) {
+
+  dn_serial_print("The end !\n");
+
+  for(;;);
+
+}
+
+void dn_dummy_waitbut(void) {
+
+  dn_serial_print("waitbut\n");
+
+}
+
+static void dn_get_model(char *model)
+{
+    strcpy(model, "Apollo ");
+    if (apollo_model >= APOLLO_DN3000 && apollo_model <= APOLLO_DN4500)
+        strcat(model, apollo_models[apollo_model - APOLLO_DN3000]);
+}
+
+#ifdef CONFIG_HEARTBEAT
+static int dn_cpuctrl=0xff00;
+
+static void dn_heartbeat(int on) {
+
+	if(on) {
+		dn_cpuctrl&=~0x100;
+		cpuctrl=dn_cpuctrl;
+	}
+	else {
+		dn_cpuctrl&=~0x100;
+		dn_cpuctrl|=0x100;
+		cpuctrl=dn_cpuctrl;
+	}
+}
+#endif
+
diff --git a/arch/m68k/apollo/dma.c b/arch/m68k/apollo/dma.c
new file mode 100644
index 0000000..aed8be1
--- /dev/null
+++ b/arch/m68k/apollo/dma.c
@@ -0,0 +1,50 @@
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/kd.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+
+#include <asm/setup.h>
+#include <asm/bootinfo.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/apollodma.h>
+#include <asm/io.h>
+
+/* note only works for 16 Bit 1 page DMA's */
+
+static unsigned short next_free_xlat_entry=0;
+
+unsigned short dma_map_page(unsigned long phys_addr,int count,int type) {
+
+	unsigned long page_aligned_addr=phys_addr & (~((1<<12)-1));
+	unsigned short start_map_addr=page_aligned_addr >> 10;
+	unsigned short free_xlat_entry, *xlat_map_entry;
+	int i;
+
+	free_xlat_entry=next_free_xlat_entry;
+	for(i=0,xlat_map_entry=addr_xlat_map+(free_xlat_entry<<2);i<8;i++,xlat_map_entry++) {
+#if 0
+		printk("phys_addr: %x, page_aligned_addr: %x, start_map_addr: %x\n",phys_addr,page_aligned_addr,start_map_addr+i);
+#endif
+		out_be16(xlat_map_entry, start_map_addr+i);
+	}
+
+	next_free_xlat_entry+=2;
+	if(next_free_xlat_entry>125)
+		next_free_xlat_entry=0;
+
+#if 0
+	printk("next_free_xlat_entry: %d\n",next_free_xlat_entry);
+#endif
+
+	return free_xlat_entry<<10;
+}
+
+void dma_unmap_page(unsigned short dma_addr) {
+
+	return ;
+
+}
+
diff --git a/arch/m68k/apollo/dn_ints.c b/arch/m68k/apollo/dn_ints.c
new file mode 100644
index 0000000..a312593
--- /dev/null
+++ b/arch/m68k/apollo/dn_ints.c
@@ -0,0 +1,125 @@
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/kernel_stat.h>
+#include <linux/timer.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/page.h>
+#include <asm/machdep.h>
+#include <asm/apollohw.h>
+#include <asm/errno.h>
+
+static irq_handler_t dn_irqs[16];
+
+irqreturn_t dn_process_int(int irq, struct pt_regs *fp)
+{
+  irqreturn_t res = IRQ_NONE;
+
+  if(dn_irqs[irq-160].handler) {
+    res = dn_irqs[irq-160].handler(irq,dn_irqs[irq-160].dev_id,fp);
+  } else {
+    printk("spurious irq %d occurred\n",irq);
+  }
+
+  *(volatile unsigned char *)(pica)=0x20;
+  *(volatile unsigned char *)(picb)=0x20;
+
+  return res;
+}
+
+void dn_init_IRQ(void) {
+
+  int i;
+
+  for(i=0;i<16;i++) {
+    dn_irqs[i].handler=NULL;
+    dn_irqs[i].flags=IRQ_FLG_STD;
+    dn_irqs[i].dev_id=NULL;
+    dn_irqs[i].devname=NULL;
+  }
+
+}
+
+int dn_request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long flags, const char *devname, void *dev_id) {
+
+  if((irq<0) || (irq>15)) {
+    printk("Trying to request invalid IRQ\n");
+    return -ENXIO;
+  }
+
+  if(!dn_irqs[irq].handler) {
+    dn_irqs[irq].handler=handler;
+    dn_irqs[irq].flags=IRQ_FLG_STD;
+    dn_irqs[irq].dev_id=dev_id;
+    dn_irqs[irq].devname=devname;
+    if(irq<8)
+      *(volatile unsigned char *)(pica+1)&=~(1<<irq);
+    else
+      *(volatile unsigned char *)(picb+1)&=~(1<<(irq-8));
+
+    return 0;
+  }
+  else {
+    printk("Trying to request already assigned irq %d\n",irq);
+    return -ENXIO;
+  }
+
+}
+
+void dn_free_irq(unsigned int irq, void *dev_id) {
+
+  if((irq<0) || (irq>15)) {
+    printk("Trying to free invalid IRQ\n");
+    return ;
+  }
+
+  if(irq<8)
+    *(volatile unsigned char *)(pica+1)|=(1<<irq);
+  else
+    *(volatile unsigned char *)(picb+1)|=(1<<(irq-8));
+
+  dn_irqs[irq].handler=NULL;
+  dn_irqs[irq].flags=IRQ_FLG_STD;
+  dn_irqs[irq].dev_id=NULL;
+  dn_irqs[irq].devname=NULL;
+
+  return ;
+
+}
+
+void dn_enable_irq(unsigned int irq) {
+
+  printk("dn enable irq\n");
+
+}
+
+void dn_disable_irq(unsigned int irq) {
+
+  printk("dn disable irq\n");
+
+}
+
+int show_dn_interrupts(struct seq_file *p, void *v) {
+
+  printk("dn get irq list\n");
+
+  return 0;
+
+}
+
+struct fb_info *dn_dummy_fb_init(long *mem_start) {
+
+  printk("fb init\n");
+
+  return NULL;
+
+}
+
+void dn_dummy_video_setup(char *options,int *ints) {
+
+  printk("no video yet\n");
+
+}
diff --git a/arch/m68k/atari/Makefile b/arch/m68k/atari/Makefile
new file mode 100644
index 0000000..8cb6236
--- /dev/null
+++ b/arch/m68k/atari/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for Linux arch/m68k/atari source directory
+#
+
+obj-y		:= config.o time.o debug.o ataints.o stdma.o \
+			atasound.o stram.o atari_ksyms.o
+
+ifeq ($(CONFIG_PCI),y)
+obj-$(CONFIG_HADES)	+= hades-pci.o
+endif
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
new file mode 100644
index 0000000..076f479
--- /dev/null
+++ b/arch/m68k/atari/ataints.c
@@ -0,0 +1,648 @@
+/*
+ * arch/m68k/atari/ataints.c -- Atari Linux interrupt handling code
+ *
+ * 5/2/94 Roman Hodek:
+ *  Added support for TT interrupts; setup for TT SCU (may someone has
+ *  twiddled there and we won't get the right interrupts :-()
+ *
+ *  Major change: The device-independent code in m68k/ints.c didn't know
+ *  about non-autovec ints yet. It hardcoded the number of possible ints to
+ *  7 (IRQ1...IRQ7). But the Atari has lots of non-autovec ints! I made the
+ *  number of possible ints a constant defined in interrupt.h, which is
+ *  47 for the Atari. So we can call request_irq() for all Atari interrupts
+ *  just the normal way. Additionally, all vectors >= 48 are initialized to
+ *  call trap() instead of inthandler(). This must be changed here, too.
+ *
+ * 1995-07-16 Lars Brinkhoff <f93labr@dd.chalmers.se>:
+ *  Corrected a bug in atari_add_isr() which rejected all SCC
+ *  interrupt sources if there were no TT MFP!
+ *
+ * 12/13/95: New interface functions atari_level_triggered_int() and
+ *  atari_register_vme_int() as support for level triggered VME interrupts.
+ *
+ * 02/12/96: (Roman)
+ *  Total rewrite of Atari interrupt handling, for new scheme see comments
+ *  below.
+ *
+ * 1996-09-03 lars brinkhoff <f93labr@dd.chalmers.se>:
+ *  Added new function atari_unregister_vme_int(), and
+ *  modified atari_register_vme_int() as well as IS_VALID_INTNO()
+ *  to work with it.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+
+#include <asm/system.h>
+#include <asm/traps.h>
+
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#include <asm/atari_stdma.h>
+#include <asm/irq.h>
+#include <asm/entry.h>
+
+
+/*
+ * Atari interrupt handling scheme:
+ * --------------------------------
+ *
+ * All interrupt source have an internal number (defined in
+ * <asm/atariints.h>): Autovector interrupts are 1..7, then follow ST-MFP,
+ * TT-MFP, SCC, and finally VME interrupts. Vector numbers for the latter can
+ * be allocated by atari_register_vme_int().
+ *
+ * Each interrupt can be of three types:
+ *
+ *  - SLOW: The handler runs with all interrupts enabled, except the one it
+ *    was called by (to avoid reentering). This should be the usual method.
+ *    But it is currently possible only for MFP ints, since only the MFP
+ *    offers an easy way to mask interrupts.
+ *
+ *  - FAST: The handler runs with all interrupts disabled. This should be used
+ *    only for really fast handlers, that just do actions immediately
+ *    necessary, and let the rest do a bottom half or task queue.
+ *
+ *  - PRIORITIZED: The handler can be interrupted by higher-level ints
+ *    (greater IPL, no MFP priorities!). This is the method of choice for ints
+ *    which should be slow, but are not from a MFP.
+ *
+ * The feature of more than one handler for one int source is still there, but
+ * only applicable if all handers are of the same type. To not slow down
+ * processing of ints with only one handler by the chaining feature, the list
+ * calling function atari_call_irq_list() is only plugged in at the time the
+ * second handler is registered.
+ *
+ * Implementation notes: For fast-as-possible int handling, there are separate
+ * entry points for each type (slow/fast/prio). The assembler handler calls
+ * the irq directly in the usual case, no C wrapper is involved. In case of
+ * multiple handlers, atari_call_irq_list() is registered as handler and calls
+ * in turn the real irq's. To ease access from assembler level to the irq
+ * function pointer and accompanying data, these two are stored in a separate
+ * array, irq_handler[]. The rest of data (type, name) are put into a second
+ * array, irq_param, that is accessed from C only. For each slow interrupt (32
+ * in all) there are separate handler functions, which makes it possible to
+ * hard-code the MFP register address and value, are necessary to mask the
+ * int. If there'd be only one generic function, lots of calculations would be
+ * needed to determine MFP register and int mask from the vector number :-(
+ *
+ * Furthermore, slow ints may not lower the IPL below its previous value
+ * (before the int happened). This is needed so that an int of class PRIO, on
+ * that this int may be stacked, cannot be reentered. This feature is
+ * implemented as follows: If the stack frame format is 1 (throwaway), the int
+ * is not stacked, and the IPL is anded with 0xfbff, resulting in a new level
+ * 2, which still blocks the HSYNC, but no interrupts of interest. If the
+ * frame format is 0, the int is nested, and the old IPL value can be found in
+ * the sr copy in the frame.
+ */
+
+
+#define	NUM_INT_SOURCES	(8 + NUM_ATARI_SOURCES)
+
+typedef void (*asm_irq_handler)(void);
+
+struct irqhandler {
+	irqreturn_t (*handler)(int, void *, struct pt_regs *);
+	void	*dev_id;
+};
+
+struct irqparam {
+	unsigned long	flags;
+	const char	*devname;
+};
+
+/*
+ * Array with irq's and their parameter data. This array is accessed from low
+ * level assembler code, so an element size of 8 allows usage of index scaling
+ * addressing mode.
+ */
+static struct irqhandler irq_handler[NUM_INT_SOURCES];
+
+/*
+ * This array hold the rest of parameters of int handlers: type
+ * (slow,fast,prio) and the name of the handler. These values are only
+ * accessed from C
+ */
+static struct irqparam irq_param[NUM_INT_SOURCES];
+
+/*
+ * Bitmap for free interrupt vector numbers
+ * (new vectors starting from 0x70 can be allocated by
+ * atari_register_vme_int())
+ */
+static int free_vme_vec_bitmap;
+
+/* check for valid int number (complex, sigh...) */
+#define	IS_VALID_INTNO(n)											\
+	((n) > 0 &&														\
+	 /* autovec and ST-MFP ok anyway */								\
+	 (((n) < TTMFP_SOURCE_BASE) ||									\
+	  /* TT-MFP ok if present */									\
+	  ((n) >= TTMFP_SOURCE_BASE && (n) < SCC_SOURCE_BASE &&			\
+	   ATARIHW_PRESENT(TT_MFP)) ||									\
+	  /* SCC ok if present and number even */						\
+	  ((n) >= SCC_SOURCE_BASE && (n) < VME_SOURCE_BASE &&			\
+	   !((n) & 1) && ATARIHW_PRESENT(SCC)) ||						\
+	  /* greater numbers ok if they are registered VME vectors */		\
+	  ((n) >= VME_SOURCE_BASE && (n) < VME_SOURCE_BASE + VME_MAX_SOURCES && \
+		  free_vme_vec_bitmap & (1 << ((n) - VME_SOURCE_BASE)))))
+
+
+/*
+ * Here start the assembler entry points for interrupts
+ */
+
+#define IRQ_NAME(nr) atari_slow_irq_##nr##_handler(void)
+
+#define	BUILD_SLOW_IRQ(n)						   \
+asmlinkage void IRQ_NAME(n);						   \
+/* Dummy function to allow asm with operands.  */			   \
+void atari_slow_irq_##n##_dummy (void) {				   \
+__asm__ (__ALIGN_STR "\n"						   \
+"atari_slow_irq_" #n "_handler:\t"					   \
+"	addl	%6,%5\n"	/* preempt_count() += HARDIRQ_OFFSET */	   \
+	SAVE_ALL_INT "\n"						   \
+	GET_CURRENT(%%d0) "\n"						   \
+"	andb	#~(1<<(%c3&7)),%a4:w\n"	/* mask this interrupt */	   \
+	/* get old IPL from stack frame */				   \
+"	bfextu	%%sp@(%c2){#5,#3},%%d0\n"				   \
+"	movew	%%sr,%%d1\n"						   \
+"	bfins	%%d0,%%d1{#21,#3}\n"					   \
+"	movew	%%d1,%%sr\n"		/* set IPL = previous value */	   \
+"	addql	#1,%a0\n"						   \
+"	lea	%a1,%%a0\n"						   \
+"	pea	%%sp@\n"		/* push addr of frame */	   \
+"	movel	%%a0@(4),%%sp@-\n"	/* push handler data */		   \
+"	pea	(%c3+8)\n"		/* push int number */		   \
+"	movel	%%a0@,%%a0\n"						   \
+"	jbsr	%%a0@\n"		/* call the handler */		   \
+"	addql	#8,%%sp\n"						   \
+"	addql	#4,%%sp\n"						   \
+"	orw	#0x0600,%%sr\n"						   \
+"	andw	#0xfeff,%%sr\n"		/* set IPL = 6 again */		   \
+"	orb	#(1<<(%c3&7)),%a4:w\n"	/* now unmask the int again */	   \
+"	jbra	ret_from_interrupt\n"					   \
+	 : : "i" (&kstat_cpu(0).irqs[n+8]), "i" (&irq_handler[n+8]),	   \
+	     "n" (PT_OFF_SR), "n" (n),					   \
+	     "i" (n & 8 ? (n & 16 ? &tt_mfp.int_mk_a : &mfp.int_mk_a)	   \
+		        : (n & 16 ? &tt_mfp.int_mk_b : &mfp.int_mk_b)),	   \
+	     "m" (preempt_count()), "di" (HARDIRQ_OFFSET)		   \
+);									   \
+	for (;;);			/* fake noreturn */		   \
+}
+
+BUILD_SLOW_IRQ(0);
+BUILD_SLOW_IRQ(1);
+BUILD_SLOW_IRQ(2);
+BUILD_SLOW_IRQ(3);
+BUILD_SLOW_IRQ(4);
+BUILD_SLOW_IRQ(5);
+BUILD_SLOW_IRQ(6);
+BUILD_SLOW_IRQ(7);
+BUILD_SLOW_IRQ(8);
+BUILD_SLOW_IRQ(9);
+BUILD_SLOW_IRQ(10);
+BUILD_SLOW_IRQ(11);
+BUILD_SLOW_IRQ(12);
+BUILD_SLOW_IRQ(13);
+BUILD_SLOW_IRQ(14);
+BUILD_SLOW_IRQ(15);
+BUILD_SLOW_IRQ(16);
+BUILD_SLOW_IRQ(17);
+BUILD_SLOW_IRQ(18);
+BUILD_SLOW_IRQ(19);
+BUILD_SLOW_IRQ(20);
+BUILD_SLOW_IRQ(21);
+BUILD_SLOW_IRQ(22);
+BUILD_SLOW_IRQ(23);
+BUILD_SLOW_IRQ(24);
+BUILD_SLOW_IRQ(25);
+BUILD_SLOW_IRQ(26);
+BUILD_SLOW_IRQ(27);
+BUILD_SLOW_IRQ(28);
+BUILD_SLOW_IRQ(29);
+BUILD_SLOW_IRQ(30);
+BUILD_SLOW_IRQ(31);
+
+asm_irq_handler slow_handlers[32] = {
+	[0]	= atari_slow_irq_0_handler,
+	[1]	= atari_slow_irq_1_handler,
+	[2]	= atari_slow_irq_2_handler,
+	[3]	= atari_slow_irq_3_handler,
+	[4]	= atari_slow_irq_4_handler,
+	[5]	= atari_slow_irq_5_handler,
+	[6]	= atari_slow_irq_6_handler,
+	[7]	= atari_slow_irq_7_handler,
+	[8]	= atari_slow_irq_8_handler,
+	[9]	= atari_slow_irq_9_handler,
+	[10]	= atari_slow_irq_10_handler,
+	[11]	= atari_slow_irq_11_handler,
+	[12]	= atari_slow_irq_12_handler,
+	[13]	= atari_slow_irq_13_handler,
+	[14]	= atari_slow_irq_14_handler,
+	[15]	= atari_slow_irq_15_handler,
+	[16]	= atari_slow_irq_16_handler,
+	[17]	= atari_slow_irq_17_handler,
+	[18]	= atari_slow_irq_18_handler,
+	[19]	= atari_slow_irq_19_handler,
+	[20]	= atari_slow_irq_20_handler,
+	[21]	= atari_slow_irq_21_handler,
+	[22]	= atari_slow_irq_22_handler,
+	[23]	= atari_slow_irq_23_handler,
+	[24]	= atari_slow_irq_24_handler,
+	[25]	= atari_slow_irq_25_handler,
+	[26]	= atari_slow_irq_26_handler,
+	[27]	= atari_slow_irq_27_handler,
+	[28]	= atari_slow_irq_28_handler,
+	[29]	= atari_slow_irq_29_handler,
+	[30]	= atari_slow_irq_30_handler,
+	[31]	= atari_slow_irq_31_handler
+};
+
+asmlinkage void atari_fast_irq_handler( void );
+asmlinkage void atari_prio_irq_handler( void );
+
+/* Dummy function to allow asm with operands.  */
+void atari_fast_prio_irq_dummy (void) {
+__asm__ (__ALIGN_STR "\n"
+"atari_fast_irq_handler:\n\t"
+	"orw	#0x700,%%sr\n"		/* disable all interrupts */
+"atari_prio_irq_handler:\n\t"
+	"addl	%3,%2\n\t"		/* preempt_count() += HARDIRQ_OFFSET */
+	SAVE_ALL_INT "\n\t"
+	GET_CURRENT(%%d0) "\n\t"
+	/* get vector number from stack frame and convert to source */
+	"bfextu	%%sp@(%c1){#4,#10},%%d0\n\t"
+	"subw	#(0x40-8),%%d0\n\t"
+	"jpl	1f\n\t"
+	"addw	#(0x40-8-0x18),%%d0\n"
+    "1:\tlea	%a0,%%a0\n\t"
+	"addql	#1,%%a0@(%%d0:l:4)\n\t"
+	"lea	irq_handler,%%a0\n\t"
+	"lea	%%a0@(%%d0:l:8),%%a0\n\t"
+	"pea	%%sp@\n\t"		/* push frame address */
+	"movel	%%a0@(4),%%sp@-\n\t"	/* push handler data */
+	"movel	%%d0,%%sp@-\n\t"	/* push int number */
+	"movel	%%a0@,%%a0\n\t"
+	"jsr	%%a0@\n\t"		/* and call the handler */
+	"addql	#8,%%sp\n\t"
+	"addql	#4,%%sp\n\t"
+	"jbra	ret_from_interrupt"
+	 : : "i" (&kstat_cpu(0).irqs), "n" (PT_OFF_FORMATVEC),
+	     "m" (preempt_count()), "di" (HARDIRQ_OFFSET)
+);
+	for (;;);
+}
+
+/* GK:
+ * HBL IRQ handler for Falcon. Nobody needs it :-)
+ * ++andreas: raise ipl to disable further HBLANK interrupts.
+ */
+asmlinkage void falcon_hblhandler(void);
+asm(".text\n"
+__ALIGN_STR "\n\t"
+"falcon_hblhandler:\n\t"
+	"orw	#0x200,%sp@\n\t"	/* set saved ipl to 2 */
+	"rte");
+
+/* Defined in entry.S; only increments 'num_spurious' */
+asmlinkage void bad_interrupt(void);
+
+extern void atari_microwire_cmd( int cmd );
+
+extern int atari_SCC_reset_done;
+
+/*
+ * void atari_init_IRQ (void)
+ *
+ * Parameters:	None
+ *
+ * Returns:	Nothing
+ *
+ * This function should be called during kernel startup to initialize
+ * the atari IRQ handling routines.
+ */
+
+void __init atari_init_IRQ(void)
+{
+	int i;
+
+	/* initialize the vector table */
+	for (i = 0; i < NUM_INT_SOURCES; ++i) {
+		vectors[IRQ_SOURCE_TO_VECTOR(i)] = bad_interrupt;
+	}
+
+	/* Initialize the MFP(s) */
+
+#ifdef ATARI_USE_SOFTWARE_EOI
+	mfp.vec_adr  = 0x48;	/* Software EOI-Mode */
+#else
+	mfp.vec_adr  = 0x40;	/* Automatic EOI-Mode */
+#endif
+	mfp.int_en_a = 0x00;	/* turn off MFP-Ints */
+	mfp.int_en_b = 0x00;
+	mfp.int_mk_a = 0xff;	/* no Masking */
+	mfp.int_mk_b = 0xff;
+
+	if (ATARIHW_PRESENT(TT_MFP)) {
+#ifdef ATARI_USE_SOFTWARE_EOI
+		tt_mfp.vec_adr  = 0x58;		/* Software EOI-Mode */
+#else
+		tt_mfp.vec_adr  = 0x50;		/* Automatic EOI-Mode */
+#endif
+		tt_mfp.int_en_a = 0x00;		/* turn off MFP-Ints */
+		tt_mfp.int_en_b = 0x00;
+		tt_mfp.int_mk_a = 0xff;		/* no Masking */
+		tt_mfp.int_mk_b = 0xff;
+	}
+
+	if (ATARIHW_PRESENT(SCC) && !atari_SCC_reset_done) {
+		scc.cha_a_ctrl = 9;
+		MFPDELAY();
+		scc.cha_a_ctrl = (char) 0xc0; /* hardware reset */
+	}
+
+	if (ATARIHW_PRESENT(SCU)) {
+		/* init the SCU if present */
+		tt_scu.sys_mask = 0x10;		/* enable VBL (for the cursor) and
+									 * disable HSYNC interrupts (who
+									 * needs them?)  MFP and SCC are
+									 * enabled in VME mask
+									 */
+		tt_scu.vme_mask = 0x60;		/* enable MFP and SCC ints */
+	}
+	else {
+		/* If no SCU and no Hades, the HSYNC interrupt needs to be
+		 * disabled this way. (Else _inthandler in kernel/sys_call.S
+		 * gets overruns)
+		 */
+
+		if (!MACH_IS_HADES)
+			vectors[VEC_INT2] = falcon_hblhandler;
+	}
+
+	if (ATARIHW_PRESENT(PCM_8BIT) && ATARIHW_PRESENT(MICROWIRE)) {
+		/* Initialize the LM1992 Sound Controller to enable
+		   the PSG sound.  This is misplaced here, it should
+		   be in an atasound_init(), that doesn't exist yet. */
+		atari_microwire_cmd(MW_LM1992_PSG_HIGH);
+	}
+
+	stdma_init();
+
+	/* Initialize the PSG: all sounds off, both ports output */
+	sound_ym.rd_data_reg_sel = 7;
+	sound_ym.wd_data = 0xff;
+}
+
+
+static irqreturn_t atari_call_irq_list( int irq, void *dev_id, struct pt_regs *fp )
+{
+	irq_node_t *node;
+
+	for (node = (irq_node_t *)dev_id; node; node = node->next)
+		node->handler(irq, node->dev_id, fp);
+	return IRQ_HANDLED;
+}
+
+
+/*
+ * atari_request_irq : add an interrupt service routine for a particular
+ *                     machine specific interrupt source.
+ *                     If the addition was successful, it returns 0.
+ */
+
+int atari_request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
+                      unsigned long flags, const char *devname, void *dev_id)
+{
+	int vector;
+	unsigned long oflags = flags;
+
+	/*
+	 * The following is a hack to make some PCI card drivers work,
+	 * which set the SA_SHIRQ flag.
+	 */
+
+	flags &= ~SA_SHIRQ;
+
+	if (flags == SA_INTERRUPT) {
+		printk ("%s: SA_INTERRUPT changed to IRQ_TYPE_SLOW for %s\n",
+			__FUNCTION__, devname);
+		flags = IRQ_TYPE_SLOW;
+	}
+	if (flags < IRQ_TYPE_SLOW || flags > IRQ_TYPE_PRIO) {
+		printk ("%s: Bad irq type 0x%lx <0x%lx> requested from %s\n",
+		        __FUNCTION__, flags, oflags, devname);
+		return -EINVAL;
+	}
+	if (!IS_VALID_INTNO(irq)) {
+		printk ("%s: Unknown irq %d requested from %s\n",
+		        __FUNCTION__, irq, devname);
+		return -ENXIO;
+	}
+	vector = IRQ_SOURCE_TO_VECTOR(irq);
+
+	/*
+	 * Check type/source combination: slow ints are (currently)
+	 * only possible for MFP-interrupts.
+	 */
+	if (flags == IRQ_TYPE_SLOW &&
+		(irq < STMFP_SOURCE_BASE || irq >= SCC_SOURCE_BASE)) {
+		printk ("%s: Slow irq requested for non-MFP source %d from %s\n",
+		        __FUNCTION__, irq, devname);
+		return -EINVAL;
+	}
+
+	if (vectors[vector] == bad_interrupt) {
+		/* int has no handler yet */
+		irq_handler[irq].handler = handler;
+		irq_handler[irq].dev_id  = dev_id;
+		irq_param[irq].flags   = flags;
+		irq_param[irq].devname = devname;
+		vectors[vector] =
+			(flags == IRQ_TYPE_SLOW) ? slow_handlers[irq-STMFP_SOURCE_BASE] :
+			(flags == IRQ_TYPE_FAST) ? atari_fast_irq_handler :
+			                          atari_prio_irq_handler;
+		/* If MFP int, also enable and umask it */
+		atari_turnon_irq(irq);
+		atari_enable_irq(irq);
+
+		return 0;
+	}
+	else if (irq_param[irq].flags == flags) {
+		/* old handler is of same type -> handlers can be chained */
+		irq_node_t *node;
+		unsigned long flags;
+
+		local_irq_save(flags);
+
+		if (irq_handler[irq].handler != atari_call_irq_list) {
+			/* Only one handler yet, make a node for this first one */
+			if (!(node = new_irq_node()))
+				return -ENOMEM;
+			node->handler = irq_handler[irq].handler;
+			node->dev_id  = irq_handler[irq].dev_id;
+			node->devname = irq_param[irq].devname;
+			node->next = NULL;
+
+			irq_handler[irq].handler = atari_call_irq_list;
+			irq_handler[irq].dev_id  = node;
+			irq_param[irq].devname   = "chained";
+		}
+
+		if (!(node = new_irq_node()))
+			return -ENOMEM;
+		node->handler = handler;
+		node->dev_id  = dev_id;
+		node->devname = devname;
+		/* new handlers are put in front of the queue */
+		node->next = irq_handler[irq].dev_id;
+		irq_handler[irq].dev_id = node;
+
+		local_irq_restore(flags);
+		return 0;
+	} else {
+		printk ("%s: Irq %d allocated by other type int (call from %s)\n",
+		        __FUNCTION__, irq, devname);
+		return -EBUSY;
+	}
+}
+
+void atari_free_irq(unsigned int irq, void *dev_id)
+{
+	unsigned long flags;
+	int vector;
+	irq_node_t **list, *node;
+
+	if (!IS_VALID_INTNO(irq)) {
+		printk("%s: Unknown irq %d\n", __FUNCTION__, irq);
+		return;
+	}
+
+	vector = IRQ_SOURCE_TO_VECTOR(irq);
+	if (vectors[vector] == bad_interrupt)
+		goto not_found;
+
+	local_irq_save(flags);
+
+	if (irq_handler[irq].handler != atari_call_irq_list) {
+		/* It's the only handler for the interrupt */
+		if (irq_handler[irq].dev_id != dev_id) {
+			local_irq_restore(flags);
+			goto not_found;
+		}
+		irq_handler[irq].handler = NULL;
+		irq_handler[irq].dev_id  = NULL;
+		irq_param[irq].devname   = NULL;
+		vectors[vector] = bad_interrupt;
+		/* If MFP int, also disable it */
+		atari_disable_irq(irq);
+		atari_turnoff_irq(irq);
+
+		local_irq_restore(flags);
+		return;
+	}
+
+	/* The interrupt is chained, find the irq on the list */
+	for(list = (irq_node_t **)&irq_handler[irq].dev_id; *list; list = &(*list)->next) {
+		if ((*list)->dev_id == dev_id) break;
+	}
+	if (!*list) {
+		local_irq_restore(flags);
+		goto not_found;
+	}
+
+	(*list)->handler = NULL; /* Mark it as free for reallocation */
+	*list = (*list)->next;
+
+	/* If there's now only one handler, unchain the interrupt, i.e. plug in
+	 * the handler directly again and omit atari_call_irq_list */
+	node = (irq_node_t *)irq_handler[irq].dev_id;
+	if (node && !node->next) {
+		irq_handler[irq].handler = node->handler;
+		irq_handler[irq].dev_id  = node->dev_id;
+		irq_param[irq].devname   = node->devname;
+		node->handler = NULL; /* Mark it as free for reallocation */
+	}
+
+	local_irq_restore(flags);
+	return;
+
+not_found:
+	printk("%s: tried to remove invalid irq\n", __FUNCTION__);
+	return;
+}
+
+
+/*
+ * atari_register_vme_int() returns the number of a free interrupt vector for
+ * hardware with a programmable int vector (probably a VME board).
+ */
+
+unsigned long atari_register_vme_int(void)
+{
+	int i;
+
+	for(i = 0; i < 32; i++)
+		if((free_vme_vec_bitmap & (1 << i)) == 0)
+			break;
+
+	if(i == 16)
+		return 0;
+
+	free_vme_vec_bitmap |= 1 << i;
+	return (VME_SOURCE_BASE + i);
+}
+
+
+void atari_unregister_vme_int(unsigned long irq)
+{
+	if(irq >= VME_SOURCE_BASE && irq < VME_SOURCE_BASE + VME_MAX_SOURCES) {
+		irq -= VME_SOURCE_BASE;
+		free_vme_vec_bitmap &= ~(1 << irq);
+	}
+}
+
+
+int show_atari_interrupts(struct seq_file *p, void *v)
+{
+	int i;
+
+	for (i = 0; i < NUM_INT_SOURCES; ++i) {
+		if (vectors[IRQ_SOURCE_TO_VECTOR(i)] == bad_interrupt)
+			continue;
+		if (i < STMFP_SOURCE_BASE)
+			seq_printf(p, "auto %2d: %10u ",
+				       i, kstat_cpu(0).irqs[i]);
+		else
+			seq_printf(p, "vec $%02x: %10u ",
+				       IRQ_SOURCE_TO_VECTOR(i),
+				       kstat_cpu(0).irqs[i]);
+
+		if (irq_handler[i].handler != atari_call_irq_list) {
+			seq_printf(p, "%s\n", irq_param[i].devname);
+		}
+		else {
+			irq_node_t *n;
+			for( n = (irq_node_t *)irq_handler[i].dev_id; n; n = n->next ) {
+				seq_printf(p, "%s\n", n->devname);
+				if (n->next)
+					seq_puts(p, "                    " );
+			}
+		}
+	}
+	if (num_spurious)
+		seq_printf(p, "spurio.: %10u\n", num_spurious);
+
+	return 0;
+}
+
+
diff --git a/arch/m68k/atari/atari_ksyms.c b/arch/m68k/atari/atari_ksyms.c
new file mode 100644
index 0000000..a047571
--- /dev/null
+++ b/arch/m68k/atari/atari_ksyms.c
@@ -0,0 +1,35 @@
+#include <linux/module.h>
+
+#include <asm/ptrace.h>
+#include <asm/traps.h>
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#include <asm/atarikb.h>
+#include <asm/atari_joystick.h>
+#include <asm/atari_stdma.h>
+#include <asm/atari_stram.h>
+
+extern void atari_microwire_cmd( int cmd );
+extern int atari_MFP_init_done;
+extern int atari_SCC_init_done;
+extern int atari_SCC_reset_done;
+
+EXPORT_SYMBOL(atari_mch_cookie);
+EXPORT_SYMBOL(atari_mch_type);
+EXPORT_SYMBOL(atari_hw_present);
+EXPORT_SYMBOL(atari_switches);
+EXPORT_SYMBOL(atari_dont_touch_floppy_select);
+EXPORT_SYMBOL(atari_register_vme_int);
+EXPORT_SYMBOL(atari_unregister_vme_int);
+EXPORT_SYMBOL(stdma_lock);
+EXPORT_SYMBOL(stdma_release);
+EXPORT_SYMBOL(stdma_others_waiting);
+EXPORT_SYMBOL(stdma_islocked);
+EXPORT_SYMBOL(atari_stram_alloc);
+EXPORT_SYMBOL(atari_stram_free);
+
+EXPORT_SYMBOL(atari_MFP_init_done);
+EXPORT_SYMBOL(atari_SCC_init_done);
+EXPORT_SYMBOL(atari_SCC_reset_done);
+
+EXPORT_SYMBOL(atari_microwire_cmd);
diff --git a/arch/m68k/atari/atasound.c b/arch/m68k/atari/atasound.c
new file mode 100644
index 0000000..ee04250
--- /dev/null
+++ b/arch/m68k/atari/atasound.c
@@ -0,0 +1,109 @@
+/*
+ * linux/arch/m68k/atari/atasound.c
+ *
+ * ++Geert: Moved almost all stuff to linux/drivers/sound/
+ *
+ * The author of atari_nosound, atari_mksound and atari_microwire_cmd is
+ * unknown. (++roman: That's me... :-)
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * 1998-05-31 ++andreas: atari_mksound rewritten to always use the envelope,
+ *			 no timer, atari_nosound removed.
+ *
+ */
+
+
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/major.h>
+#include <linux/fcntl.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+
+#include <asm/atarihw.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/pgtable.h>
+#include <asm/atariints.h>
+
+
+/*
+ * stuff from the old atasound.c
+ */
+
+void atari_microwire_cmd (int cmd)
+{
+	tt_microwire.mask = 0x7ff;
+	tt_microwire.data = MW_LM1992_ADDR | cmd;
+
+	/* Busy wait for data being completely sent :-( */
+	while( tt_microwire.mask != 0x7ff)
+		;
+}
+
+
+/* PSG base frequency */
+#define	PSG_FREQ	125000
+/* PSG envelope base frequency times 10 */
+#define PSG_ENV_FREQ_10	78125
+
+void atari_mksound (unsigned int hz, unsigned int ticks)
+{
+	/* Generates sound of some frequency for some number of clock
+	   ticks.  */
+	unsigned long flags;
+	unsigned char tmp;
+	int period;
+
+	local_irq_save(flags);
+
+
+	/* Disable generator A in mixer control.  */
+	sound_ym.rd_data_reg_sel = 7;
+	tmp = sound_ym.rd_data_reg_sel;
+	tmp |= 011;
+	sound_ym.wd_data = tmp;
+
+	if (hz) {
+	    /* Convert from frequency value to PSG period value (base
+	       frequency 125 kHz).  */
+
+	    period = PSG_FREQ / hz;
+
+	    if (period > 0xfff) period = 0xfff;
+
+	/* Set generator A frequency to hz.  */
+	sound_ym.rd_data_reg_sel = 0;
+	sound_ym.wd_data = period & 0xff;
+	sound_ym.rd_data_reg_sel = 1;
+	sound_ym.wd_data = (period >> 8) & 0xf;
+	if (ticks) {
+		/* Set length of envelope (max 8 sec).  */
+		int length = (ticks * PSG_ENV_FREQ_10) / HZ / 10;
+
+		if (length > 0xffff) length = 0xffff;
+		sound_ym.rd_data_reg_sel = 11;
+		sound_ym.wd_data = length & 0xff;
+		sound_ym.rd_data_reg_sel = 12;
+		sound_ym.wd_data = length >> 8;
+		/* Envelope form: max -> min single.  */
+		sound_ym.rd_data_reg_sel = 13;
+		sound_ym.wd_data = 0;
+		/* Use envelope for generator A.  */
+		sound_ym.rd_data_reg_sel = 8;
+		sound_ym.wd_data = 0x10;
+	} else {
+		/* Set generator A level to maximum, no envelope.  */
+		sound_ym.rd_data_reg_sel = 8;
+		sound_ym.wd_data = 15;
+	}
+	/* Turn on generator A in mixer control.  */
+	sound_ym.rd_data_reg_sel = 7;
+	tmp &= ~1;
+	sound_ym.wd_data = tmp;
+	}
+	local_irq_restore(flags);
+}
diff --git a/arch/m68k/atari/atasound.h b/arch/m68k/atari/atasound.h
new file mode 100644
index 0000000..1362762
--- /dev/null
+++ b/arch/m68k/atari/atasound.h
@@ -0,0 +1,33 @@
+/*
+ * Minor numbers for the sound driver.
+ *
+ * Unfortunately Creative called the codec chip of SB as a DSP. For this
+ * reason the /dev/dsp is reserved for digitized audio use. There is a
+ * device for true DSP processors but it will be called something else.
+ * In v3.0 it's /dev/sndproc but this could be a temporary solution.
+ */
+
+#define SND_NDEVS	256	/* Number of supported devices */
+#define SND_DEV_CTL	0	/* Control port /dev/mixer */
+#define SND_DEV_SEQ	1	/* Sequencer output /dev/sequencer (FM
+				   synthesizer and MIDI output) */
+#define SND_DEV_MIDIN	2	/* Raw midi access */
+#define SND_DEV_DSP	3	/* Digitized voice /dev/dsp */
+#define SND_DEV_AUDIO	4	/* Sparc compatible /dev/audio */
+#define SND_DEV_DSP16	5	/* Like /dev/dsp but 16 bits/sample */
+#define SND_DEV_STATUS	6	/* /dev/sndstat */
+/* #7 not in use now. Was in 2.4. Free for use after v3.0. */
+#define SND_DEV_SEQ2	8	/* /dev/sequencer, level 2 interface */
+#define SND_DEV_SNDPROC 9	/* /dev/sndproc for programmable devices */
+#define SND_DEV_PSS	SND_DEV_SNDPROC
+
+#define DSP_DEFAULT_SPEED	8000
+
+#define ON		1
+#define OFF		0
+
+#define MAX_AUDIO_DEV	5
+#define MAX_MIXER_DEV	2
+#define MAX_SYNTH_DEV	3
+#define MAX_MIDI_DEV	6
+#define MAX_TIMER_DEV	3
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
new file mode 100644
index 0000000..9261d2d
--- /dev/null
+++ b/arch/m68k/atari/config.c
@@ -0,0 +1,726 @@
+/*
+ *  linux/arch/m68k/atari/config.c
+ *
+ *  Copyright (C) 1994 Bjoern Brauel
+ *
+ *  5/2/94 Roman Hodek:
+ *    Added setting of time_adj to get a better clock.
+ *
+ *  5/14/94 Roman Hodek:
+ *    gettod() for TT
+ *
+ *  5/15/94 Roman Hodek:
+ *    hard_reset_now() for Atari (and others?)
+ *
+ *  94/12/30 Andreas Schwab:
+ *    atari_sched_init fixed to get precise clock.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * Miscellaneous atari stuff
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/vt_kern.h>
+
+#include <asm/bootinfo.h>
+#include <asm/setup.h>
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#include <asm/atari_stram.h>
+#include <asm/system.h>
+#include <asm/machdep.h>
+#include <asm/hwtest.h>
+#include <asm/io.h>
+
+u_long atari_mch_cookie;
+u_long atari_mch_type;
+struct atari_hw_present atari_hw_present;
+u_long atari_switches;
+int atari_dont_touch_floppy_select;
+int atari_rtc_year_offset;
+
+/* local function prototypes */
+static void atari_reset( void );
+#ifdef CONFIG_ATARI_FLOPPY
+extern void atari_floppy_setup(char *, int *);
+#endif
+static void atari_get_model(char *model);
+static int atari_get_hardware_list(char *buffer);
+
+/* atari specific irq functions */
+extern void atari_init_IRQ (void);
+extern int atari_request_irq (unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
+                              unsigned long flags, const char *devname, void *dev_id);
+extern void atari_free_irq (unsigned int irq, void *dev_id);
+extern void atari_enable_irq (unsigned int);
+extern void atari_disable_irq (unsigned int);
+extern int show_atari_interrupts (struct seq_file *, void *);
+extern void atari_mksound( unsigned int count, unsigned int ticks );
+#ifdef CONFIG_HEARTBEAT
+static void atari_heartbeat( int on );
+#endif
+
+/* atari specific timer functions (in time.c) */
+extern void atari_sched_init(irqreturn_t (*)(int, void *, struct pt_regs *));
+extern unsigned long atari_gettimeoffset (void);
+extern int atari_mste_hwclk (int, struct rtc_time *);
+extern int atari_tt_hwclk (int, struct rtc_time *);
+extern int atari_mste_set_clock_mmss (unsigned long);
+extern int atari_tt_set_clock_mmss (unsigned long);
+
+/* atari specific debug functions (in debug.c) */
+extern void atari_debug_init(void);
+
+
+/* I've moved hwreg_present() and hwreg_present_bywrite() out into
+ * mm/hwtest.c, to avoid having multiple copies of the same routine
+ * in the kernel [I wanted them in hp300 and they were already used
+ * in the nubus code. NB: I don't have an Atari so this might (just
+ * conceivably) break something.
+ * I've preserved the #if 0 version of hwreg_present_bywrite() here
+ * for posterity.
+ *   -- Peter Maydell <pmaydell@chiark.greenend.org.uk>, 05/1998
+ */
+
+#if 0
+static int __init
+hwreg_present_bywrite(volatile void *regp, unsigned char val)
+{
+    int		ret;
+    long	save_sp, save_vbr;
+    static long tmp_vectors[3] = { [2] = (long)&&after_test };
+
+    __asm__ __volatile__
+	(	"movec	%/vbr,%2\n\t"	/* save vbr value            */
+                "movec	%4,%/vbr\n\t"	/* set up temporary vectors  */
+		"movel	%/sp,%1\n\t"	/* save sp                   */
+		"moveq	#0,%0\n\t"	/* assume not present        */
+		"moveb	%5,%3@\n\t"	/* write the hardware reg    */
+		"cmpb	%3@,%5\n\t"	/* compare it                */
+		"seq	%0"		/* comes here only if reg    */
+                                        /* is present                */
+		: "=d&" (ret), "=r&" (save_sp), "=r&" (save_vbr)
+		: "a" (regp), "r" (tmp_vectors), "d" (val)
+                );
+  after_test:
+    __asm__ __volatile__
+      (	"movel	%0,%/sp\n\t"		/* restore sp                */
+        "movec	%1,%/vbr"			/* restore vbr               */
+        : : "r" (save_sp), "r" (save_vbr) : "sp"
+	);
+
+    return( ret );
+}
+#endif
+
+
+/* ++roman: This is a more elaborate test for an SCC chip, since the plain
+ * Medusa board generates DTACK at the SCC's standard addresses, but a SCC
+ * board in the Medusa is possible. Also, the addresses where the ST_ESCC
+ * resides generate DTACK without the chip, too.
+ * The method is to write values into the interrupt vector register, that
+ * should be readable without trouble (from channel A!).
+ */
+
+static int __init scc_test( volatile char *ctla )
+{
+	if (!hwreg_present( ctla ))
+		return( 0 );
+	MFPDELAY();
+
+	*ctla = 2; MFPDELAY();
+	*ctla = 0x40; MFPDELAY();
+
+	*ctla = 2; MFPDELAY();
+	if (*ctla != 0x40) return( 0 );
+	MFPDELAY();
+
+	*ctla = 2; MFPDELAY();
+	*ctla = 0x60; MFPDELAY();
+
+	*ctla = 2; MFPDELAY();
+	if (*ctla != 0x60) return( 0 );
+
+	return( 1 );
+}
+
+
+    /*
+     *  Parse an Atari-specific record in the bootinfo
+     */
+
+int __init atari_parse_bootinfo(const struct bi_record *record)
+{
+    int unknown = 0;
+    const u_long *data = record->data;
+
+    switch (record->tag) {
+	case BI_ATARI_MCH_COOKIE:
+	    atari_mch_cookie = *data;
+	    break;
+	case BI_ATARI_MCH_TYPE:
+	    atari_mch_type = *data;
+	    break;
+	default:
+	    unknown = 1;
+    }
+    return(unknown);
+}
+
+
+/* Parse the Atari-specific switches= option. */
+void __init atari_switches_setup( const char *str, unsigned len )
+{
+    char switches[len+1];
+    char *p;
+    int ovsc_shift;
+    char *args = switches;
+
+    /* copy string to local array, strsep works destructively... */
+    strlcpy( switches, str, sizeof(switches) );
+    atari_switches = 0;
+
+    /* parse the options */
+    while ((p = strsep(&args, ",")) != NULL) {
+	if (!*p) continue;
+	ovsc_shift = 0;
+	if (strncmp( p, "ov_", 3 ) == 0) {
+	    p += 3;
+	    ovsc_shift = ATARI_SWITCH_OVSC_SHIFT;
+	}
+
+	if (strcmp( p, "ikbd" ) == 0) {
+	    /* RTS line of IKBD ACIA */
+	    atari_switches |= ATARI_SWITCH_IKBD << ovsc_shift;
+	}
+	else if (strcmp( p, "midi" ) == 0) {
+	    /* RTS line of MIDI ACIA */
+	    atari_switches |= ATARI_SWITCH_MIDI << ovsc_shift;
+	}
+	else if (strcmp( p, "snd6" ) == 0) {
+	    atari_switches |= ATARI_SWITCH_SND6 << ovsc_shift;
+	}
+	else if (strcmp( p, "snd7" ) == 0) {
+	    atari_switches |= ATARI_SWITCH_SND7 << ovsc_shift;
+	}
+    }
+}
+
+
+    /*
+     *  Setup the Atari configuration info
+     */
+
+void __init config_atari(void)
+{
+    unsigned short tos_version;
+
+    memset(&atari_hw_present, 0, sizeof(atari_hw_present));
+
+    atari_debug_init();
+
+    ioport_resource.end  = 0xFFFFFFFF;  /* Change size of I/O space from 64KB
+                                           to 4GB. */
+
+    mach_sched_init      = atari_sched_init;
+    mach_init_IRQ        = atari_init_IRQ;
+    mach_request_irq     = atari_request_irq;
+    mach_free_irq        = atari_free_irq;
+    enable_irq           = atari_enable_irq;
+    disable_irq          = atari_disable_irq;
+    mach_get_model	 = atari_get_model;
+    mach_get_hardware_list = atari_get_hardware_list;
+    mach_get_irq_list	 = show_atari_interrupts;
+    mach_gettimeoffset   = atari_gettimeoffset;
+    mach_reset           = atari_reset;
+#ifdef CONFIG_ATARI_FLOPPY
+    mach_floppy_setup	 = atari_floppy_setup;
+#endif
+#ifdef CONFIG_DUMMY_CONSOLE
+    conswitchp	         = &dummy_con;
+#endif
+    mach_max_dma_address = 0xffffff;
+#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
+    mach_beep          = atari_mksound;
+#endif
+#ifdef CONFIG_HEARTBEAT
+    mach_heartbeat = atari_heartbeat;
+#endif
+
+    /* Set switches as requested by the user */
+    if (atari_switches & ATARI_SWITCH_IKBD)
+	acia.key_ctrl = ACIA_DIV64 | ACIA_D8N1S | ACIA_RHTID;
+    if (atari_switches & ATARI_SWITCH_MIDI)
+	acia.mid_ctrl = ACIA_DIV16 | ACIA_D8N1S | ACIA_RHTID;
+    if (atari_switches & (ATARI_SWITCH_SND6|ATARI_SWITCH_SND7)) {
+	sound_ym.rd_data_reg_sel = 14;
+	sound_ym.wd_data = sound_ym.rd_data_reg_sel |
+			   ((atari_switches&ATARI_SWITCH_SND6) ? 0x40 : 0) |
+			   ((atari_switches&ATARI_SWITCH_SND7) ? 0x80 : 0);
+    }
+
+    /* ++bjoern:
+     * Determine hardware present
+     */
+
+    printk( "Atari hardware found: " );
+    if (MACH_IS_MEDUSA || MACH_IS_HADES) {
+        /* There's no Atari video hardware on the Medusa, but all the
+         * addresses below generate a DTACK so no bus error occurs! */
+    }
+    else if (hwreg_present( f030_xreg )) {
+	ATARIHW_SET(VIDEL_SHIFTER);
+        printk( "VIDEL " );
+        /* This is a temporary hack: If there is Falcon video
+         * hardware, we assume that the ST-DMA serves SCSI instead of
+         * ACSI. In the future, there should be a better method for
+         * this...
+         */
+	ATARIHW_SET(ST_SCSI);
+        printk( "STDMA-SCSI " );
+    }
+    else if (hwreg_present( tt_palette )) {
+	ATARIHW_SET(TT_SHIFTER);
+        printk( "TT_SHIFTER " );
+    }
+    else if (hwreg_present( &shifter.bas_hi )) {
+        if (hwreg_present( &shifter.bas_lo ) &&
+	    (shifter.bas_lo = 0x0aau, shifter.bas_lo == 0x0aau)) {
+	    ATARIHW_SET(EXTD_SHIFTER);
+            printk( "EXTD_SHIFTER " );
+        }
+        else {
+	    ATARIHW_SET(STND_SHIFTER);
+            printk( "STND_SHIFTER " );
+        }
+    }
+    if (hwreg_present( &mfp.par_dt_reg )) {
+	ATARIHW_SET(ST_MFP);
+        printk( "ST_MFP " );
+    }
+    if (hwreg_present( &tt_mfp.par_dt_reg )) {
+	ATARIHW_SET(TT_MFP);
+        printk( "TT_MFP " );
+    }
+    if (hwreg_present( &tt_scsi_dma.dma_addr_hi )) {
+	ATARIHW_SET(SCSI_DMA);
+        printk( "TT_SCSI_DMA " );
+    }
+    if (!MACH_IS_HADES && hwreg_present( &st_dma.dma_hi )) {
+	ATARIHW_SET(STND_DMA);
+        printk( "STND_DMA " );
+    }
+    if (MACH_IS_MEDUSA || /* The ST-DMA address registers aren't readable
+			   * on all Medusas, so the test below may fail */
+        (hwreg_present( &st_dma.dma_vhi ) &&
+         (st_dma.dma_vhi = 0x55) && (st_dma.dma_hi = 0xaa) &&
+         st_dma.dma_vhi == 0x55 && st_dma.dma_hi == 0xaa &&
+         (st_dma.dma_vhi = 0xaa) && (st_dma.dma_hi = 0x55) &&
+         st_dma.dma_vhi == 0xaa && st_dma.dma_hi == 0x55)) {
+	ATARIHW_SET(EXTD_DMA);
+        printk( "EXTD_DMA " );
+    }
+    if (hwreg_present( &tt_scsi.scsi_data )) {
+	ATARIHW_SET(TT_SCSI);
+        printk( "TT_SCSI " );
+    }
+    if (hwreg_present( &sound_ym.rd_data_reg_sel )) {
+	ATARIHW_SET(YM_2149);
+        printk( "YM2149 " );
+    }
+    if (!MACH_IS_MEDUSA && !MACH_IS_HADES &&
+	hwreg_present( &tt_dmasnd.ctrl )) {
+	ATARIHW_SET(PCM_8BIT);
+        printk( "PCM " );
+    }
+    if (!MACH_IS_HADES && hwreg_present( &falcon_codec.unused5 )) {
+	ATARIHW_SET(CODEC);
+        printk( "CODEC " );
+    }
+    if (hwreg_present( &dsp56k_host_interface.icr )) {
+	ATARIHW_SET(DSP56K);
+        printk( "DSP56K " );
+    }
+    if (hwreg_present( &tt_scc_dma.dma_ctrl ) &&
+#if 0
+	/* This test sucks! Who knows some better? */
+	(tt_scc_dma.dma_ctrl = 0x01, (tt_scc_dma.dma_ctrl & 1) == 1) &&
+	(tt_scc_dma.dma_ctrl = 0x00, (tt_scc_dma.dma_ctrl & 1) == 0)
+#else
+	!MACH_IS_MEDUSA && !MACH_IS_HADES
+#endif
+	) {
+	ATARIHW_SET(SCC_DMA);
+        printk( "SCC_DMA " );
+    }
+    if (scc_test( &scc.cha_a_ctrl )) {
+	ATARIHW_SET(SCC);
+        printk( "SCC " );
+    }
+    if (scc_test( &st_escc.cha_b_ctrl )) {
+	ATARIHW_SET( ST_ESCC );
+	printk( "ST_ESCC " );
+    }
+    if (MACH_IS_HADES)
+    {
+        ATARIHW_SET( VME );
+        printk( "VME " );
+    }
+    else if (hwreg_present( &tt_scu.sys_mask )) {
+	ATARIHW_SET(SCU);
+	/* Assume a VME bus if there's a SCU */
+	ATARIHW_SET( VME );
+        printk( "VME SCU " );
+    }
+    if (hwreg_present( (void *)(0xffff9210) )) {
+	ATARIHW_SET(ANALOG_JOY);
+        printk( "ANALOG_JOY " );
+    }
+    if (!MACH_IS_HADES && hwreg_present( blitter.halftone )) {
+	ATARIHW_SET(BLITTER);
+        printk( "BLITTER " );
+    }
+    if (hwreg_present((void *)0xfff00039)) {
+	ATARIHW_SET(IDE);
+        printk( "IDE " );
+    }
+#if 1 /* This maybe wrong */
+    if (!MACH_IS_MEDUSA && !MACH_IS_HADES &&
+	hwreg_present( &tt_microwire.data ) &&
+	hwreg_present( &tt_microwire.mask ) &&
+	(tt_microwire.mask = 0x7ff,
+	 udelay(1),
+	 tt_microwire.data = MW_LM1992_PSG_HIGH | MW_LM1992_ADDR,
+	 udelay(1),
+	 tt_microwire.data != 0)) {
+	ATARIHW_SET(MICROWIRE);
+	while (tt_microwire.mask != 0x7ff) ;
+        printk( "MICROWIRE " );
+    }
+#endif
+    if (hwreg_present( &tt_rtc.regsel )) {
+	ATARIHW_SET(TT_CLK);
+        printk( "TT_CLK " );
+        mach_hwclk = atari_tt_hwclk;
+        mach_set_clock_mmss = atari_tt_set_clock_mmss;
+    }
+    if (!MACH_IS_HADES && hwreg_present( &mste_rtc.sec_ones)) {
+	ATARIHW_SET(MSTE_CLK);
+        printk( "MSTE_CLK ");
+        mach_hwclk = atari_mste_hwclk;
+        mach_set_clock_mmss = atari_mste_set_clock_mmss;
+    }
+    if (!MACH_IS_MEDUSA && !MACH_IS_HADES &&
+	hwreg_present( &dma_wd.fdc_speed ) &&
+	hwreg_write( &dma_wd.fdc_speed, 0 )) {
+	    ATARIHW_SET(FDCSPEED);
+	    printk( "FDC_SPEED ");
+    }
+    if (!MACH_IS_HADES && !ATARIHW_PRESENT(ST_SCSI)) {
+	ATARIHW_SET(ACSI);
+        printk( "ACSI " );
+    }
+    printk("\n");
+
+    if (CPU_IS_040_OR_060)
+        /* Now it seems to be safe to turn of the tt0 transparent
+         * translation (the one that must not be turned off in
+         * head.S...)
+         */
+        __asm__ volatile ("moveq #0,%/d0\n\t"
+                          ".chip 68040\n\t"
+			  "movec %%d0,%%itt0\n\t"
+			  "movec %%d0,%%dtt0\n\t"
+			  ".chip 68k"
+						  : /* no outputs */
+						  : /* no inputs */
+						  : "d0");
+
+    /* allocator for memory that must reside in st-ram */
+    atari_stram_init ();
+
+    /* Set up a mapping for the VMEbus address region:
+     *
+     * VME is either at phys. 0xfexxxxxx (TT) or 0xa00000..0xdfffff
+     * (MegaSTE) In both cases, the whole 16 MB chunk is mapped at
+     * 0xfe000000 virt., because this can be done with a single
+     * transparent translation. On the 68040, lots of often unused
+     * page tables would be needed otherwise. On a MegaSTE or similar,
+     * the highest byte is stripped off by hardware due to the 24 bit
+     * design of the bus.
+     */
+
+    if (CPU_IS_020_OR_030) {
+        unsigned long	tt1_val;
+        tt1_val = 0xfe008543;	/* Translate 0xfexxxxxx, enable, cache
+                                 * inhibit, read and write, FDC mask = 3,
+                                 * FDC val = 4 -> Supervisor only */
+        __asm__ __volatile__ ( ".chip 68030\n\t"
+				"pmove	%0@,%/tt1\n\t"
+				".chip 68k"
+				: : "a" (&tt1_val) );
+    }
+    else {
+        __asm__ __volatile__
+            ( "movel %0,%/d0\n\t"
+	      ".chip 68040\n\t"
+	      "movec %%d0,%%itt1\n\t"
+	      "movec %%d0,%%dtt1\n\t"
+	      ".chip 68k"
+              :
+              : "g" (0xfe00a040)	/* Translate 0xfexxxxxx, enable,
+                                         * supervisor only, non-cacheable/
+                                         * serialized, writable */
+              : "d0" );
+
+    }
+
+    /* Fetch tos version at Physical 2 */
+    /* We my not be able to access this address if the kernel is
+       loaded to st ram, since the first page is unmapped.  On the
+       Medusa this is always the case and there is nothing we can do
+       about this, so we just assume the smaller offset.  For the TT
+       we use the fact that in head.S we have set up a mapping
+       0xFFxxxxxx -> 0x00xxxxxx, so that the first 16MB is accessible
+       in the last 16MB of the address space. */
+    tos_version = (MACH_IS_MEDUSA || MACH_IS_HADES) ?
+		  0xfff : *(unsigned short *)0xff000002;
+    atari_rtc_year_offset = (tos_version < 0x306) ? 70 : 68;
+}
+
+#ifdef CONFIG_HEARTBEAT
+static void atari_heartbeat( int on )
+{
+    unsigned char tmp;
+    unsigned long flags;
+
+    if (atari_dont_touch_floppy_select)
+	return;
+
+    local_irq_save(flags);
+    sound_ym.rd_data_reg_sel = 14; /* Select PSG Port A */
+    tmp = sound_ym.rd_data_reg_sel;
+    sound_ym.wd_data = on ? (tmp & ~0x02) : (tmp | 0x02);
+    local_irq_restore(flags);
+}
+#endif
+
+/* ++roman:
+ *
+ * This function does a reset on machines that lack the ability to
+ * assert the processor's _RESET signal somehow via hardware. It is
+ * based on the fact that you can find the initial SP and PC values
+ * after a reset at physical addresses 0 and 4. This works pretty well
+ * for Atari machines, since the lowest 8 bytes of physical memory are
+ * really ROM (mapped by hardware). For other 680x0 machines: don't
+ * know if it works...
+ *
+ * To get the values at addresses 0 and 4, the MMU better is turned
+ * off first. After that, we have to jump into physical address space
+ * (the PC before the pmove statement points to the virtual address of
+ * the code). Getting that physical address is not hard, but the code
+ * becomes a bit complex since I've tried to ensure that the jump
+ * statement after the pmove is in the cache already (otherwise the
+ * processor can't fetch it!). For that, the code first jumps to the
+ * jump statement with the (virtual) address of the pmove section in
+ * an address register . The jump statement is surely in the cache
+ * now. After that, that physical address of the reset code is loaded
+ * into the same address register, pmove is done and the same jump
+ * statements goes to the reset code. Since there are not many
+ * statements between the two jumps, I hope it stays in the cache.
+ *
+ * The C code makes heavy use of the GCC features that you can get the
+ * address of a C label. No hope to compile this with another compiler
+ * than GCC!
+ */
+
+/* ++andreas: no need for complicated code, just depend on prefetch */
+
+static void atari_reset (void)
+{
+    long tc_val = 0;
+    long reset_addr;
+
+    /* On the Medusa, phys. 0x4 may contain garbage because it's no
+       ROM.  See above for explanation why we cannot use PTOV(4). */
+    reset_addr = MACH_IS_HADES ? 0x7fe00030 :
+                 MACH_IS_MEDUSA || MACH_IS_AB40 ? 0xe00030 :
+		 *(unsigned long *) 0xff000004;
+
+    /* reset ACIA for switch off OverScan, if it's active */
+    if (atari_switches & ATARI_SWITCH_OVSC_IKBD)
+	acia.key_ctrl = ACIA_RESET;
+    if (atari_switches & ATARI_SWITCH_OVSC_MIDI)
+	acia.mid_ctrl = ACIA_RESET;
+
+    /* processor independent: turn off interrupts and reset the VBR;
+     * the caches must be left enabled, else prefetching the final jump
+     * instruction doesn't work. */
+    local_irq_disable();
+    __asm__ __volatile__
+	("moveq	#0,%/d0\n\t"
+	 "movec	%/d0,%/vbr"
+	 : : : "d0" );
+
+    if (CPU_IS_040_OR_060) {
+        unsigned long jmp_addr040 = virt_to_phys(&&jmp_addr_label040);
+	if (CPU_IS_060) {
+	    /* 68060: clear PCR to turn off superscalar operation */
+	    __asm__ __volatile__
+		("moveq	#0,%/d0\n\t"
+		 ".chip 68060\n\t"
+		 "movec %%d0,%%pcr\n\t"
+		 ".chip 68k"
+		 : : : "d0" );
+	}
+
+        __asm__ __volatile__
+            ("movel    %0,%/d0\n\t"
+             "andl     #0xff000000,%/d0\n\t"
+             "orw      #0xe020,%/d0\n\t"   /* map 16 MB, enable, cacheable */
+             ".chip 68040\n\t"
+	     "movec    %%d0,%%itt0\n\t"
+             "movec    %%d0,%%dtt0\n\t"
+	     ".chip 68k\n\t"
+             "jmp   %0@\n\t"
+             : /* no outputs */
+             : "a" (jmp_addr040)
+             : "d0" );
+      jmp_addr_label040:
+        __asm__ __volatile__
+          ("moveq #0,%/d0\n\t"
+	   "nop\n\t"
+	   ".chip 68040\n\t"
+	   "cinva %%bc\n\t"
+	   "nop\n\t"
+	   "pflusha\n\t"
+	   "nop\n\t"
+	   "movec %%d0,%%tc\n\t"
+	   "nop\n\t"
+	   /* the following setup of transparent translations is needed on the
+	    * Afterburner040 to successfully reboot. Other machines shouldn't
+	    * care about a different tt regs setup, they also didn't care in
+	    * the past that the regs weren't turned off. */
+	   "movel #0xffc000,%%d0\n\t" /* whole insn space cacheable */
+	   "movec %%d0,%%itt0\n\t"
+	   "movec %%d0,%%itt1\n\t"
+	   "orw   #0x40,%/d0\n\t" /* whole data space non-cacheable/ser. */
+	   "movec %%d0,%%dtt0\n\t"
+	   "movec %%d0,%%dtt1\n\t"
+	   ".chip 68k\n\t"
+           "jmp %0@"
+           : /* no outputs */
+           : "a" (reset_addr)
+           : "d0");
+    }
+    else
+        __asm__ __volatile__
+            ("pmove %0@,%/tc\n\t"
+             "jmp %1@"
+             : /* no outputs */
+             : "a" (&tc_val), "a" (reset_addr));
+}
+
+
+static void atari_get_model(char *model)
+{
+    strcpy(model, "Atari ");
+    switch (atari_mch_cookie >> 16) {
+	case ATARI_MCH_ST:
+	    if (ATARIHW_PRESENT(MSTE_CLK))
+		strcat (model, "Mega ST");
+	    else
+		strcat (model, "ST");
+	    break;
+	case ATARI_MCH_STE:
+	    if (MACH_IS_MSTE)
+		strcat (model, "Mega STE");
+	    else
+		strcat (model, "STE");
+	    break;
+	case ATARI_MCH_TT:
+	    if (MACH_IS_MEDUSA)
+		/* Medusa has TT _MCH cookie */
+		strcat (model, "Medusa");
+	    else if (MACH_IS_HADES)
+		strcat(model, "Hades");
+	    else
+		strcat (model, "TT");
+	    break;
+	case ATARI_MCH_FALCON:
+	    strcat (model, "Falcon");
+	    if (MACH_IS_AB40)
+		strcat (model, " (with Afterburner040)");
+	    break;
+	default:
+	    sprintf (model + strlen (model), "(unknown mach cookie 0x%lx)",
+		     atari_mch_cookie);
+	    break;
+    }
+}
+
+
+static int atari_get_hardware_list(char *buffer)
+{
+    int len = 0, i;
+
+    for (i = 0; i < m68k_num_memory; i++)
+	len += sprintf (buffer+len, "\t%3ld MB at 0x%08lx (%s)\n",
+			m68k_memory[i].size >> 20, m68k_memory[i].addr,
+			(m68k_memory[i].addr & 0xff000000 ?
+			 "alternate RAM" : "ST-RAM"));
+
+#define ATARIHW_ANNOUNCE(name,str)				\
+    if (ATARIHW_PRESENT(name))			\
+	len += sprintf (buffer + len, "\t%s\n", str)
+
+    len += sprintf (buffer + len, "Detected hardware:\n");
+    ATARIHW_ANNOUNCE(STND_SHIFTER, "ST Shifter");
+    ATARIHW_ANNOUNCE(EXTD_SHIFTER, "STe Shifter");
+    ATARIHW_ANNOUNCE(TT_SHIFTER, "TT Shifter");
+    ATARIHW_ANNOUNCE(VIDEL_SHIFTER, "Falcon Shifter");
+    ATARIHW_ANNOUNCE(YM_2149, "Programmable Sound Generator");
+    ATARIHW_ANNOUNCE(PCM_8BIT, "PCM 8 Bit Sound");
+    ATARIHW_ANNOUNCE(CODEC, "CODEC Sound");
+    ATARIHW_ANNOUNCE(TT_SCSI, "SCSI Controller NCR5380 (TT style)");
+    ATARIHW_ANNOUNCE(ST_SCSI, "SCSI Controller NCR5380 (Falcon style)");
+    ATARIHW_ANNOUNCE(ACSI, "ACSI Interface");
+    ATARIHW_ANNOUNCE(IDE, "IDE Interface");
+    ATARIHW_ANNOUNCE(FDCSPEED, "8/16 Mhz Switch for FDC");
+    ATARIHW_ANNOUNCE(ST_MFP, "Multi Function Peripheral MFP 68901");
+    ATARIHW_ANNOUNCE(TT_MFP, "Second Multi Function Peripheral MFP 68901");
+    ATARIHW_ANNOUNCE(SCC, "Serial Communications Controller SCC 8530");
+    ATARIHW_ANNOUNCE(ST_ESCC, "Extended Serial Communications Controller SCC 85230");
+    ATARIHW_ANNOUNCE(ANALOG_JOY, "Paddle Interface");
+    ATARIHW_ANNOUNCE(MICROWIRE, "MICROWIRE(tm) Interface");
+    ATARIHW_ANNOUNCE(STND_DMA, "DMA Controller (24 bit)");
+    ATARIHW_ANNOUNCE(EXTD_DMA, "DMA Controller (32 bit)");
+    ATARIHW_ANNOUNCE(SCSI_DMA, "DMA Controller for NCR5380");
+    ATARIHW_ANNOUNCE(SCC_DMA, "DMA Controller for SCC");
+    ATARIHW_ANNOUNCE(TT_CLK, "Clock Chip MC146818A");
+    ATARIHW_ANNOUNCE(MSTE_CLK, "Clock Chip RP5C15");
+    ATARIHW_ANNOUNCE(SCU, "System Control Unit");
+    ATARIHW_ANNOUNCE(BLITTER, "Blitter");
+    ATARIHW_ANNOUNCE(VME, "VME Bus");
+    ATARIHW_ANNOUNCE(DSP56K, "DSP56001 processor");
+
+    return(len);
+}
+
+/*
+ * Local variables:
+ *  c-indent-level: 4
+ *  tab-width: 8
+ * End:
+ */
diff --git a/arch/m68k/atari/debug.c b/arch/m68k/atari/debug.c
new file mode 100644
index 0000000..ace05f7
--- /dev/null
+++ b/arch/m68k/atari/debug.c
@@ -0,0 +1,347 @@
+/*
+ * linux/arch/m68k/atari/debug.c
+ *
+ * Atari debugging and serial console stuff
+ *
+ * Assembled of parts of former atari/config.c 97-12-18 by Roman Hodek
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+
+extern char m68k_debug_device[];
+
+/* Flag that Modem1 port is already initialized and used */
+int atari_MFP_init_done;
+/* Flag that Modem1 port is already initialized and used */
+int atari_SCC_init_done;
+/* Can be set somewhere, if a SCC master reset has already be done and should
+ * not be repeated; used by kgdb */
+int atari_SCC_reset_done;
+
+static struct console atari_console_driver = {
+	.name =		"debug",
+	.flags =	CON_PRINTBUFFER,
+	.index =	-1,
+};
+
+
+static inline void ata_mfp_out (char c)
+{
+    while (!(mfp.trn_stat & 0x80)) /* wait for tx buf empty */
+	barrier ();
+    mfp.usart_dta = c;
+}
+
+void atari_mfp_console_write (struct console *co, const char *str,
+			      unsigned int count)
+{
+    while (count--) {
+	if (*str == '\n')
+	    ata_mfp_out( '\r' );
+	ata_mfp_out( *str++ );
+    }
+}
+
+static inline void ata_scc_out (char c)
+{
+    do {
+	MFPDELAY();
+    } while (!(scc.cha_b_ctrl & 0x04)); /* wait for tx buf empty */
+    MFPDELAY();
+    scc.cha_b_data = c;
+}
+
+void atari_scc_console_write (struct console *co, const char *str,
+			      unsigned int count)
+{
+    while (count--) {
+	if (*str == '\n')
+	    ata_scc_out( '\r' );
+	ata_scc_out( *str++ );
+    }
+}
+
+static inline void ata_midi_out (char c)
+{
+    while (!(acia.mid_ctrl & ACIA_TDRE)) /* wait for tx buf empty */
+	barrier ();
+    acia.mid_data = c;
+}
+
+void atari_midi_console_write (struct console *co, const char *str,
+			       unsigned int count)
+{
+    while (count--) {
+	if (*str == '\n')
+	    ata_midi_out( '\r' );
+	ata_midi_out( *str++ );
+    }
+}
+
+static int ata_par_out (char c)
+{
+    unsigned char tmp;
+    /* This a some-seconds timeout in case no printer is connected */
+    unsigned long i = loops_per_jiffy > 1 ? loops_per_jiffy : 10000000/HZ;
+
+    while( (mfp.par_dt_reg & 1) && --i ) /* wait for BUSY == L */
+	;
+    if (!i) return( 0 );
+
+    sound_ym.rd_data_reg_sel = 15;  /* select port B */
+    sound_ym.wd_data = c;           /* put char onto port */
+    sound_ym.rd_data_reg_sel = 14;  /* select port A */
+    tmp = sound_ym.rd_data_reg_sel;
+    sound_ym.wd_data = tmp & ~0x20; /* set strobe L */
+    MFPDELAY();                     /* wait a bit */
+    sound_ym.wd_data = tmp | 0x20;  /* set strobe H */
+    return( 1 );
+}
+
+static void atari_par_console_write (struct console *co, const char *str,
+				     unsigned int count)
+{
+    static int printer_present = 1;
+
+    if (!printer_present)
+	return;
+
+    while (count--) {
+	if (*str == '\n')
+	    if (!ata_par_out( '\r' )) {
+		printer_present = 0;
+		return;
+	    }
+	if (!ata_par_out( *str++ )) {
+	    printer_present = 0;
+	    return;
+	}
+    }
+}
+
+#ifdef CONFIG_SERIAL_CONSOLE
+int atari_mfp_console_wait_key(struct console *co)
+{
+    while( !(mfp.rcv_stat & 0x80) ) /* wait for rx buf filled */
+	barrier();
+    return( mfp.usart_dta );
+}
+
+int atari_scc_console_wait_key(struct console *co)
+{
+    do {
+	MFPDELAY();
+    } while( !(scc.cha_b_ctrl & 0x01) ); /* wait for rx buf filled */
+    MFPDELAY();
+    return( scc.cha_b_data );
+}
+
+int atari_midi_console_wait_key(struct console *co)
+{
+    while( !(acia.mid_ctrl & ACIA_RDRF) ) /* wait for rx buf filled */
+	barrier();
+    return( acia.mid_data );
+}
+#endif
+
+/* The following two functions do a quick'n'dirty initialization of the MFP or
+ * SCC serial ports. They're used by the debugging interface, kgdb, and the
+ * serial console code. */
+#ifndef CONFIG_SERIAL_CONSOLE
+static void __init atari_init_mfp_port( int cflag )
+#else
+void atari_init_mfp_port( int cflag )
+#endif
+{
+    /* timer values for 1200...115200 bps; > 38400 select 110, 134, or 150
+     * bps, resp., and work only correct if there's a RSVE or RSSPEED */
+    static int baud_table[9] = { 16, 11, 8, 4, 2, 1, 175, 143, 128 };
+    int baud = cflag & CBAUD;
+    int parity = (cflag & PARENB) ? ((cflag & PARODD) ? 0x04 : 0x06) : 0;
+    int csize = ((cflag & CSIZE) == CS7) ? 0x20 : 0x00;
+
+    if (cflag & CBAUDEX)
+	baud += B38400;
+    if (baud < B1200 || baud > B38400+2)
+	baud = B9600; /* use default 9600bps for non-implemented rates */
+    baud -= B1200; /* baud_table[] starts at 1200bps */
+
+    mfp.trn_stat &= ~0x01; /* disable TX */
+    mfp.usart_ctr = parity | csize | 0x88; /* 1:16 clk mode, 1 stop bit */
+    mfp.tim_ct_cd &= 0x70;  /* stop timer D */
+    mfp.tim_dt_d = baud_table[baud];
+    mfp.tim_ct_cd |= 0x01;  /* start timer D, 1:4 */
+    mfp.trn_stat |= 0x01;  /* enable TX */
+
+    atari_MFP_init_done = 1;
+}
+
+#define SCC_WRITE(reg,val)				\
+    do {						\
+	scc.cha_b_ctrl = (reg);				\
+	MFPDELAY();					\
+	scc.cha_b_ctrl = (val);				\
+	MFPDELAY();					\
+    } while(0)
+
+/* loops_per_jiffy isn't initialized yet, so we can't use udelay(). This does a
+ * delay of ~ 60us. */
+#define LONG_DELAY()				\
+    do {					\
+	int i;					\
+	for( i = 100; i > 0; --i )		\
+	    MFPDELAY();				\
+    } while(0)
+
+#ifndef CONFIG_SERIAL_CONSOLE
+static void __init atari_init_scc_port( int cflag )
+#else
+void atari_init_scc_port( int cflag )
+#endif
+{
+    extern int atari_SCC_reset_done;
+    static int clksrc_table[9] =
+	/* reg 11: 0x50 = BRG, 0x00 = RTxC, 0x28 = TRxC */
+	{ 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x00, 0x00 };
+    static int brgsrc_table[9] =
+	/* reg 14: 0 = RTxC, 2 = PCLK */
+	{ 2, 2, 2, 2, 2, 2, 0, 2, 2 };
+    static int clkmode_table[9] =
+	/* reg 4: 0x40 = x16, 0x80 = x32, 0xc0 = x64 */
+	{ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0xc0, 0x80 };
+    static int div_table[9] =
+	/* reg12 (BRG low) */
+	{ 208, 138, 103, 50, 24, 11, 1, 0, 0 };
+
+    int baud = cflag & CBAUD;
+    int clksrc, clkmode, div, reg3, reg5;
+
+    if (cflag & CBAUDEX)
+	baud += B38400;
+    if (baud < B1200 || baud > B38400+2)
+	baud = B9600; /* use default 9600bps for non-implemented rates */
+    baud -= B1200; /* tables starts at 1200bps */
+
+    clksrc  = clksrc_table[baud];
+    clkmode = clkmode_table[baud];
+    div     = div_table[baud];
+    if (ATARIHW_PRESENT(TT_MFP) && baud >= 6) {
+	/* special treatment for TT, where rates >= 38400 are done via TRxC */
+	clksrc = 0x28; /* TRxC */
+	clkmode = baud == 6 ? 0xc0 :
+		  baud == 7 ? 0x80 : /* really 76800bps */
+			      0x40;  /* really 153600bps */
+	div = 0;
+    }
+
+    reg3 = (cflag & CSIZE) == CS8 ? 0xc0 : 0x40;
+    reg5 = (cflag & CSIZE) == CS8 ? 0x60 : 0x20 | 0x82 /* assert DTR/RTS */;
+
+    (void)scc.cha_b_ctrl;	/* reset reg pointer */
+    SCC_WRITE( 9, 0xc0 );	/* reset */
+    LONG_DELAY();		/* extra delay after WR9 access */
+    SCC_WRITE( 4, (cflag & PARENB) ? ((cflag & PARODD) ? 0x01 : 0x03) : 0 |
+		  0x04 /* 1 stopbit */ |
+		  clkmode );
+    SCC_WRITE( 3, reg3 );
+    SCC_WRITE( 5, reg5 );
+    SCC_WRITE( 9, 0 );		/* no interrupts */
+    LONG_DELAY();		/* extra delay after WR9 access */
+    SCC_WRITE( 10, 0 );		/* NRZ mode */
+    SCC_WRITE( 11, clksrc );	/* main clock source */
+    SCC_WRITE( 12, div );	/* BRG value */
+    SCC_WRITE( 13, 0 );		/* BRG high byte */
+    SCC_WRITE( 14, brgsrc_table[baud] );
+    SCC_WRITE( 14, brgsrc_table[baud] | (div ? 1 : 0) );
+    SCC_WRITE( 3, reg3 | 1 );
+    SCC_WRITE( 5, reg5 | 8 );
+
+    atari_SCC_reset_done = 1;
+    atari_SCC_init_done = 1;
+}
+
+#ifndef CONFIG_SERIAL_CONSOLE
+static void __init atari_init_midi_port( int cflag )
+#else
+void atari_init_midi_port( int cflag )
+#endif
+{
+    int baud = cflag & CBAUD;
+    int csize = ((cflag & CSIZE) == CS8) ? 0x10 : 0x00;
+    /* warning 7N1 isn't possible! (instead 7O2 is used...) */
+    int parity = (cflag & PARENB) ? ((cflag & PARODD) ? 0x0c : 0x08) : 0x04;
+    int div;
+
+    /* 4800 selects 7812.5, 115200 selects 500000, all other (incl. 9600 as
+     * default) the standard MIDI speed 31250. */
+    if (cflag & CBAUDEX)
+	baud += B38400;
+    if (baud == B4800)
+	div = ACIA_DIV64; /* really 7812.5 bps */
+    else if (baud == B38400+2 /* 115200 */)
+	div = ACIA_DIV1; /* really 500 kbps (does that work??) */
+    else
+	div = ACIA_DIV16; /* 31250 bps, standard for MIDI */
+
+    /* RTS low, ints disabled */
+    acia.mid_ctrl = div | csize | parity |
+		    ((atari_switches & ATARI_SWITCH_MIDI) ?
+		     ACIA_RHTID : ACIA_RLTID);
+}
+
+void __init atari_debug_init(void)
+{
+    if (!strcmp( m68k_debug_device, "ser" )) {
+	/* defaults to ser2 for a Falcon and ser1 otherwise */
+	strcpy( m68k_debug_device, MACH_IS_FALCON ? "ser2" : "ser1" );
+
+    }
+
+    if (!strcmp( m68k_debug_device, "ser1" )) {
+	/* ST-MFP Modem1 serial port */
+	atari_init_mfp_port( B9600|CS8 );
+	atari_console_driver.write = atari_mfp_console_write;
+    }
+    else if (!strcmp( m68k_debug_device, "ser2" )) {
+	/* SCC Modem2 serial port */
+	atari_init_scc_port( B9600|CS8 );
+	atari_console_driver.write = atari_scc_console_write;
+    }
+    else if (!strcmp( m68k_debug_device, "midi" )) {
+	/* MIDI port */
+	atari_init_midi_port( B9600|CS8 );
+	atari_console_driver.write = atari_midi_console_write;
+    }
+    else if (!strcmp( m68k_debug_device, "par" )) {
+	/* parallel printer */
+	atari_turnoff_irq( IRQ_MFP_BUSY ); /* avoid ints */
+	sound_ym.rd_data_reg_sel = 7;  /* select mixer control */
+	sound_ym.wd_data = 0xff;       /* sound off, ports are output */
+	sound_ym.rd_data_reg_sel = 15; /* select port B */
+	sound_ym.wd_data = 0;          /* no char */
+	sound_ym.rd_data_reg_sel = 14; /* select port A */
+	sound_ym.wd_data = sound_ym.rd_data_reg_sel | 0x20; /* strobe H */
+	atari_console_driver.write = atari_par_console_write;
+    }
+    if (atari_console_driver.write)
+	register_console(&atari_console_driver);
+}
+
+/*
+ * Local variables:
+ *  c-indent-level: 4
+ *  tab-width: 8
+ * End:
+ */
diff --git a/arch/m68k/atari/hades-pci.c b/arch/m68k/atari/hades-pci.c
new file mode 100644
index 0000000..8888deb
--- /dev/null
+++ b/arch/m68k/atari/hades-pci.c
@@ -0,0 +1,444 @@
+/*
+ * hades-pci.c - Hardware specific PCI BIOS functions the Hades Atari clone.
+ *
+ * Written by Wout Klaren.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+
+#if 0
+# define DBG_DEVS(args)		printk args
+#else
+# define DBG_DEVS(args)
+#endif
+
+#if defined(CONFIG_PCI) && defined(CONFIG_HADES)
+
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#include <asm/byteorder.h>
+#include <asm/pci.h>
+
+#define HADES_MEM_BASE		0x80000000
+#define HADES_MEM_SIZE		0x20000000
+#define HADES_CONFIG_BASE	0xA0000000
+#define HADES_CONFIG_SIZE	0x10000000
+#define HADES_IO_BASE		0xB0000000
+#define HADES_IO_SIZE		0x10000000
+#define HADES_VIRT_IO_SIZE	0x00010000	/* Only 64k is remapped and actually used. */
+
+#define N_SLOTS				4			/* Number of PCI slots. */
+
+static const char pci_mem_name[] = "PCI memory space";
+static const char pci_io_name[] = "PCI I/O space";
+static const char pci_config_name[] = "PCI config space";
+
+static struct resource config_space = {
+    .name = pci_config_name,
+    .start = HADES_CONFIG_BASE,
+    .end = HADES_CONFIG_BASE + HADES_CONFIG_SIZE - 1
+};
+static struct resource io_space = {
+    .name = pci_io_name,
+    .start = HADES_IO_BASE,
+    .end = HADES_IO_BASE + HADES_IO_SIZE - 1
+};
+
+static const unsigned long pci_conf_base_phys[] = {
+    0xA0080000, 0xA0040000, 0xA0020000, 0xA0010000
+};
+static unsigned long pci_conf_base_virt[N_SLOTS];
+static unsigned long pci_io_base_virt;
+
+/*
+ * static void *mk_conf_addr(unsigned char bus, unsigned char device_fn,
+ *			     unsigned char where)
+ *
+ * Calculate the address of the PCI configuration area of the given
+ * device.
+ *
+ * BUG: boards with multiple functions are probably not correctly
+ * supported.
+ */
+
+static void *mk_conf_addr(struct pci_dev *dev, int where)
+{
+	int device = dev->devfn >> 3, function = dev->devfn & 7;
+	void *result;
+
+	DBG_DEVS(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, pci_addr=0x%p)\n",
+		  dev->bus->number, dev->devfn, where, pci_addr));
+
+	if (device > 3)
+	{
+		DBG_DEVS(("mk_conf_addr: device (%d) > 3, returning NULL\n", device));
+		return NULL;
+	}
+
+	if (dev->bus->number != 0)
+	{
+		DBG_DEVS(("mk_conf_addr: bus (%d) > 0, returning NULL\n", device));
+		return NULL;
+	}
+
+	result = (void *) (pci_conf_base_virt[device] | (function << 8) | (where));
+	DBG_DEVS(("mk_conf_addr: returning pci_addr 0x%lx\n", (unsigned long) result));
+	return result;
+}
+
+static int hades_read_config_byte(struct pci_dev *dev, int where, u8 *value)
+{
+	volatile unsigned char *pci_addr;
+
+	*value = 0xff;
+
+	if ((pci_addr = (unsigned char *) mk_conf_addr(dev, where)) == NULL)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	*value = *pci_addr;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int hades_read_config_word(struct pci_dev *dev, int where, u16 *value)
+{
+	volatile unsigned short *pci_addr;
+
+	*value = 0xffff;
+
+	if (where & 0x1)
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	if ((pci_addr = (unsigned short *) mk_conf_addr(dev, where)) == NULL)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	*value = le16_to_cpu(*pci_addr);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int hades_read_config_dword(struct pci_dev *dev, int where, u32 *value)
+{
+	volatile unsigned int *pci_addr;
+	unsigned char header_type;
+	int result;
+
+	*value = 0xffffffff;
+
+	if (where & 0x3)
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	if ((pci_addr = (unsigned int *) mk_conf_addr(dev, where)) == NULL)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	*value = le32_to_cpu(*pci_addr);
+
+	/*
+	 * Check if the value is an address on the bus. If true, add the
+	 * base address of the PCI memory or PCI I/O area on the Hades.
+	 */
+
+	if ((result = hades_read_config_byte(dev, PCI_HEADER_TYPE,
+					     &header_type)) != PCIBIOS_SUCCESSFUL)
+		return result;
+
+	if (((where >= PCI_BASE_ADDRESS_0) && (where <= PCI_BASE_ADDRESS_1)) ||
+	    ((header_type != PCI_HEADER_TYPE_BRIDGE) && ((where >= PCI_BASE_ADDRESS_2) &&
+							 (where <= PCI_BASE_ADDRESS_5))))
+	{
+		if ((*value & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO)
+		{
+			/*
+			 * Base address register that contains an I/O address. If the
+			 * address is valid on the Hades (0 <= *value < HADES_VIRT_IO_SIZE),
+			 * add 'pci_io_base_virt' to the value.
+			 */
+
+			if (*value < HADES_VIRT_IO_SIZE)
+				*value += pci_io_base_virt;
+		}
+		else
+		{
+			/*
+			 * Base address register that contains an memory address. If the
+			 * address is valid on the Hades (0 <= *value < HADES_MEM_SIZE),
+			 * add HADES_MEM_BASE to the value.
+			 */
+
+			if (*value == 0)
+			{
+				/*
+				 * Base address is 0. Test if this base
+				 * address register is used.
+				 */
+
+				*pci_addr = 0xffffffff;
+				if (*pci_addr != 0)
+				{
+					*pci_addr = *value;
+					if (*value < HADES_MEM_SIZE)
+						*value += HADES_MEM_BASE;
+				}
+			}
+			else
+			{
+				if (*value < HADES_MEM_SIZE)
+					*value += HADES_MEM_BASE;
+			}
+		}
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int hades_write_config_byte(struct pci_dev *dev, int where, u8 value)
+{
+	volatile unsigned char *pci_addr;
+
+	if ((pci_addr = (unsigned char *) mk_conf_addr(dev, where)) == NULL)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	*pci_addr = value;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int hades_write_config_word(struct pci_dev *dev, int where, u16 value)
+{
+	volatile unsigned short *pci_addr;
+
+	if ((pci_addr = (unsigned short *) mk_conf_addr(dev, where)) == NULL)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	*pci_addr = cpu_to_le16(value);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int hades_write_config_dword(struct pci_dev *dev, int where, u32 value)
+{
+	volatile unsigned int *pci_addr;
+	unsigned char header_type;
+	int result;
+
+	if ((pci_addr = (unsigned int *) mk_conf_addr(dev, where)) == NULL)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	/*
+	 * Check if the value is an address on the bus. If true, subtract the
+	 * base address of the PCI memory or PCI I/O area on the Hades.
+	 */
+
+	if ((result = hades_read_config_byte(dev, PCI_HEADER_TYPE,
+					     &header_type)) != PCIBIOS_SUCCESSFUL)
+		return result;
+
+	if (((where >= PCI_BASE_ADDRESS_0) && (where <= PCI_BASE_ADDRESS_1)) ||
+	    ((header_type != PCI_HEADER_TYPE_BRIDGE) && ((where >= PCI_BASE_ADDRESS_2) &&
+							 (where <= PCI_BASE_ADDRESS_5))))
+	{
+		if ((value & PCI_BASE_ADDRESS_SPACE) ==
+		    PCI_BASE_ADDRESS_SPACE_IO)
+		{
+			/*
+			 * I/O address. Check if the address is valid address on
+			 * the Hades (pci_io_base_virt <= value < pci_io_base_virt +
+			 * HADES_VIRT_IO_SIZE) or if the value is 0xffffffff. If not
+			 * true do not write the base address register. If it is a
+			 * valid base address subtract 'pci_io_base_virt' from the value.
+			 */
+
+			if ((value >= pci_io_base_virt) && (value < (pci_io_base_virt +
+														 HADES_VIRT_IO_SIZE)))
+				value -= pci_io_base_virt;
+			else
+			{
+				if (value != 0xffffffff)
+					return PCIBIOS_SET_FAILED;
+			}
+		}
+		else
+		{
+			/*
+			 * Memory address. Check if the address is valid address on
+			 * the Hades (HADES_MEM_BASE <= value < HADES_MEM_BASE + HADES_MEM_SIZE) or
+			 * if the value is 0xffffffff. If not true do not write
+			 * the base address register. If it is a valid base address
+			 * subtract HADES_MEM_BASE from the value.
+			 */
+
+			if ((value >= HADES_MEM_BASE) && (value < (HADES_MEM_BASE + HADES_MEM_SIZE)))
+				value -= HADES_MEM_BASE;
+			else
+			{
+				if (value != 0xffffffff)
+					return PCIBIOS_SET_FAILED;
+			}
+		}
+	}
+
+	*pci_addr = cpu_to_le32(value);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * static inline void hades_fixup(void)
+ *
+ * Assign IRQ numbers as used by Linux to the interrupt pins
+ * of the PCI cards.
+ */
+
+static void __init hades_fixup(int pci_modify)
+{
+	char irq_tab[4] = {
+		[0] = IRQ_TT_MFP_IO0,		/* Slot 0. */
+		[1] = IRQ_TT_MFP_IO1,		/* Slot 1. */
+		[2] = IRQ_TT_MFP_SCC,		/* Slot 2. */
+		[3] = IRQ_TT_MFP_SCSIDMA	/* Slot 3. */
+	};
+	struct pci_dev *dev = NULL;
+	unsigned char slot;
+
+	/*
+	 * Go through all devices, fixing up irqs as we see fit:
+	 */
+
+	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL)
+	{
+		if (dev->class >> 16 != PCI_BASE_CLASS_BRIDGE)
+		{
+			slot = PCI_SLOT(dev->devfn);	/* Determine slot number. */
+			dev->irq = irq_tab[slot];
+			if (pci_modify)
+				pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+		}
+	}
+}
+
+/*
+ * static void hades_conf_device(struct pci_dev *dev)
+ *
+ * Machine dependent Configure the given device.
+ *
+ * Parameters:
+ *
+ * dev		- the pci device.
+ */
+
+static void __init hades_conf_device(struct pci_dev *dev)
+{
+	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 0);
+}
+
+static struct pci_ops hades_pci_ops = {
+	.read_byte =	hades_read_config_byte,
+	.read_word =	hades_read_config_word,
+	.read_dword =	hades_read_config_dword,
+	.write_byte =	hades_write_config_byte,
+	.write_word =	hades_write_config_word,
+	.write_dword =	hades_write_config_dword
+};
+
+/*
+ * struct pci_bus_info *init_hades_pci(void)
+ *
+ * Machine specific initialisation:
+ *
+ * - Allocate and initialise a 'pci_bus_info' structure
+ * - Initialise hardware
+ *
+ * Result: pointer to 'pci_bus_info' structure.
+ */
+
+struct pci_bus_info * __init init_hades_pci(void)
+{
+	struct pci_bus_info *bus;
+	int i;
+
+	/*
+	 * Remap I/O and configuration space.
+	 */
+
+	pci_io_base_virt = (unsigned long) ioremap(HADES_IO_BASE, HADES_VIRT_IO_SIZE);
+
+	for (i = 0; i < N_SLOTS; i++)
+		pci_conf_base_virt[i] = (unsigned long) ioremap(pci_conf_base_phys[i], 0x10000);
+
+	/*
+	 * Allocate memory for bus info structure.
+	 */
+
+	bus = kmalloc(sizeof(struct pci_bus_info), GFP_KERNEL);
+	if (!bus)
+		return NULL;
+	memset(bus, 0, sizeof(struct pci_bus_info));
+
+	/*
+	 * Claim resources. The m68k has no separate I/O space, both
+	 * PCI memory space and PCI I/O space are in memory space. Therefore
+	 * the I/O resources are requested in memory space as well.
+	 */
+
+	if (request_resource(&iomem_resource, &config_space) != 0)
+	{
+		kfree(bus);
+		return NULL;
+	}
+
+	if (request_resource(&iomem_resource, &io_space) != 0)
+	{
+		release_resource(&config_space);
+		kfree(bus);
+		return NULL;
+	}
+
+	bus->mem_space.start = HADES_MEM_BASE;
+	bus->mem_space.end = HADES_MEM_BASE + HADES_MEM_SIZE - 1;
+	bus->mem_space.name = pci_mem_name;
+#if 1
+	if (request_resource(&iomem_resource, &bus->mem_space) != 0)
+	{
+		release_resource(&io_space);
+		release_resource(&config_space);
+		kfree(bus);
+		return NULL;
+	}
+#endif
+	bus->io_space.start = pci_io_base_virt;
+	bus->io_space.end = pci_io_base_virt + HADES_VIRT_IO_SIZE - 1;
+	bus->io_space.name = pci_io_name;
+#if 1
+	if (request_resource(&ioport_resource, &bus->io_space) != 0)
+	{
+		release_resource(&bus->mem_space);
+		release_resource(&io_space);
+		release_resource(&config_space);
+		kfree(bus);
+		return NULL;
+	}
+#endif
+	/*
+	 * Set hardware dependent functions.
+	 */
+
+	bus->m68k_pci_ops = &hades_pci_ops;
+	bus->fixup = hades_fixup;
+	bus->conf_device = hades_conf_device;
+
+	/*
+	 * Select high to low edge for PCI interrupts.
+	 */
+
+	tt_mfp.active_edge &= ~0x27;
+
+	return bus;
+}
+#endif
diff --git a/arch/m68k/atari/stdma.c b/arch/m68k/atari/stdma.c
new file mode 100644
index 0000000..288f5e6
--- /dev/null
+++ b/arch/m68k/atari/stdma.c
@@ -0,0 +1,196 @@
+/*
+ *  linux/arch/m68k/atari/stmda.c
+ *
+ *  Copyright (C) 1994 Roman Hodek
+ *
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+
+/* This file contains some function for controlling the access to the  */
+/* ST-DMA chip that may be shared between devices. Currently we have:  */
+/*   TT:     Floppy and ACSI bus                                       */
+/*   Falcon: Floppy and SCSI                                           */
+/*                                                                     */
+/* The controlling functions set up a wait queue for access to the     */
+/* ST-DMA chip. Callers to stdma_lock() that cannot granted access are */
+/* put onto a queue and waked up later if the owner calls              */
+/* stdma_release(). Additionally, the caller gives his interrupt       */
+/* service routine to stdma_lock().                                    */
+/*                                                                     */
+/* On the Falcon, the IDE bus uses just the ACSI/Floppy interrupt, but */
+/* not the ST-DMA chip itself. So falhd.c needs not to lock the        */
+/* chip. The interrupt is routed to falhd.c if IDE is configured, the  */
+/* model is a Falcon and the interrupt was caused by the HD controller */
+/* (can be determined by looking at its status register).              */
+
+
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/genhd.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+
+#include <asm/atari_stdma.h>
+#include <asm/atariints.h>
+#include <asm/atarihw.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+static int stdma_locked;			/* the semaphore */
+						/* int func to be called */
+static irqreturn_t (*stdma_isr)(int, void *, struct pt_regs *);
+static void *stdma_isr_data;			/* data passed to isr */
+static DECLARE_WAIT_QUEUE_HEAD(stdma_wait);	/* wait queue for ST-DMA */
+
+
+
+
+/***************************** Prototypes *****************************/
+
+static irqreturn_t stdma_int (int irq, void *dummy, struct pt_regs *fp);
+
+/************************* End of Prototypes **************************/
+
+
+
+/*
+ * Function: void stdma_lock( isrfunc isr, void *data )
+ *
+ * Purpose: Tries to get a lock on the ST-DMA chip that is used by more
+ *   then one device driver. Waits on stdma_wait until lock is free.
+ *   stdma_lock() may not be called from an interrupt! You have to
+ *   get the lock in your main routine and release it when your
+ *   request is finished.
+ *
+ * Inputs: A interrupt function that is called until the lock is
+ *   released.
+ *
+ * Returns: nothing
+ *
+ */
+
+void stdma_lock(irqreturn_t (*handler)(int, void *, struct pt_regs *),
+		void *data)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);		/* protect lock */
+
+	/* Since the DMA is used for file system purposes, we
+	 have to sleep uninterruptible (there may be locked
+	 buffers) */
+	wait_event(stdma_wait, !stdma_locked);
+
+	stdma_locked   = 1;
+	stdma_isr      = handler;
+	stdma_isr_data = data;
+	local_irq_restore(flags);
+}
+
+
+/*
+ * Function: void stdma_release( void )
+ *
+ * Purpose: Releases the lock on the ST-DMA chip.
+ *
+ * Inputs: none
+ *
+ * Returns: nothing
+ *
+ */
+
+void stdma_release(void)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	stdma_locked   = 0;
+	stdma_isr      = NULL;
+	stdma_isr_data = NULL;
+	wake_up(&stdma_wait);
+
+	local_irq_restore(flags);
+}
+
+
+/*
+ * Function: int stdma_others_waiting( void )
+ *
+ * Purpose: Check if someone waits for the ST-DMA lock.
+ *
+ * Inputs: none
+ *
+ * Returns: 0 if no one is waiting, != 0 otherwise
+ *
+ */
+
+int stdma_others_waiting(void)
+{
+	return waitqueue_active(&stdma_wait);
+}
+
+
+/*
+ * Function: int stdma_islocked( void )
+ *
+ * Purpose: Check if the ST-DMA is currently locked.
+ * Note: Returned status is only valid if ints are disabled while calling and
+ *       as long as they remain disabled.
+ *       If called with ints enabled, status can change only from locked to
+ *       unlocked, because ints may not lock the ST-DMA.
+ *
+ * Inputs: none
+ *
+ * Returns: != 0 if locked, 0 otherwise
+ *
+ */
+
+int stdma_islocked(void)
+{
+	return stdma_locked;
+}
+
+
+/*
+ * Function: void stdma_init( void )
+ *
+ * Purpose: Initialize the ST-DMA chip access controlling.
+ *   It sets up the interrupt and its service routine. The int is registered
+ *   as slow int, client devices have to live with that (no problem
+ *   currently).
+ *
+ * Inputs: none
+ *
+ * Return: nothing
+ *
+ */
+
+void __init stdma_init(void)
+{
+	stdma_isr = NULL;
+	request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW,
+	            "ST-DMA: floppy/ACSI/IDE/Falcon-SCSI", stdma_int);
+}
+
+
+/*
+ * Function: void stdma_int()
+ *
+ * Purpose: The interrupt routine for the ST-DMA. It calls the isr
+ *   registered by stdma_lock().
+ *
+ */
+
+static irqreturn_t stdma_int(int irq, void *dummy, struct pt_regs *fp)
+{
+  if (stdma_isr)
+      (*stdma_isr)(irq, stdma_isr_data, fp);
+  return IRQ_HANDLED;
+}
diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c
new file mode 100644
index 0000000..5a3c106
--- /dev/null
+++ b/arch/m68k/atari/stram.c
@@ -0,0 +1,1247 @@
+/*
+ * arch/m68k/atari/stram.c: Functions for ST-RAM allocations
+ *
+ * Copyright 1994-97 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/init.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/shm.h>
+#include <linux/bootmem.h>
+#include <linux/mount.h>
+#include <linux/blkdev.h>
+
+#include <asm/setup.h>
+#include <asm/machdep.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/atarihw.h>
+#include <asm/atari_stram.h>
+#include <asm/io.h>
+#include <asm/semaphore.h>
+
+#include <linux/swapops.h>
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define	DPRINTK(fmt,args...) printk( fmt, ##args )
+#else
+#define DPRINTK(fmt,args...)
+#endif
+
+#if defined(CONFIG_PROC_FS) && defined(CONFIG_STRAM_PROC)
+/* abbrev for the && above... */
+#define DO_PROC
+#include <linux/proc_fs.h>
+#endif
+
+/* Pre-swapping comments:
+ *
+ * ++roman:
+ *
+ * New version of ST-Ram buffer allocation. Instead of using the
+ * 1 MB - 4 KB that remain when the ST-Ram chunk starts at $1000
+ * (1 MB granularity!), such buffers are reserved like this:
+ *
+ *  - If the kernel resides in ST-Ram anyway, we can take the buffer
+ *    from behind the current kernel data space the normal way
+ *    (incrementing start_mem).
+ *
+ *  - If the kernel is in TT-Ram, stram_init() initializes start and
+ *    end of the available region. Buffers are allocated from there
+ *    and mem_init() later marks the such used pages as reserved.
+ *    Since each TT-Ram chunk is at least 4 MB in size, I hope there
+ *    won't be an overrun of the ST-Ram region by normal kernel data
+ *    space.
+ *
+ * For that, ST-Ram may only be allocated while kernel initialization
+ * is going on, or exactly: before mem_init() is called. There is also
+ * no provision now for freeing ST-Ram buffers. It seems that isn't
+ * really needed.
+ *
+ */
+
+/*
+ * New Nov 1997: Use ST-RAM as swap space!
+ *
+ * In the past, there were often problems with modules that require ST-RAM
+ * buffers. Such drivers have to use __get_dma_pages(), which unfortunately
+ * often isn't very successful in allocating more than 1 page :-( [1] The net
+ * result was that most of the time you couldn't insmod such modules (ataflop,
+ * ACSI, SCSI on Falcon, Atari internal framebuffer, not to speak of acsi_slm,
+ * which needs a 1 MB buffer... :-).
+ *
+ * To overcome this limitation, ST-RAM can now be turned into a very
+ * high-speed swap space. If a request for an ST-RAM buffer comes, the kernel
+ * now tries to unswap some pages on that swap device to make some free (and
+ * contiguous) space. This works much better in comparison to
+ * __get_dma_pages(), since used swap pages can be selectively freed by either
+ * moving them to somewhere else in swap space, or by reading them back into
+ * system memory. Ok, there operation of unswapping isn't really cheap (for
+ * each page, one has to go through the page tables of all processes), but it
+ * doesn't happen that often (only when allocation ST-RAM, i.e. when loading a
+ * module that needs ST-RAM). But it at least makes it possible to load such
+ * modules!
+ *
+ * It could also be that overall system performance increases a bit due to
+ * ST-RAM swapping, since slow ST-RAM isn't used anymore for holding data or
+ * executing code in. It's then just a (very fast, compared to disk) back
+ * storage for not-so-often needed data. (But this effect must be compared
+ * with the loss of total memory...) Don't know if the effect is already
+ * visible on a TT, where the speed difference between ST- and TT-RAM isn't
+ * that dramatic, but it should on machines where TT-RAM is really much faster
+ * (e.g. Afterburner).
+ *
+ *   [1]: __get_free_pages() does a fine job if you only want one page, but if
+ * you want more (contiguous) pages, it can give you such a block only if
+ * there's already a free one. The algorithm can't try to free buffers or swap
+ * out something in order to make more free space, since all that page-freeing
+ * mechanisms work "target-less", i.e. they just free something, but not in a
+ * specific place. I.e., __get_free_pages() can't do anything to free
+ * *adjacent* pages :-( This situation becomes even worse for DMA memory,
+ * since the freeing algorithms are also blind to DMA capability of pages.
+ */
+
+/* 1998-10-20: ++andreas
+   unswap_by_move disabled because it does not handle swapped shm pages.
+*/
+
+/* 2000-05-01: ++andreas
+   Integrated with bootmem.  Remove all traces of unswap_by_move.
+*/
+
+#ifdef CONFIG_STRAM_SWAP
+#define ALIGN_IF_SWAP(x)	PAGE_ALIGN(x)
+#else
+#define ALIGN_IF_SWAP(x)	(x)
+#endif
+
+/* get index of swap page at address 'addr' */
+#define SWAP_NR(addr)		(((addr) - swap_start) >> PAGE_SHIFT)
+
+/* get address of swap page #'nr' */
+#define SWAP_ADDR(nr)		(swap_start + ((nr) << PAGE_SHIFT))
+
+/* get number of pages for 'n' bytes (already page-aligned) */
+#define N_PAGES(n)			((n) >> PAGE_SHIFT)
+
+/* The following two numbers define the maximum fraction of ST-RAM in total
+ * memory, below that the kernel would automatically use ST-RAM as swap
+ * space. This decision can be overridden with stram_swap= */
+#define MAX_STRAM_FRACTION_NOM		1
+#define MAX_STRAM_FRACTION_DENOM	3
+
+/* Start and end (virtual) of ST-RAM */
+static void *stram_start, *stram_end;
+
+/* set after memory_init() executed and allocations via start_mem aren't
+ * possible anymore */
+static int mem_init_done;
+
+/* set if kernel is in ST-RAM */
+static int kernel_in_stram;
+
+typedef struct stram_block {
+	struct stram_block *next;
+	void *start;
+	unsigned long size;
+	unsigned flags;
+	const char *owner;
+} BLOCK;
+
+/* values for flags field */
+#define BLOCK_FREE		0x01	/* free structure in the BLOCKs pool */
+#define BLOCK_KMALLOCED	0x02	/* structure allocated by kmalloc() */
+#define BLOCK_GFP		0x08	/* block allocated with __get_dma_pages() */
+#define BLOCK_INSWAP	0x10	/* block allocated in swap space */
+
+/* list of allocated blocks */
+static BLOCK *alloc_list;
+
+/* We can't always use kmalloc() to allocate BLOCK structures, since
+ * stram_alloc() can be called rather early. So we need some pool of
+ * statically allocated structures. 20 of them is more than enough, so in most
+ * cases we never should need to call kmalloc(). */
+#define N_STATIC_BLOCKS	20
+static BLOCK static_blocks[N_STATIC_BLOCKS];
+
+#ifdef CONFIG_STRAM_SWAP
+/* max. number of bytes to use for swapping
+ *  0 = no ST-RAM swapping
+ * -1 = do swapping (to whole ST-RAM) if it's less than MAX_STRAM_FRACTION of
+ *      total memory
+ */
+static int max_swap_size = -1;
+
+/* start and end of swapping area */
+static void *swap_start, *swap_end;
+
+/* The ST-RAM's swap info structure */
+static struct swap_info_struct *stram_swap_info;
+
+/* The ST-RAM's swap type */
+static int stram_swap_type;
+
+/* Semaphore for get_stram_region.  */
+static DECLARE_MUTEX(stram_swap_sem);
+
+/* major and minor device number of the ST-RAM device; for the major, we use
+ * the same as Amiga z2ram, which is really similar and impossible on Atari,
+ * and for the minor a relatively odd number to avoid the user creating and
+ * using that device. */
+#define	STRAM_MAJOR		Z2RAM_MAJOR
+#define	STRAM_MINOR		13
+
+/* Some impossible pointer value */
+#define MAGIC_FILE_P	(struct file *)0xffffdead
+
+#ifdef DO_PROC
+static unsigned stat_swap_read;
+static unsigned stat_swap_write;
+static unsigned stat_swap_force;
+#endif /* DO_PROC */
+
+#endif /* CONFIG_STRAM_SWAP */
+
+/***************************** Prototypes *****************************/
+
+#ifdef CONFIG_STRAM_SWAP
+static int swap_init(void *start_mem, void *swap_data);
+static void *get_stram_region( unsigned long n_pages );
+static void free_stram_region( unsigned long offset, unsigned long n_pages
+			       );
+static int in_some_region(void *addr);
+static unsigned long find_free_region( unsigned long n_pages, unsigned long
+				       *total_free, unsigned long
+				       *region_free );
+static void do_stram_request(request_queue_t *);
+static int stram_open( struct inode *inode, struct file *filp );
+static int stram_release( struct inode *inode, struct file *filp );
+static void reserve_region(void *start, void *end);
+#endif
+static BLOCK *add_region( void *addr, unsigned long size );
+static BLOCK *find_region( void *addr );
+static int remove_region( BLOCK *block );
+
+/************************* End of Prototypes **************************/
+
+
+/* ------------------------------------------------------------------------ */
+/*							   Public Interface								*/
+/* ------------------------------------------------------------------------ */
+
+/*
+ * This init function is called very early by atari/config.c
+ * It initializes some internal variables needed for stram_alloc()
+ */
+void __init atari_stram_init(void)
+{
+	int i;
+
+	/* initialize static blocks */
+	for( i = 0; i < N_STATIC_BLOCKS; ++i )
+		static_blocks[i].flags = BLOCK_FREE;
+
+	/* determine whether kernel code resides in ST-RAM (then ST-RAM is the
+	 * first memory block at virtual 0x0) */
+	stram_start = phys_to_virt(0);
+	kernel_in_stram = (stram_start == 0);
+
+	for( i = 0; i < m68k_num_memory; ++i ) {
+		if (m68k_memory[i].addr == 0) {
+			/* skip first 2kB or page (supervisor-only!) */
+			stram_end = stram_start + m68k_memory[i].size;
+			return;
+		}
+	}
+	/* Should never come here! (There is always ST-Ram!) */
+	panic( "atari_stram_init: no ST-RAM found!" );
+}
+
+
+/*
+ * This function is called from setup_arch() to reserve the pages needed for
+ * ST-RAM management.
+ */
+void __init atari_stram_reserve_pages(void *start_mem)
+{
+#ifdef CONFIG_STRAM_SWAP
+	/* if max_swap_size is negative (i.e. no stram_swap= option given),
+	 * determine at run time whether to use ST-RAM swapping */
+	if (max_swap_size < 0)
+		/* Use swapping if ST-RAM doesn't make up more than MAX_STRAM_FRACTION
+		 * of total memory. In that case, the max. size is set to 16 MB,
+		 * because ST-RAM can never be bigger than that.
+		 * Also, never use swapping on a Hades, there's no separate ST-RAM in
+		 * that machine. */
+		max_swap_size =
+			(!MACH_IS_HADES &&
+			 (N_PAGES(stram_end-stram_start)*MAX_STRAM_FRACTION_DENOM <=
+			  ((unsigned long)high_memory>>PAGE_SHIFT)*MAX_STRAM_FRACTION_NOM)) ? 16*1024*1024 : 0;
+	DPRINTK( "atari_stram_reserve_pages: max_swap_size = %d\n", max_swap_size );
+#endif
+
+	/* always reserve first page of ST-RAM, the first 2 kB are
+	 * supervisor-only! */
+	if (!kernel_in_stram)
+		reserve_bootmem (0, PAGE_SIZE);
+
+#ifdef CONFIG_STRAM_SWAP
+	{
+		void *swap_data;
+
+		start_mem = (void *) PAGE_ALIGN ((unsigned long) start_mem);
+		/* determine first page to use as swap: if the kernel is
+		   in TT-RAM, this is the first page of (usable) ST-RAM;
+		   otherwise just use the end of kernel data (= start_mem) */
+		swap_start = !kernel_in_stram ? stram_start + PAGE_SIZE : start_mem;
+		/* decrement by one page, rest of kernel assumes that first swap page
+		 * is always reserved and maybe doesn't handle swp_entry == 0
+		 * correctly */
+		swap_start -= PAGE_SIZE;
+		swap_end = stram_end;
+		if (swap_end-swap_start > max_swap_size)
+			swap_end =  swap_start + max_swap_size;
+		DPRINTK( "atari_stram_reserve_pages: swapping enabled; "
+				 "swap=%p-%p\n", swap_start, swap_end);
+
+		/* reserve some amount of memory for maintainance of
+		 * swapping itself: one page for each 2048 (PAGE_SIZE/2)
+		 * swap pages. (2 bytes for each page) */
+		swap_data = start_mem;
+		start_mem += ((SWAP_NR(swap_end) + PAGE_SIZE/2 - 1)
+			      >> (PAGE_SHIFT-1)) << PAGE_SHIFT;
+		/* correct swap_start if necessary */
+		if (swap_start + PAGE_SIZE == swap_data)
+			swap_start = start_mem - PAGE_SIZE;
+
+		if (!swap_init( start_mem, swap_data )) {
+			printk( KERN_ERR "ST-RAM swap space initialization failed\n" );
+			max_swap_size = 0;
+			return;
+		}
+		/* reserve region for swapping meta-data */
+		reserve_region(swap_data, start_mem);
+		/* reserve swapping area itself */
+		reserve_region(swap_start + PAGE_SIZE, swap_end);
+
+		/*
+		 * If the whole ST-RAM is used for swapping, there are no allocatable
+		 * dma pages left. But unfortunately, some shared parts of the kernel
+		 * (particularly the SCSI mid-level) call __get_dma_pages()
+		 * unconditionally :-( These calls then fail, and scsi.c even doesn't
+		 * check for NULL return values and just crashes. The quick fix for
+		 * this (instead of doing much clean up work in the SCSI code) is to
+		 * pretend all pages are DMA-able by setting mach_max_dma_address to
+		 * ULONG_MAX. This doesn't change any functionality so far, since
+		 * get_dma_pages() shouldn't be used on Atari anyway anymore (better
+		 * use atari_stram_alloc()), and the Atari SCSI drivers don't need DMA
+		 * memory. But unfortunately there's now no kind of warning (even not
+		 * a NULL return value) if you use get_dma_pages() nevertheless :-(
+		 * You just will get non-DMA-able memory...
+		 */
+		mach_max_dma_address = 0xffffffff;
+	}
+#endif
+}
+
+void atari_stram_mem_init_hook (void)
+{
+	mem_init_done = 1;
+}
+
+
+/*
+ * This is main public interface: somehow allocate a ST-RAM block
+ * There are three strategies:
+ *
+ *  - If we're before mem_init(), we have to make a static allocation. The
+ *    region is taken in the kernel data area (if the kernel is in ST-RAM) or
+ *    from the start of ST-RAM (if the kernel is in TT-RAM) and added to the
+ *    rsvd_stram_* region. The ST-RAM is somewhere in the middle of kernel
+ *    address space in the latter case.
+ *
+ *  - If mem_init() already has been called and ST-RAM swapping is enabled,
+ *    try to get the memory from the (pseudo) swap-space, either free already
+ *    or by moving some other pages out of the swap.
+ *
+ *  - If mem_init() already has been called, and ST-RAM swapping is not
+ *    enabled, the only possibility is to try with __get_dma_pages(). This has
+ *    the disadvantage that it's very hard to get more than 1 page, and it is
+ *    likely to fail :-(
+ *
+ */
+void *atari_stram_alloc(long size, const char *owner)
+{
+	void *addr = NULL;
+	BLOCK *block;
+	int flags;
+
+	DPRINTK("atari_stram_alloc(size=%08lx,owner=%s)\n", size, owner);
+
+	size = ALIGN_IF_SWAP(size);
+	DPRINTK( "atari_stram_alloc: rounded size = %08lx\n", size );
+#ifdef CONFIG_STRAM_SWAP
+	if (max_swap_size) {
+		/* If swapping is active: make some free space in the swap
+		   "device". */
+		DPRINTK( "atari_stram_alloc: after mem_init, swapping ok, "
+				 "calling get_region\n" );
+		addr = get_stram_region( N_PAGES(size) );
+		flags = BLOCK_INSWAP;
+	}
+	else
+#endif
+	if (!mem_init_done)
+		return alloc_bootmem_low(size);
+	else {
+		/* After mem_init() and no swapping: can only resort to
+		 * __get_dma_pages() */
+		addr = (void *)__get_dma_pages(GFP_KERNEL, get_order(size));
+		flags = BLOCK_GFP;
+		DPRINTK( "atari_stram_alloc: after mem_init, swapping off, "
+				 "get_pages=%p\n", addr );
+	}
+
+	if (addr) {
+		if (!(block = add_region( addr, size ))) {
+			/* out of memory for BLOCK structure :-( */
+			DPRINTK( "atari_stram_alloc: out of mem for BLOCK -- "
+					 "freeing again\n" );
+#ifdef CONFIG_STRAM_SWAP
+			if (flags == BLOCK_INSWAP)
+				free_stram_region( SWAP_NR(addr), N_PAGES(size) );
+			else
+#endif
+				free_pages((unsigned long)addr, get_order(size));
+			return( NULL );
+		}
+		block->owner = owner;
+		block->flags |= flags;
+	}
+	return( addr );
+}
+
+void atari_stram_free( void *addr )
+
+{
+	BLOCK *block;
+
+	DPRINTK( "atari_stram_free(addr=%p)\n", addr );
+
+	if (!(block = find_region( addr ))) {
+		printk( KERN_ERR "Attempt to free non-allocated ST-RAM block at %p "
+				"from %p\n", addr, __builtin_return_address(0) );
+		return;
+	}
+	DPRINTK( "atari_stram_free: found block (%p): size=%08lx, owner=%s, "
+			 "flags=%02x\n", block, block->size, block->owner, block->flags );
+
+#ifdef CONFIG_STRAM_SWAP
+	if (!max_swap_size) {
+#endif
+		if (block->flags & BLOCK_GFP) {
+			DPRINTK("atari_stram_free: is kmalloced, order_size=%d\n",
+				get_order(block->size));
+			free_pages((unsigned long)addr, get_order(block->size));
+		}
+		else
+			goto fail;
+#ifdef CONFIG_STRAM_SWAP
+	}
+	else if (block->flags & BLOCK_INSWAP) {
+		DPRINTK( "atari_stram_free: is swap-alloced\n" );
+		free_stram_region( SWAP_NR(block->start), N_PAGES(block->size) );
+	}
+	else
+		goto fail;
+#endif
+	remove_region( block );
+	return;
+
+  fail:
+	printk( KERN_ERR "atari_stram_free: cannot free block at %p "
+			"(called from %p)\n", addr, __builtin_return_address(0) );
+}
+
+
+#ifdef CONFIG_STRAM_SWAP
+
+
+/* ------------------------------------------------------------------------ */
+/*						   Main Swapping Functions							*/
+/* ------------------------------------------------------------------------ */
+
+
+/*
+ * Initialize ST-RAM swap device
+ * (lots copied and modified from sys_swapon() in mm/swapfile.c)
+ */
+static int __init swap_init(void *start_mem, void *swap_data)
+{
+	static struct dentry fake_dentry;
+	static struct vfsmount fake_vfsmnt;
+	struct swap_info_struct *p;
+	struct inode swap_inode;
+	unsigned int type;
+	void *addr;
+	int i, j, k, prev;
+
+	DPRINTK("swap_init(start_mem=%p, swap_data=%p)\n",
+		start_mem, swap_data);
+
+	/* need at least one page for swapping to (and this also isn't very
+	 * much... :-) */
+	if (swap_end - swap_start < 2*PAGE_SIZE) {
+		printk( KERN_WARNING "stram_swap_init: swap space too small\n" );
+		return( 0 );
+	}
+
+	/* find free slot in swap_info */
+	for( p = swap_info, type = 0; type < nr_swapfiles; type++, p++ )
+		if (!(p->flags & SWP_USED))
+			break;
+	if (type >= MAX_SWAPFILES) {
+		printk( KERN_WARNING "stram_swap_init: max. number of "
+				"swap devices exhausted\n" );
+		return( 0 );
+	}
+	if (type >= nr_swapfiles)
+		nr_swapfiles = type+1;
+
+	stram_swap_info = p;
+	stram_swap_type = type;
+
+	/* fake some dir cache entries to give us some name in /dev/swaps */
+	fake_dentry.d_parent = &fake_dentry;
+	fake_dentry.d_name.name = "stram (internal)";
+	fake_dentry.d_name.len = 16;
+	fake_vfsmnt.mnt_parent = &fake_vfsmnt;
+
+	p->flags        = SWP_USED;
+	p->swap_file    = &fake_dentry;
+	p->swap_vfsmnt  = &fake_vfsmnt;
+	p->swap_map	= swap_data;
+	p->cluster_nr   = 0;
+	p->next         = -1;
+	p->prio         = 0x7ff0;	/* a rather high priority, but not the higest
+								 * to give the user a chance to override */
+
+	/* call stram_open() directly, avoids at least the overhead in
+	 * constructing a dummy file structure... */
+	swap_inode.i_rdev = MKDEV( STRAM_MAJOR, STRAM_MINOR );
+	stram_open( &swap_inode, MAGIC_FILE_P );
+	p->max = SWAP_NR(swap_end);
+
+	/* initialize swap_map: set regions that are already allocated or belong
+	 * to kernel data space to SWAP_MAP_BAD, otherwise to free */
+	j = 0; /* # of free pages */
+	k = 0; /* # of already allocated pages (from pre-mem_init stram_alloc()) */
+	p->lowest_bit = 0;
+	p->highest_bit = 0;
+	for( i = 1, addr = SWAP_ADDR(1); i < p->max;
+		 i++, addr += PAGE_SIZE ) {
+		if (in_some_region( addr )) {
+			p->swap_map[i] = SWAP_MAP_BAD;
+			++k;
+		}
+		else if (kernel_in_stram && addr < start_mem ) {
+			p->swap_map[i] = SWAP_MAP_BAD;
+		}
+		else {
+			p->swap_map[i] = 0;
+			++j;
+			if (!p->lowest_bit) p->lowest_bit = i;
+			p->highest_bit = i;
+		}
+	}
+	/* first page always reserved (and doesn't really belong to swap space) */
+	p->swap_map[0] = SWAP_MAP_BAD;
+
+	/* now swapping to this device ok */
+	p->pages = j + k;
+	swap_list_lock();
+	nr_swap_pages += j;
+	p->flags = SWP_WRITEOK;
+
+	/* insert swap space into swap_list */
+	prev = -1;
+	for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
+		if (p->prio >= swap_info[i].prio) {
+			break;
+		}
+		prev = i;
+	}
+	p->next = i;
+	if (prev < 0) {
+		swap_list.head = swap_list.next = p - swap_info;
+	} else {
+		swap_info[prev].next = p - swap_info;
+	}
+	swap_list_unlock();
+
+	printk( KERN_INFO "Using %dk (%d pages) of ST-RAM as swap space.\n",
+			p->pages << 2, p->pages );
+	return( 1 );
+}
+
+
+/*
+ * The swap entry has been read in advance, and we return 1 to indicate
+ * that the page has been used or is no longer needed.
+ *
+ * Always set the resulting pte to be nowrite (the same as COW pages
+ * after one process has exited).  We don't know just how many PTEs will
+ * share this swap entry, so be cautious and let do_wp_page work out
+ * what to do if a write is requested later.
+ */
+static inline void unswap_pte(struct vm_area_struct * vma, unsigned long
+			      address, pte_t *dir, swp_entry_t entry,
+			      struct page *page)
+{
+	pte_t pte = *dir;
+
+	if (pte_none(pte))
+		return;
+	if (pte_present(pte)) {
+		/* If this entry is swap-cached, then page must already
+                   hold the right address for any copies in physical
+                   memory */
+		if (pte_page(pte) != page)
+			return;
+		/* We will be removing the swap cache in a moment, so... */
+		set_pte(dir, pte_mkdirty(pte));
+		return;
+	}
+	if (pte_val(pte) != entry.val)
+		return;
+
+	DPRINTK("unswap_pte: replacing entry %08lx by new page %p",
+		entry.val, page);
+	set_pte(dir, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
+	swap_free(entry);
+	get_page(page);
+	inc_mm_counter(vma->vm_mm, rss);
+}
+
+static inline void unswap_pmd(struct vm_area_struct * vma, pmd_t *dir,
+			      unsigned long address, unsigned long size,
+			      unsigned long offset, swp_entry_t entry,
+			      struct page *page)
+{
+	pte_t * pte;
+	unsigned long end;
+
+	if (pmd_none(*dir))
+		return;
+	if (pmd_bad(*dir)) {
+		pmd_ERROR(*dir);
+		pmd_clear(dir);
+		return;
+	}
+	pte = pte_offset_kernel(dir, address);
+	offset += address & PMD_MASK;
+	address &= ~PMD_MASK;
+	end = address + size;
+	if (end > PMD_SIZE)
+		end = PMD_SIZE;
+	do {
+		unswap_pte(vma, offset+address-vma->vm_start, pte, entry, page);
+		address += PAGE_SIZE;
+		pte++;
+	} while (address < end);
+}
+
+static inline void unswap_pgd(struct vm_area_struct * vma, pgd_t *dir,
+			      unsigned long address, unsigned long size,
+			      swp_entry_t entry, struct page *page)
+{
+	pmd_t * pmd;
+	unsigned long offset, end;
+
+	if (pgd_none(*dir))
+		return;
+	if (pgd_bad(*dir)) {
+		pgd_ERROR(*dir);
+		pgd_clear(dir);
+		return;
+	}
+	pmd = pmd_offset(dir, address);
+	offset = address & PGDIR_MASK;
+	address &= ~PGDIR_MASK;
+	end = address + size;
+	if (end > PGDIR_SIZE)
+		end = PGDIR_SIZE;
+	do {
+		unswap_pmd(vma, pmd, address, end - address, offset, entry,
+			   page);
+		address = (address + PMD_SIZE) & PMD_MASK;
+		pmd++;
+	} while (address < end);
+}
+
+static void unswap_vma(struct vm_area_struct * vma, pgd_t *pgdir,
+		       swp_entry_t entry, struct page *page)
+{
+	unsigned long start = vma->vm_start, end = vma->vm_end;
+
+	do {
+		unswap_pgd(vma, pgdir, start, end - start, entry, page);
+		start = (start + PGDIR_SIZE) & PGDIR_MASK;
+		pgdir++;
+	} while (start < end);
+}
+
+static void unswap_process(struct mm_struct * mm, swp_entry_t entry,
+			   struct page *page)
+{
+	struct vm_area_struct* vma;
+
+	/*
+	 * Go through process' page directory.
+	 */
+	if (!mm)
+		return;
+	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+		pgd_t * pgd = pgd_offset(mm, vma->vm_start);
+		unswap_vma(vma, pgd, entry, page);
+	}
+}
+
+
+static int unswap_by_read(unsigned short *map, unsigned long max,
+			  unsigned long start, unsigned long n_pages)
+{
+	struct task_struct *p;
+	struct page *page;
+	swp_entry_t entry;
+	unsigned long i;
+
+	DPRINTK( "unswapping %lu..%lu by reading in\n",
+			 start, start+n_pages-1 );
+
+	for( i = start; i < start+n_pages; ++i ) {
+		if (map[i] == SWAP_MAP_BAD) {
+			printk( KERN_ERR "get_stram_region: page %lu already "
+					"reserved??\n", i );
+			continue;
+		}
+
+		if (map[i]) {
+			entry = swp_entry(stram_swap_type, i);
+			DPRINTK("unswap: map[i=%lu]=%u nr_swap=%ld\n",
+				i, map[i], nr_swap_pages);
+
+			swap_device_lock(stram_swap_info);
+			map[i]++;
+			swap_device_unlock(stram_swap_info);
+			/* Get a page for the entry, using the existing
+			   swap cache page if there is one.  Otherwise,
+			   get a clean page and read the swap into it. */
+			page = read_swap_cache_async(entry, NULL, 0);
+			if (!page) {
+				swap_free(entry);
+				return -ENOMEM;
+			}
+			read_lock(&tasklist_lock);
+			for_each_process(p)
+				unswap_process(p->mm, entry, page);
+			read_unlock(&tasklist_lock);
+			shmem_unuse(entry, page);
+			/* Now get rid of the extra reference to the
+			   temporary page we've been using. */
+			if (PageSwapCache(page))
+				delete_from_swap_cache(page);
+			__free_page(page);
+	#ifdef DO_PROC
+			stat_swap_force++;
+	#endif
+		}
+
+		DPRINTK( "unswap: map[i=%lu]=%u nr_swap=%ld\n",
+				 i, map[i], nr_swap_pages );
+		swap_list_lock();
+		swap_device_lock(stram_swap_info);
+		map[i] = SWAP_MAP_BAD;
+		if (stram_swap_info->lowest_bit == i)
+			stram_swap_info->lowest_bit++;
+		if (stram_swap_info->highest_bit == i)
+			stram_swap_info->highest_bit--;
+		--nr_swap_pages;
+		swap_device_unlock(stram_swap_info);
+		swap_list_unlock();
+	}
+
+	return 0;
+}
+
+/*
+ * reserve a region in ST-RAM swap space for an allocation
+ */
+static void *get_stram_region( unsigned long n_pages )
+{
+	unsigned short *map = stram_swap_info->swap_map;
+	unsigned long max = stram_swap_info->max;
+	unsigned long start, total_free, region_free;
+	int err;
+	void *ret = NULL;
+
+	DPRINTK( "get_stram_region(n_pages=%lu)\n", n_pages );
+
+	down(&stram_swap_sem);
+
+	/* disallow writing to the swap device now */
+	stram_swap_info->flags = SWP_USED;
+
+	/* find a region of n_pages pages in the swap space including as much free
+	 * pages as possible (and excluding any already-reserved pages). */
+	if (!(start = find_free_region( n_pages, &total_free, &region_free )))
+		goto end;
+	DPRINTK( "get_stram_region: region starts at %lu, has %lu free pages\n",
+			 start, region_free );
+
+	err = unswap_by_read(map, max, start, n_pages);
+	if (err)
+		goto end;
+
+	ret = SWAP_ADDR(start);
+  end:
+	/* allow using swap device again */
+	stram_swap_info->flags = SWP_WRITEOK;
+	up(&stram_swap_sem);
+	DPRINTK( "get_stram_region: returning %p\n", ret );
+	return( ret );
+}
+
+
+/*
+ * free a reserved region in ST-RAM swap space
+ */
+static void free_stram_region( unsigned long offset, unsigned long n_pages )
+{
+	unsigned short *map = stram_swap_info->swap_map;
+
+	DPRINTK( "free_stram_region(offset=%lu,n_pages=%lu)\n", offset, n_pages );
+
+	if (offset < 1 || offset + n_pages > stram_swap_info->max) {
+		printk( KERN_ERR "free_stram_region: Trying to free non-ST-RAM\n" );
+		return;
+	}
+
+	swap_list_lock();
+	swap_device_lock(stram_swap_info);
+	/* un-reserve the freed pages */
+	for( ; n_pages > 0; ++offset, --n_pages ) {
+		if (map[offset] != SWAP_MAP_BAD)
+			printk( KERN_ERR "free_stram_region: Swap page %lu was not "
+					"reserved\n", offset );
+		map[offset] = 0;
+	}
+
+	/* update swapping meta-data */
+	if (offset < stram_swap_info->lowest_bit)
+		stram_swap_info->lowest_bit = offset;
+	if (offset+n_pages-1 > stram_swap_info->highest_bit)
+		stram_swap_info->highest_bit = offset+n_pages-1;
+	if (stram_swap_info->prio > swap_info[swap_list.next].prio)
+		swap_list.next = swap_list.head;
+	nr_swap_pages += n_pages;
+	swap_device_unlock(stram_swap_info);
+	swap_list_unlock();
+}
+
+
+/* ------------------------------------------------------------------------ */
+/*						Utility Functions for Swapping						*/
+/* ------------------------------------------------------------------------ */
+
+
+/* is addr in some of the allocated regions? */
+static int in_some_region(void *addr)
+{
+	BLOCK *p;
+
+	for( p = alloc_list; p; p = p->next ) {
+		if (p->start <= addr && addr < p->start + p->size)
+			return( 1 );
+	}
+	return( 0 );
+}
+
+
+static unsigned long find_free_region(unsigned long n_pages,
+				      unsigned long *total_free,
+				      unsigned long *region_free)
+{
+	unsigned short *map = stram_swap_info->swap_map;
+	unsigned long max = stram_swap_info->max;
+	unsigned long head, tail, max_start;
+	long nfree, max_free;
+
+	/* first scan the swap space for a suitable place for the allocation */
+	head = 1;
+	max_start = 0;
+	max_free = -1;
+	*total_free = 0;
+
+  start_over:
+	/* increment tail until final window size reached, and count free pages */
+	nfree = 0;
+	for( tail = head; tail-head < n_pages && tail < max; ++tail ) {
+		if (map[tail] == SWAP_MAP_BAD) {
+			head = tail+1;
+			goto start_over;
+		}
+		if (!map[tail]) {
+			++nfree;
+			++*total_free;
+		}
+	}
+	if (tail-head < n_pages)
+		goto out;
+	if (nfree > max_free) {
+		max_start = head;
+		max_free  = nfree;
+		if (max_free >= n_pages)
+			/* don't need more free pages... :-) */
+			goto out;
+	}
+
+	/* now shift the window and look for the area where as much pages as
+	 * possible are free */
+	while( tail < max ) {
+		nfree -= (map[head++] == 0);
+		if (map[tail] == SWAP_MAP_BAD) {
+			head = tail+1;
+			goto start_over;
+		}
+		if (!map[tail]) {
+			++nfree;
+			++*total_free;
+		}
+		++tail;
+		if (nfree > max_free) {
+			max_start = head;
+			max_free  = nfree;
+			if (max_free >= n_pages)
+				/* don't need more free pages... :-) */
+				goto out;
+		}
+	}
+
+  out:
+	if (max_free < 0) {
+		printk( KERN_NOTICE "get_stram_region: ST-RAM too full or fragmented "
+				"-- can't allocate %lu pages\n", n_pages );
+		return( 0 );
+	}
+
+	*region_free = max_free;
+	return( max_start );
+}
+
+
+/* setup parameters from command line */
+void __init stram_swap_setup(char *str, int *ints)
+{
+	if (ints[0] >= 1)
+		max_swap_size = ((ints[1] < 0 ? 0 : ints[1]) * 1024) & PAGE_MASK;
+}
+
+
+/* ------------------------------------------------------------------------ */
+/*								ST-RAM device								*/
+/* ------------------------------------------------------------------------ */
+
+static int refcnt;
+
+static void do_stram_request(request_queue_t *q)
+{
+	struct request *req;
+
+	while ((req = elv_next_request(q)) != NULL) {
+		void *start = swap_start + (req->sector << 9);
+		unsigned long len = req->current_nr_sectors << 9;
+		if ((start + len) > swap_end) {
+			printk( KERN_ERR "stram: bad access beyond end of device: "
+					"block=%ld, count=%d\n",
+					req->sector,
+					req->current_nr_sectors );
+			end_request(req, 0);
+			continue;
+		}
+
+		if (req->cmd == READ) {
+			memcpy(req->buffer, start, len);
+#ifdef DO_PROC
+			stat_swap_read += N_PAGES(len);
+#endif
+		}
+		else {
+			memcpy(start, req->buffer, len);
+#ifdef DO_PROC
+			stat_swap_write += N_PAGES(len);
+#endif
+		}
+		end_request(req, 1);
+	}
+}
+
+
+static int stram_open( struct inode *inode, struct file *filp )
+{
+	if (filp != MAGIC_FILE_P) {
+		printk( KERN_NOTICE "Only kernel can open ST-RAM device\n" );
+		return( -EPERM );
+	}
+	if (refcnt)
+		return( -EBUSY );
+	++refcnt;
+	return( 0 );
+}
+
+static int stram_release( struct inode *inode, struct file *filp )
+{
+	if (filp != MAGIC_FILE_P) {
+		printk( KERN_NOTICE "Only kernel can close ST-RAM device\n" );
+		return( -EPERM );
+	}
+	if (refcnt > 0)
+		--refcnt;
+	return( 0 );
+}
+
+
+static struct block_device_operations stram_fops = {
+	.open =		stram_open,
+	.release =	stram_release,
+};
+
+static struct gendisk *stram_disk;
+static struct request_queue *stram_queue;
+static DEFINE_SPINLOCK(stram_lock);
+
+int __init stram_device_init(void)
+{
+	if (!MACH_IS_ATARI)
+		/* no point in initializing this, I hope */
+		return -ENXIO;
+
+	if (!max_swap_size)
+		/* swapping not enabled */
+		return -ENXIO;
+	stram_disk = alloc_disk(1);
+	if (!stram_disk)
+		return -ENOMEM;
+
+	if (register_blkdev(STRAM_MAJOR, "stram")) {
+		put_disk(stram_disk);
+		return -ENXIO;
+	}
+
+	stram_queue = blk_init_queue(do_stram_request, &stram_lock);
+	if (!stram_queue) {
+		unregister_blkdev(STRAM_MAJOR, "stram");
+		put_disk(stram_disk);
+		return -ENOMEM;
+	}
+
+	stram_disk->major = STRAM_MAJOR;
+	stram_disk->first_minor = STRAM_MINOR;
+	stram_disk->fops = &stram_fops;
+	stram_disk->queue = stram_queue;
+	sprintf(stram_disk->disk_name, "stram");
+	set_capacity(stram_disk, (swap_end - swap_start)/512);
+	add_disk(stram_disk);
+	return 0;
+}
+
+
+
+/* ------------------------------------------------------------------------ */
+/*							Misc Utility Functions							*/
+/* ------------------------------------------------------------------------ */
+
+/* reserve a range of pages */
+static void reserve_region(void *start, void *end)
+{
+	reserve_bootmem (virt_to_phys(start), end - start);
+}
+
+#endif /* CONFIG_STRAM_SWAP */
+
+
+/* ------------------------------------------------------------------------ */
+/*							  Region Management								*/
+/* ------------------------------------------------------------------------ */
+
+
+/* insert a region into the alloced list (sorted) */
+static BLOCK *add_region( void *addr, unsigned long size )
+{
+	BLOCK **p, *n = NULL;
+	int i;
+
+	for( i = 0; i < N_STATIC_BLOCKS; ++i ) {
+		if (static_blocks[i].flags & BLOCK_FREE) {
+			n = &static_blocks[i];
+			n->flags = 0;
+			break;
+		}
+	}
+	if (!n && mem_init_done) {
+		/* if statics block pool exhausted and we can call kmalloc() already
+		 * (after mem_init()), try that */
+		n = kmalloc( sizeof(BLOCK), GFP_KERNEL );
+		if (n)
+			n->flags = BLOCK_KMALLOCED;
+	}
+	if (!n) {
+		printk( KERN_ERR "Out of memory for ST-RAM descriptor blocks\n" );
+		return( NULL );
+	}
+	n->start = addr;
+	n->size  = size;
+
+	for( p = &alloc_list; *p; p = &((*p)->next) )
+		if ((*p)->start > addr) break;
+	n->next = *p;
+	*p = n;
+
+	return( n );
+}
+
+
+/* find a region (by start addr) in the alloced list */
+static BLOCK *find_region( void *addr )
+{
+	BLOCK *p;
+
+	for( p = alloc_list; p; p = p->next ) {
+		if (p->start == addr)
+			return( p );
+		if (p->start > addr)
+			break;
+	}
+	return( NULL );
+}
+
+
+/* remove a block from the alloced list */
+static int remove_region( BLOCK *block )
+{
+	BLOCK **p;
+
+	for( p = &alloc_list; *p; p = &((*p)->next) )
+		if (*p == block) break;
+	if (!*p)
+		return( 0 );
+
+	*p = block->next;
+	if (block->flags & BLOCK_KMALLOCED)
+		kfree( block );
+	else
+		block->flags |= BLOCK_FREE;
+	return( 1 );
+}
+
+
+
+/* ------------------------------------------------------------------------ */
+/*						 /proc statistics file stuff						*/
+/* ------------------------------------------------------------------------ */
+
+#ifdef DO_PROC
+
+#define	PRINT_PROC(fmt,args...) len += sprintf( buf+len, fmt, ##args )
+
+int get_stram_list( char *buf )
+{
+	int len = 0;
+	BLOCK *p;
+#ifdef CONFIG_STRAM_SWAP
+	int i;
+	unsigned short *map = stram_swap_info->swap_map;
+	unsigned long max = stram_swap_info->max;
+	unsigned free = 0, used = 0, rsvd = 0;
+#endif
+
+#ifdef CONFIG_STRAM_SWAP
+	if (max_swap_size) {
+		for( i = 1; i < max; ++i ) {
+			if (!map[i])
+				++free;
+			else if (map[i] == SWAP_MAP_BAD)
+				++rsvd;
+			else
+				++used;
+		}
+		PRINT_PROC(
+			"Total ST-RAM:      %8u kB\n"
+			"Total ST-RAM swap: %8lu kB\n"
+			"Free swap:         %8u kB\n"
+			"Used swap:         %8u kB\n"
+			"Allocated swap:    %8u kB\n"
+			"Swap Reads:        %8u\n"
+			"Swap Writes:       %8u\n"
+			"Swap Forced Reads: %8u\n",
+			(stram_end - stram_start) >> 10,
+			(max-1) << (PAGE_SHIFT-10),
+			free << (PAGE_SHIFT-10),
+			used << (PAGE_SHIFT-10),
+			rsvd << (PAGE_SHIFT-10),
+			stat_swap_read,
+			stat_swap_write,
+			stat_swap_force );
+	}
+	else {
+#endif
+		PRINT_PROC( "ST-RAM swapping disabled\n" );
+		PRINT_PROC("Total ST-RAM:      %8u kB\n",
+			   (stram_end - stram_start) >> 10);
+#ifdef CONFIG_STRAM_SWAP
+	}
+#endif
+
+	PRINT_PROC( "Allocated regions:\n" );
+	for( p = alloc_list; p; p = p->next ) {
+		if (len + 50 >= PAGE_SIZE)
+			break;
+		PRINT_PROC("0x%08lx-0x%08lx: %s (",
+			   virt_to_phys(p->start),
+			   virt_to_phys(p->start+p->size-1),
+			   p->owner);
+		if (p->flags & BLOCK_GFP)
+			PRINT_PROC( "page-alloced)\n" );
+		else if (p->flags & BLOCK_INSWAP)
+			PRINT_PROC( "in swap)\n" );
+		else
+			PRINT_PROC( "??)\n" );
+	}
+
+	return( len );
+}
+
+#endif
+
+
+/*
+ * Local variables:
+ *  c-indent-level: 4
+ *  tab-width: 4
+ * End:
+ */
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
new file mode 100644
index 0000000..6df7fb6
--- /dev/null
+++ b/arch/m68k/atari/time.c
@@ -0,0 +1,348 @@
+/*
+ * linux/arch/m68k/atari/time.c
+ *
+ * Atari time and real time clock stuff
+ *
+ * Assembled of parts of former atari/config.c 97-12-18 by Roman Hodek
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/mc146818rtc.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+
+#include <asm/atariints.h>
+
+void __init
+atari_sched_init(irqreturn_t (*timer_routine)(int, void *, struct pt_regs *))
+{
+    /* set Timer C data Register */
+    mfp.tim_dt_c = INT_TICKS;
+    /* start timer C, div = 1:100 */
+    mfp.tim_ct_cd = (mfp.tim_ct_cd & 15) | 0x60;
+    /* install interrupt service routine for MFP Timer C */
+    request_irq(IRQ_MFP_TIMC, timer_routine, IRQ_TYPE_SLOW,
+                "timer", timer_routine);
+}
+
+/* ++andreas: gettimeoffset fixed to check for pending interrupt */
+
+#define TICK_SIZE 10000
+
+/* This is always executed with interrupts disabled.  */
+unsigned long atari_gettimeoffset (void)
+{
+  unsigned long ticks, offset = 0;
+
+  /* read MFP timer C current value */
+  ticks = mfp.tim_dt_c;
+  /* The probability of underflow is less than 2% */
+  if (ticks > INT_TICKS - INT_TICKS / 50)
+    /* Check for pending timer interrupt */
+    if (mfp.int_pn_b & (1 << 5))
+      offset = TICK_SIZE;
+
+  ticks = INT_TICKS - ticks;
+  ticks = ticks * 10000L / INT_TICKS;
+
+  return ticks + offset;
+}
+
+
+static void mste_read(struct MSTE_RTC *val)
+{
+#define COPY(v) val->v=(mste_rtc.v & 0xf)
+	do {
+		COPY(sec_ones) ; COPY(sec_tens) ; COPY(min_ones) ;
+		COPY(min_tens) ; COPY(hr_ones) ; COPY(hr_tens) ;
+		COPY(weekday) ; COPY(day_ones) ; COPY(day_tens) ;
+		COPY(mon_ones) ; COPY(mon_tens) ; COPY(year_ones) ;
+		COPY(year_tens) ;
+	/* prevent from reading the clock while it changed */
+	} while (val->sec_ones != (mste_rtc.sec_ones & 0xf));
+#undef COPY
+}
+
+static void mste_write(struct MSTE_RTC *val)
+{
+#define COPY(v) mste_rtc.v=val->v
+	do {
+		COPY(sec_ones) ; COPY(sec_tens) ; COPY(min_ones) ;
+		COPY(min_tens) ; COPY(hr_ones) ; COPY(hr_tens) ;
+		COPY(weekday) ; COPY(day_ones) ; COPY(day_tens) ;
+		COPY(mon_ones) ; COPY(mon_tens) ; COPY(year_ones) ;
+		COPY(year_tens) ;
+	/* prevent from writing the clock while it changed */
+	} while (val->sec_ones != (mste_rtc.sec_ones & 0xf));
+#undef COPY
+}
+
+#define	RTC_READ(reg)				\
+    ({	unsigned char	__val;			\
+		(void) atari_writeb(reg,&tt_rtc.regsel);	\
+		__val = tt_rtc.data;		\
+		__val;				\
+	})
+
+#define	RTC_WRITE(reg,val)			\
+    do {					\
+		atari_writeb(reg,&tt_rtc.regsel);	\
+		tt_rtc.data = (val);		\
+	} while(0)
+
+
+#define HWCLK_POLL_INTERVAL	5
+
+int atari_mste_hwclk( int op, struct rtc_time *t )
+{
+    int hour, year;
+    int hr24=0;
+    struct MSTE_RTC val;
+
+    mste_rtc.mode=(mste_rtc.mode | 1);
+    hr24=mste_rtc.mon_tens & 1;
+    mste_rtc.mode=(mste_rtc.mode & ~1);
+
+    if (op) {
+        /* write: prepare values */
+
+        val.sec_ones = t->tm_sec % 10;
+        val.sec_tens = t->tm_sec / 10;
+        val.min_ones = t->tm_min % 10;
+        val.min_tens = t->tm_min / 10;
+        hour = t->tm_hour;
+        if (!hr24) {
+	    if (hour > 11)
+		hour += 20 - 12;
+	    if (hour == 0 || hour == 20)
+		hour += 12;
+        }
+        val.hr_ones = hour % 10;
+        val.hr_tens = hour / 10;
+        val.day_ones = t->tm_mday % 10;
+        val.day_tens = t->tm_mday / 10;
+        val.mon_ones = (t->tm_mon+1) % 10;
+        val.mon_tens = (t->tm_mon+1) / 10;
+        year = t->tm_year - 80;
+        val.year_ones = year % 10;
+        val.year_tens = year / 10;
+        val.weekday = t->tm_wday;
+        mste_write(&val);
+        mste_rtc.mode=(mste_rtc.mode | 1);
+        val.year_ones = (year % 4);	/* leap year register */
+        mste_rtc.mode=(mste_rtc.mode & ~1);
+    }
+    else {
+        mste_read(&val);
+        t->tm_sec = val.sec_ones + val.sec_tens * 10;
+        t->tm_min = val.min_ones + val.min_tens * 10;
+        hour = val.hr_ones + val.hr_tens * 10;
+	if (!hr24) {
+	    if (hour == 12 || hour == 12 + 20)
+		hour -= 12;
+	    if (hour >= 20)
+                hour += 12 - 20;
+        }
+	t->tm_hour = hour;
+	t->tm_mday = val.day_ones + val.day_tens * 10;
+        t->tm_mon  = val.mon_ones + val.mon_tens * 10 - 1;
+        t->tm_year = val.year_ones + val.year_tens * 10 + 80;
+        t->tm_wday = val.weekday;
+    }
+    return 0;
+}
+
+int atari_tt_hwclk( int op, struct rtc_time *t )
+{
+    int sec=0, min=0, hour=0, day=0, mon=0, year=0, wday=0;
+    unsigned long	flags;
+    unsigned char	ctrl;
+    int pm = 0;
+
+    ctrl = RTC_READ(RTC_CONTROL); /* control registers are
+                                   * independent from the UIP */
+
+    if (op) {
+        /* write: prepare values */
+
+        sec  = t->tm_sec;
+        min  = t->tm_min;
+        hour = t->tm_hour;
+        day  = t->tm_mday;
+        mon  = t->tm_mon + 1;
+        year = t->tm_year - atari_rtc_year_offset;
+        wday = t->tm_wday + (t->tm_wday >= 0);
+
+        if (!(ctrl & RTC_24H)) {
+	    if (hour > 11) {
+		pm = 0x80;
+		if (hour != 12)
+		    hour -= 12;
+	    }
+	    else if (hour == 0)
+		hour = 12;
+        }
+
+        if (!(ctrl & RTC_DM_BINARY)) {
+            BIN_TO_BCD(sec);
+            BIN_TO_BCD(min);
+            BIN_TO_BCD(hour);
+            BIN_TO_BCD(day);
+            BIN_TO_BCD(mon);
+            BIN_TO_BCD(year);
+            if (wday >= 0) BIN_TO_BCD(wday);
+        }
+    }
+
+    /* Reading/writing the clock registers is a bit critical due to
+     * the regular update cycle of the RTC. While an update is in
+     * progress, registers 0..9 shouldn't be touched.
+     * The problem is solved like that: If an update is currently in
+     * progress (the UIP bit is set), the process sleeps for a while
+     * (50ms). This really should be enough, since the update cycle
+     * normally needs 2 ms.
+     * If the UIP bit reads as 0, we have at least 244 usecs until the
+     * update starts. This should be enough... But to be sure,
+     * additionally the RTC_SET bit is set to prevent an update cycle.
+     */
+
+    while( RTC_READ(RTC_FREQ_SELECT) & RTC_UIP ) {
+        current->state = TASK_INTERRUPTIBLE;
+        schedule_timeout(HWCLK_POLL_INTERVAL);
+    }
+
+    local_irq_save(flags);
+    RTC_WRITE( RTC_CONTROL, ctrl | RTC_SET );
+    if (!op) {
+        sec  = RTC_READ( RTC_SECONDS );
+        min  = RTC_READ( RTC_MINUTES );
+        hour = RTC_READ( RTC_HOURS );
+        day  = RTC_READ( RTC_DAY_OF_MONTH );
+        mon  = RTC_READ( RTC_MONTH );
+        year = RTC_READ( RTC_YEAR );
+        wday = RTC_READ( RTC_DAY_OF_WEEK );
+    }
+    else {
+        RTC_WRITE( RTC_SECONDS, sec );
+        RTC_WRITE( RTC_MINUTES, min );
+        RTC_WRITE( RTC_HOURS, hour + pm);
+        RTC_WRITE( RTC_DAY_OF_MONTH, day );
+        RTC_WRITE( RTC_MONTH, mon );
+        RTC_WRITE( RTC_YEAR, year );
+        if (wday >= 0) RTC_WRITE( RTC_DAY_OF_WEEK, wday );
+    }
+    RTC_WRITE( RTC_CONTROL, ctrl & ~RTC_SET );
+    local_irq_restore(flags);
+
+    if (!op) {
+        /* read: adjust values */
+
+        if (hour & 0x80) {
+	    hour &= ~0x80;
+	    pm = 1;
+	}
+
+	if (!(ctrl & RTC_DM_BINARY)) {
+            BCD_TO_BIN(sec);
+            BCD_TO_BIN(min);
+            BCD_TO_BIN(hour);
+            BCD_TO_BIN(day);
+            BCD_TO_BIN(mon);
+            BCD_TO_BIN(year);
+            BCD_TO_BIN(wday);
+        }
+
+        if (!(ctrl & RTC_24H)) {
+	    if (!pm && hour == 12)
+		hour = 0;
+	    else if (pm && hour != 12)
+		hour += 12;
+        }
+
+        t->tm_sec  = sec;
+        t->tm_min  = min;
+        t->tm_hour = hour;
+        t->tm_mday = day;
+        t->tm_mon  = mon - 1;
+        t->tm_year = year + atari_rtc_year_offset;
+        t->tm_wday = wday - 1;
+    }
+
+    return( 0 );
+}
+
+
+int atari_mste_set_clock_mmss (unsigned long nowtime)
+{
+    short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
+    struct MSTE_RTC val;
+    unsigned char rtc_minutes;
+
+    mste_read(&val);
+    rtc_minutes= val.min_ones + val.min_tens * 10;
+    if ((rtc_minutes < real_minutes
+         ? real_minutes - rtc_minutes
+         : rtc_minutes - real_minutes) < 30)
+    {
+        val.sec_ones = real_seconds % 10;
+        val.sec_tens = real_seconds / 10;
+        val.min_ones = real_minutes % 10;
+        val.min_tens = real_minutes / 10;
+        mste_write(&val);
+    }
+    else
+        return -1;
+    return 0;
+}
+
+int atari_tt_set_clock_mmss (unsigned long nowtime)
+{
+    int retval = 0;
+    short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
+    unsigned char save_control, save_freq_select, rtc_minutes;
+
+    save_control = RTC_READ (RTC_CONTROL); /* tell the clock it's being set */
+    RTC_WRITE (RTC_CONTROL, save_control | RTC_SET);
+
+    save_freq_select = RTC_READ (RTC_FREQ_SELECT); /* stop and reset prescaler */
+    RTC_WRITE (RTC_FREQ_SELECT, save_freq_select | RTC_DIV_RESET2);
+
+    rtc_minutes = RTC_READ (RTC_MINUTES);
+    if (!(save_control & RTC_DM_BINARY))
+        BCD_TO_BIN (rtc_minutes);
+
+    /* Since we're only adjusting minutes and seconds, don't interfere
+       with hour overflow.  This avoids messing with unknown time zones
+       but requires your RTC not to be off by more than 30 minutes.  */
+    if ((rtc_minutes < real_minutes
+         ? real_minutes - rtc_minutes
+         : rtc_minutes - real_minutes) < 30)
+        {
+            if (!(save_control & RTC_DM_BINARY))
+                {
+                    BIN_TO_BCD (real_seconds);
+                    BIN_TO_BCD (real_minutes);
+                }
+            RTC_WRITE (RTC_SECONDS, real_seconds);
+            RTC_WRITE (RTC_MINUTES, real_minutes);
+        }
+    else
+        retval = -1;
+
+    RTC_WRITE (RTC_FREQ_SELECT, save_freq_select);
+    RTC_WRITE (RTC_CONTROL, save_control);
+    return retval;
+}
+
+/*
+ * Local variables:
+ *  c-indent-level: 4
+ *  tab-width: 8
+ * End:
+ */
diff --git a/arch/m68k/bvme6000/Makefile b/arch/m68k/bvme6000/Makefile
new file mode 100644
index 0000000..2348e6c
--- /dev/null
+++ b/arch/m68k/bvme6000/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for Linux arch/m68k/bvme6000 source directory
+#
+
+obj-y		:= config.o bvmeints.o rtc.o
diff --git a/arch/m68k/bvme6000/bvmeints.c b/arch/m68k/bvme6000/bvmeints.c
new file mode 100644
index 0000000..298a8df
--- /dev/null
+++ b/arch/m68k/bvme6000/bvmeints.c
@@ -0,0 +1,160 @@
+/*
+ * arch/m68k/bvme6000/bvmeints.c
+ *
+ * Copyright (C) 1997 Richard Hirst [richard@sleepie.demon.co.uk]
+ *
+ * based on amiints.c -- Amiga Linux interrupt handling code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file README.legal in the main directory of this archive
+ * for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/seq_file.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+
+static irqreturn_t bvme6000_defhand (int irq, void *dev_id, struct pt_regs *fp);
+
+/*
+ * This should ideally be 4 elements only, for speed.
+ */
+
+static struct {
+	irqreturn_t	(*handler)(int, void *, struct pt_regs *);
+	unsigned long	flags;
+	void		*dev_id;
+	const char	*devname;
+	unsigned	count;
+} irq_tab[256];
+
+/*
+ * void bvme6000_init_IRQ (void)
+ *
+ * Parameters:	None
+ *
+ * Returns:	Nothing
+ *
+ * This function is called during kernel startup to initialize
+ * the bvme6000 IRQ handling routines.
+ */
+
+void bvme6000_init_IRQ (void)
+{
+	int i;
+
+	for (i = 0; i < 256; i++) {
+		irq_tab[i].handler = bvme6000_defhand;
+		irq_tab[i].flags = IRQ_FLG_STD;
+		irq_tab[i].dev_id = NULL;
+		irq_tab[i].devname = NULL;
+		irq_tab[i].count = 0;
+	}
+}
+
+int bvme6000_request_irq(unsigned int irq,
+		irqreturn_t (*handler)(int, void *, struct pt_regs *),
+                unsigned long flags, const char *devname, void *dev_id)
+{
+	if (irq > 255) {
+		printk("%s: Incorrect IRQ %d from %s\n", __FUNCTION__, irq, devname);
+		return -ENXIO;
+	}
+#if 0
+	/* Nothing special about auto-vectored devices for the BVME6000,
+	 * but treat it specially to avoid changes elsewhere.
+	 */
+
+	if (irq >= VEC_INT1 && irq <= VEC_INT7)
+		return cpu_request_irq(irq - VEC_SPUR, handler, flags,
+						devname, dev_id);
+#endif
+	if (!(irq_tab[irq].flags & IRQ_FLG_STD)) {
+		if (irq_tab[irq].flags & IRQ_FLG_LOCK) {
+			printk("%s: IRQ %d from %s is not replaceable\n",
+			       __FUNCTION__, irq, irq_tab[irq].devname);
+			return -EBUSY;
+		}
+		if (flags & IRQ_FLG_REPLACE) {
+			printk("%s: %s can't replace IRQ %d from %s\n",
+			       __FUNCTION__, devname, irq, irq_tab[irq].devname);
+			return -EBUSY;
+		}
+	}
+	irq_tab[irq].handler = handler;
+	irq_tab[irq].flags   = flags;
+	irq_tab[irq].dev_id  = dev_id;
+	irq_tab[irq].devname = devname;
+	return 0;
+}
+
+void bvme6000_free_irq(unsigned int irq, void *dev_id)
+{
+	if (irq > 255) {
+		printk("%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
+		return;
+	}
+#if 0
+	if (irq >= VEC_INT1 && irq <= VEC_INT7) {
+		cpu_free_irq(irq - VEC_SPUR, dev_id);
+		return;
+	}
+#endif
+	if (irq_tab[irq].dev_id != dev_id)
+		printk("%s: Removing probably wrong IRQ %d from %s\n",
+		       __FUNCTION__, irq, irq_tab[irq].devname);
+
+	irq_tab[irq].handler = bvme6000_defhand;
+	irq_tab[irq].flags   = IRQ_FLG_STD;
+	irq_tab[irq].dev_id  = NULL;
+	irq_tab[irq].devname = NULL;
+}
+
+irqreturn_t bvme6000_process_int (unsigned long vec, struct pt_regs *fp)
+{
+	if (vec > 255) {
+		printk ("bvme6000_process_int: Illegal vector %ld", vec);
+		return IRQ_NONE;
+	} else {
+		irq_tab[vec].count++;
+		irq_tab[vec].handler(vec, irq_tab[vec].dev_id, fp);
+		return IRQ_HANDLED;
+	}
+}
+
+int show_bvme6000_interrupts(struct seq_file *p, void *v)
+{
+	int i;
+
+	for (i = 0; i < 256; i++) {
+		if (irq_tab[i].count)
+			seq_printf(p, "Vec 0x%02x: %8d  %s\n",
+			    i, irq_tab[i].count,
+			    irq_tab[i].devname ? irq_tab[i].devname : "free");
+	}
+	return 0;
+}
+
+
+static irqreturn_t bvme6000_defhand (int irq, void *dev_id, struct pt_regs *fp)
+{
+	printk ("Unknown interrupt 0x%02x\n", irq);
+	return IRQ_NONE;
+}
+
+void bvme6000_enable_irq (unsigned int irq)
+{
+}
+
+
+void bvme6000_disable_irq (unsigned int irq)
+{
+}
+
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
new file mode 100644
index 0000000..3ffc84f
--- /dev/null
+++ b/arch/m68k/bvme6000/config.c
@@ -0,0 +1,380 @@
+/*
+ *  arch/m68k/bvme6000/config.c
+ *
+ *  Copyright (C) 1997 Richard Hirst [richard@sleepie.demon.co.uk]
+ *
+ * Based on:
+ *
+ *  linux/amiga/config.c
+ *
+ *  Copyright (C) 1993 Hamish Macdonald
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file README.legal in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/major.h>
+#include <linux/genhd.h>
+#include <linux/rtc.h>
+#include <linux/interrupt.h>
+
+#include <asm/bootinfo.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/rtc.h>
+#include <asm/machdep.h>
+#include <asm/bvme6000hw.h>
+
+extern irqreturn_t bvme6000_process_int (int level, struct pt_regs *regs);
+extern void bvme6000_init_IRQ (void);
+extern void bvme6000_free_irq (unsigned int, void *);
+extern int  show_bvme6000_interrupts(struct seq_file *, void *);
+extern void bvme6000_enable_irq (unsigned int);
+extern void bvme6000_disable_irq (unsigned int);
+static void bvme6000_get_model(char *model);
+static int  bvme6000_get_hardware_list(char *buffer);
+extern int  bvme6000_request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long flags, const char *devname, void *dev_id);
+extern void bvme6000_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *));
+extern unsigned long bvme6000_gettimeoffset (void);
+extern int bvme6000_hwclk (int, struct rtc_time *);
+extern int bvme6000_set_clock_mmss (unsigned long);
+extern void bvme6000_reset (void);
+extern void bvme6000_waitbut(void);
+void bvme6000_set_vectors (void);
+
+static unsigned char bcd2bin (unsigned char b);
+static unsigned char bin2bcd (unsigned char b);
+
+/* Save tick handler routine pointer, will point to do_timer() in
+ * kernel/sched.c, called via bvme6000_process_int() */
+
+static irqreturn_t (*tick_handler)(int, void *, struct pt_regs *);
+
+
+int bvme6000_parse_bootinfo(const struct bi_record *bi)
+{
+	if (bi->tag == BI_VME_TYPE)
+		return 0;
+	else
+		return 1;
+}
+
+void bvme6000_reset(void)
+{
+	volatile PitRegsPtr pit = (PitRegsPtr)BVME_PIT_BASE;
+
+	printk ("\r\n\nCalled bvme6000_reset\r\n"
+			"\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r");
+	/* The string of returns is to delay the reset until the whole
+	 * message is output. */
+	/* Enable the watchdog, via PIT port C bit 4 */
+
+	pit->pcddr	|= 0x10;	/* WDOG enable */
+
+	while(1)
+		;
+}
+
+static void bvme6000_get_model(char *model)
+{
+    sprintf(model, "BVME%d000", m68k_cputype == CPU_68060 ? 6 : 4);
+}
+
+
+/* No hardware options on BVME6000? */
+
+static int bvme6000_get_hardware_list(char *buffer)
+{
+    *buffer = '\0';
+    return 0;
+}
+
+
+void __init config_bvme6000(void)
+{
+    volatile PitRegsPtr pit = (PitRegsPtr)BVME_PIT_BASE;
+
+    /* Board type is only set by newer versions of vmelilo/tftplilo */
+    if (!vme_brdtype) {
+	if (m68k_cputype == CPU_68060)
+	    vme_brdtype = VME_TYPE_BVME6000;
+	else
+	    vme_brdtype = VME_TYPE_BVME4000;
+    }
+#if 0
+    /* Call bvme6000_set_vectors() so ABORT will work, along with BVMBug
+     * debugger.  Note trap_init() will splat the abort vector, but
+     * bvme6000_init_IRQ() will put it back again.  Hopefully. */
+
+    bvme6000_set_vectors();
+#endif
+
+    mach_max_dma_address = 0xffffffff;
+    mach_sched_init      = bvme6000_sched_init;
+    mach_init_IRQ        = bvme6000_init_IRQ;
+    mach_gettimeoffset   = bvme6000_gettimeoffset;
+    mach_hwclk           = bvme6000_hwclk;
+    mach_set_clock_mmss	 = bvme6000_set_clock_mmss;
+    mach_reset		 = bvme6000_reset;
+    mach_free_irq	 = bvme6000_free_irq;
+    mach_process_int	 = bvme6000_process_int;
+    mach_get_irq_list	 = show_bvme6000_interrupts;
+    mach_request_irq	 = bvme6000_request_irq;
+    enable_irq		 = bvme6000_enable_irq;
+    disable_irq          = bvme6000_disable_irq;
+    mach_get_model       = bvme6000_get_model;
+    mach_get_hardware_list = bvme6000_get_hardware_list;
+
+    printk ("Board is %sconfigured as a System Controller\n",
+		*config_reg_ptr & BVME_CONFIG_SW1 ? "" : "not ");
+
+    /* Now do the PIT configuration */
+
+    pit->pgcr	= 0x00;	/* Unidirectional 8 bit, no handshake for now */
+    pit->psrr	= 0x18;	/* PIACK and PIRQ fucntions enabled */
+    pit->pacr	= 0x00;	/* Sub Mode 00, H2 i/p, no DMA */
+    pit->padr	= 0x00;	/* Just to be tidy! */
+    pit->paddr	= 0x00;	/* All inputs for now (safest) */
+    pit->pbcr	= 0x80;	/* Sub Mode 1x, H4 i/p, no DMA */
+    pit->pbdr	= 0xbc | (*config_reg_ptr & BVME_CONFIG_SW1 ? 0 : 0x40);
+			/* PRI, SYSCON?, Level3, SCC clks from xtal */
+    pit->pbddr	= 0xf3;	/* Mostly outputs */
+    pit->pcdr	= 0x01;	/* PA transceiver disabled */
+    pit->pcddr	= 0x03;	/* WDOG disable */
+
+    /* Disable snooping for Ethernet and VME accesses */
+
+    bvme_acr_addrctl = 0;
+}
+
+
+irqreturn_t bvme6000_abort_int (int irq, void *dev_id, struct pt_regs *fp)
+{
+        unsigned long *new = (unsigned long *)vectors;
+        unsigned long *old = (unsigned long *)0xf8000000;
+
+        /* Wait for button release */
+        while (*(volatile unsigned char *)BVME_LOCAL_IRQ_STAT & BVME_ABORT_STATUS)
+                ;
+
+        *(new+4) = *(old+4);            /* Illegal instruction */
+        *(new+9) = *(old+9);            /* Trace */
+        *(new+47) = *(old+47);          /* Trap #15 */
+        *(new+0x1f) = *(old+0x1f);      /* ABORT switch */
+	return IRQ_HANDLED;
+}
+
+
+static irqreturn_t bvme6000_timer_int (int irq, void *dev_id, struct pt_regs *fp)
+{
+    volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
+    unsigned char msr = rtc->msr & 0xc0;
+
+    rtc->msr = msr | 0x20;		/* Ack the interrupt */
+
+    return tick_handler(irq, dev_id, fp);
+}
+
+/*
+ * Set up the RTC timer 1 to mode 2, so T1 output toggles every 5ms
+ * (40000 x 125ns).  It will interrupt every 10ms, when T1 goes low.
+ * So, when reading the elapsed time, you should read timer1,
+ * subtract it from 39999, and then add 40000 if T1 is high.
+ * That gives you the number of 125ns ticks in to the 10ms period,
+ * so divide by 8 to get the microsecond result.
+ */
+
+void bvme6000_sched_init (irqreturn_t (*timer_routine)(int, void *, struct pt_regs *))
+{
+    volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
+    unsigned char msr = rtc->msr & 0xc0;
+
+    rtc->msr = 0;	/* Ensure timer registers accessible */
+
+    tick_handler = timer_routine;
+    if (request_irq(BVME_IRQ_RTC, bvme6000_timer_int, 0,
+				"timer", bvme6000_timer_int))
+	panic ("Couldn't register timer int");
+
+    rtc->t1cr_omr = 0x04;	/* Mode 2, ext clk */
+    rtc->t1msb = 39999 >> 8;
+    rtc->t1lsb = 39999 & 0xff;
+    rtc->irr_icr1 &= 0xef;	/* Route timer 1 to INTR pin */
+    rtc->msr = 0x40;		/* Access int.cntrl, etc */
+    rtc->pfr_icr0 = 0x80;	/* Just timer 1 ints enabled */
+    rtc->irr_icr1 = 0;
+    rtc->t1cr_omr = 0x0a;	/* INTR+T1 active lo, push-pull */
+    rtc->t0cr_rtmr &= 0xdf;	/* Stop timers in standby */
+    rtc->msr = 0;		/* Access timer 1 control */
+    rtc->t1cr_omr = 0x05;	/* Mode 2, ext clk, GO */
+
+    rtc->msr = msr;
+
+    if (request_irq(BVME_IRQ_ABORT, bvme6000_abort_int, 0,
+				"abort", bvme6000_abort_int))
+	panic ("Couldn't register abort int");
+}
+
+
+/* This is always executed with interrupts disabled.  */
+
+/*
+ * NOTE:  Don't accept any readings within 5us of rollover, as
+ * the T1INT bit may be a little slow getting set.  There is also
+ * a fault in the chip, meaning that reads may produce invalid
+ * results...
+ */
+
+unsigned long bvme6000_gettimeoffset (void)
+{
+    volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
+    volatile PitRegsPtr pit = (PitRegsPtr)BVME_PIT_BASE;
+    unsigned char msr = rtc->msr & 0xc0;
+    unsigned char t1int, t1op;
+    unsigned long v = 800000, ov;
+
+    rtc->msr = 0;	/* Ensure timer registers accessible */
+
+    do {
+	ov = v;
+	t1int = rtc->msr & 0x20;
+	t1op  = pit->pcdr & 0x04;
+	rtc->t1cr_omr |= 0x40;		/* Latch timer1 */
+	v = rtc->t1msb << 8;		/* Read timer1 */
+	v |= rtc->t1lsb;		/* Read timer1 */
+    } while (t1int != (rtc->msr & 0x20) ||
+		t1op != (pit->pcdr & 0x04) ||
+			abs(ov-v) > 80 ||
+				v > 39960);
+
+    v = 39999 - v;
+    if (!t1op)				/* If in second half cycle.. */
+	v += 40000;
+    v /= 8;				/* Convert ticks to microseconds */
+    if (t1int)
+	v += 10000;			/* Int pending, + 10ms */
+    rtc->msr = msr;
+
+    return v;
+}
+
+static unsigned char bcd2bin (unsigned char b)
+{
+	return ((b>>4)*10 + (b&15));
+}
+
+static unsigned char bin2bcd (unsigned char b)
+{
+	return (((b/10)*16) + (b%10));
+}
+
+
+/*
+ * Looks like op is non-zero for setting the clock, and zero for
+ * reading the clock.
+ *
+ *  struct hwclk_time {
+ *         unsigned        sec;       0..59
+ *         unsigned        min;       0..59
+ *         unsigned        hour;      0..23
+ *         unsigned        day;       1..31
+ *         unsigned        mon;       0..11
+ *         unsigned        year;      00...
+ *         int             wday;      0..6, 0 is Sunday, -1 means unknown/don't set
+ * };
+ */
+
+int bvme6000_hwclk(int op, struct rtc_time *t)
+{
+	volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
+	unsigned char msr = rtc->msr & 0xc0;
+
+	rtc->msr = 0x40;	/* Ensure clock and real-time-mode-register
+				 * are accessible */
+	if (op)
+	{	/* Write.... */
+		rtc->t0cr_rtmr = t->tm_year%4;
+		rtc->bcd_tenms = 0;
+		rtc->bcd_sec = bin2bcd(t->tm_sec);
+		rtc->bcd_min = bin2bcd(t->tm_min);
+		rtc->bcd_hr  = bin2bcd(t->tm_hour);
+		rtc->bcd_dom = bin2bcd(t->tm_mday);
+		rtc->bcd_mth = bin2bcd(t->tm_mon + 1);
+		rtc->bcd_year = bin2bcd(t->tm_year%100);
+		if (t->tm_wday >= 0)
+			rtc->bcd_dow = bin2bcd(t->tm_wday+1);
+		rtc->t0cr_rtmr = t->tm_year%4 | 0x08;
+	}
+	else
+	{	/* Read....  */
+		do {
+			t->tm_sec  = bcd2bin(rtc->bcd_sec);
+			t->tm_min  = bcd2bin(rtc->bcd_min);
+			t->tm_hour = bcd2bin(rtc->bcd_hr);
+			t->tm_mday = bcd2bin(rtc->bcd_dom);
+			t->tm_mon  = bcd2bin(rtc->bcd_mth)-1;
+			t->tm_year = bcd2bin(rtc->bcd_year);
+			if (t->tm_year < 70)
+				t->tm_year += 100;
+			t->tm_wday = bcd2bin(rtc->bcd_dow)-1;
+		} while (t->tm_sec != bcd2bin(rtc->bcd_sec));
+	}
+
+	rtc->msr = msr;
+
+	return 0;
+}
+
+/*
+ * Set the minutes and seconds from seconds value 'nowtime'.  Fail if
+ * clock is out by > 30 minutes.  Logic lifted from atari code.
+ * Algorithm is to wait for the 10ms register to change, and then to
+ * wait a short while, and then set it.
+ */
+
+int bvme6000_set_clock_mmss (unsigned long nowtime)
+{
+	int retval = 0;
+	short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
+	unsigned char rtc_minutes, rtc_tenms;
+	volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
+	unsigned char msr = rtc->msr & 0xc0;
+	unsigned long flags;
+	volatile int i;
+
+	rtc->msr = 0;		/* Ensure clock accessible */
+	rtc_minutes = bcd2bin (rtc->bcd_min);
+
+	if ((rtc_minutes < real_minutes
+		? real_minutes - rtc_minutes
+			: rtc_minutes - real_minutes) < 30)
+	{
+		local_irq_save(flags);
+		rtc_tenms = rtc->bcd_tenms;
+		while (rtc_tenms == rtc->bcd_tenms)
+			;
+		for (i = 0; i < 1000; i++)
+			;
+		rtc->bcd_min = bin2bcd(real_minutes);
+		rtc->bcd_sec = bin2bcd(real_seconds);
+		local_irq_restore(flags);
+	}
+	else
+		retval = -1;
+
+	rtc->msr = msr;
+
+	return retval;
+}
+
diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c
new file mode 100644
index 0000000..c6b2a41
--- /dev/null
+++ b/arch/m68k/bvme6000/rtc.c
@@ -0,0 +1,182 @@
+/*
+ *	Real Time Clock interface for Linux on the BVME6000
+ *
+ * Based on the PC driver by Paul Gortmaker.
+ */
+
+#define RTC_VERSION		"1.00"
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/mc146818rtc.h>	/* For struct rtc_time and ioctls, etc */
+#include <linux/smp_lock.h>
+#include <asm/bvme6000hw.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/setup.h>
+
+/*
+ *	We sponge a minor off of the misc major. No need slurping
+ *	up another valuable major dev number for this. If you add
+ *	an ioctl, make sure you don't conflict with SPARC's RTC
+ *	ioctls.
+ */
+
+#define BCD2BIN(val) (((val)&15) + ((val)>>4)*10)
+#define BIN2BCD(val) ((((val)/10)<<4) + (val)%10)
+
+static unsigned char days_in_mo[] =
+{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+
+static char rtc_status;
+
+static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+		     unsigned long arg)
+{
+	volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
+	unsigned char msr;
+	unsigned long flags;
+	struct rtc_time wtime;
+
+	switch (cmd) {
+	case RTC_RD_TIME:	/* Read the time/date from RTC	*/
+	{
+		local_irq_save(flags);
+		/* Ensure clock and real-time-mode-register are accessible */
+		msr = rtc->msr & 0xc0;
+		rtc->msr = 0x40;
+		memset(&wtime, 0, sizeof(struct rtc_time));
+		do {
+			wtime.tm_sec =  BCD2BIN(rtc->bcd_sec);
+			wtime.tm_min =  BCD2BIN(rtc->bcd_min);
+			wtime.tm_hour = BCD2BIN(rtc->bcd_hr);
+			wtime.tm_mday =  BCD2BIN(rtc->bcd_dom);
+			wtime.tm_mon =  BCD2BIN(rtc->bcd_mth)-1;
+			wtime.tm_year = BCD2BIN(rtc->bcd_year);
+			if (wtime.tm_year < 70)
+				wtime.tm_year += 100;
+			wtime.tm_wday = BCD2BIN(rtc->bcd_dow)-1;
+		} while (wtime.tm_sec != BCD2BIN(rtc->bcd_sec));
+		rtc->msr = msr;
+		local_irq_restore(flags);
+		return copy_to_user((void *)arg, &wtime, sizeof wtime) ?
+								-EFAULT : 0;
+	}
+	case RTC_SET_TIME:	/* Set the RTC */
+	{
+		struct rtc_time rtc_tm;
+		unsigned char mon, day, hrs, min, sec, leap_yr;
+		unsigned int yrs;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EACCES;
+
+		if (copy_from_user(&rtc_tm, (struct rtc_time*)arg,
+				   sizeof(struct rtc_time)))
+			return -EFAULT;
+
+		yrs = rtc_tm.tm_year;
+		if (yrs < 1900)
+			yrs += 1900;
+		mon = rtc_tm.tm_mon + 1;   /* tm_mon starts at zero */
+		day = rtc_tm.tm_mday;
+		hrs = rtc_tm.tm_hour;
+		min = rtc_tm.tm_min;
+		sec = rtc_tm.tm_sec;
+
+		leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
+
+		if ((mon > 12) || (mon < 1) || (day == 0))
+			return -EINVAL;
+
+		if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
+			return -EINVAL;
+
+		if ((hrs >= 24) || (min >= 60) || (sec >= 60))
+			return -EINVAL;
+
+		if (yrs >= 2070)
+			return -EINVAL;
+
+		local_irq_save(flags);
+		/* Ensure clock and real-time-mode-register are accessible */
+		msr = rtc->msr & 0xc0;
+		rtc->msr = 0x40;
+
+		rtc->t0cr_rtmr = yrs%4;
+		rtc->bcd_tenms = 0;
+		rtc->bcd_sec   = BIN2BCD(sec);
+		rtc->bcd_min   = BIN2BCD(min);
+		rtc->bcd_hr    = BIN2BCD(hrs);
+		rtc->bcd_dom   = BIN2BCD(day);
+		rtc->bcd_mth   = BIN2BCD(mon);
+		rtc->bcd_year  = BIN2BCD(yrs%100);
+		if (rtc_tm.tm_wday >= 0)
+			rtc->bcd_dow = BIN2BCD(rtc_tm.tm_wday+1);
+		rtc->t0cr_rtmr = yrs%4 | 0x08;
+
+		rtc->msr = msr;
+		local_irq_restore(flags);
+		return 0;
+	}
+	default:
+		return -EINVAL;
+	}
+}
+
+/*
+ *	We enforce only one user at a time here with the open/close.
+ *	Also clear the previous interrupt data on an open, and clean
+ *	up things on a close.
+ */
+
+static int rtc_open(struct inode *inode, struct file *file)
+{
+	if(rtc_status)
+		return -EBUSY;
+
+	rtc_status = 1;
+	return 0;
+}
+
+static int rtc_release(struct inode *inode, struct file *file)
+{
+	lock_kernel();
+	rtc_status = 0;
+	unlock_kernel();
+	return 0;
+}
+
+/*
+ *	The various file operations we support.
+ */
+
+static struct file_operations rtc_fops = {
+	.ioctl =	rtc_ioctl,
+	.open =		rtc_open,
+	.release =	rtc_release,
+};
+
+static struct miscdevice rtc_dev = {
+	.minor =	RTC_MINOR,
+	.name =		"rtc",
+	.fops =		&rtc_fops
+};
+
+int __init rtc_DP8570A_init(void)
+{
+	if (!MACH_IS_BVME6000)
+		return -ENODEV;
+
+	printk(KERN_INFO "DP8570A Real Time Clock Driver v%s\n", RTC_VERSION);
+	return misc_register(&rtc_dev);
+}
+
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
new file mode 100644
index 0000000..c1b2333
--- /dev/null
+++ b/arch/m68k/configs/amiga_defconfig
@@ -0,0 +1,968 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-m68k
+# Sun Dec 26 11:22:54 2004
+#
+CONFIG_M68K=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION="-amiga"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_SYSCTL=y
+CONFIG_AUDIT=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Platform dependent setup
+#
+# CONFIG_SUN3 is not set
+CONFIG_AMIGA=y
+# CONFIG_ATARI is not set
+# CONFIG_MAC is not set
+# CONFIG_APOLLO is not set
+# CONFIG_VME is not set
+# CONFIG_HP300 is not set
+# CONFIG_SUN3X is not set
+# CONFIG_Q40 is not set
+
+#
+# Processor type
+#
+CONFIG_M68020=y
+CONFIG_M68030=y
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_MMU_MOTOROLA=y
+CONFIG_M68KFPU_EMU=y
+CONFIG_M68KFPU_EMU_EXTRAPREC=y
+# CONFIG_M68KFPU_EMU_ONLY is not set
+# CONFIG_ADVANCED is not set
+
+#
+# General setup
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
+CONFIG_ZORRO=y
+CONFIG_AMIGA_PCMCIA=y
+# CONFIG_HEARTBEAT is not set
+CONFIG_PROC_HARDWARE=y
+CONFIG_ISA=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_ZORRO_NAMES=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+CONFIG_PARPORT=m
+# CONFIG_PARPORT_PC is not set
+CONFIG_PARPORT_AMIGA=m
+CONFIG_PARPORT_MFC3=m
+# CONFIG_PARPORT_OTHER is not set
+CONFIG_PARPORT_1284=y
+
+#
+# Plug and Play support
+#
+# CONFIG_PNP is not set
+
+#
+# Block devices
+#
+CONFIG_AMIGA_FLOPPY=y
+CONFIG_AMIGA_Z2RAM=y
+# CONFIG_BLK_DEV_XD is not set
+# CONFIG_PARIDE is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDETAPE=m
+CONFIG_BLK_DEV_IDEFLOPPY=m
+# CONFIG_BLK_DEV_IDESCSI is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_GAYLE=y
+CONFIG_BLK_DEV_IDEDOUBLER=y
+CONFIG_BLK_DEV_BUDDHA=y
+# CONFIG_IDE_CHIPSETS is not set
+# CONFIG_BLK_DEV_IDEDMA is not set
+# CONFIG_IDEDMA_AUTO is not set
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
+# CONFIG_SCSI_PPA is not set
+# CONFIG_SCSI_IMM is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PSI240I is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_DEBUG is not set
+CONFIG_A3000_SCSI=y
+CONFIG_A2091_SCSI=y
+CONFIG_GVP11_SCSI=y
+CONFIG_CYBERSTORM_SCSI=y
+CONFIG_CYBERSTORMII_SCSI=y
+CONFIG_BLZ2060_SCSI=y
+CONFIG_BLZ1230_SCSI=y
+CONFIG_FASTLANE_SCSI=y
+CONFIG_OKTAGON_SCSI=y
+
+#
+# Old CD-ROM drivers (not SCSI, not IDE)
+#
+# CONFIG_CD_NO_IDESCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+# CONFIG_MD_RAID10 is not set
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+# CONFIG_MD_FAULTY is not set
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_NETLINK_DEV=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_TUNNEL=m
+CONFIG_IP_TCPDIAG=m
+CONFIG_IP_TCPDIAG_IPV6=y
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+# CONFIG_IP_NF_CT_ACCT is not set
+CONFIG_IP_NF_CONNTRACK_MARK=y
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_PKTTYPE=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_DSCP=m
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+CONFIG_IP_NF_MATCH_HELPER=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_REALM=m
+# CONFIG_IP_NF_MATCH_SCTP is not set
+# CONFIG_IP_NF_MATCH_COMMENT is not set
+CONFIG_IP_NF_MATCH_CONNMARK=m
+CONFIG_IP_NF_MATCH_HASHLIMIT=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_DSCP=m
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_CLASSIFY=m
+CONFIG_IP_NF_TARGET_CONNMARK=m
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_NOTRACK=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP_NF_COMPAT_IPCHAINS=m
+CONFIG_IP_NF_COMPAT_IPFWADM=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+CONFIG_IP6_NF_MATCH_MAC=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_MULTIPORT=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_MARK=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AHESP=m
+CONFIG_IP6_NF_MATCH_LENGTH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+# CONFIG_DEV_APPLETALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+CONFIG_NET_CLS_ROUTE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+# CONFIG_BONDING is not set
+CONFIG_EQUALIZER=m
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_ARIADNE=m
+CONFIG_A2065=m
+CONFIG_HYDRA=m
+CONFIG_ZORRO8390=m
+CONFIG_APNE=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_LANCE is not set
+# CONFIG_NET_VENDOR_SMC is not set
+# CONFIG_NET_VENDOR_RACAL is not set
+# CONFIG_AT1700 is not set
+# CONFIG_DEPCA is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_ISA is not set
+# CONFIG_NET_PCI is not set
+# CONFIG_NET_POCKET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+CONFIG_PLIP=m
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_SHAPER=m
+CONFIG_NETCONSOLE=m
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=m
+CONFIG_SERIO_SERPORT=m
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PARKBD is not set
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_KEYBOARD_AMIGA=y
+CONFIG_INPUT_MOUSE=y
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_INPORT is not set
+# CONFIG_MOUSE_LOGIBM is not set
+# CONFIG_MOUSE_PC110PAD is not set
+CONFIG_MOUSE_AMIGA=y
+# CONFIG_MOUSE_VSXXXAA is not set
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDDLER is not set
+# CONFIG_JOYSTICK_DB9 is not set
+# CONFIG_JOYSTICK_GAMECON is not set
+# CONFIG_JOYSTICK_TURBOGRAFX is not set
+CONFIG_JOYSTICK_AMIGA=m
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+CONFIG_A2232=m
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+CONFIG_PRINTER=m
+# CONFIG_LP_CONSOLE is not set
+# CONFIG_PPDEV is not set
+# CONFIG_TIPAR is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_GEN_RTC=m
+CONFIG_GEN_RTC_X=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+# CONFIG_FB_TILEBLITTING is not set
+CONFIG_FB_CIRRUS=m
+CONFIG_FB_AMIGA=y
+CONFIG_FB_AMIGA_OCS=y
+CONFIG_FB_AMIGA_ECS=y
+CONFIG_FB_AMIGA_AGA=y
+CONFIG_FB_FM2=y
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_FONT_PEARL_8x8=y
+
+#
+# Logo configuration
+#
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+
+#
+# Sound
+#
+CONFIG_SOUND=m
+CONFIG_DMASOUND_PAULA=m
+CONFIG_DMASOUND=m
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# Character devices
+#
+CONFIG_AMIGA_BUILTIN_SERIAL=y
+# CONFIG_WHIPPET_SERIAL is not set
+CONFIG_MULTIFACE_III_TTY=m
+# CONFIG_SERIAL_CONSOLE is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+CONFIG_JFS_FS=m
+# CONFIG_JFS_POSIX_ACL is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_SECURITY is not set
+# CONFIG_XFS_POSIX_ACL is not set
+CONFIG_MINIX_FS=y
+# CONFIG_ROMFS_FS is not set
+CONFIG_QUOTA=y
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
+CONFIG_QUOTACTL=y
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=m
+CONFIG_AUTOFS4_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+CONFIG_AFFS_FS=y
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_CRAMFS=m
+# CONFIG_VXFS_FS is not set
+CONFIG_HPFS_FS=m
+# CONFIG_QNX4FS_FS is not set
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+# CONFIG_UFS_FS_WRITE is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=m
+CONFIG_SUNRPC_GSS=m
+CONFIG_RPCSEC_GSS_KRB5=m
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_SMB_NLS_REMOTE="cp437"
+# CONFIG_CIFS is not set
+CONFIG_NCP_FS=m
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+CONFIG_NCPFS_NFS_NS=y
+CONFIG_NCPFS_OS2_NS=y
+# CONFIG_NCPFS_SMALLDOS is not set
+CONFIG_NCPFS_NLS=y
+# CONFIG_NCPFS_EXTRAS is not set
+CONFIG_CODA_FS=m
+# CONFIG_CODA_FS_OLD_API is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_AMIGA_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+# CONFIG_CRYPTO_WP512 is not set
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=m
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
new file mode 100644
index 0000000..648361b
--- /dev/null
+++ b/arch/m68k/configs/apollo_defconfig
@@ -0,0 +1,825 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-m68k
+# Sun Dec 26 11:22:58 2004
+#
+CONFIG_M68K=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION="-apollo"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_SYSCTL=y
+CONFIG_AUDIT=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Platform dependent setup
+#
+# CONFIG_SUN3 is not set
+# CONFIG_AMIGA is not set
+# CONFIG_ATARI is not set
+# CONFIG_MAC is not set
+CONFIG_APOLLO=y
+# CONFIG_VME is not set
+# CONFIG_HP300 is not set
+# CONFIG_SUN3X is not set
+# CONFIG_Q40 is not set
+
+#
+# Processor type
+#
+CONFIG_M68020=y
+CONFIG_M68030=y
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_MMU_MOTOROLA=y
+CONFIG_M68KFPU_EMU=y
+CONFIG_M68KFPU_EMU_EXTRAPREC=y
+# CONFIG_M68KFPU_EMU_ONLY is not set
+# CONFIG_ADVANCED is not set
+
+#
+# General setup
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+# CONFIG_MD_RAID10 is not set
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+# CONFIG_MD_FAULTY is not set
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_NETLINK_DEV=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_TUNNEL=m
+CONFIG_IP_TCPDIAG=m
+CONFIG_IP_TCPDIAG_IPV6=y
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+# CONFIG_IP_NF_CT_ACCT is not set
+CONFIG_IP_NF_CONNTRACK_MARK=y
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_PKTTYPE=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_DSCP=m
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+CONFIG_IP_NF_MATCH_HELPER=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_REALM=m
+# CONFIG_IP_NF_MATCH_SCTP is not set
+# CONFIG_IP_NF_MATCH_COMMENT is not set
+CONFIG_IP_NF_MATCH_CONNMARK=m
+CONFIG_IP_NF_MATCH_HASHLIMIT=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_DSCP=m
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_CLASSIFY=m
+CONFIG_IP_NF_TARGET_CONNMARK=m
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_NOTRACK=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP_NF_COMPAT_IPCHAINS=m
+CONFIG_IP_NF_COMPAT_IPFWADM=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+CONFIG_IP6_NF_MATCH_MAC=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_MULTIPORT=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_MARK=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AHESP=m
+CONFIG_IP6_NF_MATCH_LENGTH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+# CONFIG_DEV_APPLETALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+CONFIG_NET_CLS_ROUTE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+# CONFIG_BONDING is not set
+CONFIG_EQUALIZER=m
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_APOLLO_ELPLUS=y
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_SHAPER=m
+CONFIG_NETCONSOLE=m
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=m
+CONFIG_SERIO_SERPORT=m
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_SERIAL=m
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_GEN_RTC=y
+CONFIG_GEN_RTC_X=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# Character devices
+#
+CONFIG_DN_SERIAL=y
+CONFIG_SERIAL_CONSOLE=y
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+CONFIG_JFS_FS=m
+# CONFIG_JFS_POSIX_ACL is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_SECURITY is not set
+# CONFIG_XFS_POSIX_ACL is not set
+CONFIG_MINIX_FS=y
+# CONFIG_ROMFS_FS is not set
+CONFIG_QUOTA=y
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
+CONFIG_QUOTACTL=y
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=m
+CONFIG_AUTOFS4_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_CRAMFS=m
+# CONFIG_VXFS_FS is not set
+CONFIG_HPFS_FS=m
+# CONFIG_QNX4FS_FS is not set
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+# CONFIG_UFS_FS_WRITE is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_SMB_NLS_REMOTE="cp437"
+# CONFIG_CIFS is not set
+CONFIG_NCP_FS=m
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+CONFIG_NCPFS_NFS_NS=y
+CONFIG_NCPFS_OS2_NS=y
+# CONFIG_NCPFS_SMALLDOS is not set
+CONFIG_NCPFS_NLS=y
+# CONFIG_NCPFS_EXTRAS is not set
+CONFIG_CODA_FS=m
+# CONFIG_CODA_FS_OLD_API is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+# CONFIG_CRYPTO_WP512 is not set
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=m
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
new file mode 100644
index 0000000..1fb25c0
--- /dev/null
+++ b/arch/m68k/configs/atari_defconfig
@@ -0,0 +1,880 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-m68k
+# Sun Dec 26 11:23:11 2004
+#
+CONFIG_M68K=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION="-atari"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_SYSCTL=y
+CONFIG_AUDIT=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Platform dependent setup
+#
+# CONFIG_SUN3 is not set
+# CONFIG_AMIGA is not set
+CONFIG_ATARI=y
+# CONFIG_MAC is not set
+# CONFIG_APOLLO is not set
+# CONFIG_VME is not set
+# CONFIG_HP300 is not set
+# CONFIG_SUN3X is not set
+# CONFIG_Q40 is not set
+
+#
+# Processor type
+#
+CONFIG_M68020=y
+CONFIG_M68030=y
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_MMU_MOTOROLA=y
+CONFIG_M68KFPU_EMU=y
+CONFIG_M68KFPU_EMU_EXTRAPREC=y
+# CONFIG_M68KFPU_EMU_ONLY is not set
+# CONFIG_ADVANCED is not set
+
+#
+# General setup
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
+CONFIG_STRAM_PROC=y
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+CONFIG_PARPORT=m
+# CONFIG_PARPORT_PC is not set
+CONFIG_PARPORT_ATARI=m
+# CONFIG_PARPORT_OTHER is not set
+CONFIG_PARPORT_1284=y
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+CONFIG_ATARI_FLOPPY=y
+# CONFIG_PARIDE is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDETAPE=m
+CONFIG_BLK_DEV_IDEFLOPPY=m
+# CONFIG_BLK_DEV_IDESCSI is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_FALCON_IDE=y
+# CONFIG_BLK_DEV_IDEDMA is not set
+# CONFIG_IDEDMA_AUTO is not set
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_PPA is not set
+# CONFIG_SCSI_IMM is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+# CONFIG_MD_RAID10 is not set
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+# CONFIG_MD_FAULTY is not set
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_NETLINK_DEV=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_TUNNEL=m
+CONFIG_IP_TCPDIAG=m
+CONFIG_IP_TCPDIAG_IPV6=y
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+# CONFIG_IP_NF_CT_ACCT is not set
+CONFIG_IP_NF_CONNTRACK_MARK=y
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_PKTTYPE=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_DSCP=m
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+CONFIG_IP_NF_MATCH_HELPER=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_REALM=m
+# CONFIG_IP_NF_MATCH_SCTP is not set
+# CONFIG_IP_NF_MATCH_COMMENT is not set
+CONFIG_IP_NF_MATCH_CONNMARK=m
+CONFIG_IP_NF_MATCH_HASHLIMIT=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_DSCP=m
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_CLASSIFY=m
+CONFIG_IP_NF_TARGET_CONNMARK=m
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_NOTRACK=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP_NF_COMPAT_IPCHAINS=m
+CONFIG_IP_NF_COMPAT_IPFWADM=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+CONFIG_IP6_NF_MATCH_MAC=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_MULTIPORT=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_MARK=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AHESP=m
+CONFIG_IP6_NF_MATCH_LENGTH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+# CONFIG_DEV_APPLETALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+CONFIG_NET_CLS_ROUTE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+# CONFIG_BONDING is not set
+CONFIG_EQUALIZER=m
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_ATARILANCE=m
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PLIP is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_SHAPER=m
+CONFIG_NETCONSOLE=m
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PARKBD is not set
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_M68K_BEEP=m
+CONFIG_INPUT_UINPUT=m
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+CONFIG_PRINTER=m
+# CONFIG_LP_CONSOLE is not set
+# CONFIG_PPDEV is not set
+# CONFIG_TIPAR is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_NVRAM=y
+CONFIG_GEN_RTC=y
+CONFIG_GEN_RTC_X=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+# CONFIG_FB_TILEBLITTING is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+
+#
+# Logo configuration
+#
+# CONFIG_LOGO is not set
+
+#
+# Sound
+#
+CONFIG_SOUND=m
+CONFIG_DMASOUND_ATARI=m
+CONFIG_DMASOUND=m
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# Character devices
+#
+CONFIG_ATARI_MFPSER=m
+CONFIG_ATARI_SCC=y
+CONFIG_ATARI_SCC_DMA=y
+CONFIG_ATARI_MIDI=m
+CONFIG_ATARI_DSP56K=m
+# CONFIG_SERIAL_CONSOLE is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+CONFIG_REISERFS_PROC_INFO=y
+# CONFIG_REISERFS_FS_XATTR is not set
+CONFIG_JFS_FS=m
+# CONFIG_JFS_POSIX_ACL is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_XFS_FS=m
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_SECURITY is not set
+# CONFIG_XFS_POSIX_ACL is not set
+CONFIG_MINIX_FS=y
+# CONFIG_ROMFS_FS is not set
+CONFIG_QUOTA=y
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
+CONFIG_QUOTACTL=y
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=m
+CONFIG_AUTOFS4_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_CRAMFS=m
+# CONFIG_VXFS_FS is not set
+CONFIG_HPFS_FS=m
+# CONFIG_QNX4FS_FS is not set
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+# CONFIG_UFS_FS_WRITE is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V4 is not set
+CONFIG_NFSD_TCP=y
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=m
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_SMB_NLS_REMOTE="cp437"
+# CONFIG_CIFS is not set
+CONFIG_NCP_FS=m
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+CONFIG_NCPFS_NFS_NS=y
+CONFIG_NCPFS_OS2_NS=y
+# CONFIG_NCPFS_SMALLDOS is not set
+CONFIG_NCPFS_NLS=y
+# CONFIG_NCPFS_EXTRAS is not set
+CONFIG_CODA_FS=m
+# CONFIG_CODA_FS_OLD_API is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_ATARI_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+# CONFIG_CRYPTO_WP512 is not set
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=m
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
new file mode 100644
index 0000000..f1f2cf0
--- /dev/null
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -0,0 +1,824 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-m68k
+# Sun Dec 26 11:23:15 2004
+#
+CONFIG_M68K=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION="-bvme6000"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_SYSCTL=y
+CONFIG_AUDIT=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Platform dependent setup
+#
+# CONFIG_SUN3 is not set
+# CONFIG_AMIGA is not set
+# CONFIG_ATARI is not set
+# CONFIG_MAC is not set
+# CONFIG_APOLLO is not set
+CONFIG_VME=y
+# CONFIG_MVME147 is not set
+# CONFIG_MVME16x is not set
+CONFIG_BVME6000=y
+# CONFIG_HP300 is not set
+# CONFIG_SUN3X is not set
+# CONFIG_Q40 is not set
+
+#
+# Processor type
+#
+# CONFIG_M68020 is not set
+# CONFIG_M68030 is not set
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_MMU_MOTOROLA=y
+# CONFIG_M68KFPU_EMU is not set
+# CONFIG_ADVANCED is not set
+
+#
+# General setup
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
+CONFIG_PROC_HARDWARE=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+# CONFIG_MD_RAID10 is not set
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+# CONFIG_MD_FAULTY is not set
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_NETLINK_DEV=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_TUNNEL=m
+CONFIG_IP_TCPDIAG=m
+CONFIG_IP_TCPDIAG_IPV6=y
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+# CONFIG_IP_NF_CT_ACCT is not set
+CONFIG_IP_NF_CONNTRACK_MARK=y
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_PKTTYPE=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_DSCP=m
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+CONFIG_IP_NF_MATCH_HELPER=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_REALM=m
+# CONFIG_IP_NF_MATCH_SCTP is not set
+# CONFIG_IP_NF_MATCH_COMMENT is not set
+CONFIG_IP_NF_MATCH_CONNMARK=m
+CONFIG_IP_NF_MATCH_HASHLIMIT=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_DSCP=m
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_CLASSIFY=m
+CONFIG_IP_NF_TARGET_CONNMARK=m
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_NOTRACK=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP_NF_COMPAT_IPCHAINS=m
+CONFIG_IP_NF_COMPAT_IPFWADM=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+CONFIG_IP6_NF_MATCH_MAC=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_MULTIPORT=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_MARK=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AHESP=m
+CONFIG_IP6_NF_MATCH_LENGTH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+CONFIG_ATALK=m
+# CONFIG_DEV_APPLETALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+CONFIG_NET_CLS_ROUTE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+# CONFIG_BONDING is not set
+CONFIG_EQUALIZER=m
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_BVME6000_NET=y
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+# CONFIG_PPP_FILTER is not set
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_SHAPER=m
+CONFIG_NETCONSOLE=m
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=m
+CONFIG_SERIO_SERPORT=m
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_SERIAL=m
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_GEN_RTC=m
+CONFIG_GEN_RTC_X=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# Character devices
+#
+CONFIG_BVME6000_SCC=y
+CONFIG_SERIAL_CONSOLE=y
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+CONFIG_JFS_FS=m
+# CONFIG_JFS_POSIX_ACL is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_SECURITY is not set
+# CONFIG_XFS_POSIX_ACL is not set
+CONFIG_MINIX_FS=y
+# CONFIG_ROMFS_FS is not set
+CONFIG_QUOTA=y
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
+CONFIG_QUOTACTL=y
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=m
+CONFIG_AUTOFS4_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_CRAMFS=m
+# CONFIG_VXFS_FS is not set
+CONFIG_HPFS_FS=m
+# CONFIG_QNX4FS_FS is not set
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+# CONFIG_UFS_FS_WRITE is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_SMB_NLS_REMOTE="cp437"
+# CONFIG_CIFS is not set
+CONFIG_NCP_FS=m
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+CONFIG_NCPFS_NFS_NS=y
+CONFIG_NCPFS_OS2_NS=y
+# CONFIG_NCPFS_SMALLDOS is not set
+CONFIG_NCPFS_NLS=y
+# CONFIG_NCPFS_EXTRAS is not set
+CONFIG_CODA_FS=m
+# CONFIG_CODA_FS_OLD_API is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+# CONFIG_CRYPTO_WP512 is not set
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=m
+CONFIG_CRC32=m
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
new file mode 100644
index 0000000..53dde43
--- /dev/null
+++ b/arch/m68k/configs/hp300_defconfig
@@ -0,0 +1,824 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-m68k
+# Sun Dec 26 11:23:40 2004
+#
+CONFIG_M68K=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION="-hp300"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_SYSCTL=y
+CONFIG_AUDIT=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Platform dependent setup
+#
+# CONFIG_SUN3 is not set
+# CONFIG_AMIGA is not set
+# CONFIG_ATARI is not set
+# CONFIG_MAC is not set
+# CONFIG_APOLLO is not set
+# CONFIG_VME is not set
+CONFIG_HP300=y
+CONFIG_DIO=y
+# CONFIG_SUN3X is not set
+# CONFIG_Q40 is not set
+
+#
+# Processor type
+#
+CONFIG_M68020=y
+CONFIG_M68030=y
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_MMU_MOTOROLA=y
+CONFIG_M68KFPU_EMU=y
+CONFIG_M68KFPU_EMU_EXTRAPREC=y
+# CONFIG_M68KFPU_EMU_ONLY is not set
+# CONFIG_ADVANCED is not set
+
+#
+# General setup
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+# CONFIG_MD_RAID10 is not set
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+# CONFIG_MD_FAULTY is not set
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_NETLINK_DEV=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_TUNNEL=m
+CONFIG_IP_TCPDIAG=m
+CONFIG_IP_TCPDIAG_IPV6=y
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+# CONFIG_IP_NF_CT_ACCT is not set
+CONFIG_IP_NF_CONNTRACK_MARK=y
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_PKTTYPE=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_DSCP=m
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+CONFIG_IP_NF_MATCH_HELPER=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_REALM=m
+# CONFIG_IP_NF_MATCH_SCTP is not set
+# CONFIG_IP_NF_MATCH_COMMENT is not set
+CONFIG_IP_NF_MATCH_CONNMARK=m
+CONFIG_IP_NF_MATCH_HASHLIMIT=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_DSCP=m
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_CLASSIFY=m
+CONFIG_IP_NF_TARGET_CONNMARK=m
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_NOTRACK=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP_NF_COMPAT_IPCHAINS=m
+CONFIG_IP_NF_COMPAT_IPFWADM=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+CONFIG_IP6_NF_MATCH_MAC=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_MULTIPORT=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_MARK=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AHESP=m
+CONFIG_IP6_NF_MATCH_LENGTH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+# CONFIG_DEV_APPLETALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+CONFIG_NET_CLS_ROUTE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+# CONFIG_BONDING is not set
+CONFIG_EQUALIZER=m
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_HPLANCE=y
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_SHAPER=m
+CONFIG_NETCONSOLE=m
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=m
+CONFIG_SERIO_SERPORT=m
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_SERIAL=m
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_GEN_RTC=y
+CONFIG_GEN_RTC_X=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# Character devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+CONFIG_JFS_FS=m
+# CONFIG_JFS_POSIX_ACL is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_SECURITY is not set
+# CONFIG_XFS_POSIX_ACL is not set
+CONFIG_MINIX_FS=y
+# CONFIG_ROMFS_FS is not set
+CONFIG_QUOTA=y
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
+CONFIG_QUOTACTL=y
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=m
+CONFIG_AUTOFS4_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_CRAMFS=m
+# CONFIG_VXFS_FS is not set
+CONFIG_HPFS_FS=m
+# CONFIG_QNX4FS_FS is not set
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+# CONFIG_UFS_FS_WRITE is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_SMB_NLS_REMOTE="cp437"
+# CONFIG_CIFS is not set
+CONFIG_NCP_FS=m
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+CONFIG_NCPFS_NFS_NS=y
+CONFIG_NCPFS_OS2_NS=y
+# CONFIG_NCPFS_SMALLDOS is not set
+CONFIG_NCPFS_NLS=y
+# CONFIG_NCPFS_EXTRAS is not set
+CONFIG_CODA_FS=m
+# CONFIG_CODA_FS_OLD_API is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+# CONFIG_CRYPTO_WP512 is not set
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=m
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
new file mode 100644
index 0000000..2452dac
--- /dev/null
+++ b/arch/m68k/configs/mac_defconfig
@@ -0,0 +1,903 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-m68k
+# Sun Dec 26 11:23:44 2004
+#
+CONFIG_M68K=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION="-mac"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_SYSCTL=y
+CONFIG_AUDIT=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Platform dependent setup
+#
+# CONFIG_SUN3 is not set
+# CONFIG_AMIGA is not set
+# CONFIG_ATARI is not set
+CONFIG_MAC=y
+CONFIG_NUBUS=y
+CONFIG_M68K_L2_CACHE=y
+# CONFIG_APOLLO is not set
+# CONFIG_VME is not set
+# CONFIG_HP300 is not set
+# CONFIG_SUN3X is not set
+# CONFIG_Q40 is not set
+
+#
+# Processor type
+#
+CONFIG_M68020=y
+CONFIG_M68030=y
+CONFIG_M68040=y
+# CONFIG_M68060 is not set
+CONFIG_MMU_MOTOROLA=y
+CONFIG_M68KFPU_EMU=y
+CONFIG_M68KFPU_EMU_EXTRAPREC=y
+# CONFIG_M68KFPU_EMU_ONLY is not set
+# CONFIG_ADVANCED is not set
+
+#
+# General setup
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
+# CONFIG_HEARTBEAT is not set
+CONFIG_PROC_HARDWARE=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDETAPE=m
+CONFIG_BLK_DEV_IDEFLOPPY=m
+# CONFIG_BLK_DEV_IDESCSI is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_MAC_IDE=y
+# CONFIG_BLK_DEV_IDEDMA is not set
+# CONFIG_IDEDMA_AUTO is not set
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+CONFIG_MAC_SCSI=y
+CONFIG_SCSI_MAC_ESP=y
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+# CONFIG_MD_RAID10 is not set
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+# CONFIG_MD_FAULTY is not set
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Macintosh device drivers
+#
+CONFIG_ADB=y
+CONFIG_ADB_MACII=y
+CONFIG_ADB_MACIISI=y
+CONFIG_ADB_IOP=y
+CONFIG_ADB_PMU68K=y
+CONFIG_ADB_CUDA=y
+CONFIG_INPUT_ADBHID=y
+CONFIG_MAC_EMUMOUSEBTN=y
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_NETLINK_DEV=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_TUNNEL=m
+CONFIG_IP_TCPDIAG=m
+CONFIG_IP_TCPDIAG_IPV6=y
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+# CONFIG_IP_NF_CT_ACCT is not set
+CONFIG_IP_NF_CONNTRACK_MARK=y
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_PKTTYPE=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_DSCP=m
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+CONFIG_IP_NF_MATCH_HELPER=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_REALM=m
+# CONFIG_IP_NF_MATCH_SCTP is not set
+# CONFIG_IP_NF_MATCH_COMMENT is not set
+CONFIG_IP_NF_MATCH_CONNMARK=m
+CONFIG_IP_NF_MATCH_HASHLIMIT=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_DSCP=m
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_CLASSIFY=m
+CONFIG_IP_NF_TARGET_CONNMARK=m
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_NOTRACK=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP_NF_COMPAT_IPCHAINS=m
+CONFIG_IP_NF_COMPAT_IPFWADM=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+CONFIG_IP6_NF_MATCH_MAC=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_MULTIPORT=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_MARK=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AHESP=m
+CONFIG_IP6_NF_MATCH_LENGTH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=y
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+CONFIG_NET_CLS_ROUTE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+# CONFIG_BONDING is not set
+CONFIG_EQUALIZER=m
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+CONFIG_MAC8390=y
+CONFIG_MAC89x0=y
+CONFIG_MACSONIC=y
+CONFIG_MACMACE=y
+# CONFIG_NET_VENDOR_SMC is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+# CONFIG_PPP_ASYNC is not set
+# CONFIG_PPP_SYNC_TTY is not set
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_SHAPER=m
+CONFIG_NETCONSOLE=m
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=m
+CONFIG_SERIO_SERPORT=m
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_SERIAL=m
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_GEN_RTC=m
+CONFIG_GEN_RTC_X=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+# CONFIG_FB_TILEBLITTING is not set
+CONFIG_FB_VALKYRIE=y
+CONFIG_FB_MAC=y
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_FONT_6x11=y
+
+#
+# Logo configuration
+#
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+CONFIG_LOGO_MAC_CLUT224=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# Character devices
+#
+CONFIG_MAC_SCC=y
+CONFIG_MAC_HID=y
+CONFIG_MAC_ADBKEYCODES=y
+CONFIG_SERIAL_CONSOLE=y
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+CONFIG_JFS_FS=m
+# CONFIG_JFS_POSIX_ACL is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_SECURITY is not set
+# CONFIG_XFS_POSIX_ACL is not set
+CONFIG_MINIX_FS=y
+# CONFIG_ROMFS_FS is not set
+CONFIG_QUOTA=y
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
+CONFIG_QUOTACTL=y
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=m
+CONFIG_AUTOFS4_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=y
+CONFIG_HFSPLUS_FS=y
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_CRAMFS=m
+# CONFIG_VXFS_FS is not set
+CONFIG_HPFS_FS=m
+# CONFIG_QNX4FS_FS is not set
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+# CONFIG_UFS_FS_WRITE is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=m
+CONFIG_SUNRPC_GSS=m
+CONFIG_RPCSEC_GSS_KRB5=m
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_SMB_NLS_REMOTE="cp437"
+# CONFIG_CIFS is not set
+CONFIG_NCP_FS=m
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+CONFIG_NCPFS_NFS_NS=y
+CONFIG_NCPFS_OS2_NS=y
+# CONFIG_NCPFS_SMALLDOS is not set
+CONFIG_NCPFS_NLS=y
+# CONFIG_NCPFS_EXTRAS is not set
+CONFIG_CODA_FS=m
+# CONFIG_CODA_FS_OLD_API is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_LDM_PARTITION=y
+CONFIG_LDM_DEBUG=y
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+# CONFIG_CRYPTO_WP512 is not set
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=m
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
new file mode 100644
index 0000000..ea38e87
--- /dev/null
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -0,0 +1,843 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-m68k
+# Sun Dec 26 11:23:49 2004
+#
+CONFIG_M68K=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION="-mvme147"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_SYSCTL=y
+CONFIG_AUDIT=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Platform dependent setup
+#
+# CONFIG_SUN3 is not set
+# CONFIG_AMIGA is not set
+# CONFIG_ATARI is not set
+# CONFIG_MAC is not set
+# CONFIG_APOLLO is not set
+CONFIG_VME=y
+CONFIG_MVME147=y
+# CONFIG_MVME16x is not set
+# CONFIG_BVME6000 is not set
+# CONFIG_HP300 is not set
+# CONFIG_SUN3X is not set
+# CONFIG_Q40 is not set
+
+#
+# Processor type
+#
+# CONFIG_M68020 is not set
+CONFIG_M68030=y
+# CONFIG_M68040 is not set
+# CONFIG_M68060 is not set
+CONFIG_MMU_MOTOROLA=y
+# CONFIG_M68KFPU_EMU is not set
+# CONFIG_ADVANCED is not set
+
+#
+# General setup
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
+CONFIG_PROC_HARDWARE=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+CONFIG_MVME147_SCSI=y
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+# CONFIG_MD_RAID10 is not set
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+# CONFIG_MD_FAULTY is not set
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_NETLINK_DEV=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_TUNNEL=m
+CONFIG_IP_TCPDIAG=m
+CONFIG_IP_TCPDIAG_IPV6=y
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+# CONFIG_IP_NF_CT_ACCT is not set
+CONFIG_IP_NF_CONNTRACK_MARK=y
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_PKTTYPE=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_DSCP=m
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+CONFIG_IP_NF_MATCH_HELPER=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_REALM=m
+# CONFIG_IP_NF_MATCH_SCTP is not set
+# CONFIG_IP_NF_MATCH_COMMENT is not set
+CONFIG_IP_NF_MATCH_CONNMARK=m
+CONFIG_IP_NF_MATCH_HASHLIMIT=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_DSCP=m
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_CLASSIFY=m
+CONFIG_IP_NF_TARGET_CONNMARK=m
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_NOTRACK=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP_NF_COMPAT_IPCHAINS=m
+CONFIG_IP_NF_COMPAT_IPFWADM=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+CONFIG_IP6_NF_MATCH_MAC=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_MULTIPORT=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_MARK=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AHESP=m
+CONFIG_IP6_NF_MATCH_LENGTH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+# CONFIG_DEV_APPLETALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+CONFIG_NET_CLS_ROUTE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+# CONFIG_BONDING is not set
+CONFIG_EQUALIZER=m
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_MVME147_NET=y
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_SHAPER=m
+CONFIG_NETCONSOLE=m
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=m
+CONFIG_SERIO_SERPORT=m
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_SERIAL=m
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_GEN_RTC=m
+CONFIG_GEN_RTC_X=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+# CONFIG_FB_TILEBLITTING is not set
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+
+#
+# Logo configuration
+#
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# Character devices
+#
+CONFIG_MVME147_SCC=y
+CONFIG_SERIAL_CONSOLE=y
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+CONFIG_JFS_FS=m
+# CONFIG_JFS_POSIX_ACL is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_SECURITY is not set
+# CONFIG_XFS_POSIX_ACL is not set
+CONFIG_MINIX_FS=y
+# CONFIG_ROMFS_FS is not set
+CONFIG_QUOTA=y
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
+CONFIG_QUOTACTL=y
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=m
+CONFIG_AUTOFS4_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_DEVFS_FS=y
+# CONFIG_DEVFS_MOUNT is not set
+# CONFIG_DEVFS_DEBUG is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_CRAMFS=m
+# CONFIG_VXFS_FS is not set
+CONFIG_HPFS_FS=m
+# CONFIG_QNX4FS_FS is not set
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+# CONFIG_UFS_FS_WRITE is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_SMB_NLS_REMOTE="cp437"
+# CONFIG_CIFS is not set
+CONFIG_NCP_FS=m
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+# CONFIG_NCPFS_NFS_NS is not set
+# CONFIG_NCPFS_OS2_NS is not set
+# CONFIG_NCPFS_SMALLDOS is not set
+CONFIG_NCPFS_NLS=y
+# CONFIG_NCPFS_EXTRAS is not set
+CONFIG_CODA_FS=m
+# CONFIG_CODA_FS_OLD_API is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+# CONFIG_CRYPTO_WP512 is not set
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=m
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
new file mode 100644
index 0000000..f931a64
--- /dev/null
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -0,0 +1,842 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-m68k
+# Sun Dec 26 11:23:53 2004
+#
+CONFIG_M68K=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION="-mvme16x"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_SYSCTL=y
+CONFIG_AUDIT=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Platform dependent setup
+#
+# CONFIG_SUN3 is not set
+# CONFIG_AMIGA is not set
+# CONFIG_ATARI is not set
+# CONFIG_MAC is not set
+# CONFIG_APOLLO is not set
+CONFIG_VME=y
+# CONFIG_MVME147 is not set
+CONFIG_MVME16x=y
+# CONFIG_BVME6000 is not set
+# CONFIG_HP300 is not set
+# CONFIG_SUN3X is not set
+# CONFIG_Q40 is not set
+
+#
+# Processor type
+#
+# CONFIG_M68020 is not set
+# CONFIG_M68030 is not set
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_MMU_MOTOROLA=y
+# CONFIG_M68KFPU_EMU is not set
+# CONFIG_ADVANCED is not set
+
+#
+# General setup
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
+CONFIG_PROC_HARDWARE=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+# CONFIG_MD_RAID10 is not set
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+# CONFIG_MD_FAULTY is not set
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_NETLINK_DEV=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_TUNNEL=m
+CONFIG_IP_TCPDIAG=m
+CONFIG_IP_TCPDIAG_IPV6=y
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+# CONFIG_IP_NF_CT_ACCT is not set
+CONFIG_IP_NF_CONNTRACK_MARK=y
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_PKTTYPE=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_DSCP=m
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+CONFIG_IP_NF_MATCH_HELPER=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_REALM=m
+# CONFIG_IP_NF_MATCH_SCTP is not set
+# CONFIG_IP_NF_MATCH_COMMENT is not set
+CONFIG_IP_NF_MATCH_CONNMARK=m
+CONFIG_IP_NF_MATCH_HASHLIMIT=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_DSCP=m
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_CLASSIFY=m
+CONFIG_IP_NF_TARGET_CONNMARK=m
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_NOTRACK=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP_NF_COMPAT_IPCHAINS=m
+CONFIG_IP_NF_COMPAT_IPFWADM=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+CONFIG_IP6_NF_MATCH_MAC=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_MULTIPORT=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_MARK=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AHESP=m
+CONFIG_IP6_NF_MATCH_LENGTH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+# CONFIG_DEV_APPLETALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+CONFIG_NET_CLS_ROUTE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+# CONFIG_BONDING is not set
+CONFIG_EQUALIZER=m
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_MVME16x_NET=y
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_SHAPER=m
+CONFIG_NETCONSOLE=m
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=m
+CONFIG_SERIO_SERPORT=m
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_SERIAL=m
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_GEN_RTC=m
+CONFIG_GEN_RTC_X=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+# CONFIG_FB_TILEBLITTING is not set
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+
+#
+# Logo configuration
+#
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# Character devices
+#
+CONFIG_MVME162_SCC=y
+CONFIG_SERIAL_CONSOLE=y
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+CONFIG_JFS_FS=m
+# CONFIG_JFS_POSIX_ACL is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_SECURITY is not set
+# CONFIG_XFS_POSIX_ACL is not set
+CONFIG_MINIX_FS=y
+# CONFIG_ROMFS_FS is not set
+CONFIG_QUOTA=y
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
+CONFIG_QUOTACTL=y
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=m
+CONFIG_AUTOFS4_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_DEVFS_FS=y
+# CONFIG_DEVFS_MOUNT is not set
+# CONFIG_DEVFS_DEBUG is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_CRAMFS=m
+# CONFIG_VXFS_FS is not set
+CONFIG_HPFS_FS=m
+# CONFIG_QNX4FS_FS is not set
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+# CONFIG_UFS_FS_WRITE is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_SMB_NLS_REMOTE="cp437"
+# CONFIG_CIFS is not set
+CONFIG_NCP_FS=m
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+# CONFIG_NCPFS_NFS_NS is not set
+# CONFIG_NCPFS_OS2_NS is not set
+# CONFIG_NCPFS_SMALLDOS is not set
+CONFIG_NCPFS_NLS=y
+# CONFIG_NCPFS_EXTRAS is not set
+CONFIG_CODA_FS=m
+# CONFIG_CODA_FS_OLD_API is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+# CONFIG_CRYPTO_WP512 is not set
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=m
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
new file mode 100644
index 0000000..713020c
--- /dev/null
+++ b/arch/m68k/configs/q40_defconfig
@@ -0,0 +1,915 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-m68k
+# Sun Dec 26 11:23:57 2004
+#
+CONFIG_M68K=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION="-q40"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_SYSCTL=y
+CONFIG_AUDIT=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Platform dependent setup
+#
+# CONFIG_SUN3 is not set
+# CONFIG_AMIGA is not set
+# CONFIG_ATARI is not set
+# CONFIG_MAC is not set
+# CONFIG_APOLLO is not set
+# CONFIG_VME is not set
+# CONFIG_HP300 is not set
+# CONFIG_SUN3X is not set
+CONFIG_Q40=y
+
+#
+# Processor type
+#
+# CONFIG_M68020 is not set
+# CONFIG_M68030 is not set
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_MMU_MOTOROLA=y
+CONFIG_M68KFPU_EMU=y
+CONFIG_M68KFPU_EMU_EXTRAPREC=y
+# CONFIG_M68KFPU_EMU_ONLY is not set
+# CONFIG_ADVANCED is not set
+
+#
+# General setup
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
+# CONFIG_HEARTBEAT is not set
+CONFIG_PROC_HARDWARE=y
+CONFIG_ISA=y
+CONFIG_GENERIC_ISA_DMA=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+# CONFIG_PNP is not set
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_XD is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDETAPE=m
+CONFIG_BLK_DEV_IDEFLOPPY=m
+# CONFIG_BLK_DEV_IDESCSI is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_Q40IDE=y
+# CONFIG_IDE_CHIPSETS is not set
+# CONFIG_BLK_DEV_IDEDMA is not set
+# CONFIG_IDEDMA_AUTO is not set
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PSI240I is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Old CD-ROM drivers (not SCSI, not IDE)
+#
+# CONFIG_CD_NO_IDESCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+# CONFIG_MD_RAID10 is not set
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+# CONFIG_MD_FAULTY is not set
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_NETLINK_DEV=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_TUNNEL=m
+CONFIG_IP_TCPDIAG=m
+CONFIG_IP_TCPDIAG_IPV6=y
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+# CONFIG_IP_NF_CT_ACCT is not set
+CONFIG_IP_NF_CONNTRACK_MARK=y
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_PKTTYPE=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_DSCP=m
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+CONFIG_IP_NF_MATCH_HELPER=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_REALM=m
+# CONFIG_IP_NF_MATCH_SCTP is not set
+# CONFIG_IP_NF_MATCH_COMMENT is not set
+CONFIG_IP_NF_MATCH_CONNMARK=m
+CONFIG_IP_NF_MATCH_HASHLIMIT=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_DSCP=m
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_CLASSIFY=m
+CONFIG_IP_NF_TARGET_CONNMARK=m
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_NOTRACK=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP_NF_COMPAT_IPCHAINS=m
+CONFIG_IP_NF_COMPAT_IPFWADM=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+CONFIG_IP6_NF_MATCH_MAC=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_MULTIPORT=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_MARK=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AHESP=m
+CONFIG_IP6_NF_MATCH_LENGTH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+# CONFIG_DEV_APPLETALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+CONFIG_NET_CLS_ROUTE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+# CONFIG_BONDING is not set
+CONFIG_EQUALIZER=m
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_LANCE is not set
+# CONFIG_NET_VENDOR_SMC is not set
+# CONFIG_NET_VENDOR_RACAL is not set
+# CONFIG_AT1700 is not set
+# CONFIG_DEPCA is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_ISA is not set
+CONFIG_NE2000=m
+# CONFIG_NET_PCI is not set
+# CONFIG_NET_POCKET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_SHAPER=m
+CONFIG_NETCONSOLE=m
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=m
+CONFIG_SERIO_SERPORT=m
+# CONFIG_SERIO_CT82C710 is not set
+CONFIG_SERIO_Q40KBD=m
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_SERIAL=m
+# CONFIG_MOUSE_INPORT is not set
+# CONFIG_MOUSE_LOGIBM is not set
+# CONFIG_MOUSE_PC110PAD is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_GEN_RTC=m
+CONFIG_GEN_RTC_X=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+# CONFIG_FB_TILEBLITTING is not set
+CONFIG_FB_Q40=y
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+
+#
+# Logo configuration
+#
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+
+#
+# Sound
+#
+CONFIG_SOUND=y
+CONFIG_DMASOUND_Q40=y
+CONFIG_DMASOUND=y
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# Character devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+CONFIG_JFS_FS=m
+# CONFIG_JFS_POSIX_ACL is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_SECURITY is not set
+# CONFIG_XFS_POSIX_ACL is not set
+CONFIG_MINIX_FS=y
+# CONFIG_ROMFS_FS is not set
+CONFIG_QUOTA=y
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
+CONFIG_QUOTACTL=y
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=m
+CONFIG_AUTOFS4_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_CRAMFS=m
+# CONFIG_VXFS_FS is not set
+CONFIG_HPFS_FS=m
+# CONFIG_QNX4FS_FS is not set
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_UFS_FS_WRITE=y
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_SMB_NLS_REMOTE="cp437"
+# CONFIG_CIFS is not set
+CONFIG_NCP_FS=m
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+# CONFIG_NCPFS_NFS_NS is not set
+# CONFIG_NCPFS_OS2_NS is not set
+# CONFIG_NCPFS_SMALLDOS is not set
+CONFIG_NCPFS_NLS=y
+# CONFIG_NCPFS_EXTRAS is not set
+CONFIG_CODA_FS=m
+# CONFIG_CODA_FS_OLD_API is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+# CONFIG_CRYPTO_WP512 is not set
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=m
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
new file mode 100644
index 0000000..33c4db6
--- /dev/null
+++ b/arch/m68k/configs/sun3_defconfig
@@ -0,0 +1,831 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-m68k
+# Sun Dec 26 11:24:01 2004
+#
+CONFIG_M68K=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION="-sun3"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_SYSCTL=y
+CONFIG_AUDIT=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Platform dependent setup
+#
+CONFIG_SUN3=y
+
+#
+# Processor type
+#
+CONFIG_M68020=y
+CONFIG_MMU_SUN3=y
+CONFIG_M68KFPU_EMU=y
+CONFIG_M68KFPU_EMU_EXTRAPREC=y
+# CONFIG_M68KFPU_EMU_ONLY is not set
+# CONFIG_ADVANCED is not set
+
+#
+# General setup
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
+CONFIG_PROC_HARDWARE=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+CONFIG_SUN3_SCSI=y
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+# CONFIG_MD_RAID10 is not set
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+# CONFIG_MD_FAULTY is not set
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_NETLINK_DEV=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_TUNNEL=m
+CONFIG_IP_TCPDIAG=m
+CONFIG_IP_TCPDIAG_IPV6=y
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+# CONFIG_IP_NF_CT_ACCT is not set
+CONFIG_IP_NF_CONNTRACK_MARK=y
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_PKTTYPE=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_DSCP=m
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+CONFIG_IP_NF_MATCH_HELPER=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_REALM=m
+# CONFIG_IP_NF_MATCH_SCTP is not set
+# CONFIG_IP_NF_MATCH_COMMENT is not set
+CONFIG_IP_NF_MATCH_CONNMARK=m
+CONFIG_IP_NF_MATCH_HASHLIMIT=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_DSCP=m
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_CLASSIFY=m
+CONFIG_IP_NF_TARGET_CONNMARK=m
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_NOTRACK=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP_NF_COMPAT_IPCHAINS=m
+CONFIG_IP_NF_COMPAT_IPFWADM=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+CONFIG_IP6_NF_MATCH_MAC=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_MULTIPORT=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_MARK=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AHESP=m
+CONFIG_IP6_NF_MATCH_LENGTH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+# CONFIG_DEV_APPLETALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+CONFIG_NET_CLS_ROUTE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+# CONFIG_BONDING is not set
+CONFIG_EQUALIZER=m
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_SUN3LANCE=y
+CONFIG_SUN3_82586=y
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_SHAPER=m
+CONFIG_NETCONSOLE=m
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=m
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_SUNKBD=y
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_SERIAL=m
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_GEN_RTC=y
+CONFIG_GEN_RTC_X=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+# CONFIG_FB_TILEBLITTING is not set
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+
+#
+# Logo configuration
+#
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# Character devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+CONFIG_JFS_FS=m
+# CONFIG_JFS_POSIX_ACL is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_SECURITY is not set
+# CONFIG_XFS_POSIX_ACL is not set
+CONFIG_MINIX_FS=y
+# CONFIG_ROMFS_FS is not set
+CONFIG_QUOTA=y
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
+CONFIG_QUOTACTL=y
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=m
+CONFIG_AUTOFS4_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_DEVFS_FS=y
+CONFIG_DEVFS_MOUNT=y
+# CONFIG_DEVFS_DEBUG is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_CRAMFS=m
+# CONFIG_VXFS_FS is not set
+CONFIG_HPFS_FS=m
+# CONFIG_QNX4FS_FS is not set
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_UFS_FS_WRITE=y
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_SMB_NLS_REMOTE="cp437"
+# CONFIG_CIFS is not set
+CONFIG_NCP_FS=m
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+# CONFIG_NCPFS_NFS_NS is not set
+# CONFIG_NCPFS_OS2_NS is not set
+# CONFIG_NCPFS_SMALLDOS is not set
+CONFIG_NCPFS_NLS=y
+# CONFIG_NCPFS_EXTRAS is not set
+CONFIG_CODA_FS=m
+# CONFIG_CODA_FS_OLD_API is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_SUN_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+# CONFIG_CRYPTO_WP512 is not set
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=m
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
new file mode 100644
index 0000000..fe008c9
--- /dev/null
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -0,0 +1,841 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-m68k
+# Sun Dec 26 11:24:05 2004
+#
+CONFIG_M68K=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION="-sun3x"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_SYSCTL=y
+CONFIG_AUDIT=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Platform dependent setup
+#
+# CONFIG_SUN3 is not set
+# CONFIG_AMIGA is not set
+# CONFIG_ATARI is not set
+# CONFIG_MAC is not set
+# CONFIG_APOLLO is not set
+# CONFIG_VME is not set
+# CONFIG_HP300 is not set
+CONFIG_SUN3X=y
+# CONFIG_Q40 is not set
+
+#
+# Processor type
+#
+# CONFIG_M68020 is not set
+CONFIG_M68030=y
+# CONFIG_M68040 is not set
+# CONFIG_M68060 is not set
+CONFIG_MMU_MOTOROLA=y
+CONFIG_M68KFPU_EMU=y
+CONFIG_M68KFPU_EMU_EXTRAPREC=y
+# CONFIG_M68KFPU_EMU_ONLY is not set
+# CONFIG_ADVANCED is not set
+
+#
+# General setup
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
+CONFIG_PROC_HARDWARE=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+CONFIG_SUN3X_ESP=y
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+# CONFIG_MD_RAID10 is not set
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+# CONFIG_MD_FAULTY is not set
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_NETLINK_DEV=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_TUNNEL=m
+CONFIG_IP_TCPDIAG=m
+CONFIG_IP_TCPDIAG_IPV6=y
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+# CONFIG_IP_NF_CT_ACCT is not set
+CONFIG_IP_NF_CONNTRACK_MARK=y
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_PKTTYPE=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_DSCP=m
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+CONFIG_IP_NF_MATCH_HELPER=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_REALM=m
+# CONFIG_IP_NF_MATCH_SCTP is not set
+# CONFIG_IP_NF_MATCH_COMMENT is not set
+CONFIG_IP_NF_MATCH_CONNMARK=m
+CONFIG_IP_NF_MATCH_HASHLIMIT=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_DSCP=m
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_CLASSIFY=m
+CONFIG_IP_NF_TARGET_CONNMARK=m
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_NOTRACK=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP_NF_COMPAT_IPCHAINS=m
+CONFIG_IP_NF_COMPAT_IPFWADM=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+CONFIG_IP6_NF_MATCH_MAC=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_MULTIPORT=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_MARK=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AHESP=m
+CONFIG_IP6_NF_MATCH_LENGTH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+# CONFIG_DEV_APPLETALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+CONFIG_NET_CLS_ROUTE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+# CONFIG_BONDING is not set
+CONFIG_EQUALIZER=m
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_SUN3LANCE=y
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+CONFIG_SHAPER=m
+CONFIG_NETCONSOLE=m
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=m
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_SUNKBD=y
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_SERIAL=m
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_GEN_RTC=y
+CONFIG_GEN_RTC_X=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+# CONFIG_FB_TILEBLITTING is not set
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+
+#
+# Logo configuration
+#
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# Character devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+CONFIG_JFS_FS=m
+# CONFIG_JFS_POSIX_ACL is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_SECURITY is not set
+# CONFIG_XFS_POSIX_ACL is not set
+CONFIG_MINIX_FS=y
+# CONFIG_ROMFS_FS is not set
+CONFIG_QUOTA=y
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
+CONFIG_QUOTACTL=y
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=m
+CONFIG_AUTOFS4_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_DEVFS_FS=y
+CONFIG_DEVFS_MOUNT=y
+# CONFIG_DEVFS_DEBUG is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_CRAMFS=m
+# CONFIG_VXFS_FS is not set
+CONFIG_HPFS_FS=m
+# CONFIG_QNX4FS_FS is not set
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_UFS_FS_WRITE=y
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_SMB_NLS_REMOTE="cp437"
+# CONFIG_CIFS is not set
+CONFIG_NCP_FS=m
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+# CONFIG_NCPFS_NFS_NS is not set
+# CONFIG_NCPFS_OS2_NS is not set
+# CONFIG_NCPFS_SMALLDOS is not set
+CONFIG_NCPFS_NLS=y
+# CONFIG_NCPFS_EXTRAS is not set
+CONFIG_CODA_FS=m
+# CONFIG_CODA_FS_OLD_API is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_SUN_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+# CONFIG_CRYPTO_WP512 is not set
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=m
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
diff --git a/arch/m68k/defconfig b/arch/m68k/defconfig
new file mode 100644
index 0000000..78f57d3
--- /dev/null
+++ b/arch/m68k/defconfig
@@ -0,0 +1,629 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-m68k
+# Sun Dec 26 11:23:36 2004
+#
+CONFIG_M68K=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+# CONFIG_MODULES is not set
+
+#
+# Platform dependent setup
+#
+# CONFIG_SUN3 is not set
+CONFIG_AMIGA=y
+# CONFIG_ATARI is not set
+# CONFIG_MAC is not set
+# CONFIG_APOLLO is not set
+# CONFIG_VME is not set
+# CONFIG_HP300 is not set
+# CONFIG_SUN3X is not set
+# CONFIG_Q40 is not set
+
+#
+# Processor type
+#
+CONFIG_M68020=y
+CONFIG_M68030=y
+CONFIG_M68040=y
+# CONFIG_M68060 is not set
+CONFIG_MMU_MOTOROLA=y
+# CONFIG_M68KFPU_EMU is not set
+# CONFIG_ADVANCED is not set
+
+#
+# General setup
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_ZORRO=y
+# CONFIG_AMIGA_PCMCIA is not set
+# CONFIG_HEARTBEAT is not set
+CONFIG_PROC_HARDWARE=y
+# CONFIG_ZORRO_NAMES is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+CONFIG_AMIGA_FLOPPY=y
+# CONFIG_AMIGA_Z2RAM is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CDROM_PKTCDVD=y
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+# CONFIG_BLK_DEV_SR_VENDOR is not set
+# CONFIG_CHR_DEV_SG is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+CONFIG_A3000_SCSI=y
+CONFIG_A2091_SCSI=y
+CONFIG_GVP11_SCSI=y
+# CONFIG_CYBERSTORM_SCSI is not set
+# CONFIG_CYBERSTORMII_SCSI is not set
+# CONFIG_BLZ2060_SCSI is not set
+# CONFIG_BLZ1230_SCSI is not set
+# CONFIG_FASTLANE_SCSI is not set
+# CONFIG_OKTAGON_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_IP_TCPDIAG=y
+# CONFIG_IP_TCPDIAG_IPV6 is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+# CONFIG_NET_ETHERNET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_AMIGA is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_AMIGA is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_A2232 is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+# CONFIG_FB_TILEBLITTING is not set
+# CONFIG_FB_CIRRUS is not set
+CONFIG_FB_AMIGA=y
+CONFIG_FB_AMIGA_OCS=y
+CONFIG_FB_AMIGA_ECS=y
+CONFIG_FB_AMIGA_AGA=y
+# CONFIG_FB_FM2 is not set
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE is not set
+
+#
+# Logo configuration
+#
+# CONFIG_LOGO is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# Character devices
+#
+CONFIG_AMIGA_BUILTIN_SERIAL=y
+# CONFIG_MULTIFACE_III_TTY is not set
+# CONFIG_GVPIOEXT is not set
+# CONFIG_SERIAL_CONSOLE is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_JBD is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+CONFIG_MINIX_FS=y
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+# CONFIG_VFAT_FS is not set
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+# CONFIG_TMPFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+# CONFIG_NFS_V3 is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+# CONFIG_EXPORTFS is not set
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_AMIGA_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_DEBUG_KERNEL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
diff --git a/arch/m68k/fpsp040/Makefile b/arch/m68k/fpsp040/Makefile
new file mode 100644
index 0000000..0214d2f
--- /dev/null
+++ b/arch/m68k/fpsp040/Makefile
@@ -0,0 +1,16 @@
+#
+# Makefile for Linux arch/m68k/fpsp040 source directory
+#
+
+obj-y    := bindec.o binstr.o decbin.o do_func.o gen_except.o get_op.o \
+	    kernel_ex.o res_func.o round.o sacos.o sasin.o satan.o satanh.o \
+	    scosh.o setox.o sgetem.o sint.o slog2.o slogn.o \
+	    smovecr.o srem_mod.o scale.o \
+	    ssin.o ssinh.o stan.o stanh.o sto_res.o stwotox.o tbldo.o util.o \
+	    x_bsun.o x_fline.o x_operr.o x_ovfl.o x_snan.o x_store.o \
+	    x_unfl.o x_unimp.o x_unsupp.o bugfix.o skeleton.o
+
+EXTRA_AFLAGS := -traditional
+EXTRA_LDFLAGS := -x
+
+$(OS_OBJS): fpsp.h
diff --git a/arch/m68k/fpsp040/README b/arch/m68k/fpsp040/README
new file mode 100644
index 0000000..f574944
--- /dev/null
+++ b/arch/m68k/fpsp040/README
@@ -0,0 +1,30 @@
+
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68040 Software Package
+
+M68040 Software Package Copyright (c) 1993, 1994 Motorola Inc.
+All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A
+PARTICULAR PURPOSE and any warranty against infringement with
+regard to the SOFTWARE (INCLUDING ANY MODIFIED VERSIONS THEREOF)
+and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS
+PROFITS, BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR
+OTHER PECUNIARY LOSS) ARISING OF THE USE OR INABILITY TO USE THE
+SOFTWARE.  Motorola assumes no responsibility for the maintenance
+and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and
+distribute the SOFTWARE so long as this entire notice is retained
+without alteration in any modified and/or redistributed versions,
+and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise
+under any patents or trademarks of Motorola, Inc.
diff --git a/arch/m68k/fpsp040/bindec.S b/arch/m68k/fpsp040/bindec.S
new file mode 100644
index 0000000..3ba446a9
--- /dev/null
+++ b/arch/m68k/fpsp040/bindec.S
@@ -0,0 +1,920 @@
+|
+|	bindec.sa 3.4 1/3/91
+|
+|	bindec
+|
+|	Description:
+|		Converts an input in extended precision format
+|		to bcd format.
+|
+|	Input:
+|		a0 points to the input extended precision value
+|		value in memory; d0 contains the k-factor sign-extended
+|		to 32-bits.  The input may be either normalized,
+|		unnormalized, or denormalized.
+|
+|	Output:	result in the FP_SCR1 space on the stack.
+|
+|	Saves and Modifies: D2-D7,A2,FP2
+|
+|	Algorithm:
+|
+|	A1.	Set RM and size ext;  Set SIGMA = sign of input.
+|		The k-factor is saved for use in d7. Clear the
+|		BINDEC_FLG for separating normalized/denormalized
+|		input.  If input is unnormalized or denormalized,
+|		normalize it.
+|
+|	A2.	Set X = abs(input).
+|
+|	A3.	Compute ILOG.
+|		ILOG is the log base 10 of the input value.  It is
+|		approximated by adding e + 0.f when the original
+|		value is viewed as 2^^e * 1.f in extended precision.
+|		This value is stored in d6.
+|
+|	A4.	Clr INEX bit.
+|		The operation in A3 above may have set INEX2.
+|
+|	A5.	Set ICTR = 0;
+|		ICTR is a flag used in A13.  It must be set before the
+|		loop entry A6.
+|
+|	A6.	Calculate LEN.
+|		LEN is the number of digits to be displayed.  The
+|		k-factor can dictate either the total number of digits,
+|		if it is a positive number, or the number of digits
+|		after the decimal point which are to be included as
+|		significant.  See the 68882 manual for examples.
+|		If LEN is computed to be greater than 17, set OPERR in
+|		USER_FPSR.  LEN is stored in d4.
+|
+|	A7.	Calculate SCALE.
+|		SCALE is equal to 10^ISCALE, where ISCALE is the number
+|		of decimal places needed to insure LEN integer digits
+|		in the output before conversion to bcd. LAMBDA is the
+|		sign of ISCALE, used in A9. Fp1 contains
+|		10^^(abs(ISCALE)) using a rounding mode which is a
+|		function of the original rounding mode and the signs
+|		of ISCALE and X.  A table is given in the code.
+|
+|	A8.	Clr INEX; Force RZ.
+|		The operation in A3 above may have set INEX2.
+|		RZ mode is forced for the scaling operation to insure
+|		only one rounding error.  The grs bits are collected in
+|		the INEX flag for use in A10.
+|
+|	A9.	Scale X -> Y.
+|		The mantissa is scaled to the desired number of
+|		significant digits.  The excess digits are collected
+|		in INEX2.
+|
+|	A10.	Or in INEX.
+|		If INEX is set, round error occurred.  This is
+|		compensated for by 'or-ing' in the INEX2 flag to
+|		the lsb of Y.
+|
+|	A11.	Restore original FPCR; set size ext.
+|		Perform FINT operation in the user's rounding mode.
+|		Keep the size to extended.
+|
+|	A12.	Calculate YINT = FINT(Y) according to user's rounding
+|		mode.  The FPSP routine sintd0 is used.  The output
+|		is in fp0.
+|
+|	A13.	Check for LEN digits.
+|		If the int operation results in more than LEN digits,
+|		or less than LEN -1 digits, adjust ILOG and repeat from
+|		A6.  This test occurs only on the first pass.  If the
+|		result is exactly 10^LEN, decrement ILOG and divide
+|		the mantissa by 10.
+|
+|	A14.	Convert the mantissa to bcd.
+|		The binstr routine is used to convert the LEN digit
+|		mantissa to bcd in memory.  The input to binstr is
+|		to be a fraction; i.e. (mantissa)/10^LEN and adjusted
+|		such that the decimal point is to the left of bit 63.
+|		The bcd digits are stored in the correct position in
+|		the final string area in memory.
+|
+|	A15.	Convert the exponent to bcd.
+|		As in A14 above, the exp is converted to bcd and the
+|		digits are stored in the final string.
+|		Test the length of the final exponent string.  If the
+|		length is 4, set operr.
+|
+|	A16.	Write sign bits to final string.
+|
+|	Implementation Notes:
+|
+|	The registers are used as follows:
+|
+|		d0: scratch; LEN input to binstr
+|		d1: scratch
+|		d2: upper 32-bits of mantissa for binstr
+|		d3: scratch;lower 32-bits of mantissa for binstr
+|		d4: LEN
+|		d5: LAMBDA/ICTR
+|		d6: ILOG
+|		d7: k-factor
+|		a0: ptr for original operand/final result
+|		a1: scratch pointer
+|		a2: pointer to FP_X; abs(original value) in ext
+|		fp0: scratch
+|		fp1: scratch
+|		fp2: scratch
+|		F_SCR1:
+|		F_SCR2:
+|		L_SCR1:
+|		L_SCR2:
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|BINDEC    idnt    2,1 | Motorola 040 Floating Point Software Package
+
+#include "fpsp.h"
+
+	|section	8
+
+| Constants in extended precision
+LOG2:	.long	0x3FFD0000,0x9A209A84,0xFBCFF798,0x00000000
+LOG2UP1:	.long	0x3FFD0000,0x9A209A84,0xFBCFF799,0x00000000
+
+| Constants in single precision
+FONE:	.long	0x3F800000,0x00000000,0x00000000,0x00000000
+FTWO:	.long	0x40000000,0x00000000,0x00000000,0x00000000
+FTEN:	.long	0x41200000,0x00000000,0x00000000,0x00000000
+F4933:	.long	0x459A2800,0x00000000,0x00000000,0x00000000
+
+RBDTBL:	.byte	0,0,0,0
+	.byte	3,3,2,2
+	.byte	3,2,2,3
+	.byte	2,3,3,2
+
+	|xref	binstr
+	|xref	sintdo
+	|xref	ptenrn,ptenrm,ptenrp
+
+	.global	bindec
+	.global	sc_mul
+bindec:
+	moveml	%d2-%d7/%a2,-(%a7)
+	fmovemx %fp0-%fp2,-(%a7)
+
+| A1. Set RM and size ext. Set SIGMA = sign input;
+|     The k-factor is saved for use in d7.  Clear BINDEC_FLG for
+|     separating  normalized/denormalized input.  If the input
+|     is a denormalized number, set the BINDEC_FLG memory word
+|     to signal denorm.  If the input is unnormalized, normalize
+|     the input and test for denormalized result.
+|
+	fmovel	#rm_mode,%FPCR	|set RM and ext
+	movel	(%a0),L_SCR2(%a6)	|save exponent for sign check
+	movel	%d0,%d7		|move k-factor to d7
+	clrb	BINDEC_FLG(%a6)	|clr norm/denorm flag
+	movew	STAG(%a6),%d0	|get stag
+	andiw	#0xe000,%d0	|isolate stag bits
+	beq	A2_str		|if zero, input is norm
+|
+| Normalize the denorm
+|
+un_de_norm:
+	movew	(%a0),%d0
+	andiw	#0x7fff,%d0	|strip sign of normalized exp
+	movel	4(%a0),%d1
+	movel	8(%a0),%d2
+norm_loop:
+	subw	#1,%d0
+	lsll	#1,%d2
+	roxll	#1,%d1
+	tstl	%d1
+	bges	norm_loop
+|
+| Test if the normalized input is denormalized
+|
+	tstw	%d0
+	bgts	pos_exp		|if greater than zero, it is a norm
+	st	BINDEC_FLG(%a6)	|set flag for denorm
+pos_exp:
+	andiw	#0x7fff,%d0	|strip sign of normalized exp
+	movew	%d0,(%a0)
+	movel	%d1,4(%a0)
+	movel	%d2,8(%a0)
+
+| A2. Set X = abs(input).
+|
+A2_str:
+	movel	(%a0),FP_SCR2(%a6) | move input to work space
+	movel	4(%a0),FP_SCR2+4(%a6) | move input to work space
+	movel	8(%a0),FP_SCR2+8(%a6) | move input to work space
+	andil	#0x7fffffff,FP_SCR2(%a6) |create abs(X)
+
+| A3. Compute ILOG.
+|     ILOG is the log base 10 of the input value.  It is approx-
+|     imated by adding e + 0.f when the original value is viewed
+|     as 2^^e * 1.f in extended precision.  This value is stored
+|     in d6.
+|
+| Register usage:
+|	Input/Output
+|	d0: k-factor/exponent
+|	d2: x/x
+|	d3: x/x
+|	d4: x/x
+|	d5: x/x
+|	d6: x/ILOG
+|	d7: k-factor/Unchanged
+|	a0: ptr for original operand/final result
+|	a1: x/x
+|	a2: x/x
+|	fp0: x/float(ILOG)
+|	fp1: x/x
+|	fp2: x/x
+|	F_SCR1:x/x
+|	F_SCR2:Abs(X)/Abs(X) with $3fff exponent
+|	L_SCR1:x/x
+|	L_SCR2:first word of X packed/Unchanged
+
+	tstb	BINDEC_FLG(%a6)	|check for denorm
+	beqs	A3_cont		|if clr, continue with norm
+	movel	#-4933,%d6	|force ILOG = -4933
+	bras	A4_str
+A3_cont:
+	movew	FP_SCR2(%a6),%d0	|move exp to d0
+	movew	#0x3fff,FP_SCR2(%a6) |replace exponent with 0x3fff
+	fmovex	FP_SCR2(%a6),%fp0	|now fp0 has 1.f
+	subw	#0x3fff,%d0	|strip off bias
+	faddw	%d0,%fp0		|add in exp
+	fsubs	FONE,%fp0	|subtract off 1.0
+	fbge	pos_res		|if pos, branch
+	fmulx	LOG2UP1,%fp0	|if neg, mul by LOG2UP1
+	fmovel	%fp0,%d6		|put ILOG in d6 as a lword
+	bras	A4_str		|go move out ILOG
+pos_res:
+	fmulx	LOG2,%fp0	|if pos, mul by LOG2
+	fmovel	%fp0,%d6		|put ILOG in d6 as a lword
+
+
+| A4. Clr INEX bit.
+|     The operation in A3 above may have set INEX2.
+
+A4_str:
+	fmovel	#0,%FPSR		|zero all of fpsr - nothing needed
+
+
+| A5. Set ICTR = 0;
+|     ICTR is a flag used in A13.  It must be set before the
+|     loop entry A6. The lower word of d5 is used for ICTR.
+
+	clrw	%d5		|clear ICTR
+
+
+| A6. Calculate LEN.
+|     LEN is the number of digits to be displayed.  The k-factor
+|     can dictate either the total number of digits, if it is
+|     a positive number, or the number of digits after the
+|     original decimal point which are to be included as
+|     significant.  See the 68882 manual for examples.
+|     If LEN is computed to be greater than 17, set OPERR in
+|     USER_FPSR.  LEN is stored in d4.
+|
+| Register usage:
+|	Input/Output
+|	d0: exponent/Unchanged
+|	d2: x/x/scratch
+|	d3: x/x
+|	d4: exc picture/LEN
+|	d5: ICTR/Unchanged
+|	d6: ILOG/Unchanged
+|	d7: k-factor/Unchanged
+|	a0: ptr for original operand/final result
+|	a1: x/x
+|	a2: x/x
+|	fp0: float(ILOG)/Unchanged
+|	fp1: x/x
+|	fp2: x/x
+|	F_SCR1:x/x
+|	F_SCR2:Abs(X) with $3fff exponent/Unchanged
+|	L_SCR1:x/x
+|	L_SCR2:first word of X packed/Unchanged
+
+A6_str:
+	tstl	%d7		|branch on sign of k
+	bles	k_neg		|if k <= 0, LEN = ILOG + 1 - k
+	movel	%d7,%d4		|if k > 0, LEN = k
+	bras	len_ck		|skip to LEN check
+k_neg:
+	movel	%d6,%d4		|first load ILOG to d4
+	subl	%d7,%d4		|subtract off k
+	addql	#1,%d4		|add in the 1
+len_ck:
+	tstl	%d4		|LEN check: branch on sign of LEN
+	bles	LEN_ng		|if neg, set LEN = 1
+	cmpl	#17,%d4		|test if LEN > 17
+	bles	A7_str		|if not, forget it
+	movel	#17,%d4		|set max LEN = 17
+	tstl	%d7		|if negative, never set OPERR
+	bles	A7_str		|if positive, continue
+	orl	#opaop_mask,USER_FPSR(%a6) |set OPERR & AIOP in USER_FPSR
+	bras	A7_str		|finished here
+LEN_ng:
+	moveql	#1,%d4		|min LEN is 1
+
+
+| A7. Calculate SCALE.
+|     SCALE is equal to 10^ISCALE, where ISCALE is the number
+|     of decimal places needed to insure LEN integer digits
+|     in the output before conversion to bcd. LAMBDA is the sign
+|     of ISCALE, used in A9.  Fp1 contains 10^^(abs(ISCALE)) using
+|     the rounding mode as given in the following table (see
+|     Coonen, p. 7.23 as ref.; however, the SCALE variable is
+|     of opposite sign in bindec.sa from Coonen).
+|
+|	Initial					USE
+|	FPCR[6:5]	LAMBDA	SIGN(X)		FPCR[6:5]
+|	----------------------------------------------
+|	 RN	00	   0	   0		00/0	RN
+|	 RN	00	   0	   1		00/0	RN
+|	 RN	00	   1	   0		00/0	RN
+|	 RN	00	   1	   1		00/0	RN
+|	 RZ	01	   0	   0		11/3	RP
+|	 RZ	01	   0	   1		11/3	RP
+|	 RZ	01	   1	   0		10/2	RM
+|	 RZ	01	   1	   1		10/2	RM
+|	 RM	10	   0	   0		11/3	RP
+|	 RM	10	   0	   1		10/2	RM
+|	 RM	10	   1	   0		10/2	RM
+|	 RM	10	   1	   1		11/3	RP
+|	 RP	11	   0	   0		10/2	RM
+|	 RP	11	   0	   1		11/3	RP
+|	 RP	11	   1	   0		11/3	RP
+|	 RP	11	   1	   1		10/2	RM
+|
+| Register usage:
+|	Input/Output
+|	d0: exponent/scratch - final is 0
+|	d2: x/0 or 24 for A9
+|	d3: x/scratch - offset ptr into PTENRM array
+|	d4: LEN/Unchanged
+|	d5: 0/ICTR:LAMBDA
+|	d6: ILOG/ILOG or k if ((k<=0)&(ILOG<k))
+|	d7: k-factor/Unchanged
+|	a0: ptr for original operand/final result
+|	a1: x/ptr to PTENRM array
+|	a2: x/x
+|	fp0: float(ILOG)/Unchanged
+|	fp1: x/10^ISCALE
+|	fp2: x/x
+|	F_SCR1:x/x
+|	F_SCR2:Abs(X) with $3fff exponent/Unchanged
+|	L_SCR1:x/x
+|	L_SCR2:first word of X packed/Unchanged
+
+A7_str:
+	tstl	%d7		|test sign of k
+	bgts	k_pos		|if pos and > 0, skip this
+	cmpl	%d6,%d7		|test k - ILOG
+	blts	k_pos		|if ILOG >= k, skip this
+	movel	%d7,%d6		|if ((k<0) & (ILOG < k)) ILOG = k
+k_pos:
+	movel	%d6,%d0		|calc ILOG + 1 - LEN in d0
+	addql	#1,%d0		|add the 1
+	subl	%d4,%d0		|sub off LEN
+	swap	%d5		|use upper word of d5 for LAMBDA
+	clrw	%d5		|set it zero initially
+	clrw	%d2		|set up d2 for very small case
+	tstl	%d0		|test sign of ISCALE
+	bges	iscale		|if pos, skip next inst
+	addqw	#1,%d5		|if neg, set LAMBDA true
+	cmpl	#0xffffecd4,%d0	|test iscale <= -4908
+	bgts	no_inf		|if false, skip rest
+	addil	#24,%d0		|add in 24 to iscale
+	movel	#24,%d2		|put 24 in d2 for A9
+no_inf:
+	negl	%d0		|and take abs of ISCALE
+iscale:
+	fmoves	FONE,%fp1	|init fp1 to 1
+	bfextu	USER_FPCR(%a6){#26:#2},%d1 |get initial rmode bits
+	lslw	#1,%d1		|put them in bits 2:1
+	addw	%d5,%d1		|add in LAMBDA
+	lslw	#1,%d1		|put them in bits 3:1
+	tstl	L_SCR2(%a6)	|test sign of original x
+	bges	x_pos		|if pos, don't set bit 0
+	addql	#1,%d1		|if neg, set bit 0
+x_pos:
+	leal	RBDTBL,%a2	|load rbdtbl base
+	moveb	(%a2,%d1),%d3	|load d3 with new rmode
+	lsll	#4,%d3		|put bits in proper position
+	fmovel	%d3,%fpcr		|load bits into fpu
+	lsrl	#4,%d3		|put bits in proper position
+	tstb	%d3		|decode new rmode for pten table
+	bnes	not_rn		|if zero, it is RN
+	leal	PTENRN,%a1	|load a1 with RN table base
+	bras	rmode		|exit decode
+not_rn:
+	lsrb	#1,%d3		|get lsb in carry
+	bccs	not_rp		|if carry clear, it is RM
+	leal	PTENRP,%a1	|load a1 with RP table base
+	bras	rmode		|exit decode
+not_rp:
+	leal	PTENRM,%a1	|load a1 with RM table base
+rmode:
+	clrl	%d3		|clr table index
+e_loop:
+	lsrl	#1,%d0		|shift next bit into carry
+	bccs	e_next		|if zero, skip the mul
+	fmulx	(%a1,%d3),%fp1	|mul by 10**(d3_bit_no)
+e_next:
+	addl	#12,%d3		|inc d3 to next pwrten table entry
+	tstl	%d0		|test if ISCALE is zero
+	bnes	e_loop		|if not, loop
+
+
+| A8. Clr INEX; Force RZ.
+|     The operation in A3 above may have set INEX2.
+|     RZ mode is forced for the scaling operation to insure
+|     only one rounding error.  The grs bits are collected in
+|     the INEX flag for use in A10.
+|
+| Register usage:
+|	Input/Output
+
+	fmovel	#0,%FPSR		|clr INEX
+	fmovel	#rz_mode,%FPCR	|set RZ rounding mode
+
+
+| A9. Scale X -> Y.
+|     The mantissa is scaled to the desired number of significant
+|     digits.  The excess digits are collected in INEX2. If mul,
+|     Check d2 for excess 10 exponential value.  If not zero,
+|     the iscale value would have caused the pwrten calculation
+|     to overflow.  Only a negative iscale can cause this, so
+|     multiply by 10^(d2), which is now only allowed to be 24,
+|     with a multiply by 10^8 and 10^16, which is exact since
+|     10^24 is exact.  If the input was denormalized, we must
+|     create a busy stack frame with the mul command and the
+|     two operands, and allow the fpu to complete the multiply.
+|
+| Register usage:
+|	Input/Output
+|	d0: FPCR with RZ mode/Unchanged
+|	d2: 0 or 24/unchanged
+|	d3: x/x
+|	d4: LEN/Unchanged
+|	d5: ICTR:LAMBDA
+|	d6: ILOG/Unchanged
+|	d7: k-factor/Unchanged
+|	a0: ptr for original operand/final result
+|	a1: ptr to PTENRM array/Unchanged
+|	a2: x/x
+|	fp0: float(ILOG)/X adjusted for SCALE (Y)
+|	fp1: 10^ISCALE/Unchanged
+|	fp2: x/x
+|	F_SCR1:x/x
+|	F_SCR2:Abs(X) with $3fff exponent/Unchanged
+|	L_SCR1:x/x
+|	L_SCR2:first word of X packed/Unchanged
+
+A9_str:
+	fmovex	(%a0),%fp0	|load X from memory
+	fabsx	%fp0		|use abs(X)
+	tstw	%d5		|LAMBDA is in lower word of d5
+	bne	sc_mul		|if neg (LAMBDA = 1), scale by mul
+	fdivx	%fp1,%fp0		|calculate X / SCALE -> Y to fp0
+	bras	A10_st		|branch to A10
+
+sc_mul:
+	tstb	BINDEC_FLG(%a6)	|check for denorm
+	beqs	A9_norm		|if norm, continue with mul
+	fmovemx %fp1-%fp1,-(%a7)	|load ETEMP with 10^ISCALE
+	movel	8(%a0),-(%a7)	|load FPTEMP with input arg
+	movel	4(%a0),-(%a7)
+	movel	(%a0),-(%a7)
+	movel	#18,%d3		|load count for busy stack
+A9_loop:
+	clrl	-(%a7)		|clear lword on stack
+	dbf	%d3,A9_loop
+	moveb	VER_TMP(%a6),(%a7) |write current version number
+	moveb	#BUSY_SIZE-4,1(%a7) |write current busy size
+	moveb	#0x10,0x44(%a7)	|set fcefpte[15] bit
+	movew	#0x0023,0x40(%a7)	|load cmdreg1b with mul command
+	moveb	#0xfe,0x8(%a7)	|load all 1s to cu savepc
+	frestore (%a7)+		|restore frame to fpu for completion
+	fmulx	36(%a1),%fp0	|multiply fp0 by 10^8
+	fmulx	48(%a1),%fp0	|multiply fp0 by 10^16
+	bras	A10_st
+A9_norm:
+	tstw	%d2		|test for small exp case
+	beqs	A9_con		|if zero, continue as normal
+	fmulx	36(%a1),%fp0	|multiply fp0 by 10^8
+	fmulx	48(%a1),%fp0	|multiply fp0 by 10^16
+A9_con:
+	fmulx	%fp1,%fp0		|calculate X * SCALE -> Y to fp0
+
+
+| A10. Or in INEX.
+|      If INEX is set, round error occurred.  This is compensated
+|      for by 'or-ing' in the INEX2 flag to the lsb of Y.
+|
+| Register usage:
+|	Input/Output
+|	d0: FPCR with RZ mode/FPSR with INEX2 isolated
+|	d2: x/x
+|	d3: x/x
+|	d4: LEN/Unchanged
+|	d5: ICTR:LAMBDA
+|	d6: ILOG/Unchanged
+|	d7: k-factor/Unchanged
+|	a0: ptr for original operand/final result
+|	a1: ptr to PTENxx array/Unchanged
+|	a2: x/ptr to FP_SCR2(a6)
+|	fp0: Y/Y with lsb adjusted
+|	fp1: 10^ISCALE/Unchanged
+|	fp2: x/x
+
+A10_st:
+	fmovel	%FPSR,%d0		|get FPSR
+	fmovex	%fp0,FP_SCR2(%a6)	|move Y to memory
+	leal	FP_SCR2(%a6),%a2	|load a2 with ptr to FP_SCR2
+	btstl	#9,%d0		|check if INEX2 set
+	beqs	A11_st		|if clear, skip rest
+	oril	#1,8(%a2)	|or in 1 to lsb of mantissa
+	fmovex	FP_SCR2(%a6),%fp0	|write adjusted Y back to fpu
+
+
+| A11. Restore original FPCR; set size ext.
+|      Perform FINT operation in the user's rounding mode.  Keep
+|      the size to extended.  The sintdo entry point in the sint
+|      routine expects the FPCR value to be in USER_FPCR for
+|      mode and precision.  The original FPCR is saved in L_SCR1.
+
+A11_st:
+	movel	USER_FPCR(%a6),L_SCR1(%a6) |save it for later
+	andil	#0x00000030,USER_FPCR(%a6) |set size to ext,
+|					;block exceptions
+
+
+| A12. Calculate YINT = FINT(Y) according to user's rounding mode.
+|      The FPSP routine sintd0 is used.  The output is in fp0.
+|
+| Register usage:
+|	Input/Output
+|	d0: FPSR with AINEX cleared/FPCR with size set to ext
+|	d2: x/x/scratch
+|	d3: x/x
+|	d4: LEN/Unchanged
+|	d5: ICTR:LAMBDA/Unchanged
+|	d6: ILOG/Unchanged
+|	d7: k-factor/Unchanged
+|	a0: ptr for original operand/src ptr for sintdo
+|	a1: ptr to PTENxx array/Unchanged
+|	a2: ptr to FP_SCR2(a6)/Unchanged
+|	a6: temp pointer to FP_SCR2(a6) - orig value saved and restored
+|	fp0: Y/YINT
+|	fp1: 10^ISCALE/Unchanged
+|	fp2: x/x
+|	F_SCR1:x/x
+|	F_SCR2:Y adjusted for inex/Y with original exponent
+|	L_SCR1:x/original USER_FPCR
+|	L_SCR2:first word of X packed/Unchanged
+
+A12_st:
+	moveml	%d0-%d1/%a0-%a1,-(%a7)	|save regs used by sintd0
+	movel	L_SCR1(%a6),-(%a7)
+	movel	L_SCR2(%a6),-(%a7)
+	leal	FP_SCR2(%a6),%a0		|a0 is ptr to F_SCR2(a6)
+	fmovex	%fp0,(%a0)		|move Y to memory at FP_SCR2(a6)
+	tstl	L_SCR2(%a6)		|test sign of original operand
+	bges	do_fint			|if pos, use Y
+	orl	#0x80000000,(%a0)		|if neg, use -Y
+do_fint:
+	movel	USER_FPSR(%a6),-(%a7)
+	bsr	sintdo			|sint routine returns int in fp0
+	moveb	(%a7),USER_FPSR(%a6)
+	addl	#4,%a7
+	movel	(%a7)+,L_SCR2(%a6)
+	movel	(%a7)+,L_SCR1(%a6)
+	moveml	(%a7)+,%d0-%d1/%a0-%a1	|restore regs used by sint
+	movel	L_SCR2(%a6),FP_SCR2(%a6)	|restore original exponent
+	movel	L_SCR1(%a6),USER_FPCR(%a6) |restore user's FPCR
+
+
+| A13. Check for LEN digits.
+|      If the int operation results in more than LEN digits,
+|      or less than LEN -1 digits, adjust ILOG and repeat from
+|      A6.  This test occurs only on the first pass.  If the
+|      result is exactly 10^LEN, decrement ILOG and divide
+|      the mantissa by 10.  The calculation of 10^LEN cannot
+|      be inexact, since all powers of ten upto 10^27 are exact
+|      in extended precision, so the use of a previous power-of-ten
+|      table will introduce no error.
+|
+|
+| Register usage:
+|	Input/Output
+|	d0: FPCR with size set to ext/scratch final = 0
+|	d2: x/x
+|	d3: x/scratch final = x
+|	d4: LEN/LEN adjusted
+|	d5: ICTR:LAMBDA/LAMBDA:ICTR
+|	d6: ILOG/ILOG adjusted
+|	d7: k-factor/Unchanged
+|	a0: pointer into memory for packed bcd string formation
+|	a1: ptr to PTENxx array/Unchanged
+|	a2: ptr to FP_SCR2(a6)/Unchanged
+|	fp0: int portion of Y/abs(YINT) adjusted
+|	fp1: 10^ISCALE/Unchanged
+|	fp2: x/10^LEN
+|	F_SCR1:x/x
+|	F_SCR2:Y with original exponent/Unchanged
+|	L_SCR1:original USER_FPCR/Unchanged
+|	L_SCR2:first word of X packed/Unchanged
+
+A13_st:
+	swap	%d5		|put ICTR in lower word of d5
+	tstw	%d5		|check if ICTR = 0
+	bne	not_zr		|if non-zero, go to second test
+|
+| Compute 10^(LEN-1)
+|
+	fmoves	FONE,%fp2	|init fp2 to 1.0
+	movel	%d4,%d0		|put LEN in d0
+	subql	#1,%d0		|d0 = LEN -1
+	clrl	%d3		|clr table index
+l_loop:
+	lsrl	#1,%d0		|shift next bit into carry
+	bccs	l_next		|if zero, skip the mul
+	fmulx	(%a1,%d3),%fp2	|mul by 10**(d3_bit_no)
+l_next:
+	addl	#12,%d3		|inc d3 to next pwrten table entry
+	tstl	%d0		|test if LEN is zero
+	bnes	l_loop		|if not, loop
+|
+| 10^LEN-1 is computed for this test and A14.  If the input was
+| denormalized, check only the case in which YINT > 10^LEN.
+|
+	tstb	BINDEC_FLG(%a6)	|check if input was norm
+	beqs	A13_con		|if norm, continue with checking
+	fabsx	%fp0		|take abs of YINT
+	bra	test_2
+|
+| Compare abs(YINT) to 10^(LEN-1) and 10^LEN
+|
+A13_con:
+	fabsx	%fp0		|take abs of YINT
+	fcmpx	%fp2,%fp0		|compare abs(YINT) with 10^(LEN-1)
+	fbge	test_2		|if greater, do next test
+	subql	#1,%d6		|subtract 1 from ILOG
+	movew	#1,%d5		|set ICTR
+	fmovel	#rm_mode,%FPCR	|set rmode to RM
+	fmuls	FTEN,%fp2	|compute 10^LEN
+	bra	A6_str		|return to A6 and recompute YINT
+test_2:
+	fmuls	FTEN,%fp2	|compute 10^LEN
+	fcmpx	%fp2,%fp0		|compare abs(YINT) with 10^LEN
+	fblt	A14_st		|if less, all is ok, go to A14
+	fbgt	fix_ex		|if greater, fix and redo
+	fdivs	FTEN,%fp0	|if equal, divide by 10
+	addql	#1,%d6		| and inc ILOG
+	bras	A14_st		| and continue elsewhere
+fix_ex:
+	addql	#1,%d6		|increment ILOG by 1
+	movew	#1,%d5		|set ICTR
+	fmovel	#rm_mode,%FPCR	|set rmode to RM
+	bra	A6_str		|return to A6 and recompute YINT
+|
+| Since ICTR <> 0, we have already been through one adjustment,
+| and shouldn't have another; this is to check if abs(YINT) = 10^LEN
+| 10^LEN is again computed using whatever table is in a1 since the
+| value calculated cannot be inexact.
+|
+not_zr:
+	fmoves	FONE,%fp2	|init fp2 to 1.0
+	movel	%d4,%d0		|put LEN in d0
+	clrl	%d3		|clr table index
+z_loop:
+	lsrl	#1,%d0		|shift next bit into carry
+	bccs	z_next		|if zero, skip the mul
+	fmulx	(%a1,%d3),%fp2	|mul by 10**(d3_bit_no)
+z_next:
+	addl	#12,%d3		|inc d3 to next pwrten table entry
+	tstl	%d0		|test if LEN is zero
+	bnes	z_loop		|if not, loop
+	fabsx	%fp0		|get abs(YINT)
+	fcmpx	%fp2,%fp0		|check if abs(YINT) = 10^LEN
+	fbne	A14_st		|if not, skip this
+	fdivs	FTEN,%fp0	|divide abs(YINT) by 10
+	addql	#1,%d6		|and inc ILOG by 1
+	addql	#1,%d4		| and inc LEN
+	fmuls	FTEN,%fp2	| if LEN++, the get 10^^LEN
+
+
+| A14. Convert the mantissa to bcd.
+|      The binstr routine is used to convert the LEN digit
+|      mantissa to bcd in memory.  The input to binstr is
+|      to be a fraction; i.e. (mantissa)/10^LEN and adjusted
+|      such that the decimal point is to the left of bit 63.
+|      The bcd digits are stored in the correct position in
+|      the final string area in memory.
+|
+|
+| Register usage:
+|	Input/Output
+|	d0: x/LEN call to binstr - final is 0
+|	d1: x/0
+|	d2: x/ms 32-bits of mant of abs(YINT)
+|	d3: x/ls 32-bits of mant of abs(YINT)
+|	d4: LEN/Unchanged
+|	d5: ICTR:LAMBDA/LAMBDA:ICTR
+|	d6: ILOG
+|	d7: k-factor/Unchanged
+|	a0: pointer into memory for packed bcd string formation
+|	    /ptr to first mantissa byte in result string
+|	a1: ptr to PTENxx array/Unchanged
+|	a2: ptr to FP_SCR2(a6)/Unchanged
+|	fp0: int portion of Y/abs(YINT) adjusted
+|	fp1: 10^ISCALE/Unchanged
+|	fp2: 10^LEN/Unchanged
+|	F_SCR1:x/Work area for final result
+|	F_SCR2:Y with original exponent/Unchanged
+|	L_SCR1:original USER_FPCR/Unchanged
+|	L_SCR2:first word of X packed/Unchanged
+
+A14_st:
+	fmovel	#rz_mode,%FPCR	|force rz for conversion
+	fdivx	%fp2,%fp0		|divide abs(YINT) by 10^LEN
+	leal	FP_SCR1(%a6),%a0
+	fmovex	%fp0,(%a0)	|move abs(YINT)/10^LEN to memory
+	movel	4(%a0),%d2	|move 2nd word of FP_RES to d2
+	movel	8(%a0),%d3	|move 3rd word of FP_RES to d3
+	clrl	4(%a0)		|zero word 2 of FP_RES
+	clrl	8(%a0)		|zero word 3 of FP_RES
+	movel	(%a0),%d0		|move exponent to d0
+	swap	%d0		|put exponent in lower word
+	beqs	no_sft		|if zero, don't shift
+	subil	#0x3ffd,%d0	|sub bias less 2 to make fract
+	tstl	%d0		|check if > 1
+	bgts	no_sft		|if so, don't shift
+	negl	%d0		|make exp positive
+m_loop:
+	lsrl	#1,%d2		|shift d2:d3 right, add 0s
+	roxrl	#1,%d3		|the number of places
+	dbf	%d0,m_loop	|given in d0
+no_sft:
+	tstl	%d2		|check for mantissa of zero
+	bnes	no_zr		|if not, go on
+	tstl	%d3		|continue zero check
+	beqs	zer_m		|if zero, go directly to binstr
+no_zr:
+	clrl	%d1		|put zero in d1 for addx
+	addil	#0x00000080,%d3	|inc at bit 7
+	addxl	%d1,%d2		|continue inc
+	andil	#0xffffff80,%d3	|strip off lsb not used by 882
+zer_m:
+	movel	%d4,%d0		|put LEN in d0 for binstr call
+	addql	#3,%a0		|a0 points to M16 byte in result
+	bsr	binstr		|call binstr to convert mant
+
+
+| A15. Convert the exponent to bcd.
+|      As in A14 above, the exp is converted to bcd and the
+|      digits are stored in the final string.
+|
+|      Digits are stored in L_SCR1(a6) on return from BINDEC as:
+|
+|	 32               16 15                0
+|	-----------------------------------------
+|	|  0 | e3 | e2 | e1 | e4 |  X |  X |  X |
+|	-----------------------------------------
+|
+| And are moved into their proper places in FP_SCR1.  If digit e4
+| is non-zero, OPERR is signaled.  In all cases, all 4 digits are
+| written as specified in the 881/882 manual for packed decimal.
+|
+| Register usage:
+|	Input/Output
+|	d0: x/LEN call to binstr - final is 0
+|	d1: x/scratch (0);shift count for final exponent packing
+|	d2: x/ms 32-bits of exp fraction/scratch
+|	d3: x/ls 32-bits of exp fraction
+|	d4: LEN/Unchanged
+|	d5: ICTR:LAMBDA/LAMBDA:ICTR
+|	d6: ILOG
+|	d7: k-factor/Unchanged
+|	a0: ptr to result string/ptr to L_SCR1(a6)
+|	a1: ptr to PTENxx array/Unchanged
+|	a2: ptr to FP_SCR2(a6)/Unchanged
+|	fp0: abs(YINT) adjusted/float(ILOG)
+|	fp1: 10^ISCALE/Unchanged
+|	fp2: 10^LEN/Unchanged
+|	F_SCR1:Work area for final result/BCD result
+|	F_SCR2:Y with original exponent/ILOG/10^4
+|	L_SCR1:original USER_FPCR/Exponent digits on return from binstr
+|	L_SCR2:first word of X packed/Unchanged
+
+A15_st:
+	tstb	BINDEC_FLG(%a6)	|check for denorm
+	beqs	not_denorm
+	ftstx	%fp0		|test for zero
+	fbeq	den_zero	|if zero, use k-factor or 4933
+	fmovel	%d6,%fp0		|float ILOG
+	fabsx	%fp0		|get abs of ILOG
+	bras	convrt
+den_zero:
+	tstl	%d7		|check sign of the k-factor
+	blts	use_ilog	|if negative, use ILOG
+	fmoves	F4933,%fp0	|force exponent to 4933
+	bras	convrt		|do it
+use_ilog:
+	fmovel	%d6,%fp0		|float ILOG
+	fabsx	%fp0		|get abs of ILOG
+	bras	convrt
+not_denorm:
+	ftstx	%fp0		|test for zero
+	fbne	not_zero	|if zero, force exponent
+	fmoves	FONE,%fp0	|force exponent to 1
+	bras	convrt		|do it
+not_zero:
+	fmovel	%d6,%fp0		|float ILOG
+	fabsx	%fp0		|get abs of ILOG
+convrt:
+	fdivx	24(%a1),%fp0	|compute ILOG/10^4
+	fmovex	%fp0,FP_SCR2(%a6)	|store fp0 in memory
+	movel	4(%a2),%d2	|move word 2 to d2
+	movel	8(%a2),%d3	|move word 3 to d3
+	movew	(%a2),%d0		|move exp to d0
+	beqs	x_loop_fin	|if zero, skip the shift
+	subiw	#0x3ffd,%d0	|subtract off bias
+	negw	%d0		|make exp positive
+x_loop:
+	lsrl	#1,%d2		|shift d2:d3 right
+	roxrl	#1,%d3		|the number of places
+	dbf	%d0,x_loop	|given in d0
+x_loop_fin:
+	clrl	%d1		|put zero in d1 for addx
+	addil	#0x00000080,%d3	|inc at bit 6
+	addxl	%d1,%d2		|continue inc
+	andil	#0xffffff80,%d3	|strip off lsb not used by 882
+	movel	#4,%d0		|put 4 in d0 for binstr call
+	leal	L_SCR1(%a6),%a0	|a0 is ptr to L_SCR1 for exp digits
+	bsr	binstr		|call binstr to convert exp
+	movel	L_SCR1(%a6),%d0	|load L_SCR1 lword to d0
+	movel	#12,%d1		|use d1 for shift count
+	lsrl	%d1,%d0		|shift d0 right by 12
+	bfins	%d0,FP_SCR1(%a6){#4:#12} |put e3:e2:e1 in FP_SCR1
+	lsrl	%d1,%d0		|shift d0 right by 12
+	bfins	%d0,FP_SCR1(%a6){#16:#4} |put e4 in FP_SCR1
+	tstb	%d0		|check if e4 is zero
+	beqs	A16_st		|if zero, skip rest
+	orl	#opaop_mask,USER_FPSR(%a6) |set OPERR & AIOP in USER_FPSR
+
+
+| A16. Write sign bits to final string.
+|	   Sigma is bit 31 of initial value; RHO is bit 31 of d6 (ILOG).
+|
+| Register usage:
+|	Input/Output
+|	d0: x/scratch - final is x
+|	d2: x/x
+|	d3: x/x
+|	d4: LEN/Unchanged
+|	d5: ICTR:LAMBDA/LAMBDA:ICTR
+|	d6: ILOG/ILOG adjusted
+|	d7: k-factor/Unchanged
+|	a0: ptr to L_SCR1(a6)/Unchanged
+|	a1: ptr to PTENxx array/Unchanged
+|	a2: ptr to FP_SCR2(a6)/Unchanged
+|	fp0: float(ILOG)/Unchanged
+|	fp1: 10^ISCALE/Unchanged
+|	fp2: 10^LEN/Unchanged
+|	F_SCR1:BCD result with correct signs
+|	F_SCR2:ILOG/10^4
+|	L_SCR1:Exponent digits on return from binstr
+|	L_SCR2:first word of X packed/Unchanged
+
+A16_st:
+	clrl	%d0		|clr d0 for collection of signs
+	andib	#0x0f,FP_SCR1(%a6) |clear first nibble of FP_SCR1
+	tstl	L_SCR2(%a6)	|check sign of original mantissa
+	bges	mant_p		|if pos, don't set SM
+	moveql	#2,%d0		|move 2 in to d0 for SM
+mant_p:
+	tstl	%d6		|check sign of ILOG
+	bges	wr_sgn		|if pos, don't set SE
+	addql	#1,%d0		|set bit 0 in d0 for SE
+wr_sgn:
+	bfins	%d0,FP_SCR1(%a6){#0:#2} |insert SM and SE into FP_SCR1
+
+| Clean up and restore all registers used.
+
+	fmovel	#0,%FPSR		|clear possible inex2/ainex bits
+	fmovemx (%a7)+,%fp0-%fp2
+	moveml	(%a7)+,%d2-%d7/%a2
+	rts
+
+	|end
diff --git a/arch/m68k/fpsp040/binstr.S b/arch/m68k/fpsp040/binstr.S
new file mode 100644
index 0000000..d53555c
--- /dev/null
+++ b/arch/m68k/fpsp040/binstr.S
@@ -0,0 +1,140 @@
+|
+|	binstr.sa 3.3 12/19/90
+|
+|
+|	Description: Converts a 64-bit binary integer to bcd.
+|
+|	Input: 64-bit binary integer in d2:d3, desired length (LEN) in
+|          d0, and a  pointer to start in memory for bcd characters
+|          in d0. (This pointer must point to byte 4 of the first
+|          lword of the packed decimal memory string.)
+|
+|	Output:	LEN bcd digits representing the 64-bit integer.
+|
+|	Algorithm:
+|		The 64-bit binary is assumed to have a decimal point before
+|		bit 63.  The fraction is multiplied by 10 using a mul by 2
+|		shift and a mul by 8 shift.  The bits shifted out of the
+|		msb form a decimal digit.  This process is iterated until
+|		LEN digits are formed.
+|
+|	A1. Init d7 to 1.  D7 is the byte digit counter, and if 1, the
+|		digit formed will be assumed the least significant.  This is
+|		to force the first byte formed to have a 0 in the upper 4 bits.
+|
+|	A2. Beginning of the loop:
+|		Copy the fraction in d2:d3 to d4:d5.
+|
+|	A3. Multiply the fraction in d2:d3 by 8 using bit-field
+|		extracts and shifts.  The three msbs from d2 will go into
+|		d1.
+|
+|	A4. Multiply the fraction in d4:d5 by 2 using shifts.  The msb
+|		will be collected by the carry.
+|
+|	A5. Add using the carry the 64-bit quantities in d2:d3 and d4:d5
+|		into d2:d3.  D1 will contain the bcd digit formed.
+|
+|	A6. Test d7.  If zero, the digit formed is the ms digit.  If non-
+|		zero, it is the ls digit.  Put the digit in its place in the
+|		upper word of d0.  If it is the ls digit, write the word
+|		from d0 to memory.
+|
+|	A7. Decrement d6 (LEN counter) and repeat the loop until zero.
+|
+|	Implementation Notes:
+|
+|	The registers are used as follows:
+|
+|		d0: LEN counter
+|		d1: temp used to form the digit
+|		d2: upper 32-bits of fraction for mul by 8
+|		d3: lower 32-bits of fraction for mul by 8
+|		d4: upper 32-bits of fraction for mul by 2
+|		d5: lower 32-bits of fraction for mul by 2
+|		d6: temp for bit-field extracts
+|		d7: byte digit formation word;digit count {0,1}
+|		a0: pointer into memory for packed bcd string formation
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|BINSTR    idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+	.global	binstr
+binstr:
+	moveml	%d0-%d7,-(%a7)
+|
+| A1: Init d7
+|
+	moveql	#1,%d7			|init d7 for second digit
+	subql	#1,%d0			|for dbf d0 would have LEN+1 passes
+|
+| A2. Copy d2:d3 to d4:d5.  Start loop.
+|
+loop:
+	movel	%d2,%d4			|copy the fraction before muls
+	movel	%d3,%d5			|to d4:d5
+|
+| A3. Multiply d2:d3 by 8; extract msbs into d1.
+|
+	bfextu	%d2{#0:#3},%d1		|copy 3 msbs of d2 into d1
+	asll	#3,%d2			|shift d2 left by 3 places
+	bfextu	%d3{#0:#3},%d6		|copy 3 msbs of d3 into d6
+	asll	#3,%d3			|shift d3 left by 3 places
+	orl	%d6,%d2			|or in msbs from d3 into d2
+|
+| A4. Multiply d4:d5 by 2; add carry out to d1.
+|
+	asll	#1,%d5			|mul d5 by 2
+	roxll	#1,%d4			|mul d4 by 2
+	swap	%d6			|put 0 in d6 lower word
+	addxw	%d6,%d1			|add in extend from mul by 2
+|
+| A5. Add mul by 8 to mul by 2.  D1 contains the digit formed.
+|
+	addl	%d5,%d3			|add lower 32 bits
+	nop				|ERRATA ; FIX #13 (Rev. 1.2 6/6/90)
+	addxl	%d4,%d2			|add with extend upper 32 bits
+	nop				|ERRATA ; FIX #13 (Rev. 1.2 6/6/90)
+	addxw	%d6,%d1			|add in extend from add to d1
+	swap	%d6			|with d6 = 0; put 0 in upper word
+|
+| A6. Test d7 and branch.
+|
+	tstw	%d7			|if zero, store digit & to loop
+	beqs	first_d			|if non-zero, form byte & write
+sec_d:
+	swap	%d7			|bring first digit to word d7b
+	aslw	#4,%d7			|first digit in upper 4 bits d7b
+	addw	%d1,%d7			|add in ls digit to d7b
+	moveb	%d7,(%a0)+		|store d7b byte in memory
+	swap	%d7			|put LEN counter in word d7a
+	clrw	%d7			|set d7a to signal no digits done
+	dbf	%d0,loop		|do loop some more!
+	bras	end_bstr		|finished, so exit
+first_d:
+	swap	%d7			|put digit word in d7b
+	movew	%d1,%d7			|put new digit in d7b
+	swap	%d7			|put LEN counter in word d7a
+	addqw	#1,%d7			|set d7a to signal first digit done
+	dbf	%d0,loop		|do loop some more!
+	swap	%d7			|put last digit in string
+	lslw	#4,%d7			|move it to upper 4 bits
+	moveb	%d7,(%a0)+		|store it in memory string
+|
+| Clean up and return with result in fp0.
+|
+end_bstr:
+	moveml	(%a7)+,%d0-%d7
+	rts
+	|end
diff --git a/arch/m68k/fpsp040/bugfix.S b/arch/m68k/fpsp040/bugfix.S
new file mode 100644
index 0000000..942c4f6
--- /dev/null
+++ b/arch/m68k/fpsp040/bugfix.S
@@ -0,0 +1,496 @@
+|
+|	bugfix.sa 3.2 1/31/91
+|
+|
+|	This file contains workarounds for bugs in the 040
+|	relating to the Floating-Point Software Package (FPSP)
+|
+|	Fixes for bugs: 1238
+|
+|	Bug: 1238
+|
+|
+|    /* The following dirty_bit clear should be left in
+|     * the handler permanently to improve throughput.
+|     * The dirty_bits are located at bits [23:16] in
+|     * longword $08 in the busy frame $4x60.  Bit 16
+|     * corresponds to FP0, bit 17 corresponds to FP1,
+|     * and so on.
+|     */
+|    if  (E3_exception_just_serviced)   {
+|         dirty_bit[cmdreg3b[9:7]] = 0;
+|         }
+|
+|    if  (fsave_format_version != $40)  {goto NOFIX}
+|
+|    if !(E3_exception_just_serviced)   {goto NOFIX}
+|    if  (cupc == 0000000)              {goto NOFIX}
+|    if  ((cmdreg1b[15:13] != 000) &&
+|         (cmdreg1b[15:10] != 010001))  {goto NOFIX}
+|    if (((cmdreg1b[15:13] != 000) || ((cmdreg1b[12:10] != cmdreg2b[9:7]) &&
+|				      (cmdreg1b[12:10] != cmdreg3b[9:7]))  ) &&
+|	 ((cmdreg1b[ 9: 7] != cmdreg2b[9:7]) &&
+|	  (cmdreg1b[ 9: 7] != cmdreg3b[9:7])) )  {goto NOFIX}
+|
+|    /* Note: for 6d43b or 8d43b, you may want to add the following code
+|     * to get better coverage.  (If you do not insert this code, the part
+|     * won't lock up; it will simply get the wrong answer.)
+|     * Do NOT insert this code for 10d43b or later parts.
+|     *
+|     *  if (fpiarcu == integer stack return address) {
+|     *       cupc = 0000000;
+|     *       goto NOFIX;
+|     *       }
+|     */
+|
+|    if (cmdreg1b[15:13] != 000)   {goto FIX_OPCLASS2}
+|    FIX_OPCLASS0:
+|    if (((cmdreg1b[12:10] == cmdreg2b[9:7]) ||
+|	 (cmdreg1b[ 9: 7] == cmdreg2b[9:7])) &&
+|	(cmdreg1b[12:10] != cmdreg3b[9:7]) &&
+|	(cmdreg1b[ 9: 7] != cmdreg3b[9:7]))  {  /* xu conflict only */
+|	/* We execute the following code if there is an
+|	   xu conflict and NOT an nu conflict */
+|
+|	/* first save some values on the fsave frame */
+|	stag_temp     = STAG[fsave_frame];
+|	cmdreg1b_temp = CMDREG1B[fsave_frame];
+|	dtag_temp     = DTAG[fsave_frame];
+|	ete15_temp    = ETE15[fsave_frame];
+|
+|	CUPC[fsave_frame] = 0000000;
+|	FRESTORE
+|	FSAVE
+|
+|	/* If the xu instruction is exceptional, we punt.
+|	 * Otherwise, we would have to include OVFL/UNFL handler
+|	 * code here to get the correct answer.
+|	 */
+|	if (fsave_frame_format == $4060) {goto KILL_PROCESS}
+|
+|	fsave_frame = /* build a long frame of all zeros */
+|	fsave_frame_format = $4060;  /* label it as long frame */
+|
+|	/* load it with the temps we saved */
+|	STAG[fsave_frame]     =  stag_temp;
+|	CMDREG1B[fsave_frame] =  cmdreg1b_temp;
+|	DTAG[fsave_frame]     =  dtag_temp;
+|	ETE15[fsave_frame]    =  ete15_temp;
+|
+|	/* Make sure that the cmdreg3b dest reg is not going to
+|	 * be destroyed by a FMOVEM at the end of all this code.
+|	 * If it is, you should move the current value of the reg
+|	 * onto the stack so that the reg will loaded with that value.
+|	 */
+|
+|	/* All done.  Proceed with the code below */
+|    }
+|
+|    etemp  = FP_reg_[cmdreg1b[12:10]];
+|    ete15  = ~ete14;
+|    cmdreg1b[15:10] = 010010;
+|    clear(bug_flag_procIDxxxx);
+|    FRESTORE and return;
+|
+|
+|    FIX_OPCLASS2:
+|    if ((cmdreg1b[9:7] == cmdreg2b[9:7]) &&
+|	(cmdreg1b[9:7] != cmdreg3b[9:7]))  {  /* xu conflict only */
+|	/* We execute the following code if there is an
+|	   xu conflict and NOT an nu conflict */
+|
+|	/* first save some values on the fsave frame */
+|	stag_temp     = STAG[fsave_frame];
+|	cmdreg1b_temp = CMDREG1B[fsave_frame];
+|	dtag_temp     = DTAG[fsave_frame];
+|	ete15_temp    = ETE15[fsave_frame];
+|	etemp_temp    = ETEMP[fsave_frame];
+|
+|	CUPC[fsave_frame] = 0000000;
+|	FRESTORE
+|	FSAVE
+|
+|
+|	/* If the xu instruction is exceptional, we punt.
+|	 * Otherwise, we would have to include OVFL/UNFL handler
+|	 * code here to get the correct answer.
+|	 */
+|	if (fsave_frame_format == $4060) {goto KILL_PROCESS}
+|
+|	fsave_frame = /* build a long frame of all zeros */
+|	fsave_frame_format = $4060;  /* label it as long frame */
+|
+|	/* load it with the temps we saved */
+|	STAG[fsave_frame]     =  stag_temp;
+|	CMDREG1B[fsave_frame] =  cmdreg1b_temp;
+|	DTAG[fsave_frame]     =  dtag_temp;
+|	ETE15[fsave_frame]    =  ete15_temp;
+|	ETEMP[fsave_frame]    =  etemp_temp;
+|
+|	/* Make sure that the cmdreg3b dest reg is not going to
+|	 * be destroyed by a FMOVEM at the end of all this code.
+|	 * If it is, you should move the current value of the reg
+|	 * onto the stack so that the reg will loaded with that value.
+|	 */
+|
+|	/* All done.  Proceed with the code below */
+|    }
+|
+|    if (etemp_exponent == min_sgl)   etemp_exponent = min_dbl;
+|    if (etemp_exponent == max_sgl)   etemp_exponent = max_dbl;
+|    cmdreg1b[15:10] = 010101;
+|    clear(bug_flag_procIDxxxx);
+|    FRESTORE and return;
+|
+|
+|    NOFIX:
+|    clear(bug_flag_procIDxxxx);
+|    FRESTORE and return;
+|
+
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|BUGFIX    idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+	|xref	fpsp_fmt_error
+
+	.global	b1238_fix
+b1238_fix:
+|
+| This code is entered only on completion of the handling of an
+| nu-generated ovfl, unfl, or inex exception.  If the version
+| number of the fsave is not $40, this handler is not necessary.
+| Simply branch to fix_done and exit normally.
+|
+	cmpib	#VER_40,4(%a7)
+	bne	fix_done
+|
+| Test for cu_savepc equal to zero.  If not, this is not a bug
+| #1238 case.
+|
+	moveb	CU_SAVEPC(%a6),%d0
+	andib	#0xFE,%d0
+	beq	fix_done	|if zero, this is not bug #1238
+
+|
+| Test the register conflict aspect.  If opclass0, check for
+| cu src equal to xu dest or equal to nu dest.  If so, go to
+| op0.  Else, or if opclass2, check for cu dest equal to
+| xu dest or equal to nu dest.  If so, go to tst_opcl.  Else,
+| exit, it is not the bug case.
+|
+| Check for opclass 0.  If not, go and check for opclass 2 and sgl.
+|
+	movew	CMDREG1B(%a6),%d0
+	andiw	#0xE000,%d0		|strip all but opclass
+	bne	op2sgl			|not opclass 0, check op2
+|
+| Check for cu and nu register conflict.  If one exists, this takes
+| priority over a cu and xu conflict.
+|
+	bfextu	CMDREG1B(%a6){#3:#3},%d0	|get 1st src
+	bfextu	CMDREG3B(%a6){#6:#3},%d1	|get 3rd dest
+	cmpb	%d0,%d1
+	beqs	op0			|if equal, continue bugfix
+|
+| Check for cu dest equal to nu dest.  If so, go and fix the
+| bug condition.  Otherwise, exit.
+|
+	bfextu	CMDREG1B(%a6){#6:#3},%d0	|get 1st dest
+	cmpb	%d0,%d1			|cmp 1st dest with 3rd dest
+	beqs	op0			|if equal, continue bugfix
+|
+| Check for cu and xu register conflict.
+|
+	bfextu	CMDREG2B(%a6){#6:#3},%d1	|get 2nd dest
+	cmpb	%d0,%d1			|cmp 1st dest with 2nd dest
+	beqs	op0_xu			|if equal, continue bugfix
+	bfextu	CMDREG1B(%a6){#3:#3},%d0	|get 1st src
+	cmpb	%d0,%d1			|cmp 1st src with 2nd dest
+	beq	op0_xu
+	bne	fix_done		|if the reg checks fail, exit
+|
+| We have the opclass 0 situation.
+|
+op0:
+	bfextu	CMDREG1B(%a6){#3:#3},%d0	|get source register no
+	movel	#7,%d1
+	subl	%d0,%d1
+	clrl	%d0
+	bsetl	%d1,%d0
+	fmovemx %d0,ETEMP(%a6)		|load source to ETEMP
+
+	moveb	#0x12,%d0
+	bfins	%d0,CMDREG1B(%a6){#0:#6}	|opclass 2, extended
+|
+|	Set ETEMP exponent bit 15 as the opposite of ete14
+|
+	btst	#6,ETEMP_EX(%a6)		|check etemp exponent bit 14
+	beq	setete15
+	bclr	#etemp15_bit,STAG(%a6)
+	bra	finish
+setete15:
+	bset	#etemp15_bit,STAG(%a6)
+	bra	finish
+
+|
+| We have the case in which a conflict exists between the cu src or
+| dest and the dest of the xu.  We must clear the instruction in
+| the cu and restore the state, allowing the instruction in the
+| xu to complete.  Remember, the instruction in the nu
+| was exceptional, and was completed by the appropriate handler.
+| If the result of the xu instruction is not exceptional, we can
+| restore the instruction from the cu to the frame and continue
+| processing the original exception.  If the result is also
+| exceptional, we choose to kill the process.
+|
+|	Items saved from the stack:
+|
+|		$3c stag     - L_SCR1
+|		$40 cmdreg1b - L_SCR2
+|		$44 dtag     - L_SCR3
+|
+| The cu savepc is set to zero, and the frame is restored to the
+| fpu.
+|
+op0_xu:
+	movel	STAG(%a6),L_SCR1(%a6)
+	movel	CMDREG1B(%a6),L_SCR2(%a6)
+	movel	DTAG(%a6),L_SCR3(%a6)
+	andil	#0xe0000000,L_SCR3(%a6)
+	moveb	#0,CU_SAVEPC(%a6)
+	movel	(%a7)+,%d1		|save return address from bsr
+	frestore (%a7)+
+	fsave	-(%a7)
+|
+| Check if the instruction which just completed was exceptional.
+|
+	cmpw	#0x4060,(%a7)
+	beq	op0_xb
+|
+| It is necessary to isolate the result of the instruction in the
+| xu if it is to fp0 - fp3 and write that value to the USER_FPn
+| locations on the stack.  The correct destination register is in
+| cmdreg2b.
+|
+	bfextu	CMDREG2B(%a6){#6:#3},%d0	|get dest register no
+	cmpil	#3,%d0
+	bgts	op0_xi
+	beqs	op0_fp3
+	cmpil	#1,%d0
+	blts	op0_fp0
+	beqs	op0_fp1
+op0_fp2:
+	fmovemx %fp2-%fp2,USER_FP2(%a6)
+	bras	op0_xi
+op0_fp1:
+	fmovemx %fp1-%fp1,USER_FP1(%a6)
+	bras	op0_xi
+op0_fp0:
+	fmovemx %fp0-%fp0,USER_FP0(%a6)
+	bras	op0_xi
+op0_fp3:
+	fmovemx %fp3-%fp3,USER_FP3(%a6)
+|
+| The frame returned is idle.  We must build a busy frame to hold
+| the cu state information and setup etemp.
+|
+op0_xi:
+	movel	#22,%d0		|clear 23 lwords
+	clrl	(%a7)
+op0_loop:
+	clrl	-(%a7)
+	dbf	%d0,op0_loop
+	movel	#0x40600000,-(%a7)
+	movel	L_SCR1(%a6),STAG(%a6)
+	movel	L_SCR2(%a6),CMDREG1B(%a6)
+	movel	L_SCR3(%a6),DTAG(%a6)
+	moveb	#0x6,CU_SAVEPC(%a6)
+	movel	%d1,-(%a7)		|return bsr return address
+	bfextu	CMDREG1B(%a6){#3:#3},%d0	|get source register no
+	movel	#7,%d1
+	subl	%d0,%d1
+	clrl	%d0
+	bsetl	%d1,%d0
+	fmovemx %d0,ETEMP(%a6)		|load source to ETEMP
+
+	moveb	#0x12,%d0
+	bfins	%d0,CMDREG1B(%a6){#0:#6}	|opclass 2, extended
+|
+|	Set ETEMP exponent bit 15 as the opposite of ete14
+|
+	btst	#6,ETEMP_EX(%a6)		|check etemp exponent bit 14
+	beq	op0_sete15
+	bclr	#etemp15_bit,STAG(%a6)
+	bra	finish
+op0_sete15:
+	bset	#etemp15_bit,STAG(%a6)
+	bra	finish
+
+|
+| The frame returned is busy.  It is not possible to reconstruct
+| the code sequence to allow completion.  We will jump to
+| fpsp_fmt_error and allow the kernel to kill the process.
+|
+op0_xb:
+	jmp	fpsp_fmt_error
+
+|
+| Check for opclass 2 and single size.  If not both, exit.
+|
+op2sgl:
+	movew	CMDREG1B(%a6),%d0
+	andiw	#0xFC00,%d0		|strip all but opclass and size
+	cmpiw	#0x4400,%d0		|test for opclass 2 and size=sgl
+	bne	fix_done		|if not, it is not bug 1238
+|
+| Check for cu dest equal to nu dest or equal to xu dest, with
+| a cu and nu conflict taking priority an nu conflict.  If either,
+| go and fix the bug condition.  Otherwise, exit.
+|
+	bfextu	CMDREG1B(%a6){#6:#3},%d0	|get 1st dest
+	bfextu	CMDREG3B(%a6){#6:#3},%d1	|get 3rd dest
+	cmpb	%d0,%d1			|cmp 1st dest with 3rd dest
+	beq	op2_com			|if equal, continue bugfix
+	bfextu	CMDREG2B(%a6){#6:#3},%d1	|get 2nd dest
+	cmpb	%d0,%d1			|cmp 1st dest with 2nd dest
+	bne	fix_done		|if the reg checks fail, exit
+|
+| We have the case in which a conflict exists between the cu src or
+| dest and the dest of the xu.  We must clear the instruction in
+| the cu and restore the state, allowing the instruction in the
+| xu to complete.  Remember, the instruction in the nu
+| was exceptional, and was completed by the appropriate handler.
+| If the result of the xu instruction is not exceptional, we can
+| restore the instruction from the cu to the frame and continue
+| processing the original exception.  If the result is also
+| exceptional, we choose to kill the process.
+|
+|	Items saved from the stack:
+|
+|		$3c stag     - L_SCR1
+|		$40 cmdreg1b - L_SCR2
+|		$44 dtag     - L_SCR3
+|		etemp        - FP_SCR2
+|
+| The cu savepc is set to zero, and the frame is restored to the
+| fpu.
+|
+op2_xu:
+	movel	STAG(%a6),L_SCR1(%a6)
+	movel	CMDREG1B(%a6),L_SCR2(%a6)
+	movel	DTAG(%a6),L_SCR3(%a6)
+	andil	#0xe0000000,L_SCR3(%a6)
+	moveb	#0,CU_SAVEPC(%a6)
+	movel	ETEMP(%a6),FP_SCR2(%a6)
+	movel	ETEMP_HI(%a6),FP_SCR2+4(%a6)
+	movel	ETEMP_LO(%a6),FP_SCR2+8(%a6)
+	movel	(%a7)+,%d1		|save return address from bsr
+	frestore (%a7)+
+	fsave	-(%a7)
+|
+| Check if the instruction which just completed was exceptional.
+|
+	cmpw	#0x4060,(%a7)
+	beq	op2_xb
+|
+| It is necessary to isolate the result of the instruction in the
+| xu if it is to fp0 - fp3 and write that value to the USER_FPn
+| locations on the stack.  The correct destination register is in
+| cmdreg2b.
+|
+	bfextu	CMDREG2B(%a6){#6:#3},%d0	|get dest register no
+	cmpil	#3,%d0
+	bgts	op2_xi
+	beqs	op2_fp3
+	cmpil	#1,%d0
+	blts	op2_fp0
+	beqs	op2_fp1
+op2_fp2:
+	fmovemx %fp2-%fp2,USER_FP2(%a6)
+	bras	op2_xi
+op2_fp1:
+	fmovemx %fp1-%fp1,USER_FP1(%a6)
+	bras	op2_xi
+op2_fp0:
+	fmovemx %fp0-%fp0,USER_FP0(%a6)
+	bras	op2_xi
+op2_fp3:
+	fmovemx %fp3-%fp3,USER_FP3(%a6)
+|
+| The frame returned is idle.  We must build a busy frame to hold
+| the cu state information and fix up etemp.
+|
+op2_xi:
+	movel	#22,%d0		|clear 23 lwords
+	clrl	(%a7)
+op2_loop:
+	clrl	-(%a7)
+	dbf	%d0,op2_loop
+	movel	#0x40600000,-(%a7)
+	movel	L_SCR1(%a6),STAG(%a6)
+	movel	L_SCR2(%a6),CMDREG1B(%a6)
+	movel	L_SCR3(%a6),DTAG(%a6)
+	moveb	#0x6,CU_SAVEPC(%a6)
+	movel	FP_SCR2(%a6),ETEMP(%a6)
+	movel	FP_SCR2+4(%a6),ETEMP_HI(%a6)
+	movel	FP_SCR2+8(%a6),ETEMP_LO(%a6)
+	movel	%d1,-(%a7)
+	bra	op2_com
+
+|
+| We have the opclass 2 single source situation.
+|
+op2_com:
+	moveb	#0x15,%d0
+	bfins	%d0,CMDREG1B(%a6){#0:#6}	|opclass 2, double
+
+	cmpw	#0x407F,ETEMP_EX(%a6)	|single +max
+	bnes	case2
+	movew	#0x43FF,ETEMP_EX(%a6)	|to double +max
+	bra	finish
+case2:
+	cmpw	#0xC07F,ETEMP_EX(%a6)	|single -max
+	bnes	case3
+	movew	#0xC3FF,ETEMP_EX(%a6)	|to double -max
+	bra	finish
+case3:
+	cmpw	#0x3F80,ETEMP_EX(%a6)	|single +min
+	bnes	case4
+	movew	#0x3C00,ETEMP_EX(%a6)	|to double +min
+	bra	finish
+case4:
+	cmpw	#0xBF80,ETEMP_EX(%a6)	|single -min
+	bne	fix_done
+	movew	#0xBC00,ETEMP_EX(%a6)	|to double -min
+	bra	finish
+|
+| The frame returned is busy.  It is not possible to reconstruct
+| the code sequence to allow completion.  fpsp_fmt_error causes
+| an fline illegal instruction to be executed.
+|
+| You should replace the jump to fpsp_fmt_error with a jump
+| to the entry point used to kill a process.
+|
+op2_xb:
+	jmp	fpsp_fmt_error
+
+|
+| Enter here if the case is not of the situations affected by
+| bug #1238, or if the fix is completed, and exit.
+|
+finish:
+fix_done:
+	rts
+
+	|end
diff --git a/arch/m68k/fpsp040/decbin.S b/arch/m68k/fpsp040/decbin.S
new file mode 100644
index 0000000..2160609
--- /dev/null
+++ b/arch/m68k/fpsp040/decbin.S
@@ -0,0 +1,506 @@
+|
+|	decbin.sa 3.3 12/19/90
+|
+|	Description: Converts normalized packed bcd value pointed to by
+|	register A6 to extended-precision value in FP0.
+|
+|	Input: Normalized packed bcd value in ETEMP(a6).
+|
+|	Output:	Exact floating-point representation of the packed bcd value.
+|
+|	Saves and Modifies: D2-D5
+|
+|	Speed: The program decbin takes ??? cycles to execute.
+|
+|	Object Size:
+|
+|	External Reference(s): None.
+|
+|	Algorithm:
+|	Expected is a normal bcd (i.e. non-exceptional; all inf, zero,
+|	and NaN operands are dispatched without entering this routine)
+|	value in 68881/882 format at location ETEMP(A6).
+|
+|	A1.	Convert the bcd exponent to binary by successive adds and muls.
+|	Set the sign according to SE. Subtract 16 to compensate
+|	for the mantissa which is to be interpreted as 17 integer
+|	digits, rather than 1 integer and 16 fraction digits.
+|	Note: this operation can never overflow.
+|
+|	A2. Convert the bcd mantissa to binary by successive
+|	adds and muls in FP0. Set the sign according to SM.
+|	The mantissa digits will be converted with the decimal point
+|	assumed following the least-significant digit.
+|	Note: this operation can never overflow.
+|
+|	A3. Count the number of leading/trailing zeros in the
+|	bcd string.  If SE is positive, count the leading zeros;
+|	if negative, count the trailing zeros.  Set the adjusted
+|	exponent equal to the exponent from A1 and the zero count
+|	added if SM = 1 and subtracted if SM = 0.  Scale the
+|	mantissa the equivalent of forcing in the bcd value:
+|
+|	SM = 0	a non-zero digit in the integer position
+|	SM = 1	a non-zero digit in Mant0, lsd of the fraction
+|
+|	this will insure that any value, regardless of its
+|	representation (ex. 0.1E2, 1E1, 10E0, 100E-1), is converted
+|	consistently.
+|
+|	A4. Calculate the factor 10^exp in FP1 using a table of
+|	10^(2^n) values.  To reduce the error in forming factors
+|	greater than 10^27, a directed rounding scheme is used with
+|	tables rounded to RN, RM, and RP, according to the table
+|	in the comments of the pwrten section.
+|
+|	A5. Form the final binary number by scaling the mantissa by
+|	the exponent factor.  This is done by multiplying the
+|	mantissa in FP0 by the factor in FP1 if the adjusted
+|	exponent sign is positive, and dividing FP0 by FP1 if
+|	it is negative.
+|
+|	Clean up and return.  Check if the final mul or div resulted
+|	in an inex2 exception.  If so, set inex1 in the fpsr and
+|	check if the inex1 exception is enabled.  If so, set d7 upper
+|	word to $0100.  This will signal unimp.sa that an enabled inex1
+|	exception occurred.  Unimp will fix the stack.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|DECBIN    idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+|
+|	PTENRN, PTENRM, and PTENRP are arrays of powers of 10 rounded
+|	to nearest, minus, and plus, respectively.  The tables include
+|	10**{1,2,4,8,16,32,64,128,256,512,1024,2048,4096}.  No rounding
+|	is required until the power is greater than 27, however, all
+|	tables include the first 5 for ease of indexing.
+|
+	|xref	PTENRN
+	|xref	PTENRM
+	|xref	PTENRP
+
+RTABLE:	.byte	0,0,0,0
+	.byte	2,3,2,3
+	.byte	2,3,3,2
+	.byte	3,2,2,3
+
+	.global	decbin
+	.global	calc_e
+	.global	pwrten
+	.global	calc_m
+	.global	norm
+	.global	ap_st_z
+	.global	ap_st_n
+|
+	.set	FNIBS,7
+	.set	FSTRT,0
+|
+	.set	ESTRT,4
+	.set	EDIGITS,2	|
+|
+| Constants in single precision
+FZERO:	.long	0x00000000
+FONE:	.long	0x3F800000
+FTEN:	.long	0x41200000
+
+	.set	TEN,10
+
+|
+decbin:
+	| fmovel	#0,FPCR		;clr real fpcr
+	moveml	%d2-%d5,-(%a7)
+|
+| Calculate exponent:
+|  1. Copy bcd value in memory for use as a working copy.
+|  2. Calculate absolute value of exponent in d1 by mul and add.
+|  3. Correct for exponent sign.
+|  4. Subtract 16 to compensate for interpreting the mant as all integer digits.
+|     (i.e., all digits assumed left of the decimal point.)
+|
+| Register usage:
+|
+|  calc_e:
+|	(*)  d0: temp digit storage
+|	(*)  d1: accumulator for binary exponent
+|	(*)  d2: digit count
+|	(*)  d3: offset pointer
+|	( )  d4: first word of bcd
+|	( )  a0: pointer to working bcd value
+|	( )  a6: pointer to original bcd value
+|	(*)  FP_SCR1: working copy of original bcd value
+|	(*)  L_SCR1: copy of original exponent word
+|
+calc_e:
+	movel	#EDIGITS,%d2	|# of nibbles (digits) in fraction part
+	moveql	#ESTRT,%d3	|counter to pick up digits
+	leal	FP_SCR1(%a6),%a0	|load tmp bcd storage address
+	movel	ETEMP(%a6),(%a0)	|save input bcd value
+	movel	ETEMP_HI(%a6),4(%a0) |save words 2 and 3
+	movel	ETEMP_LO(%a6),8(%a0) |and work with these
+	movel	(%a0),%d4	|get first word of bcd
+	clrl	%d1		|zero d1 for accumulator
+e_gd:
+	mulul	#TEN,%d1	|mul partial product by one digit place
+	bfextu	%d4{%d3:#4},%d0	|get the digit and zero extend into d0
+	addl	%d0,%d1		|d1 = d1 + d0
+	addqb	#4,%d3		|advance d3 to the next digit
+	dbf	%d2,e_gd	|if we have used all 3 digits, exit loop
+	btst	#30,%d4		|get SE
+	beqs	e_pos		|don't negate if pos
+	negl	%d1		|negate before subtracting
+e_pos:
+	subl	#16,%d1		|sub to compensate for shift of mant
+	bges	e_save		|if still pos, do not neg
+	negl	%d1		|now negative, make pos and set SE
+	orl	#0x40000000,%d4	|set SE in d4,
+	orl	#0x40000000,(%a0)	|and in working bcd
+e_save:
+	movel	%d1,L_SCR1(%a6)	|save exp in memory
+|
+|
+| Calculate mantissa:
+|  1. Calculate absolute value of mantissa in fp0 by mul and add.
+|  2. Correct for mantissa sign.
+|     (i.e., all digits assumed left of the decimal point.)
+|
+| Register usage:
+|
+|  calc_m:
+|	(*)  d0: temp digit storage
+|	(*)  d1: lword counter
+|	(*)  d2: digit count
+|	(*)  d3: offset pointer
+|	( )  d4: words 2 and 3 of bcd
+|	( )  a0: pointer to working bcd value
+|	( )  a6: pointer to original bcd value
+|	(*) fp0: mantissa accumulator
+|	( )  FP_SCR1: working copy of original bcd value
+|	( )  L_SCR1: copy of original exponent word
+|
+calc_m:
+	moveql	#1,%d1		|word counter, init to 1
+	fmoves	FZERO,%fp0	|accumulator
+|
+|
+|  Since the packed number has a long word between the first & second parts,
+|  get the integer digit then skip down & get the rest of the
+|  mantissa.  We will unroll the loop once.
+|
+	bfextu	(%a0){#28:#4},%d0	|integer part is ls digit in long word
+	faddb	%d0,%fp0		|add digit to sum in fp0
+|
+|
+|  Get the rest of the mantissa.
+|
+loadlw:
+	movel	(%a0,%d1.L*4),%d4	|load mantissa longword into d4
+	moveql	#FSTRT,%d3	|counter to pick up digits
+	moveql	#FNIBS,%d2	|reset number of digits per a0 ptr
+md2b:
+	fmuls	FTEN,%fp0	|fp0 = fp0 * 10
+	bfextu	%d4{%d3:#4},%d0	|get the digit and zero extend
+	faddb	%d0,%fp0	|fp0 = fp0 + digit
+|
+|
+|  If all the digits (8) in that long word have been converted (d2=0),
+|  then inc d1 (=2) to point to the next long word and reset d3 to 0
+|  to initialize the digit offset, and set d2 to 7 for the digit count;
+|  else continue with this long word.
+|
+	addqb	#4,%d3		|advance d3 to the next digit
+	dbf	%d2,md2b		|check for last digit in this lw
+nextlw:
+	addql	#1,%d1		|inc lw pointer in mantissa
+	cmpl	#2,%d1		|test for last lw
+	ble	loadlw		|if not, get last one
+
+|
+|  Check the sign of the mant and make the value in fp0 the same sign.
+|
+m_sign:
+	btst	#31,(%a0)	|test sign of the mantissa
+	beq	ap_st_z		|if clear, go to append/strip zeros
+	fnegx	%fp0		|if set, negate fp0
+
+|
+| Append/strip zeros:
+|
+|  For adjusted exponents which have an absolute value greater than 27*,
+|  this routine calculates the amount needed to normalize the mantissa
+|  for the adjusted exponent.  That number is subtracted from the exp
+|  if the exp was positive, and added if it was negative.  The purpose
+|  of this is to reduce the value of the exponent and the possibility
+|  of error in calculation of pwrten.
+|
+|  1. Branch on the sign of the adjusted exponent.
+|  2p.(positive exp)
+|   2. Check M16 and the digits in lwords 2 and 3 in descending order.
+|   3. Add one for each zero encountered until a non-zero digit.
+|   4. Subtract the count from the exp.
+|   5. Check if the exp has crossed zero in #3 above; make the exp abs
+|	   and set SE.
+|	6. Multiply the mantissa by 10**count.
+|  2n.(negative exp)
+|   2. Check the digits in lwords 3 and 2 in descending order.
+|   3. Add one for each zero encountered until a non-zero digit.
+|   4. Add the count to the exp.
+|   5. Check if the exp has crossed zero in #3 above; clear SE.
+|   6. Divide the mantissa by 10**count.
+|
+|  *Why 27?  If the adjusted exponent is within -28 < expA < 28, than
+|   any adjustment due to append/strip zeros will drive the resultant
+|   exponent towards zero.  Since all pwrten constants with a power
+|   of 27 or less are exact, there is no need to use this routine to
+|   attempt to lessen the resultant exponent.
+|
+| Register usage:
+|
+|  ap_st_z:
+|	(*)  d0: temp digit storage
+|	(*)  d1: zero count
+|	(*)  d2: digit count
+|	(*)  d3: offset pointer
+|	( )  d4: first word of bcd
+|	(*)  d5: lword counter
+|	( )  a0: pointer to working bcd value
+|	( )  FP_SCR1: working copy of original bcd value
+|	( )  L_SCR1: copy of original exponent word
+|
+|
+| First check the absolute value of the exponent to see if this
+| routine is necessary.  If so, then check the sign of the exponent
+| and do append (+) or strip (-) zeros accordingly.
+| This section handles a positive adjusted exponent.
+|
+ap_st_z:
+	movel	L_SCR1(%a6),%d1	|load expA for range test
+	cmpl	#27,%d1		|test is with 27
+	ble	pwrten		|if abs(expA) <28, skip ap/st zeros
+	btst	#30,(%a0)	|check sign of exp
+	bne	ap_st_n		|if neg, go to neg side
+	clrl	%d1		|zero count reg
+	movel	(%a0),%d4		|load lword 1 to d4
+	bfextu	%d4{#28:#4},%d0	|get M16 in d0
+	bnes	ap_p_fx		|if M16 is non-zero, go fix exp
+	addql	#1,%d1		|inc zero count
+	moveql	#1,%d5		|init lword counter
+	movel	(%a0,%d5.L*4),%d4	|get lword 2 to d4
+	bnes	ap_p_cl		|if lw 2 is zero, skip it
+	addql	#8,%d1		|and inc count by 8
+	addql	#1,%d5		|inc lword counter
+	movel	(%a0,%d5.L*4),%d4	|get lword 3 to d4
+ap_p_cl:
+	clrl	%d3		|init offset reg
+	moveql	#7,%d2		|init digit counter
+ap_p_gd:
+	bfextu	%d4{%d3:#4},%d0	|get digit
+	bnes	ap_p_fx		|if non-zero, go to fix exp
+	addql	#4,%d3		|point to next digit
+	addql	#1,%d1		|inc digit counter
+	dbf	%d2,ap_p_gd	|get next digit
+ap_p_fx:
+	movel	%d1,%d0		|copy counter to d2
+	movel	L_SCR1(%a6),%d1	|get adjusted exp from memory
+	subl	%d0,%d1		|subtract count from exp
+	bges	ap_p_fm		|if still pos, go to pwrten
+	negl	%d1		|now its neg; get abs
+	movel	(%a0),%d4		|load lword 1 to d4
+	orl	#0x40000000,%d4	| and set SE in d4
+	orl	#0x40000000,(%a0)	| and in memory
+|
+| Calculate the mantissa multiplier to compensate for the striping of
+| zeros from the mantissa.
+|
+ap_p_fm:
+	movel	#PTENRN,%a1	|get address of power-of-ten table
+	clrl	%d3		|init table index
+	fmoves	FONE,%fp1	|init fp1 to 1
+	moveql	#3,%d2		|init d2 to count bits in counter
+ap_p_el:
+	asrl	#1,%d0		|shift lsb into carry
+	bccs	ap_p_en		|if 1, mul fp1 by pwrten factor
+	fmulx	(%a1,%d3),%fp1	|mul by 10**(d3_bit_no)
+ap_p_en:
+	addl	#12,%d3		|inc d3 to next rtable entry
+	tstl	%d0		|check if d0 is zero
+	bnes	ap_p_el		|if not, get next bit
+	fmulx	%fp1,%fp0		|mul mantissa by 10**(no_bits_shifted)
+	bra	pwrten		|go calc pwrten
+|
+| This section handles a negative adjusted exponent.
+|
+ap_st_n:
+	clrl	%d1		|clr counter
+	moveql	#2,%d5		|set up d5 to point to lword 3
+	movel	(%a0,%d5.L*4),%d4	|get lword 3
+	bnes	ap_n_cl		|if not zero, check digits
+	subl	#1,%d5		|dec d5 to point to lword 2
+	addql	#8,%d1		|inc counter by 8
+	movel	(%a0,%d5.L*4),%d4	|get lword 2
+ap_n_cl:
+	movel	#28,%d3		|point to last digit
+	moveql	#7,%d2		|init digit counter
+ap_n_gd:
+	bfextu	%d4{%d3:#4},%d0	|get digit
+	bnes	ap_n_fx		|if non-zero, go to exp fix
+	subql	#4,%d3		|point to previous digit
+	addql	#1,%d1		|inc digit counter
+	dbf	%d2,ap_n_gd	|get next digit
+ap_n_fx:
+	movel	%d1,%d0		|copy counter to d0
+	movel	L_SCR1(%a6),%d1	|get adjusted exp from memory
+	subl	%d0,%d1		|subtract count from exp
+	bgts	ap_n_fm		|if still pos, go fix mantissa
+	negl	%d1		|take abs of exp and clr SE
+	movel	(%a0),%d4		|load lword 1 to d4
+	andl	#0xbfffffff,%d4	| and clr SE in d4
+	andl	#0xbfffffff,(%a0)	| and in memory
+|
+| Calculate the mantissa multiplier to compensate for the appending of
+| zeros to the mantissa.
+|
+ap_n_fm:
+	movel	#PTENRN,%a1	|get address of power-of-ten table
+	clrl	%d3		|init table index
+	fmoves	FONE,%fp1	|init fp1 to 1
+	moveql	#3,%d2		|init d2 to count bits in counter
+ap_n_el:
+	asrl	#1,%d0		|shift lsb into carry
+	bccs	ap_n_en		|if 1, mul fp1 by pwrten factor
+	fmulx	(%a1,%d3),%fp1	|mul by 10**(d3_bit_no)
+ap_n_en:
+	addl	#12,%d3		|inc d3 to next rtable entry
+	tstl	%d0		|check if d0 is zero
+	bnes	ap_n_el		|if not, get next bit
+	fdivx	%fp1,%fp0		|div mantissa by 10**(no_bits_shifted)
+|
+|
+| Calculate power-of-ten factor from adjusted and shifted exponent.
+|
+| Register usage:
+|
+|  pwrten:
+|	(*)  d0: temp
+|	( )  d1: exponent
+|	(*)  d2: {FPCR[6:5],SM,SE} as index in RTABLE; temp
+|	(*)  d3: FPCR work copy
+|	( )  d4: first word of bcd
+|	(*)  a1: RTABLE pointer
+|  calc_p:
+|	(*)  d0: temp
+|	( )  d1: exponent
+|	(*)  d3: PWRTxx table index
+|	( )  a0: pointer to working copy of bcd
+|	(*)  a1: PWRTxx pointer
+|	(*) fp1: power-of-ten accumulator
+|
+| Pwrten calculates the exponent factor in the selected rounding mode
+| according to the following table:
+|
+|	Sign of Mant  Sign of Exp  Rounding Mode  PWRTEN Rounding Mode
+|
+|	ANY	  ANY	RN	RN
+|
+|	 +	   +	RP	RP
+|	 -	   +	RP	RM
+|	 +	   -	RP	RM
+|	 -	   -	RP	RP
+|
+|	 +	   +	RM	RM
+|	 -	   +	RM	RP
+|	 +	   -	RM	RP
+|	 -	   -	RM	RM
+|
+|	 +	   +	RZ	RM
+|	 -	   +	RZ	RM
+|	 +	   -	RZ	RP
+|	 -	   -	RZ	RP
+|
+|
+pwrten:
+	movel	USER_FPCR(%a6),%d3 |get user's FPCR
+	bfextu	%d3{#26:#2},%d2	|isolate rounding mode bits
+	movel	(%a0),%d4		|reload 1st bcd word to d4
+	asll	#2,%d2		|format d2 to be
+	bfextu	%d4{#0:#2},%d0	| {FPCR[6],FPCR[5],SM,SE}
+	addl	%d0,%d2		|in d2 as index into RTABLE
+	leal	RTABLE,%a1	|load rtable base
+	moveb	(%a1,%d2),%d0	|load new rounding bits from table
+	clrl	%d3			|clear d3 to force no exc and extended
+	bfins	%d0,%d3{#26:#2}	|stuff new rounding bits in FPCR
+	fmovel	%d3,%FPCR		|write new FPCR
+	asrl	#1,%d0		|write correct PTENxx table
+	bccs	not_rp		|to a1
+	leal	PTENRP,%a1	|it is RP
+	bras	calc_p		|go to init section
+not_rp:
+	asrl	#1,%d0		|keep checking
+	bccs	not_rm
+	leal	PTENRM,%a1	|it is RM
+	bras	calc_p		|go to init section
+not_rm:
+	leal	PTENRN,%a1	|it is RN
+calc_p:
+	movel	%d1,%d0		|copy exp to d0;use d0
+	bpls	no_neg		|if exp is negative,
+	negl	%d0		|invert it
+	orl	#0x40000000,(%a0)	|and set SE bit
+no_neg:
+	clrl	%d3		|table index
+	fmoves	FONE,%fp1	|init fp1 to 1
+e_loop:
+	asrl	#1,%d0		|shift next bit into carry
+	bccs	e_next		|if zero, skip the mul
+	fmulx	(%a1,%d3),%fp1	|mul by 10**(d3_bit_no)
+e_next:
+	addl	#12,%d3		|inc d3 to next rtable entry
+	tstl	%d0		|check if d0 is zero
+	bnes	e_loop		|not zero, continue shifting
+|
+|
+|  Check the sign of the adjusted exp and make the value in fp0 the
+|  same sign. If the exp was pos then multiply fp1*fp0;
+|  else divide fp0/fp1.
+|
+| Register Usage:
+|  norm:
+|	( )  a0: pointer to working bcd value
+|	(*) fp0: mantissa accumulator
+|	( ) fp1: scaling factor - 10**(abs(exp))
+|
+norm:
+	btst	#30,(%a0)	|test the sign of the exponent
+	beqs	mul		|if clear, go to multiply
+div:
+	fdivx	%fp1,%fp0		|exp is negative, so divide mant by exp
+	bras	end_dec
+mul:
+	fmulx	%fp1,%fp0		|exp is positive, so multiply by exp
+|
+|
+| Clean up and return with result in fp0.
+|
+| If the final mul/div in decbin incurred an inex exception,
+| it will be inex2, but will be reported as inex1 by get_op.
+|
+end_dec:
+	fmovel	%FPSR,%d0		|get status register
+	bclrl	#inex2_bit+8,%d0	|test for inex2 and clear it
+	fmovel	%d0,%FPSR		|return status reg w/o inex2
+	beqs	no_exc		|skip this if no exc
+	orl	#inx1a_mask,USER_FPSR(%a6) |set inex1/ainex
+no_exc:
+	moveml	(%a7)+,%d2-%d5
+	rts
+	|end
diff --git a/arch/m68k/fpsp040/do_func.S b/arch/m68k/fpsp040/do_func.S
new file mode 100644
index 0000000..81f6a98
--- /dev/null
+++ b/arch/m68k/fpsp040/do_func.S
@@ -0,0 +1,559 @@
+|
+|	do_func.sa 3.4 2/18/91
+|
+| Do_func performs the unimplemented operation.  The operation
+| to be performed is determined from the lower 7 bits of the
+| extension word (except in the case of fmovecr and fsincos).
+| The opcode and tag bits form an index into a jump table in
+| tbldo.sa.  Cases of zero, infinity and NaN are handled in
+| do_func by forcing the default result.  Normalized and
+| denormalized (there are no unnormalized numbers at this
+| point) are passed onto the emulation code.
+|
+| CMDREG1B and STAG are extracted from the fsave frame
+| and combined to form the table index.  The function called
+| will start with a0 pointing to the ETEMP operand.  Dyadic
+| functions can find FPTEMP at -12(a0).
+|
+| Called functions return their result in fp0.  Sincos returns
+| sin(x) in fp0 and cos(x) in fp1.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+DO_FUNC:	|idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+	|xref	t_dz2
+	|xref	t_operr
+	|xref	t_inx2
+	|xref	t_resdnrm
+	|xref	dst_nan
+	|xref	src_nan
+	|xref	nrm_set
+	|xref	sto_cos
+
+	|xref	tblpre
+	|xref	slognp1,slogn,slog10,slog2
+	|xref	slognd,slog10d,slog2d
+	|xref	smod,srem
+	|xref	sscale
+	|xref	smovcr
+
+PONE:	.long	0x3fff0000,0x80000000,0x00000000	|+1
+MONE:	.long	0xbfff0000,0x80000000,0x00000000	|-1
+PZERO:	.long	0x00000000,0x00000000,0x00000000	|+0
+MZERO:	.long	0x80000000,0x00000000,0x00000000	|-0
+PINF:	.long	0x7fff0000,0x00000000,0x00000000	|+inf
+MINF:	.long	0xffff0000,0x00000000,0x00000000	|-inf
+QNAN:	.long	0x7fff0000,0xffffffff,0xffffffff	|non-signaling nan
+PPIBY2:  .long	0x3FFF0000,0xC90FDAA2,0x2168C235	|+PI/2
+MPIBY2:  .long	0xbFFF0000,0xC90FDAA2,0x2168C235	|-PI/2
+
+	.global	do_func
+do_func:
+	clrb	CU_ONLY(%a6)
+|
+| Check for fmovecr.  It does not follow the format of fp gen
+| unimplemented instructions.  The test is on the upper 6 bits;
+| if they are $17, the inst is fmovecr.  Call entry smovcr
+| directly.
+|
+	bfextu	CMDREG1B(%a6){#0:#6},%d0 |get opclass and src fields
+	cmpil	#0x17,%d0		|if op class and size fields are $17,
+|				;it is FMOVECR; if not, continue
+	bnes	not_fmovecr
+	jmp	smovcr		|fmovecr; jmp directly to emulation
+
+not_fmovecr:
+	movew	CMDREG1B(%a6),%d0
+	andl	#0x7F,%d0
+	cmpil	#0x38,%d0		|if the extension is >= $38,
+	bge	serror		|it is illegal
+	bfextu	STAG(%a6){#0:#3},%d1
+	lsll	#3,%d0		|make room for STAG
+	addl	%d1,%d0		|combine for final index into table
+	leal	tblpre,%a1	|start of monster jump table
+	movel	(%a1,%d0.w*4),%a1	|real target address
+	leal	ETEMP(%a6),%a0	|a0 is pointer to src op
+	movel	USER_FPCR(%a6),%d1
+	andl	#0xFF,%d1		| discard all but rounding mode/prec
+	fmovel	#0,%fpcr
+	jmp	(%a1)
+|
+|	ERROR
+|
+	.global	serror
+serror:
+	st	STORE_FLG(%a6)
+	rts
+|
+| These routines load forced values into fp0.  They are called
+| by index into tbldo.
+|
+| Load a signed zero to fp0 and set inex2/ainex
+|
+	.global	snzrinx
+snzrinx:
+	btstb	#sign_bit,LOCAL_EX(%a0)	|get sign of source operand
+	bnes	ld_mzinx	|if negative, branch
+	bsr	ld_pzero	|bsr so we can return and set inx
+	bra	t_inx2		|now, set the inx for the next inst
+ld_mzinx:
+	bsr	ld_mzero	|if neg, load neg zero, return here
+	bra	t_inx2		|now, set the inx for the next inst
+|
+| Load a signed zero to fp0; do not set inex2/ainex
+|
+	.global	szero
+szero:
+	btstb	#sign_bit,LOCAL_EX(%a0) |get sign of source operand
+	bne	ld_mzero	|if neg, load neg zero
+	bra	ld_pzero	|load positive zero
+|
+| Load a signed infinity to fp0; do not set inex2/ainex
+|
+	.global	sinf
+sinf:
+	btstb	#sign_bit,LOCAL_EX(%a0)	|get sign of source operand
+	bne	ld_minf			|if negative branch
+	bra	ld_pinf
+|
+| Load a signed one to fp0; do not set inex2/ainex
+|
+	.global	sone
+sone:
+	btstb	#sign_bit,LOCAL_EX(%a0)	|check sign of source
+	bne	ld_mone
+	bra	ld_pone
+|
+| Load a signed pi/2 to fp0; do not set inex2/ainex
+|
+	.global	spi_2
+spi_2:
+	btstb	#sign_bit,LOCAL_EX(%a0)	|check sign of source
+	bne	ld_mpi2
+	bra	ld_ppi2
+|
+| Load either a +0 or +inf for plus/minus operand
+|
+	.global	szr_inf
+szr_inf:
+	btstb	#sign_bit,LOCAL_EX(%a0)	|check sign of source
+	bne	ld_pzero
+	bra	ld_pinf
+|
+| Result is either an operr or +inf for plus/minus operand
+| [Used by slogn, slognp1, slog10, and slog2]
+|
+	.global	sopr_inf
+sopr_inf:
+	btstb	#sign_bit,LOCAL_EX(%a0)	|check sign of source
+	bne	t_operr
+	bra	ld_pinf
+|
+|	FLOGNP1
+|
+	.global	sslognp1
+sslognp1:
+	fmovemx (%a0),%fp0-%fp0
+	fcmpb	#-1,%fp0
+	fbgt	slognp1
+	fbeq	t_dz2		|if = -1, divide by zero exception
+	fmovel	#0,%FPSR		|clr N flag
+	bra	t_operr		|take care of operands < -1
+|
+|	FETOXM1
+|
+	.global	setoxm1i
+setoxm1i:
+	btstb	#sign_bit,LOCAL_EX(%a0)	|check sign of source
+	bne	ld_mone
+	bra	ld_pinf
+|
+|	FLOGN
+|
+| Test for 1.0 as an input argument, returning +zero.  Also check
+| the sign and return operr if negative.
+|
+	.global	sslogn
+sslogn:
+	btstb	#sign_bit,LOCAL_EX(%a0)
+	bne	t_operr		|take care of operands < 0
+	cmpiw	#0x3fff,LOCAL_EX(%a0) |test for 1.0 input
+	bne	slogn
+	cmpil	#0x80000000,LOCAL_HI(%a0)
+	bne	slogn
+	tstl	LOCAL_LO(%a0)
+	bne	slogn
+	fmovex	PZERO,%fp0
+	rts
+
+	.global	sslognd
+sslognd:
+	btstb	#sign_bit,LOCAL_EX(%a0)
+	beq	slognd
+	bra	t_operr		|take care of operands < 0
+
+|
+|	FLOG10
+|
+	.global	sslog10
+sslog10:
+	btstb	#sign_bit,LOCAL_EX(%a0)
+	bne	t_operr		|take care of operands < 0
+	cmpiw	#0x3fff,LOCAL_EX(%a0) |test for 1.0 input
+	bne	slog10
+	cmpil	#0x80000000,LOCAL_HI(%a0)
+	bne	slog10
+	tstl	LOCAL_LO(%a0)
+	bne	slog10
+	fmovex	PZERO,%fp0
+	rts
+
+	.global	sslog10d
+sslog10d:
+	btstb	#sign_bit,LOCAL_EX(%a0)
+	beq	slog10d
+	bra	t_operr		|take care of operands < 0
+
+|
+|	FLOG2
+|
+	.global	sslog2
+sslog2:
+	btstb	#sign_bit,LOCAL_EX(%a0)
+	bne	t_operr		|take care of operands < 0
+	cmpiw	#0x3fff,LOCAL_EX(%a0) |test for 1.0 input
+	bne	slog2
+	cmpil	#0x80000000,LOCAL_HI(%a0)
+	bne	slog2
+	tstl	LOCAL_LO(%a0)
+	bne	slog2
+	fmovex	PZERO,%fp0
+	rts
+
+	.global	sslog2d
+sslog2d:
+	btstb	#sign_bit,LOCAL_EX(%a0)
+	beq	slog2d
+	bra	t_operr		|take care of operands < 0
+
+|
+|	FMOD
+|
+pmodt:
+|				;$21 fmod
+|				;dtag,stag
+	.long	smod		|  00,00  norm,norm = normal
+	.long	smod_oper	|  00,01  norm,zero = nan with operr
+	.long	smod_fpn	|  00,10  norm,inf  = fpn
+	.long	smod_snan	|  00,11  norm,nan  = nan
+	.long	smod_zro	|  01,00  zero,norm = +-zero
+	.long	smod_oper	|  01,01  zero,zero = nan with operr
+	.long	smod_zro	|  01,10  zero,inf  = +-zero
+	.long	smod_snan	|  01,11  zero,nan  = nan
+	.long	smod_oper	|  10,00  inf,norm  = nan with operr
+	.long	smod_oper	|  10,01  inf,zero  = nan with operr
+	.long	smod_oper	|  10,10  inf,inf   = nan with operr
+	.long	smod_snan	|  10,11  inf,nan   = nan
+	.long	smod_dnan	|  11,00  nan,norm  = nan
+	.long	smod_dnan	|  11,01  nan,zero  = nan
+	.long	smod_dnan	|  11,10  nan,inf   = nan
+	.long	smod_dnan	|  11,11  nan,nan   = nan
+
+	.global	pmod
+pmod:
+	clrb	FPSR_QBYTE(%a6) | clear quotient field
+	bfextu	STAG(%a6){#0:#3},%d0 |stag = d0
+	bfextu	DTAG(%a6){#0:#3},%d1 |dtag = d1
+
+|
+| Alias extended denorms to norms for the jump table.
+|
+	bclrl	#2,%d0
+	bclrl	#2,%d1
+
+	lslb	#2,%d1
+	orb	%d0,%d1		|d1{3:2} = dtag, d1{1:0} = stag
+|				;Tag values:
+|				;00 = norm or denorm
+|				;01 = zero
+|				;10 = inf
+|				;11 = nan
+	lea	pmodt,%a1
+	movel	(%a1,%d1.w*4),%a1
+	jmp	(%a1)
+
+smod_snan:
+	bra	src_nan
+smod_dnan:
+	bra	dst_nan
+smod_oper:
+	bra	t_operr
+smod_zro:
+	moveb	ETEMP(%a6),%d1	|get sign of src op
+	moveb	FPTEMP(%a6),%d0	|get sign of dst op
+	eorb	%d0,%d1		|get exor of sign bits
+	btstl	#7,%d1		|test for sign
+	beqs	smod_zsn	|if clr, do not set sign big
+	bsetb	#q_sn_bit,FPSR_QBYTE(%a6) |set q-byte sign bit
+smod_zsn:
+	btstl	#7,%d0		|test if + or -
+	beq	ld_pzero	|if pos then load +0
+	bra	ld_mzero	|else neg load -0
+
+smod_fpn:
+	moveb	ETEMP(%a6),%d1	|get sign of src op
+	moveb	FPTEMP(%a6),%d0	|get sign of dst op
+	eorb	%d0,%d1		|get exor of sign bits
+	btstl	#7,%d1		|test for sign
+	beqs	smod_fsn	|if clr, do not set sign big
+	bsetb	#q_sn_bit,FPSR_QBYTE(%a6) |set q-byte sign bit
+smod_fsn:
+	tstb	DTAG(%a6)	|filter out denormal destination case
+	bpls	smod_nrm	|
+	leal	FPTEMP(%a6),%a0	|a0<- addr(FPTEMP)
+	bra	t_resdnrm	|force UNFL(but exact) result
+smod_nrm:
+	fmovel USER_FPCR(%a6),%fpcr |use user's rmode and precision
+	fmovex FPTEMP(%a6),%fp0	|return dest to fp0
+	rts
+
+|
+|	FREM
+|
+premt:
+|				;$25 frem
+|				;dtag,stag
+	.long	srem		|  00,00  norm,norm = normal
+	.long	srem_oper	|  00,01  norm,zero = nan with operr
+	.long	srem_fpn	|  00,10  norm,inf  = fpn
+	.long	srem_snan	|  00,11  norm,nan  = nan
+	.long	srem_zro	|  01,00  zero,norm = +-zero
+	.long	srem_oper	|  01,01  zero,zero = nan with operr
+	.long	srem_zro	|  01,10  zero,inf  = +-zero
+	.long	srem_snan	|  01,11  zero,nan  = nan
+	.long	srem_oper	|  10,00  inf,norm  = nan with operr
+	.long	srem_oper	|  10,01  inf,zero  = nan with operr
+	.long	srem_oper	|  10,10  inf,inf   = nan with operr
+	.long	srem_snan	|  10,11  inf,nan   = nan
+	.long	srem_dnan	|  11,00  nan,norm  = nan
+	.long	srem_dnan	|  11,01  nan,zero  = nan
+	.long	srem_dnan	|  11,10  nan,inf   = nan
+	.long	srem_dnan	|  11,11  nan,nan   = nan
+
+	.global	prem
+prem:
+	clrb	FPSR_QBYTE(%a6)   |clear quotient field
+	bfextu	STAG(%a6){#0:#3},%d0 |stag = d0
+	bfextu	DTAG(%a6){#0:#3},%d1 |dtag = d1
+|
+| Alias extended denorms to norms for the jump table.
+|
+	bclr	#2,%d0
+	bclr	#2,%d1
+
+	lslb	#2,%d1
+	orb	%d0,%d1		|d1{3:2} = dtag, d1{1:0} = stag
+|				;Tag values:
+|				;00 = norm or denorm
+|				;01 = zero
+|				;10 = inf
+|				;11 = nan
+	lea	premt,%a1
+	movel	(%a1,%d1.w*4),%a1
+	jmp	(%a1)
+
+srem_snan:
+	bra	src_nan
+srem_dnan:
+	bra	dst_nan
+srem_oper:
+	bra	t_operr
+srem_zro:
+	moveb	ETEMP(%a6),%d1	|get sign of src op
+	moveb	FPTEMP(%a6),%d0	|get sign of dst op
+	eorb	%d0,%d1		|get exor of sign bits
+	btstl	#7,%d1		|test for sign
+	beqs	srem_zsn	|if clr, do not set sign big
+	bsetb	#q_sn_bit,FPSR_QBYTE(%a6) |set q-byte sign bit
+srem_zsn:
+	btstl	#7,%d0		|test if + or -
+	beq	ld_pzero	|if pos then load +0
+	bra	ld_mzero	|else neg load -0
+
+srem_fpn:
+	moveb	ETEMP(%a6),%d1	|get sign of src op
+	moveb	FPTEMP(%a6),%d0	|get sign of dst op
+	eorb	%d0,%d1		|get exor of sign bits
+	btstl	#7,%d1		|test for sign
+	beqs	srem_fsn	|if clr, do not set sign big
+	bsetb	#q_sn_bit,FPSR_QBYTE(%a6) |set q-byte sign bit
+srem_fsn:
+	tstb	DTAG(%a6)	|filter out denormal destination case
+	bpls	srem_nrm	|
+	leal	FPTEMP(%a6),%a0	|a0<- addr(FPTEMP)
+	bra	t_resdnrm	|force UNFL(but exact) result
+srem_nrm:
+	fmovel USER_FPCR(%a6),%fpcr |use user's rmode and precision
+	fmovex FPTEMP(%a6),%fp0	|return dest to fp0
+	rts
+|
+|	FSCALE
+|
+pscalet:
+|				;$26 fscale
+|				;dtag,stag
+	.long	sscale		|  00,00  norm,norm = result
+	.long	sscale		|  00,01  norm,zero = fpn
+	.long	scl_opr		|  00,10  norm,inf  = nan with operr
+	.long	scl_snan	|  00,11  norm,nan  = nan
+	.long	scl_zro		|  01,00  zero,norm = +-zero
+	.long	scl_zro		|  01,01  zero,zero = +-zero
+	.long	scl_opr		|  01,10  zero,inf  = nan with operr
+	.long	scl_snan	|  01,11  zero,nan  = nan
+	.long	scl_inf		|  10,00  inf,norm  = +-inf
+	.long	scl_inf		|  10,01  inf,zero  = +-inf
+	.long	scl_opr		|  10,10  inf,inf   = nan with operr
+	.long	scl_snan	|  10,11  inf,nan   = nan
+	.long	scl_dnan	|  11,00  nan,norm  = nan
+	.long	scl_dnan	|  11,01  nan,zero  = nan
+	.long	scl_dnan	|  11,10  nan,inf   = nan
+	.long	scl_dnan	|  11,11  nan,nan   = nan
+
+	.global	pscale
+pscale:
+	bfextu	STAG(%a6){#0:#3},%d0 |stag in d0
+	bfextu	DTAG(%a6){#0:#3},%d1 |dtag in d1
+	bclrl	#2,%d0		|alias  denorm into norm
+	bclrl	#2,%d1		|alias  denorm into norm
+	lslb	#2,%d1
+	orb	%d0,%d1		|d1{4:2} = dtag, d1{1:0} = stag
+|				;dtag values     stag values:
+|				;000 = norm      00 = norm
+|				;001 = zero	 01 = zero
+|				;010 = inf	 10 = inf
+|				;011 = nan	 11 = nan
+|				;100 = dnrm
+|
+|
+	leal	pscalet,%a1	|load start of jump table
+	movel	(%a1,%d1.w*4),%a1	|load a1 with label depending on tag
+	jmp	(%a1)		|go to the routine
+
+scl_opr:
+	bra	t_operr
+
+scl_dnan:
+	bra	dst_nan
+
+scl_zro:
+	btstb	#sign_bit,FPTEMP_EX(%a6)	|test if + or -
+	beq	ld_pzero		|if pos then load +0
+	bra	ld_mzero		|if neg then load -0
+scl_inf:
+	btstb	#sign_bit,FPTEMP_EX(%a6)	|test if + or -
+	beq	ld_pinf			|if pos then load +inf
+	bra	ld_minf			|else neg load -inf
+scl_snan:
+	bra	src_nan
+|
+|	FSINCOS
+|
+	.global	ssincosz
+ssincosz:
+	btstb	#sign_bit,ETEMP(%a6)	|get sign
+	beqs	sincosp
+	fmovex	MZERO,%fp0
+	bras	sincoscom
+sincosp:
+	fmovex PZERO,%fp0
+sincoscom:
+	fmovemx PONE,%fp1-%fp1	|do not allow FPSR to be affected
+	bra	sto_cos		|store cosine result
+
+	.global	ssincosi
+ssincosi:
+	fmovex QNAN,%fp1	|load NAN
+	bsr	sto_cos		|store cosine result
+	fmovex QNAN,%fp0	|load NAN
+	bra	t_operr
+
+	.global	ssincosnan
+ssincosnan:
+	movel	ETEMP_EX(%a6),FP_SCR1(%a6)
+	movel	ETEMP_HI(%a6),FP_SCR1+4(%a6)
+	movel	ETEMP_LO(%a6),FP_SCR1+8(%a6)
+	bsetb	#signan_bit,FP_SCR1+4(%a6)
+	fmovemx FP_SCR1(%a6),%fp1-%fp1
+	bsr	sto_cos
+	bra	src_nan
+|
+| This code forces default values for the zero, inf, and nan cases
+| in the transcendentals code.  The CC bits must be set in the
+| stacked FPSR to be correctly reported.
+|
+|**Returns +PI/2
+	.global	ld_ppi2
+ld_ppi2:
+	fmovex PPIBY2,%fp0		|load +pi/2
+	bra	t_inx2			|set inex2 exc
+
+|**Returns -PI/2
+	.global	ld_mpi2
+ld_mpi2:
+	fmovex MPIBY2,%fp0		|load -pi/2
+	orl	#neg_mask,USER_FPSR(%a6)	|set N bit
+	bra	t_inx2			|set inex2 exc
+
+|**Returns +inf
+	.global	ld_pinf
+ld_pinf:
+	fmovex PINF,%fp0		|load +inf
+	orl	#inf_mask,USER_FPSR(%a6)	|set I bit
+	rts
+
+|**Returns -inf
+	.global	ld_minf
+ld_minf:
+	fmovex MINF,%fp0		|load -inf
+	orl	#neg_mask+inf_mask,USER_FPSR(%a6)	|set N and I bits
+	rts
+
+|**Returns +1
+	.global	ld_pone
+ld_pone:
+	fmovex PONE,%fp0		|load +1
+	rts
+
+|**Returns -1
+	.global	ld_mone
+ld_mone:
+	fmovex MONE,%fp0		|load -1
+	orl	#neg_mask,USER_FPSR(%a6)	|set N bit
+	rts
+
+|**Returns +0
+	.global	ld_pzero
+ld_pzero:
+	fmovex PZERO,%fp0		|load +0
+	orl	#z_mask,USER_FPSR(%a6)	|set Z bit
+	rts
+
+|**Returns -0
+	.global	ld_mzero
+ld_mzero:
+	fmovex MZERO,%fp0		|load -0
+	orl	#neg_mask+z_mask,USER_FPSR(%a6)	|set N and Z bits
+	rts
+
+	|end
diff --git a/arch/m68k/fpsp040/fpsp.h b/arch/m68k/fpsp040/fpsp.h
new file mode 100644
index 0000000..984a4eb
--- /dev/null
+++ b/arch/m68k/fpsp040/fpsp.h
@@ -0,0 +1,348 @@
+|
+|	fpsp.h 3.3 3.3
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|	fpsp.h --- stack frame offsets during FPSP exception handling
+|
+|	These equates are used to access the exception frame, the fsave
+|	frame and any local variables needed by the FPSP package.
+|
+|	All FPSP handlers begin by executing:
+|
+|		link	a6,#-LOCAL_SIZE
+|		fsave	-(a7)
+|		movem.l	d0-d1/a0-a1,USER_DA(a6)
+|		fmovem.x fp0-fp3,USER_FP0(a6)
+|		fmove.l	fpsr/fpcr/fpiar,USER_FPSR(a6)
+|
+|	After initialization, the stack looks like this:
+|
+|	A7 --->	+-------------------------------+
+|		|				|
+|		|	FPU fsave area		|
+|		|				|
+|		+-------------------------------+
+|		|				|
+|		|	FPSP Local Variables	|
+|		|	     including		|
+|		|	  saved registers	|
+|		|				|
+|		+-------------------------------+
+|	A6 --->	|	Saved A6		|
+|		+-------------------------------+
+|		|				|
+|		|	Exception Frame		|
+|		|				|
+|		|				|
+|
+|	Positive offsets from A6 refer to the exception frame.  Negative
+|	offsets refer to the Local Variable area and the fsave area.
+|	The fsave frame is also accessible from the top via A7.
+|
+|	On exit, the handlers execute:
+|
+|		movem.l	USER_DA(a6),d0-d1/a0-a1
+|		fmovem.x USER_FP0(a6),fp0-fp3
+|		fmove.l	USER_FPSR(a6),fpsr/fpcr/fpiar
+|		frestore (a7)+
+|		unlk	a6
+|
+|	and then either "bra fpsp_done" if the exception was completely
+|	handled	by the package, or "bra real_xxxx" which is an external
+|	label to a routine that will process a real exception of the
+|	type that was generated.  Some handlers may omit the "frestore"
+|	if the FPU state after the exception is idle.
+|
+|	Sometimes the exception handler will transform the fsave area
+|	because it needs to report an exception back to the user.  This
+|	can happen if the package is entered for an unimplemented float
+|	instruction that generates (say) an underflow.  Alternatively,
+|	a second fsave frame can be pushed onto the stack and the
+|	handler	exit code will reload the new frame and discard the old.
+|
+|	The registers d0, d1, a0, a1 and fp0-fp3 are always saved and
+|	restored from the "local variable" area and can be used as
+|	temporaries.  If a routine needs to change any
+|	of these registers, it should modify the saved copy and let
+|	the handler exit code restore the value.
+|
+|----------------------------------------------------------------------
+|
+|	Local Variables on the stack
+|
+	.set	LOCAL_SIZE,192		| bytes needed for local variables
+	.set	LV,-LOCAL_SIZE	| convenient base value
+|
+	.set	USER_DA,LV+0		| save space for D0-D1,A0-A1
+	.set	USER_D0,LV+0		| saved user D0
+	.set	USER_D1,LV+4		| saved user D1
+	.set	USER_A0,LV+8		| saved user A0
+	.set	USER_A1,LV+12		| saved user A1
+	.set	USER_FP0,LV+16		| saved user FP0
+	.set	USER_FP1,LV+28		| saved user FP1
+	.set	USER_FP2,LV+40		| saved user FP2
+	.set	USER_FP3,LV+52		| saved user FP3
+	.set	USER_FPCR,LV+64		| saved user FPCR
+	.set	FPCR_ENABLE,USER_FPCR+2	|	FPCR exception enable
+	.set	FPCR_MODE,USER_FPCR+3	|	FPCR rounding mode control
+	.set	USER_FPSR,LV+68		| saved user FPSR
+	.set	FPSR_CC,USER_FPSR+0	|	FPSR condition code
+	.set	FPSR_QBYTE,USER_FPSR+1	|	FPSR quotient
+	.set	FPSR_EXCEPT,USER_FPSR+2	|	FPSR exception
+	.set	FPSR_AEXCEPT,USER_FPSR+3	|	FPSR accrued exception
+	.set	USER_FPIAR,LV+72		| saved user FPIAR
+	.set	FP_SCR1,LV+76		| room for a temporary float value
+	.set	FP_SCR2,LV+92		| room for a temporary float value
+	.set	L_SCR1,LV+108		| room for a temporary long value
+	.set	L_SCR2,LV+112		| room for a temporary long value
+	.set	STORE_FLG,LV+116
+	.set	BINDEC_FLG,LV+117		| used in bindec
+	.set	DNRM_FLG,LV+118		| used in res_func
+	.set	RES_FLG,LV+119		| used in res_func
+	.set	DY_MO_FLG,LV+120		| dyadic/monadic flag
+	.set	UFLG_TMP,LV+121		| temporary for uflag errata
+	.set	CU_ONLY,LV+122		| cu-only flag
+	.set	VER_TMP,LV+123		| temp holding for version number
+	.set	L_SCR3,LV+124		| room for a temporary long value
+	.set	FP_SCR3,LV+128		| room for a temporary float value
+	.set	FP_SCR4,LV+144		| room for a temporary float value
+	.set	FP_SCR5,LV+160		| room for a temporary float value
+	.set	FP_SCR6,LV+176
+|
+|NEXT		equ	LV+192		;need to increase LOCAL_SIZE
+|
+|--------------------------------------------------------------------------
+|
+|	fsave offsets and bit definitions
+|
+|	Offsets are defined from the end of an fsave because the last 10
+|	words of a busy frame are the same as the unimplemented frame.
+|
+	.set	CU_SAVEPC,LV-92		| micro-pc for CU (1 byte)
+	.set	FPR_DIRTY_BITS,LV-91		| fpr dirty bits
+|
+	.set	WBTEMP,LV-76		| write back temp (12 bytes)
+	.set	WBTEMP_EX,WBTEMP		| wbtemp sign and exponent (2 bytes)
+	.set	WBTEMP_HI,WBTEMP+4	| wbtemp mantissa [63:32] (4 bytes)
+	.set	WBTEMP_LO,WBTEMP+8	| wbtemp mantissa [31:00] (4 bytes)
+|
+	.set	WBTEMP_SGN,WBTEMP+2	| used to store sign
+|
+	.set	FPSR_SHADOW,LV-64		| fpsr shadow reg
+|
+	.set	FPIARCU,LV-60		| Instr. addr. reg. for CU (4 bytes)
+|
+	.set	CMDREG2B,LV-52		| cmd reg for machine 2
+	.set	CMDREG3B,LV-48		| cmd reg for E3 exceptions (2 bytes)
+|
+	.set	NMNEXC,LV-44		| NMNEXC (unsup,snan bits only)
+	.set	nmn_unsup_bit,1	|
+	.set	nmn_snan_bit,0	|
+|
+	.set	NMCEXC,LV-43		| NMNEXC & NMCEXC
+	.set	nmn_operr_bit,7
+	.set	nmn_ovfl_bit,6
+	.set	nmn_unfl_bit,5
+	.set	nmc_unsup_bit,4
+	.set	nmc_snan_bit,3
+	.set	nmc_operr_bit,2
+	.set	nmc_ovfl_bit,1
+	.set	nmc_unfl_bit,0
+|
+	.set	STAG,LV-40		| source tag (1 byte)
+	.set	WBTEMP_GRS,LV-40		| alias wbtemp guard, round, sticky
+	.set	guard_bit,1		| guard bit is bit number 1
+	.set	round_bit,0		| round bit is bit number 0
+	.set	stag_mask,0xE0		| upper 3 bits are source tag type
+	.set	denorm_bit,7		| bit determines if denorm or unnorm
+	.set	etemp15_bit,4		| etemp exponent bit #15
+	.set	wbtemp66_bit,2		| wbtemp mantissa bit #66
+	.set	wbtemp1_bit,1		| wbtemp mantissa bit #1
+	.set	wbtemp0_bit,0		| wbtemp mantissa bit #0
+|
+	.set	STICKY,LV-39		| holds sticky bit
+	.set	sticky_bit,7
+|
+	.set	CMDREG1B,LV-36		| cmd reg for E1 exceptions (2 bytes)
+	.set	kfact_bit,12		| distinguishes static/dynamic k-factor
+|					;on packed move outs.  NOTE: this
+|					;equate only works when CMDREG1B is in
+|					;a register.
+|
+	.set	CMDWORD,LV-35		| command word in cmd1b
+	.set	direction_bit,5		| bit 0 in opclass
+	.set	size_bit2,12		| bit 2 in size field
+|
+	.set	DTAG,LV-32		| dest tag (1 byte)
+	.set	dtag_mask,0xE0		| upper 3 bits are dest type tag
+	.set	fptemp15_bit,4		| fptemp exponent bit #15
+|
+	.set	WB_BYTE,LV-31		| holds WBTE15 bit (1 byte)
+	.set	wbtemp15_bit,4		| wbtemp exponent bit #15
+|
+	.set	E_BYTE,LV-28		| holds E1 and E3 bits (1 byte)
+	.set	E1,2		| which bit is E1 flag
+	.set	E3,1		| which bit is E3 flag
+	.set	SFLAG,0		| which bit is S flag
+|
+	.set	T_BYTE,LV-27		| holds T and U bits (1 byte)
+	.set	XFLAG,7		| which bit is X flag
+	.set	UFLAG,5		| which bit is U flag
+	.set	TFLAG,4		| which bit is T flag
+|
+	.set	FPTEMP,LV-24		| fptemp (12 bytes)
+	.set	FPTEMP_EX,FPTEMP		| fptemp sign and exponent (2 bytes)
+	.set	FPTEMP_HI,FPTEMP+4	| fptemp mantissa [63:32] (4 bytes)
+	.set	FPTEMP_LO,FPTEMP+8	| fptemp mantissa [31:00] (4 bytes)
+|
+	.set	FPTEMP_SGN,FPTEMP+2	| used to store sign
+|
+	.set	ETEMP,LV-12		| etemp (12 bytes)
+	.set	ETEMP_EX,ETEMP		| etemp sign and exponent (2 bytes)
+	.set	ETEMP_HI,ETEMP+4		| etemp mantissa [63:32] (4 bytes)
+	.set	ETEMP_LO,ETEMP+8		| etemp mantissa [31:00] (4 bytes)
+|
+	.set	ETEMP_SGN,ETEMP+2		| used to store sign
+|
+	.set	EXC_SR,4		| exception frame status register
+	.set	EXC_PC,6		| exception frame program counter
+	.set	EXC_VEC,10		| exception frame vector (format+vector#)
+	.set	EXC_EA,12		| exception frame effective address
+|
+|--------------------------------------------------------------------------
+|
+|	FPSR/FPCR bits
+|
+	.set	neg_bit,3	|  negative result
+	.set	z_bit,2	|  zero result
+	.set	inf_bit,1	|  infinity result
+	.set	nan_bit,0	|  not-a-number result
+|
+	.set	q_sn_bit,7	|  sign bit of quotient byte
+|
+	.set	bsun_bit,7	|  branch on unordered
+	.set	snan_bit,6	|  signalling nan
+	.set	operr_bit,5	|  operand error
+	.set	ovfl_bit,4	|  overflow
+	.set	unfl_bit,3	|  underflow
+	.set	dz_bit,2	|  divide by zero
+	.set	inex2_bit,1	|  inexact result 2
+	.set	inex1_bit,0	|  inexact result 1
+|
+	.set	aiop_bit,7	|  accrued illegal operation
+	.set	aovfl_bit,6	|  accrued overflow
+	.set	aunfl_bit,5	|  accrued underflow
+	.set	adz_bit,4	|  accrued divide by zero
+	.set	ainex_bit,3	|  accrued inexact
+|
+|	FPSR individual bit masks
+|
+	.set	neg_mask,0x08000000
+	.set	z_mask,0x04000000
+	.set	inf_mask,0x02000000
+	.set	nan_mask,0x01000000
+|
+	.set	bsun_mask,0x00008000	|
+	.set	snan_mask,0x00004000
+	.set	operr_mask,0x00002000
+	.set	ovfl_mask,0x00001000
+	.set	unfl_mask,0x00000800
+	.set	dz_mask,0x00000400
+	.set	inex2_mask,0x00000200
+	.set	inex1_mask,0x00000100
+|
+	.set	aiop_mask,0x00000080	|  accrued illegal operation
+	.set	aovfl_mask,0x00000040	|  accrued overflow
+	.set	aunfl_mask,0x00000020	|  accrued underflow
+	.set	adz_mask,0x00000010	|  accrued divide by zero
+	.set	ainex_mask,0x00000008	|  accrued inexact
+|
+|	FPSR combinations used in the FPSP
+|
+	.set	dzinf_mask,inf_mask+dz_mask+adz_mask
+	.set	opnan_mask,nan_mask+operr_mask+aiop_mask
+	.set	nzi_mask,0x01ffffff	|  clears N, Z, and I
+	.set	unfinx_mask,unfl_mask+inex2_mask+aunfl_mask+ainex_mask
+	.set	unf2inx_mask,unfl_mask+inex2_mask+ainex_mask
+	.set	ovfinx_mask,ovfl_mask+inex2_mask+aovfl_mask+ainex_mask
+	.set	inx1a_mask,inex1_mask+ainex_mask
+	.set	inx2a_mask,inex2_mask+ainex_mask
+	.set	snaniop_mask,nan_mask+snan_mask+aiop_mask
+	.set	naniop_mask,nan_mask+aiop_mask
+	.set	neginf_mask,neg_mask+inf_mask
+	.set	infaiop_mask,inf_mask+aiop_mask
+	.set	negz_mask,neg_mask+z_mask
+	.set	opaop_mask,operr_mask+aiop_mask
+	.set	unfl_inx_mask,unfl_mask+aunfl_mask+ainex_mask
+	.set	ovfl_inx_mask,ovfl_mask+aovfl_mask+ainex_mask
+|
+|--------------------------------------------------------------------------
+|
+|	FPCR rounding modes
+|
+	.set	x_mode,0x00	|  round to extended
+	.set	s_mode,0x40	|  round to single
+	.set	d_mode,0x80	|  round to double
+|
+	.set	rn_mode,0x00	|  round nearest
+	.set	rz_mode,0x10	|  round to zero
+	.set	rm_mode,0x20	|  round to minus infinity
+	.set	rp_mode,0x30	|  round to plus infinity
+|
+|--------------------------------------------------------------------------
+|
+|	Miscellaneous equates
+|
+	.set	signan_bit,6	|  signalling nan bit in mantissa
+	.set	sign_bit,7
+|
+	.set	rnd_stky_bit,29	|  round/sticky bit of mantissa
+|				this can only be used if in a data register
+	.set	sx_mask,0x01800000 |  set s and x bits in word $48
+|
+	.set	LOCAL_EX,0
+	.set	LOCAL_SGN,2
+	.set	LOCAL_HI,4
+	.set	LOCAL_LO,8
+	.set	LOCAL_GRS,12	|  valid ONLY for FP_SCR1, FP_SCR2
+|
+|
+	.set	norm_tag,0x00	|  tag bits in {7:5} position
+	.set	zero_tag,0x20
+	.set	inf_tag,0x40
+	.set	nan_tag,0x60
+	.set	dnrm_tag,0x80
+|
+|	fsave sizes and formats
+|
+	.set	VER_4,0x40		|  fpsp compatible version numbers
+|					are in the $40s {$40-$4f}
+	.set	VER_40,0x40		|  original version number
+	.set	VER_41,0x41		|  revision version number
+|
+	.set	BUSY_SIZE,100		|  size of busy frame
+	.set	BUSY_FRAME,LV-BUSY_SIZE	|  start of busy frame
+|
+	.set	UNIMP_40_SIZE,44		|  size of orig unimp frame
+	.set	UNIMP_41_SIZE,52		|  size of rev unimp frame
+|
+	.set	IDLE_SIZE,4		|  size of idle frame
+	.set	IDLE_FRAME,LV-IDLE_SIZE	|  start of idle frame
+|
+|	exception vectors
+|
+	.set	TRACE_VEC,0x2024		|  trace trap
+	.set	FLINE_VEC,0x002C		|  real F-line
+	.set	UNIMP_VEC,0x202C		|  unimplemented
+	.set	INEX_VEC,0x00C4
+|
+	.set	dbl_thresh,0x3C01
+	.set	sgl_thresh,0x3F81
+|
diff --git a/arch/m68k/fpsp040/gen_except.S b/arch/m68k/fpsp040/gen_except.S
new file mode 100644
index 0000000..401d06f
--- /dev/null
+++ b/arch/m68k/fpsp040/gen_except.S
@@ -0,0 +1,468 @@
+|
+|	gen_except.sa 3.7 1/16/92
+|
+|	gen_except --- FPSP routine to detect reportable exceptions
+|
+|	This routine compares the exception enable byte of the
+|	user_fpcr on the stack with the exception status byte
+|	of the user_fpsr.
+|
+|	Any routine which may report an exceptions must load
+|	the stack frame in memory with the exceptional operand(s).
+|
+|	Priority for exceptions is:
+|
+|	Highest:	bsun
+|			snan
+|			operr
+|			ovfl
+|			unfl
+|			dz
+|			inex2
+|	Lowest:		inex1
+|
+|	Note: The IEEE standard specifies that inex2 is to be
+|	reported if ovfl occurs and the ovfl enable bit is not
+|	set but the inex2 enable bit is.
+|
+|
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+GEN_EXCEPT:    |idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section 8
+
+#include "fpsp.h"
+
+	|xref	real_trace
+	|xref	fpsp_done
+	|xref	fpsp_fmt_error
+
+exc_tbl:
+	.long	bsun_exc
+	.long	commonE1
+	.long	commonE1
+	.long	ovfl_unfl
+	.long	ovfl_unfl
+	.long	commonE1
+	.long	commonE3
+	.long	commonE3
+	.long	no_match
+
+	.global	gen_except
+gen_except:
+	cmpib	#IDLE_SIZE-4,1(%a7)	|test for idle frame
+	beq	do_check		|go handle idle frame
+	cmpib	#UNIMP_40_SIZE-4,1(%a7)	|test for orig unimp frame
+	beqs	unimp_x			|go handle unimp frame
+	cmpib	#UNIMP_41_SIZE-4,1(%a7)	|test for rev unimp frame
+	beqs	unimp_x			|go handle unimp frame
+	cmpib	#BUSY_SIZE-4,1(%a7)	|if size <> $60, fmt error
+	bnel	fpsp_fmt_error
+	leal	BUSY_SIZE+LOCAL_SIZE(%a7),%a1 |init a1 so fpsp.h
+|					;equates will work
+| Fix up the new busy frame with entries from the unimp frame
+|
+	movel	ETEMP_EX(%a6),ETEMP_EX(%a1) |copy etemp from unimp
+	movel	ETEMP_HI(%a6),ETEMP_HI(%a1) |frame to busy frame
+	movel	ETEMP_LO(%a6),ETEMP_LO(%a1)
+	movel	CMDREG1B(%a6),CMDREG1B(%a1) |set inst in frame to unimp
+	movel	CMDREG1B(%a6),%d0		|fix cmd1b to make it
+	andl	#0x03c30000,%d0		|work for cmd3b
+	bfextu	CMDREG1B(%a6){#13:#1},%d1	|extract bit 2
+	lsll	#5,%d1
+	swap	%d1
+	orl	%d1,%d0			|put it in the right place
+	bfextu	CMDREG1B(%a6){#10:#3},%d1	|extract bit 3,4,5
+	lsll	#2,%d1
+	swap	%d1
+	orl	%d1,%d0			|put them in the right place
+	movel	%d0,CMDREG3B(%a1)		|in the busy frame
+|
+| Or in the FPSR from the emulation with the USER_FPSR on the stack.
+|
+	fmovel	%FPSR,%d0
+	orl	%d0,USER_FPSR(%a6)
+	movel	USER_FPSR(%a6),FPSR_SHADOW(%a1) |set exc bits
+	orl	#sx_mask,E_BYTE(%a1)
+	bra	do_clean
+
+|
+| Frame is an unimp frame possible resulting from an fmove <ea>,fp0
+| that caused an exception
+|
+| a1 is modified to point into the new frame allowing fpsp equates
+| to be valid.
+|
+unimp_x:
+	cmpib	#UNIMP_40_SIZE-4,1(%a7)	|test for orig unimp frame
+	bnes	test_rev
+	leal	UNIMP_40_SIZE+LOCAL_SIZE(%a7),%a1
+	bras	unimp_con
+test_rev:
+	cmpib	#UNIMP_41_SIZE-4,1(%a7)	|test for rev unimp frame
+	bnel	fpsp_fmt_error		|if not $28 or $30
+	leal	UNIMP_41_SIZE+LOCAL_SIZE(%a7),%a1
+
+unimp_con:
+|
+| Fix up the new unimp frame with entries from the old unimp frame
+|
+	movel	CMDREG1B(%a6),CMDREG1B(%a1) |set inst in frame to unimp
+|
+| Or in the FPSR from the emulation with the USER_FPSR on the stack.
+|
+	fmovel	%FPSR,%d0
+	orl	%d0,USER_FPSR(%a6)
+	bra	do_clean
+
+|
+| Frame is idle, so check for exceptions reported through
+| USER_FPSR and set the unimp frame accordingly.
+| A7 must be incremented to the point before the
+| idle fsave vector to the unimp vector.
+|
+
+do_check:
+	addl	#4,%a7			|point A7 back to unimp frame
+|
+| Or in the FPSR from the emulation with the USER_FPSR on the stack.
+|
+	fmovel	%FPSR,%d0
+	orl	%d0,USER_FPSR(%a6)
+|
+| On a busy frame, we must clear the nmnexc bits.
+|
+	cmpib	#BUSY_SIZE-4,1(%a7)	|check frame type
+	bnes	check_fr		|if busy, clr nmnexc
+	clrw	NMNEXC(%a6)		|clr nmnexc & nmcexc
+	btstb	#5,CMDREG1B(%a6)		|test for fmove out
+	bnes	frame_com
+	movel	USER_FPSR(%a6),FPSR_SHADOW(%a6) |set exc bits
+	orl	#sx_mask,E_BYTE(%a6)
+	bras	frame_com
+check_fr:
+	cmpb	#UNIMP_40_SIZE-4,1(%a7)
+	beqs	frame_com
+	clrw	NMNEXC(%a6)
+frame_com:
+	moveb	FPCR_ENABLE(%a6),%d0	|get fpcr enable byte
+	andb	FPSR_EXCEPT(%a6),%d0	|and in the fpsr exc byte
+	bfffo	%d0{#24:#8},%d1		|test for first set bit
+	leal	exc_tbl,%a0		|load jmp table address
+	subib	#24,%d1			|normalize bit offset to 0-8
+	movel	(%a0,%d1.w*4),%a0		|load routine address based
+|					;based on first enabled exc
+	jmp	(%a0)			|jump to routine
+|
+| Bsun is not possible in unimp or unsupp
+|
+bsun_exc:
+	bra	do_clean
+|
+| The typical work to be done to the unimp frame to report an
+| exception is to set the E1/E3 byte and clr the U flag.
+| commonE1 does this for E1 exceptions, which are snan,
+| operr, and dz.  commonE3 does this for E3 exceptions, which
+| are inex2 and inex1, and also clears the E1 exception bit
+| left over from the unimp exception.
+|
+commonE1:
+	bsetb	#E1,E_BYTE(%a6)		|set E1 flag
+	bra	commonE			|go clean and exit
+
+commonE3:
+	tstb	UFLG_TMP(%a6)		|test flag for unsup/unimp state
+	bnes	unsE3
+uniE3:
+	bsetb	#E3,E_BYTE(%a6)		|set E3 flag
+	bclrb	#E1,E_BYTE(%a6)		|clr E1 from unimp
+	bra	commonE
+
+unsE3:
+	tstb	RES_FLG(%a6)
+	bnes	unsE3_0
+unsE3_1:
+	bsetb	#E3,E_BYTE(%a6)		|set E3 flag
+unsE3_0:
+	bclrb	#E1,E_BYTE(%a6)		|clr E1 flag
+	movel	CMDREG1B(%a6),%d0
+	andl	#0x03c30000,%d0		|work for cmd3b
+	bfextu	CMDREG1B(%a6){#13:#1},%d1	|extract bit 2
+	lsll	#5,%d1
+	swap	%d1
+	orl	%d1,%d0			|put it in the right place
+	bfextu	CMDREG1B(%a6){#10:#3},%d1	|extract bit 3,4,5
+	lsll	#2,%d1
+	swap	%d1
+	orl	%d1,%d0			|put them in the right place
+	movel	%d0,CMDREG3B(%a6)		|in the busy frame
+
+commonE:
+	bclrb	#UFLAG,T_BYTE(%a6)	|clr U flag from unimp
+	bra	do_clean		|go clean and exit
+|
+| No bits in the enable byte match existing exceptions.  Check for
+| the case of the ovfl exc without the ovfl enabled, but with
+| inex2 enabled.
+|
+no_match:
+	btstb	#inex2_bit,FPCR_ENABLE(%a6) |check for ovfl/inex2 case
+	beqs	no_exc			|if clear, exit
+	btstb	#ovfl_bit,FPSR_EXCEPT(%a6) |now check ovfl
+	beqs	no_exc			|if clear, exit
+	bras	ovfl_unfl		|go to unfl_ovfl to determine if
+|					;it is an unsupp or unimp exc
+
+| No exceptions are to be reported.  If the instruction was
+| unimplemented, no FPU restore is necessary.  If it was
+| unsupported, we must perform the restore.
+no_exc:
+	tstb	UFLG_TMP(%a6)	|test flag for unsupp/unimp state
+	beqs	uni_no_exc
+uns_no_exc:
+	tstb	RES_FLG(%a6)	|check if frestore is needed
+	bne	do_clean	|if clear, no frestore needed
+uni_no_exc:
+	moveml	USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx USER_FP0(%a6),%fp0-%fp3
+	fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	unlk	%a6
+	bra	finish_up
+|
+| Unsupported Data Type Handler:
+| Ovfl:
+|   An fmoveout that results in an overflow is reported this way.
+| Unfl:
+|   An fmoveout that results in an underflow is reported this way.
+|
+| Unimplemented Instruction Handler:
+| Ovfl:
+|   Only scosh, setox, ssinh, stwotox, and scale can set overflow in
+|   this manner.
+| Unfl:
+|   Stwotox, setox, and scale can set underflow in this manner.
+|   Any of the other Library Routines such that f(x)=x in which
+|   x is an extended denorm can report an underflow exception.
+|   It is the responsibility of the exception-causing exception
+|   to make sure that WBTEMP is correct.
+|
+|   The exceptional operand is in FP_SCR1.
+|
+ovfl_unfl:
+	tstb	UFLG_TMP(%a6)	|test flag for unsupp/unimp state
+	beqs	ofuf_con
+|
+| The caller was from an unsupported data type trap.  Test if the
+| caller set CU_ONLY.  If so, the exceptional operand is expected in
+| FPTEMP, rather than WBTEMP.
+|
+	tstb	CU_ONLY(%a6)		|test if inst is cu-only
+	beq	unsE3
+|	move.w	#$fe,CU_SAVEPC(%a6)
+	clrb	CU_SAVEPC(%a6)
+	bsetb	#E1,E_BYTE(%a6)		|set E1 exception flag
+	movew	ETEMP_EX(%a6),FPTEMP_EX(%a6)
+	movel	ETEMP_HI(%a6),FPTEMP_HI(%a6)
+	movel	ETEMP_LO(%a6),FPTEMP_LO(%a6)
+	bsetb	#fptemp15_bit,DTAG(%a6)	|set fpte15
+	bclrb	#UFLAG,T_BYTE(%a6)	|clr U flag from unimp
+	bra	do_clean		|go clean and exit
+
+ofuf_con:
+	moveb	(%a7),VER_TMP(%a6)	|save version number
+	cmpib	#BUSY_SIZE-4,1(%a7)	|check for busy frame
+	beqs	busy_fr			|if unimp, grow to busy
+	cmpib	#VER_40,(%a7)		|test for orig unimp frame
+	bnes	try_41			|if not, test for rev frame
+	moveql	#13,%d0			|need to zero 14 lwords
+	bras	ofuf_fin
+try_41:
+	cmpib	#VER_41,(%a7)		|test for rev unimp frame
+	bnel	fpsp_fmt_error		|if neither, exit with error
+	moveql	#11,%d0			|need to zero 12 lwords
+
+ofuf_fin:
+	clrl	(%a7)
+loop1:
+	clrl	-(%a7)			|clear and dec a7
+	dbra	%d0,loop1
+	moveb	VER_TMP(%a6),(%a7)
+	moveb	#BUSY_SIZE-4,1(%a7)		|write busy fmt word.
+busy_fr:
+	movel	FP_SCR1(%a6),WBTEMP_EX(%a6)	|write
+	movel	FP_SCR1+4(%a6),WBTEMP_HI(%a6)	|exceptional op to
+	movel	FP_SCR1+8(%a6),WBTEMP_LO(%a6)	|wbtemp
+	bsetb	#E3,E_BYTE(%a6)			|set E3 flag
+	bclrb	#E1,E_BYTE(%a6)			|make sure E1 is clear
+	bclrb	#UFLAG,T_BYTE(%a6)		|clr U flag
+	movel	USER_FPSR(%a6),FPSR_SHADOW(%a6)
+	orl	#sx_mask,E_BYTE(%a6)
+	movel	CMDREG1B(%a6),%d0		|fix cmd1b to make it
+	andl	#0x03c30000,%d0		|work for cmd3b
+	bfextu	CMDREG1B(%a6){#13:#1},%d1	|extract bit 2
+	lsll	#5,%d1
+	swap	%d1
+	orl	%d1,%d0			|put it in the right place
+	bfextu	CMDREG1B(%a6){#10:#3},%d1	|extract bit 3,4,5
+	lsll	#2,%d1
+	swap	%d1
+	orl	%d1,%d0			|put them in the right place
+	movel	%d0,CMDREG3B(%a6)		|in the busy frame
+
+|
+| Check if the frame to be restored is busy or unimp.
+|** NOTE *** Bug fix for errata (0d43b #3)
+| If the frame is unimp, we must create a busy frame to
+| fix the bug with the nmnexc bits in cases in which they
+| are set by a previous instruction and not cleared by
+| the save. The frame will be unimp only if the final
+| instruction in an emulation routine caused the exception
+| by doing an fmove <ea>,fp0.  The exception operand, in
+| internal format, is in fptemp.
+|
+do_clean:
+	cmpib	#UNIMP_40_SIZE-4,1(%a7)
+	bnes	do_con
+	moveql	#13,%d0			|in orig, need to zero 14 lwords
+	bras	do_build
+do_con:
+	cmpib	#UNIMP_41_SIZE-4,1(%a7)
+	bnes	do_restore		|frame must be busy
+	moveql	#11,%d0			|in rev, need to zero 12 lwords
+
+do_build:
+	moveb	(%a7),VER_TMP(%a6)
+	clrl	(%a7)
+loop2:
+	clrl	-(%a7)			|clear and dec a7
+	dbra	%d0,loop2
+|
+| Use a1 as pointer into new frame.  a6 is not correct if an unimp or
+| busy frame was created as the result of an exception on the final
+| instruction of an emulation routine.
+|
+| We need to set the nmcexc bits if the exception is E1. Otherwise,
+| the exc taken will be inex2.
+|
+	leal	BUSY_SIZE+LOCAL_SIZE(%a7),%a1	|init a1 for new frame
+	moveb	VER_TMP(%a6),(%a7)	|write busy fmt word
+	moveb	#BUSY_SIZE-4,1(%a7)
+	movel	FP_SCR1(%a6),WBTEMP_EX(%a1)	|write
+	movel	FP_SCR1+4(%a6),WBTEMP_HI(%a1)	|exceptional op to
+	movel	FP_SCR1+8(%a6),WBTEMP_LO(%a1)	|wbtemp
+|	btst.b	#E1,E_BYTE(%a1)
+|	beq.b	do_restore
+	bfextu	USER_FPSR(%a6){#17:#4},%d0	|get snan/operr/ovfl/unfl bits
+	bfins	%d0,NMCEXC(%a1){#4:#4}	|and insert them in nmcexc
+	movel	USER_FPSR(%a6),FPSR_SHADOW(%a1) |set exc bits
+	orl	#sx_mask,E_BYTE(%a1)
+
+do_restore:
+	moveml	USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx USER_FP0(%a6),%fp0-%fp3
+	fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	frestore (%a7)+
+	tstb	RES_FLG(%a6)	|RES_FLG indicates a "continuation" frame
+	beq	cont
+	bsr	bug1384
+cont:
+	unlk	%a6
+|
+| If trace mode enabled, then go to trace handler.  This handler
+| cannot have any fp instructions.  If there are fp inst's and an
+| exception has been restored into the machine then the exception
+| will occur upon execution of the fp inst.  This is not desirable
+| in the kernel (supervisor mode).  See MC68040 manual Section 9.3.8.
+|
+finish_up:
+	btstb	#7,(%a7)		|test T1 in SR
+	bnes	g_trace
+	btstb	#6,(%a7)		|test T0 in SR
+	bnes	g_trace
+	bral	fpsp_done
+|
+| Change integer stack to look like trace stack
+| The address of the instruction that caused the
+| exception is already in the integer stack (is
+| the same as the saved friar)
+|
+| If the current frame is already a 6-word stack then all
+| that needs to be done is to change the vector# to TRACE.
+| If the frame is only a 4-word stack (meaning we got here
+| on an Unsupported data type exception), then we need to grow
+| the stack an extra 2 words and get the FPIAR from the FPU.
+|
+g_trace:
+	bftst	EXC_VEC-4(%sp){#0:#4}
+	bne	g_easy
+
+	subw	#4,%sp		| make room
+	movel	4(%sp),(%sp)
+	movel	8(%sp),4(%sp)
+	subw	#BUSY_SIZE,%sp
+	fsave	(%sp)
+	fmovel	%fpiar,BUSY_SIZE+EXC_EA-4(%sp)
+	frestore (%sp)
+	addw	#BUSY_SIZE,%sp
+
+g_easy:
+	movew	#TRACE_VEC,EXC_VEC-4(%a7)
+	bral	real_trace
+|
+|  This is a work-around for hardware bug 1384.
+|
+bug1384:
+	link	%a5,#0
+	fsave	-(%sp)
+	cmpib	#0x41,(%sp)	| check for correct frame
+	beq	frame_41
+	bgt	nofix		| if more advanced mask, do nada
+
+frame_40:
+	tstb	1(%sp)		| check to see if idle
+	bne	notidle
+idle40:
+	clrl	(%sp)		| get rid of old fsave frame
+        movel  %d1,USER_D1(%a6)  | save d1
+	movew	#8,%d1		| place unimp frame instead
+loop40:	clrl	-(%sp)
+	dbra	%d1,loop40
+        movel  USER_D1(%a6),%d1  | restore d1
+	movel	#0x40280000,-(%sp)
+	frestore (%sp)+
+	unlk	%a5
+	rts
+
+frame_41:
+	tstb	1(%sp)		| check to see if idle
+	bne	notidle
+idle41:
+	clrl	(%sp)		| get rid of old fsave frame
+        movel  %d1,USER_D1(%a6)  | save d1
+	movew	#10,%d1		| place unimp frame instead
+loop41:	clrl	-(%sp)
+	dbra	%d1,loop41
+        movel  USER_D1(%a6),%d1  | restore d1
+	movel	#0x41300000,-(%sp)
+	frestore (%sp)+
+	unlk	%a5
+	rts
+
+notidle:
+	bclrb	#etemp15_bit,-40(%a5)
+	frestore (%sp)+
+	unlk	%a5
+	rts
+
+nofix:
+	frestore (%sp)+
+	unlk	%a5
+	rts
+
+	|end
diff --git a/arch/m68k/fpsp040/get_op.S b/arch/m68k/fpsp040/get_op.S
new file mode 100644
index 0000000..c7c2f37
--- /dev/null
+++ b/arch/m68k/fpsp040/get_op.S
@@ -0,0 +1,676 @@
+|
+|	get_op.sa 3.6 5/19/92
+|
+|	get_op.sa 3.5 4/26/91
+|
+|  Description: This routine is called by the unsupported format/data
+| type exception handler ('unsupp' - vector 55) and the unimplemented
+| instruction exception handler ('unimp' - vector 11).  'get_op'
+| determines the opclass (0, 2, or 3) and branches to the
+| opclass handler routine.  See 68881/2 User's Manual table 4-11
+| for a description of the opclasses.
+|
+| For UNSUPPORTED data/format (exception vector 55) and for
+| UNIMPLEMENTED instructions (exception vector 11) the following
+| applies:
+|
+| - For unnormalized numbers (opclass 0, 2, or 3) the
+| number(s) is normalized and the operand type tag is updated.
+|
+| - For a packed number (opclass 2) the number is unpacked and the
+| operand type tag is updated.
+|
+| - For denormalized numbers (opclass 0 or 2) the number(s) is not
+| changed but passed to the next module.  The next module for
+| unimp is do_func, the next module for unsupp is res_func.
+|
+| For UNSUPPORTED data/format (exception vector 55) only the
+| following applies:
+|
+| - If there is a move out with a packed number (opclass 3) the
+| number is packed and written to user memory.  For the other
+| opclasses the number(s) are written back to the fsave stack
+| and the instruction is then restored back into the '040.  The
+| '040 is then able to complete the instruction.
+|
+| For example:
+| fadd.x fpm,fpn where the fpm contains an unnormalized number.
+| The '040 takes an unsupported data trap and gets to this
+| routine.  The number is normalized, put back on the stack and
+| then an frestore is done to restore the instruction back into
+| the '040.  The '040 then re-executes the fadd.x fpm,fpn with
+| a normalized number in the source and the instruction is
+| successful.
+|
+| Next consider if in the process of normalizing the un-
+| normalized number it becomes a denormalized number.  The
+| routine which converts the unnorm to a norm (called mk_norm)
+| detects this and tags the number as a denorm.  The routine
+| res_func sees the denorm tag and converts the denorm to a
+| norm.  The instruction is then restored back into the '040
+| which re_executes the instruction.
+|
+|
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+GET_OP:    |idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+	.global	PIRN,PIRZRM,PIRP
+	.global	SMALRN,SMALRZRM,SMALRP
+	.global	BIGRN,BIGRZRM,BIGRP
+
+PIRN:
+	.long 0x40000000,0xc90fdaa2,0x2168c235    |pi
+PIRZRM:
+	.long 0x40000000,0xc90fdaa2,0x2168c234    |pi
+PIRP:
+	.long 0x40000000,0xc90fdaa2,0x2168c235    |pi
+
+|round to nearest
+SMALRN:
+	.long 0x3ffd0000,0x9a209a84,0xfbcff798    |log10(2)
+	.long 0x40000000,0xadf85458,0xa2bb4a9a    |e
+	.long 0x3fff0000,0xb8aa3b29,0x5c17f0bc    |log2(e)
+	.long 0x3ffd0000,0xde5bd8a9,0x37287195    |log10(e)
+	.long 0x00000000,0x00000000,0x00000000    |0.0
+| round to zero;round to negative infinity
+SMALRZRM:
+	.long 0x3ffd0000,0x9a209a84,0xfbcff798    |log10(2)
+	.long 0x40000000,0xadf85458,0xa2bb4a9a    |e
+	.long 0x3fff0000,0xb8aa3b29,0x5c17f0bb    |log2(e)
+	.long 0x3ffd0000,0xde5bd8a9,0x37287195    |log10(e)
+	.long 0x00000000,0x00000000,0x00000000    |0.0
+| round to positive infinity
+SMALRP:
+	.long 0x3ffd0000,0x9a209a84,0xfbcff799    |log10(2)
+	.long 0x40000000,0xadf85458,0xa2bb4a9b    |e
+	.long 0x3fff0000,0xb8aa3b29,0x5c17f0bc    |log2(e)
+	.long 0x3ffd0000,0xde5bd8a9,0x37287195    |log10(e)
+	.long 0x00000000,0x00000000,0x00000000    |0.0
+
+|round to nearest
+BIGRN:
+	.long 0x3ffe0000,0xb17217f7,0xd1cf79ac    |ln(2)
+	.long 0x40000000,0x935d8ddd,0xaaa8ac17    |ln(10)
+	.long 0x3fff0000,0x80000000,0x00000000    |10 ^ 0
+
+	.global	PTENRN
+PTENRN:
+	.long 0x40020000,0xA0000000,0x00000000    |10 ^ 1
+	.long 0x40050000,0xC8000000,0x00000000    |10 ^ 2
+	.long 0x400C0000,0x9C400000,0x00000000    |10 ^ 4
+	.long 0x40190000,0xBEBC2000,0x00000000    |10 ^ 8
+	.long 0x40340000,0x8E1BC9BF,0x04000000    |10 ^ 16
+	.long 0x40690000,0x9DC5ADA8,0x2B70B59E    |10 ^ 32
+	.long 0x40D30000,0xC2781F49,0xFFCFA6D5    |10 ^ 64
+	.long 0x41A80000,0x93BA47C9,0x80E98CE0    |10 ^ 128
+	.long 0x43510000,0xAA7EEBFB,0x9DF9DE8E    |10 ^ 256
+	.long 0x46A30000,0xE319A0AE,0xA60E91C7    |10 ^ 512
+	.long 0x4D480000,0xC9767586,0x81750C17    |10 ^ 1024
+	.long 0x5A920000,0x9E8B3B5D,0xC53D5DE5    |10 ^ 2048
+	.long 0x75250000,0xC4605202,0x8A20979B    |10 ^ 4096
+|round to minus infinity
+BIGRZRM:
+	.long 0x3ffe0000,0xb17217f7,0xd1cf79ab    |ln(2)
+	.long 0x40000000,0x935d8ddd,0xaaa8ac16    |ln(10)
+	.long 0x3fff0000,0x80000000,0x00000000    |10 ^ 0
+
+	.global	PTENRM
+PTENRM:
+	.long 0x40020000,0xA0000000,0x00000000    |10 ^ 1
+	.long 0x40050000,0xC8000000,0x00000000    |10 ^ 2
+	.long 0x400C0000,0x9C400000,0x00000000    |10 ^ 4
+	.long 0x40190000,0xBEBC2000,0x00000000    |10 ^ 8
+	.long 0x40340000,0x8E1BC9BF,0x04000000    |10 ^ 16
+	.long 0x40690000,0x9DC5ADA8,0x2B70B59D    |10 ^ 32
+	.long 0x40D30000,0xC2781F49,0xFFCFA6D5    |10 ^ 64
+	.long 0x41A80000,0x93BA47C9,0x80E98CDF    |10 ^ 128
+	.long 0x43510000,0xAA7EEBFB,0x9DF9DE8D    |10 ^ 256
+	.long 0x46A30000,0xE319A0AE,0xA60E91C6    |10 ^ 512
+	.long 0x4D480000,0xC9767586,0x81750C17    |10 ^ 1024
+	.long 0x5A920000,0x9E8B3B5D,0xC53D5DE5    |10 ^ 2048
+	.long 0x75250000,0xC4605202,0x8A20979A    |10 ^ 4096
+|round to positive infinity
+BIGRP:
+	.long 0x3ffe0000,0xb17217f7,0xd1cf79ac    |ln(2)
+	.long 0x40000000,0x935d8ddd,0xaaa8ac17    |ln(10)
+	.long 0x3fff0000,0x80000000,0x00000000    |10 ^ 0
+
+	.global	PTENRP
+PTENRP:
+	.long 0x40020000,0xA0000000,0x00000000    |10 ^ 1
+	.long 0x40050000,0xC8000000,0x00000000    |10 ^ 2
+	.long 0x400C0000,0x9C400000,0x00000000    |10 ^ 4
+	.long 0x40190000,0xBEBC2000,0x00000000    |10 ^ 8
+	.long 0x40340000,0x8E1BC9BF,0x04000000    |10 ^ 16
+	.long 0x40690000,0x9DC5ADA8,0x2B70B59E    |10 ^ 32
+	.long 0x40D30000,0xC2781F49,0xFFCFA6D6    |10 ^ 64
+	.long 0x41A80000,0x93BA47C9,0x80E98CE0    |10 ^ 128
+	.long 0x43510000,0xAA7EEBFB,0x9DF9DE8E    |10 ^ 256
+	.long 0x46A30000,0xE319A0AE,0xA60E91C7    |10 ^ 512
+	.long 0x4D480000,0xC9767586,0x81750C18    |10 ^ 1024
+	.long 0x5A920000,0x9E8B3B5D,0xC53D5DE6    |10 ^ 2048
+	.long 0x75250000,0xC4605202,0x8A20979B    |10 ^ 4096
+
+	|xref	nrm_zero
+	|xref	decbin
+	|xref	round
+
+	.global    get_op
+	.global    uns_getop
+	.global    uni_getop
+get_op:
+	clrb	DY_MO_FLG(%a6)
+	tstb	UFLG_TMP(%a6)	|test flag for unsupp/unimp state
+	beq	uni_getop
+
+uns_getop:
+	btstb	#direction_bit,CMDREG1B(%a6)
+	bne	opclass3	|branch if a fmove out (any kind)
+	btstb	#6,CMDREG1B(%a6)
+	beqs	uns_notpacked
+
+	bfextu	CMDREG1B(%a6){#3:#3},%d0
+	cmpb	#3,%d0
+	beq	pack_source	|check for a packed src op, branch if so
+uns_notpacked:
+	bsr	chk_dy_mo	|set the dyadic/monadic flag
+	tstb	DY_MO_FLG(%a6)
+	beqs	src_op_ck	|if monadic, go check src op
+|				;else, check dst op (fall through)
+
+	btstb	#7,DTAG(%a6)
+	beqs	src_op_ck	|if dst op is norm, check src op
+	bras	dst_ex_dnrm	|else, handle destination unnorm/dnrm
+
+uni_getop:
+	bfextu	CMDREG1B(%a6){#0:#6},%d0 |get opclass and src fields
+	cmpil	#0x17,%d0		|if op class and size fields are $17,
+|				;it is FMOVECR; if not, continue
+|
+| If the instruction is fmovecr, exit get_op.  It is handled
+| in do_func and smovecr.sa.
+|
+	bne	not_fmovecr	|handle fmovecr as an unimplemented inst
+	rts
+
+not_fmovecr:
+	btstb	#E1,E_BYTE(%a6)	|if set, there is a packed operand
+	bne	pack_source	|check for packed src op, branch if so
+
+| The following lines of are coded to optimize on normalized operands
+	moveb	STAG(%a6),%d0
+	orb	DTAG(%a6),%d0	|check if either of STAG/DTAG msb set
+	bmis	dest_op_ck	|if so, some op needs to be fixed
+	rts
+
+dest_op_ck:
+	btstb	#7,DTAG(%a6)	|check for unsupported data types in
+	beqs	src_op_ck	|the destination, if not, check src op
+	bsr	chk_dy_mo	|set dyadic/monadic flag
+	tstb	DY_MO_FLG(%a6)	|
+	beqs	src_op_ck	|if monadic, check src op
+|
+| At this point, destination has an extended denorm or unnorm.
+|
+dst_ex_dnrm:
+	movew	FPTEMP_EX(%a6),%d0 |get destination exponent
+	andiw	#0x7fff,%d0	|mask sign, check if exp = 0000
+	beqs	src_op_ck	|if denorm then check source op.
+|				;denorms are taken care of in res_func
+|				;(unsupp) or do_func (unimp)
+|				;else unnorm fall through
+	leal	FPTEMP(%a6),%a0	|point a0 to dop - used in mk_norm
+	bsr	mk_norm		|go normalize - mk_norm returns:
+|				;L_SCR1{7:5} = operand tag
+|				;	(000 = norm, 100 = denorm)
+|				;L_SCR1{4} = fpte15 or ete15
+|				;	0 = exp >  $3fff
+|				;	1 = exp <= $3fff
+|				;and puts the normalized num back
+|				;on the fsave stack
+|
+	moveb L_SCR1(%a6),DTAG(%a6) |write the new tag & fpte15
+|				;to the fsave stack and fall
+|				;through to check source operand
+|
+src_op_ck:
+	btstb	#7,STAG(%a6)
+	beq	end_getop	|check for unsupported data types on the
+|				;source operand
+	btstb	#5,STAG(%a6)
+	bnes	src_sd_dnrm	|if bit 5 set, handle sgl/dbl denorms
+|
+| At this point only unnorms or extended denorms are possible.
+|
+src_ex_dnrm:
+	movew	ETEMP_EX(%a6),%d0 |get source exponent
+	andiw	#0x7fff,%d0	|mask sign, check if exp = 0000
+	beq	end_getop	|if denorm then exit, denorms are
+|				;handled in do_func
+	leal	ETEMP(%a6),%a0	|point a0 to sop - used in mk_norm
+	bsr	mk_norm		|go normalize - mk_norm returns:
+|				;L_SCR1{7:5} = operand tag
+|				;	(000 = norm, 100 = denorm)
+|				;L_SCR1{4} = fpte15 or ete15
+|				;	0 = exp >  $3fff
+|				;	1 = exp <= $3fff
+|				;and puts the normalized num back
+|				;on the fsave stack
+|
+	moveb	L_SCR1(%a6),STAG(%a6) |write the new tag & ete15
+	rts			|end_getop
+
+|
+| At this point, only single or double denorms are possible.
+| If the inst is not fmove, normalize the source.  If it is,
+| do nothing to the input.
+|
+src_sd_dnrm:
+	btstb	#4,CMDREG1B(%a6)	|differentiate between sgl/dbl denorm
+	bnes	is_double
+is_single:
+	movew	#0x3f81,%d1	|write bias for sgl denorm
+	bras	common		|goto the common code
+is_double:
+	movew	#0x3c01,%d1	|write the bias for a dbl denorm
+common:
+	btstb	#sign_bit,ETEMP_EX(%a6) |grab sign bit of mantissa
+	beqs	pos
+	bset	#15,%d1		|set sign bit because it is negative
+pos:
+	movew	%d1,ETEMP_EX(%a6)
+|				;put exponent on stack
+
+	movew	CMDREG1B(%a6),%d1
+	andw	#0xe3ff,%d1	|clear out source specifier
+	orw	#0x0800,%d1	|set source specifier to extended prec
+	movew	%d1,CMDREG1B(%a6)	|write back to the command word in stack
+|				;this is needed to fix unsupp data stack
+	leal	ETEMP(%a6),%a0	|point a0 to sop
+
+	bsr	mk_norm		|convert sgl/dbl denorm to norm
+	moveb	L_SCR1(%a6),STAG(%a6) |put tag into source tag reg - d0
+	rts			|end_getop
+|
+| At this point, the source is definitely packed, whether
+| instruction is dyadic or monadic is still unknown
+|
+pack_source:
+	movel	FPTEMP_LO(%a6),ETEMP(%a6)	|write ms part of packed
+|				;number to etemp slot
+	bsr	chk_dy_mo	|set dyadic/monadic flag
+	bsr	unpack
+
+	tstb	DY_MO_FLG(%a6)
+	beqs	end_getop	|if monadic, exit
+|				;else, fix FPTEMP
+pack_dya:
+	bfextu	CMDREG1B(%a6){#6:#3},%d0 |extract dest fp reg
+	movel	#7,%d1
+	subl	%d0,%d1
+	clrl	%d0
+	bsetl	%d1,%d0		|set up d0 as a dynamic register mask
+	fmovemx %d0,FPTEMP(%a6)	|write to FPTEMP
+
+	btstb	#7,DTAG(%a6)	|check dest tag for unnorm or denorm
+	bne	dst_ex_dnrm	|else, handle the unnorm or ext denorm
+|
+| Dest is not denormalized.  Check for norm, and set fpte15
+| accordingly.
+|
+	moveb	DTAG(%a6),%d0
+	andib	#0xf0,%d0		|strip to only dtag:fpte15
+	tstb	%d0		|check for normalized value
+	bnes	end_getop	|if inf/nan/zero leave get_op
+	movew	FPTEMP_EX(%a6),%d0
+	andiw	#0x7fff,%d0
+	cmpiw	#0x3fff,%d0	|check if fpte15 needs setting
+	bges	end_getop	|if >= $3fff, leave fpte15=0
+	orb	#0x10,DTAG(%a6)
+	bras	end_getop
+
+|
+| At this point, it is either an fmoveout packed, unnorm or denorm
+|
+opclass3:
+	clrb	DY_MO_FLG(%a6)	|set dyadic/monadic flag to monadic
+	bfextu	CMDREG1B(%a6){#4:#2},%d0
+	cmpib	#3,%d0
+	bne	src_ex_dnrm	|if not equal, must be unnorm or denorm
+|				;else it is a packed move out
+|				;exit
+end_getop:
+	rts
+
+|
+| Sets the DY_MO_FLG correctly. This is used only on if it is an
+| unsupported data type exception.  Set if dyadic.
+|
+chk_dy_mo:
+	movew	CMDREG1B(%a6),%d0
+	btstl	#5,%d0		|testing extension command word
+	beqs	set_mon		|if bit 5 = 0 then monadic
+	btstl	#4,%d0		|know that bit 5 = 1
+	beqs	set_dya		|if bit 4 = 0 then dyadic
+	andiw	#0x007f,%d0	|get rid of all but extension bits {6:0}
+	cmpiw	#0x0038,%d0	|if extension = $38 then fcmp (dyadic)
+	bnes	set_mon
+set_dya:
+	st	DY_MO_FLG(%a6)	|set the inst flag type to dyadic
+	rts
+set_mon:
+	clrb	DY_MO_FLG(%a6)	|set the inst flag type to monadic
+	rts
+|
+|	MK_NORM
+|
+| Normalizes unnormalized numbers, sets tag to norm or denorm, sets unfl
+| exception if denorm.
+|
+| CASE opclass 0x0 unsupp
+|	mk_norm till msb set
+|	set tag = norm
+|
+| CASE opclass 0x0 unimp
+|	mk_norm till msb set or exp = 0
+|	if integer bit = 0
+|	   tag = denorm
+|	else
+|	   tag = norm
+|
+| CASE opclass 011 unsupp
+|	mk_norm till msb set or exp = 0
+|	if integer bit = 0
+|	   tag = denorm
+|	   set unfl_nmcexe = 1
+|	else
+|	   tag = norm
+|
+| if exp <= $3fff
+|   set ete15 or fpte15 = 1
+| else set ete15 or fpte15 = 0
+
+| input:
+|	a0 = points to operand to be normalized
+| output:
+|	L_SCR1{7:5} = operand tag (000 = norm, 100 = denorm)
+|	L_SCR1{4}   = fpte15 or ete15 (0 = exp > $3fff, 1 = exp <=$3fff)
+|	the normalized operand is placed back on the fsave stack
+mk_norm:
+	clrl	L_SCR1(%a6)
+	bclrb	#sign_bit,LOCAL_EX(%a0)
+	sne	LOCAL_SGN(%a0)	|transform into internal extended format
+
+	cmpib	#0x2c,1+EXC_VEC(%a6) |check if unimp
+	bnes	uns_data	|branch if unsupp
+	bsr	uni_inst	|call if unimp (opclass 0x0)
+	bras	reload
+uns_data:
+	btstb	#direction_bit,CMDREG1B(%a6) |check transfer direction
+	bnes	bit_set		|branch if set (opclass 011)
+	bsr	uns_opx		|call if opclass 0x0
+	bras	reload
+bit_set:
+	bsr	uns_op3		|opclass 011
+reload:
+	cmpw	#0x3fff,LOCAL_EX(%a0) |if exp > $3fff
+	bgts	end_mk		|   fpte15/ete15 already set to 0
+	bsetb	#4,L_SCR1(%a6)	|else set fpte15/ete15 to 1
+|				;calling routine actually sets the
+|				;value on the stack (along with the
+|				;tag), since this routine doesn't
+|				;know if it should set ete15 or fpte15
+|				;ie, it doesn't know if this is the
+|				;src op or dest op.
+end_mk:
+	bfclr	LOCAL_SGN(%a0){#0:#8}
+	beqs	end_mk_pos
+	bsetb	#sign_bit,LOCAL_EX(%a0) |convert back to IEEE format
+end_mk_pos:
+	rts
+|
+|     CASE opclass 011 unsupp
+|
+uns_op3:
+	bsr	nrm_zero	|normalize till msb = 1 or exp = zero
+	btstb	#7,LOCAL_HI(%a0)	|if msb = 1
+	bnes	no_unfl		|then branch
+set_unfl:
+	orw	#dnrm_tag,L_SCR1(%a6) |set denorm tag
+	bsetb	#unfl_bit,FPSR_EXCEPT(%a6) |set unfl exception bit
+no_unfl:
+	rts
+|
+|     CASE opclass 0x0 unsupp
+|
+uns_opx:
+	bsr	nrm_zero	|normalize the number
+	btstb	#7,LOCAL_HI(%a0)	|check if integer bit (j-bit) is set
+	beqs	uns_den		|if clear then now have a denorm
+uns_nrm:
+	orb	#norm_tag,L_SCR1(%a6) |set tag to norm
+	rts
+uns_den:
+	orb	#dnrm_tag,L_SCR1(%a6) |set tag to denorm
+	rts
+|
+|     CASE opclass 0x0 unimp
+|
+uni_inst:
+	bsr	nrm_zero
+	btstb	#7,LOCAL_HI(%a0)	|check if integer bit (j-bit) is set
+	beqs	uni_den		|if clear then now have a denorm
+uni_nrm:
+	orb	#norm_tag,L_SCR1(%a6) |set tag to norm
+	rts
+uni_den:
+	orb	#dnrm_tag,L_SCR1(%a6) |set tag to denorm
+	rts
+
+|
+|	Decimal to binary conversion
+|
+| Special cases of inf and NaNs are completed outside of decbin.
+| If the input is an snan, the snan bit is not set.
+|
+| input:
+|	ETEMP(a6)	- points to packed decimal string in memory
+| output:
+|	fp0	- contains packed string converted to extended precision
+|	ETEMP	- same as fp0
+unpack:
+	movew	CMDREG1B(%a6),%d0	|examine command word, looking for fmove's
+	andw	#0x3b,%d0
+	beq	move_unpack	|special handling for fmove: must set FPSR_CC
+
+	movew	ETEMP(%a6),%d0	|get word with inf information
+	bfextu	%d0{#20:#12},%d1	|get exponent into d1
+	cmpiw	#0x0fff,%d1	|test for inf or NaN
+	bnes	try_zero	|if not equal, it is not special
+	bfextu	%d0{#17:#3},%d1	|get SE and y bits into d1
+	cmpiw	#7,%d1		|SE and y bits must be on for special
+	bnes	try_zero	|if not on, it is not special
+|input is of the special cases of inf and NaN
+	tstl	ETEMP_HI(%a6)	|check ms mantissa
+	bnes	fix_nan		|if non-zero, it is a NaN
+	tstl	ETEMP_LO(%a6)	|check ls mantissa
+	bnes	fix_nan		|if non-zero, it is a NaN
+	bra	finish		|special already on stack
+fix_nan:
+	btstb	#signan_bit,ETEMP_HI(%a6) |test for snan
+	bne	finish
+	orl	#snaniop_mask,USER_FPSR(%a6) |always set snan if it is so
+	bra	finish
+try_zero:
+	movew	ETEMP_EX+2(%a6),%d0 |get word 4
+	andiw	#0x000f,%d0	|clear all but last ni(y)bble
+	tstw	%d0		|check for zero.
+	bne	not_spec
+	tstl	ETEMP_HI(%a6)	|check words 3 and 2
+	bne	not_spec
+	tstl	ETEMP_LO(%a6)	|check words 1 and 0
+	bne	not_spec
+	tstl	ETEMP(%a6)	|test sign of the zero
+	bges	pos_zero
+	movel	#0x80000000,ETEMP(%a6) |write neg zero to etemp
+	clrl	ETEMP_HI(%a6)
+	clrl	ETEMP_LO(%a6)
+	bra	finish
+pos_zero:
+	clrl	ETEMP(%a6)
+	clrl	ETEMP_HI(%a6)
+	clrl	ETEMP_LO(%a6)
+	bra	finish
+
+not_spec:
+	fmovemx %fp0-%fp1,-(%a7)	|save fp0 - decbin returns in it
+	bsr	decbin
+	fmovex %fp0,ETEMP(%a6)	|put the unpacked sop in the fsave stack
+	fmovemx (%a7)+,%fp0-%fp1
+	fmovel	#0,%FPSR		|clr fpsr from decbin
+	bra	finish
+
+|
+| Special handling for packed move in:  Same results as all other
+| packed cases, but we must set the FPSR condition codes properly.
+|
+move_unpack:
+	movew	ETEMP(%a6),%d0	|get word with inf information
+	bfextu	%d0{#20:#12},%d1	|get exponent into d1
+	cmpiw	#0x0fff,%d1	|test for inf or NaN
+	bnes	mtry_zero	|if not equal, it is not special
+	bfextu	%d0{#17:#3},%d1	|get SE and y bits into d1
+	cmpiw	#7,%d1		|SE and y bits must be on for special
+	bnes	mtry_zero	|if not on, it is not special
+|input is of the special cases of inf and NaN
+	tstl	ETEMP_HI(%a6)	|check ms mantissa
+	bnes	mfix_nan		|if non-zero, it is a NaN
+	tstl	ETEMP_LO(%a6)	|check ls mantissa
+	bnes	mfix_nan		|if non-zero, it is a NaN
+|input is inf
+	orl	#inf_mask,USER_FPSR(%a6) |set I bit
+	tstl	ETEMP(%a6)	|check sign
+	bge	finish
+	orl	#neg_mask,USER_FPSR(%a6) |set N bit
+	bra	finish		|special already on stack
+mfix_nan:
+	orl	#nan_mask,USER_FPSR(%a6) |set NaN bit
+	moveb	#nan_tag,STAG(%a6)	|set stag to NaN
+	btstb	#signan_bit,ETEMP_HI(%a6) |test for snan
+	bnes	mn_snan
+	orl	#snaniop_mask,USER_FPSR(%a6) |set snan bit
+	btstb	#snan_bit,FPCR_ENABLE(%a6) |test for snan enabled
+	bnes	mn_snan
+	bsetb	#signan_bit,ETEMP_HI(%a6) |force snans to qnans
+mn_snan:
+	tstl	ETEMP(%a6)	|check for sign
+	bge	finish		|if clr, go on
+	orl	#neg_mask,USER_FPSR(%a6) |set N bit
+	bra	finish
+
+mtry_zero:
+	movew	ETEMP_EX+2(%a6),%d0 |get word 4
+	andiw	#0x000f,%d0	|clear all but last ni(y)bble
+	tstw	%d0		|check for zero.
+	bnes	mnot_spec
+	tstl	ETEMP_HI(%a6)	|check words 3 and 2
+	bnes	mnot_spec
+	tstl	ETEMP_LO(%a6)	|check words 1 and 0
+	bnes	mnot_spec
+	tstl	ETEMP(%a6)	|test sign of the zero
+	bges	mpos_zero
+	orl	#neg_mask+z_mask,USER_FPSR(%a6) |set N and Z
+	movel	#0x80000000,ETEMP(%a6) |write neg zero to etemp
+	clrl	ETEMP_HI(%a6)
+	clrl	ETEMP_LO(%a6)
+	bras	finish
+mpos_zero:
+	orl	#z_mask,USER_FPSR(%a6) |set Z
+	clrl	ETEMP(%a6)
+	clrl	ETEMP_HI(%a6)
+	clrl	ETEMP_LO(%a6)
+	bras	finish
+
+mnot_spec:
+	fmovemx %fp0-%fp1,-(%a7)	|save fp0 ,fp1 - decbin returns in fp0
+	bsr	decbin
+	fmovex %fp0,ETEMP(%a6)
+|				;put the unpacked sop in the fsave stack
+	fmovemx (%a7)+,%fp0-%fp1
+
+finish:
+	movew	CMDREG1B(%a6),%d0	|get the command word
+	andw	#0xfbff,%d0	|change the source specifier field to
+|				;extended (was packed).
+	movew	%d0,CMDREG1B(%a6)	|write command word back to fsave stack
+|				;we need to do this so the 040 will
+|				;re-execute the inst. without taking
+|				;another packed trap.
+
+fix_stag:
+|Converted result is now in etemp on fsave stack, now set the source
+|tag (stag)
+|	if (ete =$7fff) then INF or NAN
+|		if (etemp = $x.0----0) then
+|			stag = INF
+|		else
+|			stag = NAN
+|	else
+|		if (ete = $0000) then
+|			stag = ZERO
+|		else
+|			stag = NORM
+|
+| Note also that the etemp_15 bit (just right of the stag) must
+| be set accordingly.
+|
+	movew		ETEMP_EX(%a6),%d1
+	andiw		#0x7fff,%d1   |strip sign
+	cmpw		#0x7fff,%d1
+	bnes		z_or_nrm
+	movel		ETEMP_HI(%a6),%d1
+	bnes		is_nan
+	movel		ETEMP_LO(%a6),%d1
+	bnes		is_nan
+is_inf:
+	moveb		#0x40,STAG(%a6)
+	movel		#0x40,%d0
+	rts
+is_nan:
+	moveb		#0x60,STAG(%a6)
+	movel		#0x60,%d0
+	rts
+z_or_nrm:
+	tstw		%d1
+	bnes		is_nrm
+is_zro:
+| For a zero, set etemp_15
+	moveb		#0x30,STAG(%a6)
+	movel		#0x20,%d0
+	rts
+is_nrm:
+| For a norm, check if the exp <= $3fff; if so, set etemp_15
+	cmpiw		#0x3fff,%d1
+	bles		set_bit15
+	moveb		#0,STAG(%a6)
+	bras		end_is_nrm
+set_bit15:
+	moveb		#0x10,STAG(%a6)
+end_is_nrm:
+	movel		#0,%d0
+end_fix:
+	rts
+
+end_get:
+	rts
+	|end
diff --git a/arch/m68k/fpsp040/kernel_ex.S b/arch/m68k/fpsp040/kernel_ex.S
new file mode 100644
index 0000000..476b711
--- /dev/null
+++ b/arch/m68k/fpsp040/kernel_ex.S
@@ -0,0 +1,494 @@
+|
+|	kernel_ex.sa 3.3 12/19/90
+|
+| This file contains routines to force exception status in the
+| fpu for exceptional cases detected or reported within the
+| transcendental functions.  Typically, the t_xx routine will
+| set the appropriate bits in the USER_FPSR word on the stack.
+| The bits are tested in gen_except.sa to determine if an exceptional
+| situation needs to be created on return from the FPSP.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+KERNEL_EX:    |idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section    8
+
+#include "fpsp.h"
+
+mns_inf:  .long 0xffff0000,0x00000000,0x00000000
+pls_inf:  .long 0x7fff0000,0x00000000,0x00000000
+nan:      .long 0x7fff0000,0xffffffff,0xffffffff
+huge:     .long 0x7ffe0000,0xffffffff,0xffffffff
+
+	|xref	  ovf_r_k
+	|xref	  unf_sub
+	|xref	  nrm_set
+
+	.global	  t_dz
+	.global      t_dz2
+	.global      t_operr
+	.global      t_unfl
+	.global      t_ovfl
+	.global      t_ovfl2
+	.global      t_inx2
+	.global	  t_frcinx
+	.global	  t_extdnrm
+	.global	  t_resdnrm
+	.global	  dst_nan
+	.global	  src_nan
+|
+|	DZ exception
+|
+|
+|	if dz trap disabled
+|		store properly signed inf (use sign of etemp) into fp0
+|		set FPSR exception status dz bit, condition code
+|		inf bit, and accrued dz bit
+|		return
+|		frestore the frame into the machine (done by unimp_hd)
+|
+|	else dz trap enabled
+|		set exception status bit & accrued bits in FPSR
+|		set flag to disable sto_res from corrupting fp register
+|		return
+|		frestore the frame into the machine (done by unimp_hd)
+|
+| t_dz2 is used by monadic functions such as flogn (from do_func).
+| t_dz is used by monadic functions such as satanh (from the
+| transcendental function).
+|
+t_dz2:
+	bsetb	#neg_bit,FPSR_CC(%a6)	|set neg bit in FPSR
+	fmovel	#0,%FPSR			|clr status bits (Z set)
+	btstb	#dz_bit,FPCR_ENABLE(%a6)	|test FPCR for dz exc enabled
+	bnes	dz_ena_end
+	bras	m_inf			|flogx always returns -inf
+t_dz:
+	fmovel	#0,%FPSR			|clr status bits (Z set)
+	btstb	#dz_bit,FPCR_ENABLE(%a6)	|test FPCR for dz exc enabled
+	bnes	dz_ena
+|
+|	dz disabled
+|
+	btstb	#sign_bit,ETEMP_EX(%a6)	|check sign for neg or pos
+	beqs	p_inf			|branch if pos sign
+
+m_inf:
+	fmovemx mns_inf,%fp0-%fp0		|load -inf
+	bsetb	#neg_bit,FPSR_CC(%a6)	|set neg bit in FPSR
+	bras	set_fpsr
+p_inf:
+	fmovemx pls_inf,%fp0-%fp0		|load +inf
+set_fpsr:
+	orl	#dzinf_mask,USER_FPSR(%a6) |set I,DZ,ADZ
+	rts
+|
+|	dz enabled
+|
+dz_ena:
+	btstb	#sign_bit,ETEMP_EX(%a6)	|check sign for neg or pos
+	beqs	dz_ena_end
+	bsetb	#neg_bit,FPSR_CC(%a6)	|set neg bit in FPSR
+dz_ena_end:
+	orl	#dzinf_mask,USER_FPSR(%a6) |set I,DZ,ADZ
+	st	STORE_FLG(%a6)
+	rts
+|
+|	OPERR exception
+|
+|	if (operr trap disabled)
+|		set FPSR exception status operr bit, condition code
+|		nan bit; Store default NAN into fp0
+|		frestore the frame into the machine (done by unimp_hd)
+|
+|	else (operr trap enabled)
+|		set FPSR exception status operr bit, accrued operr bit
+|		set flag to disable sto_res from corrupting fp register
+|		frestore the frame into the machine (done by unimp_hd)
+|
+t_operr:
+	orl	#opnan_mask,USER_FPSR(%a6) |set NaN, OPERR, AIOP
+
+	btstb	#operr_bit,FPCR_ENABLE(%a6) |test FPCR for operr enabled
+	bnes	op_ena
+
+	fmovemx nan,%fp0-%fp0		|load default nan
+	rts
+op_ena:
+	st	STORE_FLG(%a6)		|do not corrupt destination
+	rts
+
+|
+|	t_unfl --- UNFL exception
+|
+| This entry point is used by all routines requiring unfl, inex2,
+| aunfl, and ainex to be set on exit.
+|
+| On entry, a0 points to the exceptional operand.  The final exceptional
+| operand is built in FP_SCR1 and only the sign from the original operand
+| is used.
+|
+t_unfl:
+	clrl	FP_SCR1(%a6)		|set exceptional operand to zero
+	clrl	FP_SCR1+4(%a6)
+	clrl	FP_SCR1+8(%a6)
+	tstb	(%a0)			|extract sign from caller's exop
+	bpls	unfl_signok
+	bset	#sign_bit,FP_SCR1(%a6)
+unfl_signok:
+	leal	FP_SCR1(%a6),%a0
+	orl	#unfinx_mask,USER_FPSR(%a6)
+|					;set UNFL, INEX2, AUNFL, AINEX
+unfl_con:
+	btstb	#unfl_bit,FPCR_ENABLE(%a6)
+	beqs	unfl_dis
+
+unfl_ena:
+	bfclr	STAG(%a6){#5:#3}		|clear wbtm66,wbtm1,wbtm0
+	bsetb	#wbtemp15_bit,WB_BYTE(%a6) |set wbtemp15
+	bsetb	#sticky_bit,STICKY(%a6)	|set sticky bit
+
+	bclrb	#E1,E_BYTE(%a6)
+
+unfl_dis:
+	bfextu	FPCR_MODE(%a6){#0:#2},%d0	|get round precision
+
+	bclrb	#sign_bit,LOCAL_EX(%a0)
+	sne	LOCAL_SGN(%a0)		|convert to internal ext format
+
+	bsr	unf_sub			|returns IEEE result at a0
+|					;and sets FPSR_CC accordingly
+
+	bfclr	LOCAL_SGN(%a0){#0:#8}	|convert back to IEEE ext format
+	beqs	unfl_fin
+
+	bsetb	#sign_bit,LOCAL_EX(%a0)
+	bsetb	#sign_bit,FP_SCR1(%a6)	|set sign bit of exc operand
+
+unfl_fin:
+	fmovemx (%a0),%fp0-%fp0		|store result in fp0
+	rts
+
+
+|
+|	t_ovfl2 --- OVFL exception (without inex2 returned)
+|
+| This entry is used by scale to force catastrophic overflow.  The
+| ovfl, aovfl, and ainex bits are set, but not the inex2 bit.
+|
+t_ovfl2:
+	orl	#ovfl_inx_mask,USER_FPSR(%a6)
+	movel	ETEMP(%a6),FP_SCR1(%a6)
+	movel	ETEMP_HI(%a6),FP_SCR1+4(%a6)
+	movel	ETEMP_LO(%a6),FP_SCR1+8(%a6)
+|
+| Check for single or double round precision.  If single, check if
+| the lower 40 bits of ETEMP are zero; if not, set inex2.  If double,
+| check if the lower 21 bits are zero; if not, set inex2.
+|
+	moveb	FPCR_MODE(%a6),%d0
+	andib	#0xc0,%d0
+	beq	t_work		|if extended, finish ovfl processing
+	cmpib	#0x40,%d0		|test for single
+	bnes	t_dbl
+t_sgl:
+	tstb	ETEMP_LO(%a6)
+	bnes	t_setinx2
+	movel	ETEMP_HI(%a6),%d0
+	andil	#0xff,%d0		|look at only lower 8 bits
+	bnes	t_setinx2
+	bra	t_work
+t_dbl:
+	movel	ETEMP_LO(%a6),%d0
+	andil	#0x7ff,%d0	|look at only lower 11 bits
+	beq	t_work
+t_setinx2:
+	orl	#inex2_mask,USER_FPSR(%a6)
+	bras	t_work
+|
+|	t_ovfl --- OVFL exception
+|
+|** Note: the exc operand is returned in ETEMP.
+|
+t_ovfl:
+	orl	#ovfinx_mask,USER_FPSR(%a6)
+t_work:
+	btstb	#ovfl_bit,FPCR_ENABLE(%a6) |test FPCR for ovfl enabled
+	beqs	ovf_dis
+
+ovf_ena:
+	clrl	FP_SCR1(%a6)		|set exceptional operand
+	clrl	FP_SCR1+4(%a6)
+	clrl	FP_SCR1+8(%a6)
+
+	bfclr	STAG(%a6){#5:#3}		|clear wbtm66,wbtm1,wbtm0
+	bclrb	#wbtemp15_bit,WB_BYTE(%a6) |clear wbtemp15
+	bsetb	#sticky_bit,STICKY(%a6)	|set sticky bit
+
+	bclrb	#E1,E_BYTE(%a6)
+|					;fall through to disabled case
+
+| For disabled overflow call 'ovf_r_k'.  This routine loads the
+| correct result based on the rounding precision, destination
+| format, rounding mode and sign.
+|
+ovf_dis:
+	bsr	ovf_r_k			|returns unsigned ETEMP_EX
+|					;and sets FPSR_CC accordingly.
+	bfclr	ETEMP_SGN(%a6){#0:#8}	|fix sign
+	beqs	ovf_pos
+	bsetb	#sign_bit,ETEMP_EX(%a6)
+	bsetb	#sign_bit,FP_SCR1(%a6)	|set exceptional operand sign
+ovf_pos:
+	fmovemx ETEMP(%a6),%fp0-%fp0		|move the result to fp0
+	rts
+
+
+|
+|	INEX2 exception
+|
+| The inex2 and ainex bits are set.
+|
+t_inx2:
+	orl	#inx2a_mask,USER_FPSR(%a6) |set INEX2, AINEX
+	rts
+
+|
+|	Force Inex2
+|
+| This routine is called by the transcendental routines to force
+| the inex2 exception bits set in the FPSR.  If the underflow bit
+| is set, but the underflow trap was not taken, the aunfl bit in
+| the FPSR must be set.
+|
+t_frcinx:
+	orl	#inx2a_mask,USER_FPSR(%a6) |set INEX2, AINEX
+	btstb	#unfl_bit,FPSR_EXCEPT(%a6) |test for unfl bit set
+	beqs	no_uacc1		|if clear, do not set aunfl
+	bsetb	#aunfl_bit,FPSR_AEXCEPT(%a6)
+no_uacc1:
+	rts
+
+|
+|	DST_NAN
+|
+| Determine if the destination nan is signalling or non-signalling,
+| and set the FPSR bits accordingly.  See the MC68040 User's Manual
+| section 3.2.2.5 NOT-A-NUMBERS.
+|
+dst_nan:
+	btstb	#sign_bit,FPTEMP_EX(%a6) |test sign of nan
+	beqs	dst_pos			|if clr, it was positive
+	bsetb	#neg_bit,FPSR_CC(%a6)	|set N bit
+dst_pos:
+	btstb	#signan_bit,FPTEMP_HI(%a6) |check if signalling
+	beqs	dst_snan		|branch if signalling
+
+	fmovel	%d1,%fpcr			|restore user's rmode/prec
+	fmovex FPTEMP(%a6),%fp0		|return the non-signalling nan
+|
+| Check the source nan.  If it is signalling, snan will be reported.
+|
+	moveb	STAG(%a6),%d0
+	andib	#0xe0,%d0
+	cmpib	#0x60,%d0
+	bnes	no_snan
+	btstb	#signan_bit,ETEMP_HI(%a6) |check if signalling
+	bnes	no_snan
+	orl	#snaniop_mask,USER_FPSR(%a6) |set NAN, SNAN, AIOP
+no_snan:
+	rts
+
+dst_snan:
+	btstb	#snan_bit,FPCR_ENABLE(%a6) |check if trap enabled
+	beqs	dst_dis			|branch if disabled
+
+	orb	#nan_tag,DTAG(%a6)	|set up dtag for nan
+	st	STORE_FLG(%a6)		|do not store a result
+	orl	#snaniop_mask,USER_FPSR(%a6) |set NAN, SNAN, AIOP
+	rts
+
+dst_dis:
+	bsetb	#signan_bit,FPTEMP_HI(%a6) |set SNAN bit in sop
+	fmovel	%d1,%fpcr			|restore user's rmode/prec
+	fmovex FPTEMP(%a6),%fp0		|load non-sign. nan
+	orl	#snaniop_mask,USER_FPSR(%a6) |set NAN, SNAN, AIOP
+	rts
+
+|
+|	SRC_NAN
+|
+| Determine if the source nan is signalling or non-signalling,
+| and set the FPSR bits accordingly.  See the MC68040 User's Manual
+| section 3.2.2.5 NOT-A-NUMBERS.
+|
+src_nan:
+	btstb	#sign_bit,ETEMP_EX(%a6) |test sign of nan
+	beqs	src_pos			|if clr, it was positive
+	bsetb	#neg_bit,FPSR_CC(%a6)	|set N bit
+src_pos:
+	btstb	#signan_bit,ETEMP_HI(%a6) |check if signalling
+	beqs	src_snan		|branch if signalling
+	fmovel	%d1,%fpcr			|restore user's rmode/prec
+	fmovex ETEMP(%a6),%fp0		|return the non-signalling nan
+	rts
+
+src_snan:
+	btstb	#snan_bit,FPCR_ENABLE(%a6) |check if trap enabled
+	beqs	src_dis			|branch if disabled
+	bsetb	#signan_bit,ETEMP_HI(%a6) |set SNAN bit in sop
+	orb	#norm_tag,DTAG(%a6)	|set up dtag for norm
+	orb	#nan_tag,STAG(%a6)	|set up stag for nan
+	st	STORE_FLG(%a6)		|do not store a result
+	orl	#snaniop_mask,USER_FPSR(%a6) |set NAN, SNAN, AIOP
+	rts
+
+src_dis:
+	bsetb	#signan_bit,ETEMP_HI(%a6) |set SNAN bit in sop
+	fmovel	%d1,%fpcr			|restore user's rmode/prec
+	fmovex ETEMP(%a6),%fp0		|load non-sign. nan
+	orl	#snaniop_mask,USER_FPSR(%a6) |set NAN, SNAN, AIOP
+	rts
+
+|
+| For all functions that have a denormalized input and that f(x)=x,
+| this is the entry point
+|
+t_extdnrm:
+	orl	#unfinx_mask,USER_FPSR(%a6)
+|					;set UNFL, INEX2, AUNFL, AINEX
+	bras	xdnrm_con
+|
+| Entry point for scale with extended denorm.  The function does
+| not set inex2, aunfl, or ainex.
+|
+t_resdnrm:
+	orl	#unfl_mask,USER_FPSR(%a6)
+
+xdnrm_con:
+	btstb	#unfl_bit,FPCR_ENABLE(%a6)
+	beqs	xdnrm_dis
+
+|
+| If exceptions are enabled, the additional task of setting up WBTEMP
+| is needed so that when the underflow exception handler is entered,
+| the user perceives no difference between what the 040 provides vs.
+| what the FPSP provides.
+|
+xdnrm_ena:
+	movel	%a0,-(%a7)
+
+	movel	LOCAL_EX(%a0),FP_SCR1(%a6)
+	movel	LOCAL_HI(%a0),FP_SCR1+4(%a6)
+	movel	LOCAL_LO(%a0),FP_SCR1+8(%a6)
+
+	lea	FP_SCR1(%a6),%a0
+
+	bclrb	#sign_bit,LOCAL_EX(%a0)
+	sne	LOCAL_SGN(%a0)		|convert to internal ext format
+	tstw	LOCAL_EX(%a0)		|check if input is denorm
+	beqs	xdnrm_dn		|if so, skip nrm_set
+	bsr	nrm_set			|normalize the result (exponent
+|					;will be negative
+xdnrm_dn:
+	bclrb	#sign_bit,LOCAL_EX(%a0)	|take off false sign
+	bfclr	LOCAL_SGN(%a0){#0:#8}	|change back to IEEE ext format
+	beqs	xdep
+	bsetb	#sign_bit,LOCAL_EX(%a0)
+xdep:
+	bfclr	STAG(%a6){#5:#3}		|clear wbtm66,wbtm1,wbtm0
+	bsetb	#wbtemp15_bit,WB_BYTE(%a6) |set wbtemp15
+	bclrb	#sticky_bit,STICKY(%a6)	|clear sticky bit
+	bclrb	#E1,E_BYTE(%a6)
+	movel	(%a7)+,%a0
+xdnrm_dis:
+	bfextu	FPCR_MODE(%a6){#0:#2},%d0	|get round precision
+	bnes	not_ext			|if not round extended, store
+|					;IEEE defaults
+is_ext:
+	btstb	#sign_bit,LOCAL_EX(%a0)
+	beqs	xdnrm_store
+
+	bsetb	#neg_bit,FPSR_CC(%a6)	|set N bit in FPSR_CC
+
+	bras	xdnrm_store
+
+not_ext:
+	bclrb	#sign_bit,LOCAL_EX(%a0)
+	sne	LOCAL_SGN(%a0)		|convert to internal ext format
+	bsr	unf_sub			|returns IEEE result pointed by
+|					;a0; sets FPSR_CC accordingly
+	bfclr	LOCAL_SGN(%a0){#0:#8}	|convert back to IEEE ext format
+	beqs	xdnrm_store
+	bsetb	#sign_bit,LOCAL_EX(%a0)
+xdnrm_store:
+	fmovemx (%a0),%fp0-%fp0		|store result in fp0
+	rts
+
+|
+| This subroutine is used for dyadic operations that use an extended
+| denorm within the kernel. The approach used is to capture the frame,
+| fix/restore.
+|
+	.global	t_avoid_unsupp
+t_avoid_unsupp:
+	link	%a2,#-LOCAL_SIZE		|so that a2 fpsp.h negative
+|					;offsets may be used
+	fsave	-(%a7)
+	tstb	1(%a7)			|check if idle, exit if so
+	beq	idle_end
+	btstb	#E1,E_BYTE(%a2)		|check for an E1 exception if
+|					;enabled, there is an unsupp
+	beq	end_avun		|else, exit
+	btstb	#7,DTAG(%a2)		|check for denorm destination
+	beqs	src_den			|else, must be a source denorm
+|
+| handle destination denorm
+|
+	lea	FPTEMP(%a2),%a0
+	btstb	#sign_bit,LOCAL_EX(%a0)
+	sne	LOCAL_SGN(%a0)		|convert to internal ext format
+	bclrb	#7,DTAG(%a2)		|set DTAG to norm
+	bsr	nrm_set			|normalize result, exponent
+|					;will become negative
+	bclrb	#sign_bit,LOCAL_EX(%a0)	|get rid of fake sign
+	bfclr	LOCAL_SGN(%a0){#0:#8}	|convert back to IEEE ext format
+	beqs	ck_src_den		|check if source is also denorm
+	bsetb	#sign_bit,LOCAL_EX(%a0)
+ck_src_den:
+	btstb	#7,STAG(%a2)
+	beqs	end_avun
+src_den:
+	lea	ETEMP(%a2),%a0
+	btstb	#sign_bit,LOCAL_EX(%a0)
+	sne	LOCAL_SGN(%a0)		|convert to internal ext format
+	bclrb	#7,STAG(%a2)		|set STAG to norm
+	bsr	nrm_set			|normalize result, exponent
+|					;will become negative
+	bclrb	#sign_bit,LOCAL_EX(%a0)	|get rid of fake sign
+	bfclr	LOCAL_SGN(%a0){#0:#8}	|convert back to IEEE ext format
+	beqs	den_com
+	bsetb	#sign_bit,LOCAL_EX(%a0)
+den_com:
+	moveb	#0xfe,CU_SAVEPC(%a2)	|set continue frame
+	clrw	NMNEXC(%a2)		|clear NMNEXC
+	bclrb	#E1,E_BYTE(%a2)
+|	fmove.l	%FPSR,FPSR_SHADOW(%a2)
+|	bset.b	#SFLAG,E_BYTE(%a2)
+|	bset.b	#XFLAG,T_BYTE(%a2)
+end_avun:
+	frestore (%a7)+
+	unlk	%a2
+	rts
+idle_end:
+	addl	#4,%a7
+	unlk	%a2
+	rts
+	|end
diff --git a/arch/m68k/fpsp040/res_func.S b/arch/m68k/fpsp040/res_func.S
new file mode 100644
index 0000000..8f6b952
--- /dev/null
+++ b/arch/m68k/fpsp040/res_func.S
@@ -0,0 +1,2040 @@
+|
+|	res_func.sa 3.9 7/29/91
+|
+| Normalizes denormalized numbers if necessary and updates the
+| stack frame.  The function is then restored back into the
+| machine and the 040 completes the operation.  This routine
+| is only used by the unsupported data type/format handler.
+| (Exception vector 55).
+|
+| For packed move out (fmove.p fpm,<ea>) the operation is
+| completed here; data is packed and moved to user memory.
+| The stack is restored to the 040 only in the case of a
+| reportable exception in the conversion.
+|
+|
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+RES_FUNC:    |idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+sp_bnds:	.short	0x3f81,0x407e
+		.short	0x3f6a,0x0000
+dp_bnds:	.short	0x3c01,0x43fe
+		.short	0x3bcd,0x0000
+
+	|xref	mem_write
+	|xref	bindec
+	|xref	get_fline
+	|xref	round
+	|xref	denorm
+	|xref	dest_ext
+	|xref	dest_dbl
+	|xref	dest_sgl
+	|xref	unf_sub
+	|xref	nrm_set
+	|xref	dnrm_lp
+	|xref	ovf_res
+	|xref	reg_dest
+	|xref	t_ovfl
+	|xref	t_unfl
+
+	.global	res_func
+	.global	p_move
+
+res_func:
+	clrb	DNRM_FLG(%a6)
+	clrb	RES_FLG(%a6)
+	clrb	CU_ONLY(%a6)
+	tstb	DY_MO_FLG(%a6)
+	beqs	monadic
+dyadic:
+	btstb	#7,DTAG(%a6)	|if dop = norm=000, zero=001,
+|				;inf=010 or nan=011
+	beqs	monadic		|then branch
+|				;else denorm
+| HANDLE DESTINATION DENORM HERE
+|				;set dtag to norm
+|				;write the tag & fpte15 to the fstack
+	leal	FPTEMP(%a6),%a0
+
+	bclrb	#sign_bit,LOCAL_EX(%a0)
+	sne	LOCAL_SGN(%a0)
+
+	bsr	nrm_set		|normalize number (exp will go negative)
+	bclrb	#sign_bit,LOCAL_EX(%a0) |get rid of false sign
+	bfclr	LOCAL_SGN(%a0){#0:#8}	|change back to IEEE ext format
+	beqs	dpos
+	bsetb	#sign_bit,LOCAL_EX(%a0)
+dpos:
+	bfclr	DTAG(%a6){#0:#4}	|set tag to normalized, FPTE15 = 0
+	bsetb	#4,DTAG(%a6)	|set FPTE15
+	orb	#0x0f,DNRM_FLG(%a6)
+monadic:
+	leal	ETEMP(%a6),%a0
+	btstb	#direction_bit,CMDREG1B(%a6)	|check direction
+	bne	opclass3			|it is a mv out
+|
+| At this point, only opclass 0 and 2 possible
+|
+	btstb	#7,STAG(%a6)	|if sop = norm=000, zero=001,
+|				;inf=010 or nan=011
+	bne	mon_dnrm	|else denorm
+	tstb	DY_MO_FLG(%a6)	|all cases of dyadic instructions would
+	bne	normal		|require normalization of denorm
+
+| At this point:
+|	monadic instructions:	fabs  = $18  fneg   = $1a  ftst   = $3a
+|				fmove = $00  fsmove = $40  fdmove = $44
+|				fsqrt = $05* fssqrt = $41  fdsqrt = $45
+|				(*fsqrt reencoded to $05)
+|
+	movew	CMDREG1B(%a6),%d0	|get command register
+	andil	#0x7f,%d0			|strip to only command word
+|
+| At this point, fabs, fneg, fsmove, fdmove, ftst, fsqrt, fssqrt, and
+| fdsqrt are possible.
+| For cases fabs, fneg, fsmove, and fdmove goto spos (do not normalize)
+| For cases fsqrt, fssqrt, and fdsqrt goto nrm_src (do normalize)
+|
+	btstl	#0,%d0
+	bne	normal			|weed out fsqrt instructions
+|
+| cu_norm handles fmove in instructions with normalized inputs.
+| The routine round is used to correctly round the input for the
+| destination precision and mode.
+|
+cu_norm:
+	st	CU_ONLY(%a6)		|set cu-only inst flag
+	movew	CMDREG1B(%a6),%d0
+	andib	#0x3b,%d0		|isolate bits to select inst
+	tstb	%d0
+	beql	cu_nmove	|if zero, it is an fmove
+	cmpib	#0x18,%d0
+	beql	cu_nabs		|if $18, it is fabs
+	cmpib	#0x1a,%d0
+	beql	cu_nneg		|if $1a, it is fneg
+|
+| Inst is ftst.  Check the source operand and set the cc's accordingly.
+| No write is done, so simply rts.
+|
+cu_ntst:
+	movew	LOCAL_EX(%a0),%d0
+	bclrl	#15,%d0
+	sne	LOCAL_SGN(%a0)
+	beqs	cu_ntpo
+	orl	#neg_mask,USER_FPSR(%a6) |set N
+cu_ntpo:
+	cmpiw	#0x7fff,%d0	|test for inf/nan
+	bnes	cu_ntcz
+	tstl	LOCAL_HI(%a0)
+	bnes	cu_ntn
+	tstl	LOCAL_LO(%a0)
+	bnes	cu_ntn
+	orl	#inf_mask,USER_FPSR(%a6)
+	rts
+cu_ntn:
+	orl	#nan_mask,USER_FPSR(%a6)
+	movel	ETEMP_EX(%a6),FPTEMP_EX(%a6)	|set up fptemp sign for
+|						;snan handler
+
+	rts
+cu_ntcz:
+	tstl	LOCAL_HI(%a0)
+	bnel	cu_ntsx
+	tstl	LOCAL_LO(%a0)
+	bnel	cu_ntsx
+	orl	#z_mask,USER_FPSR(%a6)
+cu_ntsx:
+	rts
+|
+| Inst is fabs.  Execute the absolute value function on the input.
+| Branch to the fmove code.  If the operand is NaN, do nothing.
+|
+cu_nabs:
+	moveb	STAG(%a6),%d0
+	btstl	#5,%d0			|test for NaN or zero
+	bne	wr_etemp		|if either, simply write it
+	bclrb	#7,LOCAL_EX(%a0)		|do abs
+	bras	cu_nmove		|fmove code will finish
+|
+| Inst is fneg.  Execute the negate value function on the input.
+| Fall though to the fmove code.  If the operand is NaN, do nothing.
+|
+cu_nneg:
+	moveb	STAG(%a6),%d0
+	btstl	#5,%d0			|test for NaN or zero
+	bne	wr_etemp		|if either, simply write it
+	bchgb	#7,LOCAL_EX(%a0)		|do neg
+|
+| Inst is fmove.  This code also handles all result writes.
+| If bit 2 is set, round is forced to double.  If it is clear,
+| and bit 6 is set, round is forced to single.  If both are clear,
+| the round precision is found in the fpcr.  If the rounding precision
+| is double or single, round the result before the write.
+|
+cu_nmove:
+	moveb	STAG(%a6),%d0
+	andib	#0xe0,%d0			|isolate stag bits
+	bne	wr_etemp		|if not norm, simply write it
+	btstb	#2,CMDREG1B+1(%a6)	|check for rd
+	bne	cu_nmrd
+	btstb	#6,CMDREG1B+1(%a6)	|check for rs
+	bne	cu_nmrs
+|
+| The move or operation is not with forced precision.  Test for
+| nan or inf as the input; if so, simply write it to FPn.  Use the
+| FPCR_MODE byte to get rounding on norms and zeros.
+|
+cu_nmnr:
+	bfextu	FPCR_MODE(%a6){#0:#2},%d0
+	tstb	%d0			|check for extended
+	beq	cu_wrexn		|if so, just write result
+	cmpib	#1,%d0			|check for single
+	beq	cu_nmrs			|fall through to double
+|
+| The move is fdmove or round precision is double.
+|
+cu_nmrd:
+	movel	#2,%d0			|set up the size for denorm
+	movew	LOCAL_EX(%a0),%d1		|compare exponent to double threshold
+	andw	#0x7fff,%d1
+	cmpw	#0x3c01,%d1
+	bls	cu_nunfl
+	bfextu	FPCR_MODE(%a6){#2:#2},%d1	|get rmode
+	orl	#0x00020000,%d1		|or in rprec (double)
+	clrl	%d0			|clear g,r,s for round
+	bclrb	#sign_bit,LOCAL_EX(%a0)	|convert to internal format
+	sne	LOCAL_SGN(%a0)
+	bsrl	round
+	bfclr	LOCAL_SGN(%a0){#0:#8}
+	beqs	cu_nmrdc
+	bsetb	#sign_bit,LOCAL_EX(%a0)
+cu_nmrdc:
+	movew	LOCAL_EX(%a0),%d1		|check for overflow
+	andw	#0x7fff,%d1
+	cmpw	#0x43ff,%d1
+	bge	cu_novfl		|take care of overflow case
+	bra	cu_wrexn
+|
+| The move is fsmove or round precision is single.
+|
+cu_nmrs:
+	movel	#1,%d0
+	movew	LOCAL_EX(%a0),%d1
+	andw	#0x7fff,%d1
+	cmpw	#0x3f81,%d1
+	bls	cu_nunfl
+	bfextu	FPCR_MODE(%a6){#2:#2},%d1
+	orl	#0x00010000,%d1
+	clrl	%d0
+	bclrb	#sign_bit,LOCAL_EX(%a0)
+	sne	LOCAL_SGN(%a0)
+	bsrl	round
+	bfclr	LOCAL_SGN(%a0){#0:#8}
+	beqs	cu_nmrsc
+	bsetb	#sign_bit,LOCAL_EX(%a0)
+cu_nmrsc:
+	movew	LOCAL_EX(%a0),%d1
+	andw	#0x7FFF,%d1
+	cmpw	#0x407f,%d1
+	blt	cu_wrexn
+|
+| The operand is above precision boundaries.  Use t_ovfl to
+| generate the correct value.
+|
+cu_novfl:
+	bsr	t_ovfl
+	bra	cu_wrexn
+|
+| The operand is below precision boundaries.  Use denorm to
+| generate the correct value.
+|
+cu_nunfl:
+	bclrb	#sign_bit,LOCAL_EX(%a0)
+	sne	LOCAL_SGN(%a0)
+	bsr	denorm
+	bfclr	LOCAL_SGN(%a0){#0:#8}	|change back to IEEE ext format
+	beqs	cu_nucont
+	bsetb	#sign_bit,LOCAL_EX(%a0)
+cu_nucont:
+	bfextu	FPCR_MODE(%a6){#2:#2},%d1
+	btstb	#2,CMDREG1B+1(%a6)	|check for rd
+	bne	inst_d
+	btstb	#6,CMDREG1B+1(%a6)	|check for rs
+	bne	inst_s
+	swap	%d1
+	moveb	FPCR_MODE(%a6),%d1
+	lsrb	#6,%d1
+	swap	%d1
+	bra	inst_sd
+inst_d:
+	orl	#0x00020000,%d1
+	bra	inst_sd
+inst_s:
+	orl	#0x00010000,%d1
+inst_sd:
+	bclrb	#sign_bit,LOCAL_EX(%a0)
+	sne	LOCAL_SGN(%a0)
+	bsrl	round
+	bfclr	LOCAL_SGN(%a0){#0:#8}
+	beqs	cu_nuflp
+	bsetb	#sign_bit,LOCAL_EX(%a0)
+cu_nuflp:
+	btstb	#inex2_bit,FPSR_EXCEPT(%a6)
+	beqs	cu_nuninx
+	orl	#aunfl_mask,USER_FPSR(%a6) |if the round was inex, set AUNFL
+cu_nuninx:
+	tstl	LOCAL_HI(%a0)		|test for zero
+	bnes	cu_nunzro
+	tstl	LOCAL_LO(%a0)
+	bnes	cu_nunzro
+|
+| The mantissa is zero from the denorm loop.  Check sign and rmode
+| to see if rounding should have occurred which would leave the lsb.
+|
+	movel	USER_FPCR(%a6),%d0
+	andil	#0x30,%d0		|isolate rmode
+	cmpil	#0x20,%d0
+	blts	cu_nzro
+	bnes	cu_nrp
+cu_nrm:
+	tstw	LOCAL_EX(%a0)	|if positive, set lsb
+	bges	cu_nzro
+	btstb	#7,FPCR_MODE(%a6) |check for double
+	beqs	cu_nincs
+	bras	cu_nincd
+cu_nrp:
+	tstw	LOCAL_EX(%a0)	|if positive, set lsb
+	blts	cu_nzro
+	btstb	#7,FPCR_MODE(%a6) |check for double
+	beqs	cu_nincs
+cu_nincd:
+	orl	#0x800,LOCAL_LO(%a0) |inc for double
+	bra	cu_nunzro
+cu_nincs:
+	orl	#0x100,LOCAL_HI(%a0) |inc for single
+	bra	cu_nunzro
+cu_nzro:
+	orl	#z_mask,USER_FPSR(%a6)
+	moveb	STAG(%a6),%d0
+	andib	#0xe0,%d0
+	cmpib	#0x40,%d0		|check if input was tagged zero
+	beqs	cu_numv
+cu_nunzro:
+	orl	#unfl_mask,USER_FPSR(%a6) |set unfl
+cu_numv:
+	movel	(%a0),ETEMP(%a6)
+	movel	4(%a0),ETEMP_HI(%a6)
+	movel	8(%a0),ETEMP_LO(%a6)
+|
+| Write the result to memory, setting the fpsr cc bits.  NaN and Inf
+| bypass cu_wrexn.
+|
+cu_wrexn:
+	tstw	LOCAL_EX(%a0)		|test for zero
+	beqs	cu_wrzero
+	cmpw	#0x8000,LOCAL_EX(%a0)	|test for zero
+	bnes	cu_wreon
+cu_wrzero:
+	orl	#z_mask,USER_FPSR(%a6)	|set Z bit
+cu_wreon:
+	tstw	LOCAL_EX(%a0)
+	bpl	wr_etemp
+	orl	#neg_mask,USER_FPSR(%a6)
+	bra	wr_etemp
+
+|
+| HANDLE SOURCE DENORM HERE
+|
+|				;clear denorm stag to norm
+|				;write the new tag & ete15 to the fstack
+mon_dnrm:
+|
+| At this point, check for the cases in which normalizing the
+| denorm produces incorrect results.
+|
+	tstb	DY_MO_FLG(%a6)	|all cases of dyadic instructions would
+	bnes	nrm_src		|require normalization of denorm
+
+| At this point:
+|	monadic instructions:	fabs  = $18  fneg   = $1a  ftst   = $3a
+|				fmove = $00  fsmove = $40  fdmove = $44
+|				fsqrt = $05* fssqrt = $41  fdsqrt = $45
+|				(*fsqrt reencoded to $05)
+|
+	movew	CMDREG1B(%a6),%d0	|get command register
+	andil	#0x7f,%d0			|strip to only command word
+|
+| At this point, fabs, fneg, fsmove, fdmove, ftst, fsqrt, fssqrt, and
+| fdsqrt are possible.
+| For cases fabs, fneg, fsmove, and fdmove goto spos (do not normalize)
+| For cases fsqrt, fssqrt, and fdsqrt goto nrm_src (do normalize)
+|
+	btstl	#0,%d0
+	bnes	nrm_src		|weed out fsqrt instructions
+	st	CU_ONLY(%a6)	|set cu-only inst flag
+	bra	cu_dnrm		|fmove, fabs, fneg, ftst
+|				;cases go to cu_dnrm
+nrm_src:
+	bclrb	#sign_bit,LOCAL_EX(%a0)
+	sne	LOCAL_SGN(%a0)
+	bsr	nrm_set		|normalize number (exponent will go
+|				; negative)
+	bclrb	#sign_bit,LOCAL_EX(%a0) |get rid of false sign
+
+	bfclr	LOCAL_SGN(%a0){#0:#8}	|change back to IEEE ext format
+	beqs	spos
+	bsetb	#sign_bit,LOCAL_EX(%a0)
+spos:
+	bfclr	STAG(%a6){#0:#4}	|set tag to normalized, FPTE15 = 0
+	bsetb	#4,STAG(%a6)	|set ETE15
+	orb	#0xf0,DNRM_FLG(%a6)
+normal:
+	tstb	DNRM_FLG(%a6)	|check if any of the ops were denorms
+	bne	ck_wrap		|if so, check if it is a potential
+|				;wrap-around case
+fix_stk:
+	moveb	#0xfe,CU_SAVEPC(%a6)
+	bclrb	#E1,E_BYTE(%a6)
+
+	clrw	NMNEXC(%a6)
+
+	st	RES_FLG(%a6)	|indicate that a restore is needed
+	rts
+
+|
+| cu_dnrm handles all cu-only instructions (fmove, fabs, fneg, and
+| ftst) completely in software without an frestore to the 040.
+|
+cu_dnrm:
+	st	CU_ONLY(%a6)
+	movew	CMDREG1B(%a6),%d0
+	andib	#0x3b,%d0		|isolate bits to select inst
+	tstb	%d0
+	beql	cu_dmove	|if zero, it is an fmove
+	cmpib	#0x18,%d0
+	beql	cu_dabs		|if $18, it is fabs
+	cmpib	#0x1a,%d0
+	beql	cu_dneg		|if $1a, it is fneg
+|
+| Inst is ftst.  Check the source operand and set the cc's accordingly.
+| No write is done, so simply rts.
+|
+cu_dtst:
+	movew	LOCAL_EX(%a0),%d0
+	bclrl	#15,%d0
+	sne	LOCAL_SGN(%a0)
+	beqs	cu_dtpo
+	orl	#neg_mask,USER_FPSR(%a6) |set N
+cu_dtpo:
+	cmpiw	#0x7fff,%d0	|test for inf/nan
+	bnes	cu_dtcz
+	tstl	LOCAL_HI(%a0)
+	bnes	cu_dtn
+	tstl	LOCAL_LO(%a0)
+	bnes	cu_dtn
+	orl	#inf_mask,USER_FPSR(%a6)
+	rts
+cu_dtn:
+	orl	#nan_mask,USER_FPSR(%a6)
+	movel	ETEMP_EX(%a6),FPTEMP_EX(%a6)	|set up fptemp sign for
+|						;snan handler
+	rts
+cu_dtcz:
+	tstl	LOCAL_HI(%a0)
+	bnel	cu_dtsx
+	tstl	LOCAL_LO(%a0)
+	bnel	cu_dtsx
+	orl	#z_mask,USER_FPSR(%a6)
+cu_dtsx:
+	rts
+|
+| Inst is fabs.  Execute the absolute value function on the input.
+| Branch to the fmove code.
+|
+cu_dabs:
+	bclrb	#7,LOCAL_EX(%a0)		|do abs
+	bras	cu_dmove		|fmove code will finish
+|
+| Inst is fneg.  Execute the negate value function on the input.
+| Fall though to the fmove code.
+|
+cu_dneg:
+	bchgb	#7,LOCAL_EX(%a0)		|do neg
+|
+| Inst is fmove.  This code also handles all result writes.
+| If bit 2 is set, round is forced to double.  If it is clear,
+| and bit 6 is set, round is forced to single.  If both are clear,
+| the round precision is found in the fpcr.  If the rounding precision
+| is double or single, the result is zero, and the mode is checked
+| to determine if the lsb of the result should be set.
+|
+cu_dmove:
+	btstb	#2,CMDREG1B+1(%a6)	|check for rd
+	bne	cu_dmrd
+	btstb	#6,CMDREG1B+1(%a6)	|check for rs
+	bne	cu_dmrs
+|
+| The move or operation is not with forced precision.  Use the
+| FPCR_MODE byte to get rounding.
+|
+cu_dmnr:
+	bfextu	FPCR_MODE(%a6){#0:#2},%d0
+	tstb	%d0			|check for extended
+	beq	cu_wrexd		|if so, just write result
+	cmpib	#1,%d0			|check for single
+	beq	cu_dmrs			|fall through to double
+|
+| The move is fdmove or round precision is double.  Result is zero.
+| Check rmode for rp or rm and set lsb accordingly.
+|
+cu_dmrd:
+	bfextu	FPCR_MODE(%a6){#2:#2},%d1	|get rmode
+	tstw	LOCAL_EX(%a0)		|check sign
+	blts	cu_dmdn
+	cmpib	#3,%d1			|check for rp
+	bne	cu_dpd			|load double pos zero
+	bra	cu_dpdr			|load double pos zero w/lsb
+cu_dmdn:
+	cmpib	#2,%d1			|check for rm
+	bne	cu_dnd			|load double neg zero
+	bra	cu_dndr			|load double neg zero w/lsb
+|
+| The move is fsmove or round precision is single.  Result is zero.
+| Check for rp or rm and set lsb accordingly.
+|
+cu_dmrs:
+	bfextu	FPCR_MODE(%a6){#2:#2},%d1	|get rmode
+	tstw	LOCAL_EX(%a0)		|check sign
+	blts	cu_dmsn
+	cmpib	#3,%d1			|check for rp
+	bne	cu_spd			|load single pos zero
+	bra	cu_spdr			|load single pos zero w/lsb
+cu_dmsn:
+	cmpib	#2,%d1			|check for rm
+	bne	cu_snd			|load single neg zero
+	bra	cu_sndr			|load single neg zero w/lsb
+|
+| The precision is extended, so the result in etemp is correct.
+| Simply set unfl (not inex2 or aunfl) and write the result to
+| the correct fp register.
+cu_wrexd:
+	orl	#unfl_mask,USER_FPSR(%a6)
+	tstw	LOCAL_EX(%a0)
+	beq	wr_etemp
+	orl	#neg_mask,USER_FPSR(%a6)
+	bra	wr_etemp
+|
+| These routines write +/- zero in double format.  The routines
+| cu_dpdr and cu_dndr set the double lsb.
+|
+cu_dpd:
+	movel	#0x3c010000,LOCAL_EX(%a0)	|force pos double zero
+	clrl	LOCAL_HI(%a0)
+	clrl	LOCAL_LO(%a0)
+	orl	#z_mask,USER_FPSR(%a6)
+	orl	#unfinx_mask,USER_FPSR(%a6)
+	bra	wr_etemp
+cu_dpdr:
+	movel	#0x3c010000,LOCAL_EX(%a0)	|force pos double zero
+	clrl	LOCAL_HI(%a0)
+	movel	#0x800,LOCAL_LO(%a0)	|with lsb set
+	orl	#unfinx_mask,USER_FPSR(%a6)
+	bra	wr_etemp
+cu_dnd:
+	movel	#0xbc010000,LOCAL_EX(%a0)	|force pos double zero
+	clrl	LOCAL_HI(%a0)
+	clrl	LOCAL_LO(%a0)
+	orl	#z_mask,USER_FPSR(%a6)
+	orl	#neg_mask,USER_FPSR(%a6)
+	orl	#unfinx_mask,USER_FPSR(%a6)
+	bra	wr_etemp
+cu_dndr:
+	movel	#0xbc010000,LOCAL_EX(%a0)	|force pos double zero
+	clrl	LOCAL_HI(%a0)
+	movel	#0x800,LOCAL_LO(%a0)	|with lsb set
+	orl	#neg_mask,USER_FPSR(%a6)
+	orl	#unfinx_mask,USER_FPSR(%a6)
+	bra	wr_etemp
+|
+| These routines write +/- zero in single format.  The routines
+| cu_dpdr and cu_dndr set the single lsb.
+|
+cu_spd:
+	movel	#0x3f810000,LOCAL_EX(%a0)	|force pos single zero
+	clrl	LOCAL_HI(%a0)
+	clrl	LOCAL_LO(%a0)
+	orl	#z_mask,USER_FPSR(%a6)
+	orl	#unfinx_mask,USER_FPSR(%a6)
+	bra	wr_etemp
+cu_spdr:
+	movel	#0x3f810000,LOCAL_EX(%a0)	|force pos single zero
+	movel	#0x100,LOCAL_HI(%a0)	|with lsb set
+	clrl	LOCAL_LO(%a0)
+	orl	#unfinx_mask,USER_FPSR(%a6)
+	bra	wr_etemp
+cu_snd:
+	movel	#0xbf810000,LOCAL_EX(%a0)	|force pos single zero
+	clrl	LOCAL_HI(%a0)
+	clrl	LOCAL_LO(%a0)
+	orl	#z_mask,USER_FPSR(%a6)
+	orl	#neg_mask,USER_FPSR(%a6)
+	orl	#unfinx_mask,USER_FPSR(%a6)
+	bra	wr_etemp
+cu_sndr:
+	movel	#0xbf810000,LOCAL_EX(%a0)	|force pos single zero
+	movel	#0x100,LOCAL_HI(%a0)	|with lsb set
+	clrl	LOCAL_LO(%a0)
+	orl	#neg_mask,USER_FPSR(%a6)
+	orl	#unfinx_mask,USER_FPSR(%a6)
+	bra	wr_etemp
+
+|
+| This code checks for 16-bit overflow conditions on dyadic
+| operations which are not restorable into the floating-point
+| unit and must be completed in software.  Basically, this
+| condition exists with a very large norm and a denorm.  One
+| of the operands must be denormalized to enter this code.
+|
+| Flags used:
+|	DY_MO_FLG contains 0 for monadic op, $ff for dyadic
+|	DNRM_FLG contains $00 for neither op denormalized
+|	                  $0f for the destination op denormalized
+|	                  $f0 for the source op denormalized
+|	                  $ff for both ops denormalized
+|
+| The wrap-around condition occurs for add, sub, div, and cmp
+| when
+|
+|	abs(dest_exp - src_exp) >= $8000
+|
+| and for mul when
+|
+|	(dest_exp + src_exp) < $0
+|
+| we must process the operation here if this case is true.
+|
+| The rts following the frcfpn routine is the exit from res_func
+| for this condition.  The restore flag (RES_FLG) is left clear.
+| No frestore is done unless an exception is to be reported.
+|
+| For fadd:
+|	if(sign_of(dest) != sign_of(src))
+|		replace exponent of src with $3fff (keep sign)
+|		use fpu to perform dest+new_src (user's rmode and X)
+|		clr sticky
+|	else
+|		set sticky
+|	call round with user's precision and mode
+|	move result to fpn and wbtemp
+|
+| For fsub:
+|	if(sign_of(dest) == sign_of(src))
+|		replace exponent of src with $3fff (keep sign)
+|		use fpu to perform dest+new_src (user's rmode and X)
+|		clr sticky
+|	else
+|		set sticky
+|	call round with user's precision and mode
+|	move result to fpn and wbtemp
+|
+| For fdiv/fsgldiv:
+|	if(both operands are denorm)
+|		restore_to_fpu;
+|	if(dest is norm)
+|		force_ovf;
+|	else(dest is denorm)
+|		force_unf:
+|
+| For fcmp:
+|	if(dest is norm)
+|		N = sign_of(dest);
+|	else(dest is denorm)
+|		N = sign_of(src);
+|
+| For fmul:
+|	if(both operands are denorm)
+|		force_unf;
+|	if((dest_exp + src_exp) < 0)
+|		force_unf:
+|	else
+|		restore_to_fpu;
+|
+| local equates:
+	.set	addcode,0x22
+	.set	subcode,0x28
+	.set	mulcode,0x23
+	.set	divcode,0x20
+	.set	cmpcode,0x38
+ck_wrap:
+	| tstb	DY_MO_FLG(%a6)	;check for fsqrt
+	beq	fix_stk		|if zero, it is fsqrt
+	movew	CMDREG1B(%a6),%d0
+	andiw	#0x3b,%d0		|strip to command bits
+	cmpiw	#addcode,%d0
+	beq	wrap_add
+	cmpiw	#subcode,%d0
+	beq	wrap_sub
+	cmpiw	#mulcode,%d0
+	beq	wrap_mul
+	cmpiw	#cmpcode,%d0
+	beq	wrap_cmp
+|
+| Inst is fdiv.
+|
+wrap_div:
+	cmpb	#0xff,DNRM_FLG(%a6) |if both ops denorm,
+	beq	fix_stk		 |restore to fpu
+|
+| One of the ops is denormalized.  Test for wrap condition
+| and force the result.
+|
+	cmpb	#0x0f,DNRM_FLG(%a6) |check for dest denorm
+	bnes	div_srcd
+div_destd:
+	bsrl	ckinf_ns
+	bne	fix_stk
+	bfextu	ETEMP_EX(%a6){#1:#15},%d0	|get src exp (always pos)
+	bfexts	FPTEMP_EX(%a6){#1:#15},%d1	|get dest exp (always neg)
+	subl	%d1,%d0			|subtract dest from src
+	cmpl	#0x7fff,%d0
+	blt	fix_stk			|if less, not wrap case
+	clrb	WBTEMP_SGN(%a6)
+	movew	ETEMP_EX(%a6),%d0		|find the sign of the result
+	movew	FPTEMP_EX(%a6),%d1
+	eorw	%d1,%d0
+	andiw	#0x8000,%d0
+	beq	force_unf
+	st	WBTEMP_SGN(%a6)
+	bra	force_unf
+
+ckinf_ns:
+	moveb	STAG(%a6),%d0		|check source tag for inf or nan
+	bra	ck_in_com
+ckinf_nd:
+	moveb	DTAG(%a6),%d0		|check destination tag for inf or nan
+ck_in_com:
+	andib	#0x60,%d0			|isolate tag bits
+	cmpb	#0x40,%d0			|is it inf?
+	beq	nan_or_inf		|not wrap case
+	cmpb	#0x60,%d0			|is it nan?
+	beq	nan_or_inf		|yes, not wrap case?
+	cmpb	#0x20,%d0			|is it a zero?
+	beq	nan_or_inf		|yes
+	clrl	%d0
+	rts				|then ; it is either a zero of norm,
+|					;check wrap case
+nan_or_inf:
+	moveql	#-1,%d0
+	rts
+
+
+
+div_srcd:
+	bsrl	ckinf_nd
+	bne	fix_stk
+	bfextu	FPTEMP_EX(%a6){#1:#15},%d0	|get dest exp (always pos)
+	bfexts	ETEMP_EX(%a6){#1:#15},%d1	|get src exp (always neg)
+	subl	%d1,%d0			|subtract src from dest
+	cmpl	#0x8000,%d0
+	blt	fix_stk			|if less, not wrap case
+	clrb	WBTEMP_SGN(%a6)
+	movew	ETEMP_EX(%a6),%d0		|find the sign of the result
+	movew	FPTEMP_EX(%a6),%d1
+	eorw	%d1,%d0
+	andiw	#0x8000,%d0
+	beqs	force_ovf
+	st	WBTEMP_SGN(%a6)
+|
+| This code handles the case of the instruction resulting in
+| an overflow condition.
+|
+force_ovf:
+	bclrb	#E1,E_BYTE(%a6)
+	orl	#ovfl_inx_mask,USER_FPSR(%a6)
+	clrw	NMNEXC(%a6)
+	leal	WBTEMP(%a6),%a0		|point a0 to memory location
+	movew	CMDREG1B(%a6),%d0
+	btstl	#6,%d0			|test for forced precision
+	beqs	frcovf_fpcr
+	btstl	#2,%d0			|check for double
+	bnes	frcovf_dbl
+	movel	#0x1,%d0			|inst is forced single
+	bras	frcovf_rnd
+frcovf_dbl:
+	movel	#0x2,%d0			|inst is forced double
+	bras	frcovf_rnd
+frcovf_fpcr:
+	bfextu	FPCR_MODE(%a6){#0:#2},%d0	|inst not forced - use fpcr prec
+frcovf_rnd:
+
+| The 881/882 does not set inex2 for the following case, so the
+| line is commented out to be compatible with 881/882
+|	tst.b	%d0
+|	beq.b	frcovf_x
+|	or.l	#inex2_mask,USER_FPSR(%a6) ;if prec is s or d, set inex2
+
+|frcovf_x:
+	bsrl	ovf_res			|get correct result based on
+|					;round precision/mode.  This
+|					;sets FPSR_CC correctly
+|					;returns in external format
+	bfclr	WBTEMP_SGN(%a6){#0:#8}
+	beq	frcfpn
+	bsetb	#sign_bit,WBTEMP_EX(%a6)
+	bra	frcfpn
+|
+| Inst is fadd.
+|
+wrap_add:
+	cmpb	#0xff,DNRM_FLG(%a6) |if both ops denorm,
+	beq	fix_stk		 |restore to fpu
+|
+| One of the ops is denormalized.  Test for wrap condition
+| and complete the instruction.
+|
+	cmpb	#0x0f,DNRM_FLG(%a6) |check for dest denorm
+	bnes	add_srcd
+add_destd:
+	bsrl	ckinf_ns
+	bne	fix_stk
+	bfextu	ETEMP_EX(%a6){#1:#15},%d0	|get src exp (always pos)
+	bfexts	FPTEMP_EX(%a6){#1:#15},%d1	|get dest exp (always neg)
+	subl	%d1,%d0			|subtract dest from src
+	cmpl	#0x8000,%d0
+	blt	fix_stk			|if less, not wrap case
+	bra	add_wrap
+add_srcd:
+	bsrl	ckinf_nd
+	bne	fix_stk
+	bfextu	FPTEMP_EX(%a6){#1:#15},%d0	|get dest exp (always pos)
+	bfexts	ETEMP_EX(%a6){#1:#15},%d1	|get src exp (always neg)
+	subl	%d1,%d0			|subtract src from dest
+	cmpl	#0x8000,%d0
+	blt	fix_stk			|if less, not wrap case
+|
+| Check the signs of the operands.  If they are unlike, the fpu
+| can be used to add the norm and 1.0 with the sign of the
+| denorm and it will correctly generate the result in extended
+| precision.  We can then call round with no sticky and the result
+| will be correct for the user's rounding mode and precision.  If
+| the signs are the same, we call round with the sticky bit set
+| and the result will be correct for the user's rounding mode and
+| precision.
+|
+add_wrap:
+	movew	ETEMP_EX(%a6),%d0
+	movew	FPTEMP_EX(%a6),%d1
+	eorw	%d1,%d0
+	andiw	#0x8000,%d0
+	beq	add_same
+|
+| The signs are unlike.
+|
+	cmpb	#0x0f,DNRM_FLG(%a6) |is dest the denorm?
+	bnes	add_u_srcd
+	movew	FPTEMP_EX(%a6),%d0
+	andiw	#0x8000,%d0
+	orw	#0x3fff,%d0	|force the exponent to +/- 1
+	movew	%d0,FPTEMP_EX(%a6) |in the denorm
+	movel	USER_FPCR(%a6),%d0
+	andil	#0x30,%d0
+	fmovel	%d0,%fpcr		|set up users rmode and X
+	fmovex	ETEMP(%a6),%fp0
+	faddx	FPTEMP(%a6),%fp0
+	leal	WBTEMP(%a6),%a0	|point a0 to wbtemp in frame
+	fmovel	%fpsr,%d1
+	orl	%d1,USER_FPSR(%a6) |capture cc's and inex from fadd
+	fmovex	%fp0,WBTEMP(%a6)	|write result to memory
+	lsrl	#4,%d0		|put rmode in lower 2 bits
+	movel	USER_FPCR(%a6),%d1
+	andil	#0xc0,%d1
+	lsrl	#6,%d1		|put precision in upper word
+	swap	%d1
+	orl	%d0,%d1		|set up for round call
+	clrl	%d0		|force sticky to zero
+	bclrb	#sign_bit,WBTEMP_EX(%a6)
+	sne	WBTEMP_SGN(%a6)
+	bsrl	round		|round result to users rmode & prec
+	bfclr	WBTEMP_SGN(%a6){#0:#8}	|convert back to IEEE ext format
+	beq	frcfpnr
+	bsetb	#sign_bit,WBTEMP_EX(%a6)
+	bra	frcfpnr
+add_u_srcd:
+	movew	ETEMP_EX(%a6),%d0
+	andiw	#0x8000,%d0
+	orw	#0x3fff,%d0	|force the exponent to +/- 1
+	movew	%d0,ETEMP_EX(%a6) |in the denorm
+	movel	USER_FPCR(%a6),%d0
+	andil	#0x30,%d0
+	fmovel	%d0,%fpcr		|set up users rmode and X
+	fmovex	ETEMP(%a6),%fp0
+	faddx	FPTEMP(%a6),%fp0
+	fmovel	%fpsr,%d1
+	orl	%d1,USER_FPSR(%a6) |capture cc's and inex from fadd
+	leal	WBTEMP(%a6),%a0	|point a0 to wbtemp in frame
+	fmovex	%fp0,WBTEMP(%a6)	|write result to memory
+	lsrl	#4,%d0		|put rmode in lower 2 bits
+	movel	USER_FPCR(%a6),%d1
+	andil	#0xc0,%d1
+	lsrl	#6,%d1		|put precision in upper word
+	swap	%d1
+	orl	%d0,%d1		|set up for round call
+	clrl	%d0		|force sticky to zero
+	bclrb	#sign_bit,WBTEMP_EX(%a6)
+	sne	WBTEMP_SGN(%a6)	|use internal format for round
+	bsrl	round		|round result to users rmode & prec
+	bfclr	WBTEMP_SGN(%a6){#0:#8}	|convert back to IEEE ext format
+	beq	frcfpnr
+	bsetb	#sign_bit,WBTEMP_EX(%a6)
+	bra	frcfpnr
+|
+| Signs are alike:
+|
+add_same:
+	cmpb	#0x0f,DNRM_FLG(%a6) |is dest the denorm?
+	bnes	add_s_srcd
+add_s_destd:
+	leal	ETEMP(%a6),%a0
+	movel	USER_FPCR(%a6),%d0
+	andil	#0x30,%d0
+	lsrl	#4,%d0		|put rmode in lower 2 bits
+	movel	USER_FPCR(%a6),%d1
+	andil	#0xc0,%d1
+	lsrl	#6,%d1		|put precision in upper word
+	swap	%d1
+	orl	%d0,%d1		|set up for round call
+	movel	#0x20000000,%d0	|set sticky for round
+	bclrb	#sign_bit,ETEMP_EX(%a6)
+	sne	ETEMP_SGN(%a6)
+	bsrl	round		|round result to users rmode & prec
+	bfclr	ETEMP_SGN(%a6){#0:#8}	|convert back to IEEE ext format
+	beqs	add_s_dclr
+	bsetb	#sign_bit,ETEMP_EX(%a6)
+add_s_dclr:
+	leal	WBTEMP(%a6),%a0
+	movel	ETEMP(%a6),(%a0)	|write result to wbtemp
+	movel	ETEMP_HI(%a6),4(%a0)
+	movel	ETEMP_LO(%a6),8(%a0)
+	tstw	ETEMP_EX(%a6)
+	bgt	add_ckovf
+	orl	#neg_mask,USER_FPSR(%a6)
+	bra	add_ckovf
+add_s_srcd:
+	leal	FPTEMP(%a6),%a0
+	movel	USER_FPCR(%a6),%d0
+	andil	#0x30,%d0
+	lsrl	#4,%d0		|put rmode in lower 2 bits
+	movel	USER_FPCR(%a6),%d1
+	andil	#0xc0,%d1
+	lsrl	#6,%d1		|put precision in upper word
+	swap	%d1
+	orl	%d0,%d1		|set up for round call
+	movel	#0x20000000,%d0	|set sticky for round
+	bclrb	#sign_bit,FPTEMP_EX(%a6)
+	sne	FPTEMP_SGN(%a6)
+	bsrl	round		|round result to users rmode & prec
+	bfclr	FPTEMP_SGN(%a6){#0:#8}	|convert back to IEEE ext format
+	beqs	add_s_sclr
+	bsetb	#sign_bit,FPTEMP_EX(%a6)
+add_s_sclr:
+	leal	WBTEMP(%a6),%a0
+	movel	FPTEMP(%a6),(%a0)	|write result to wbtemp
+	movel	FPTEMP_HI(%a6),4(%a0)
+	movel	FPTEMP_LO(%a6),8(%a0)
+	tstw	FPTEMP_EX(%a6)
+	bgt	add_ckovf
+	orl	#neg_mask,USER_FPSR(%a6)
+add_ckovf:
+	movew	WBTEMP_EX(%a6),%d0
+	andiw	#0x7fff,%d0
+	cmpiw	#0x7fff,%d0
+	bne	frcfpnr
+|
+| The result has overflowed to $7fff exponent.  Set I, ovfl,
+| and aovfl, and clr the mantissa (incorrectly set by the
+| round routine.)
+|
+	orl	#inf_mask+ovfl_inx_mask,USER_FPSR(%a6)
+	clrl	4(%a0)
+	bra	frcfpnr
+|
+| Inst is fsub.
+|
+wrap_sub:
+	cmpb	#0xff,DNRM_FLG(%a6) |if both ops denorm,
+	beq	fix_stk		 |restore to fpu
+|
+| One of the ops is denormalized.  Test for wrap condition
+| and complete the instruction.
+|
+	cmpb	#0x0f,DNRM_FLG(%a6) |check for dest denorm
+	bnes	sub_srcd
+sub_destd:
+	bsrl	ckinf_ns
+	bne	fix_stk
+	bfextu	ETEMP_EX(%a6){#1:#15},%d0	|get src exp (always pos)
+	bfexts	FPTEMP_EX(%a6){#1:#15},%d1	|get dest exp (always neg)
+	subl	%d1,%d0			|subtract src from dest
+	cmpl	#0x8000,%d0
+	blt	fix_stk			|if less, not wrap case
+	bra	sub_wrap
+sub_srcd:
+	bsrl	ckinf_nd
+	bne	fix_stk
+	bfextu	FPTEMP_EX(%a6){#1:#15},%d0	|get dest exp (always pos)
+	bfexts	ETEMP_EX(%a6){#1:#15},%d1	|get src exp (always neg)
+	subl	%d1,%d0			|subtract dest from src
+	cmpl	#0x8000,%d0
+	blt	fix_stk			|if less, not wrap case
+|
+| Check the signs of the operands.  If they are alike, the fpu
+| can be used to subtract from the norm 1.0 with the sign of the
+| denorm and it will correctly generate the result in extended
+| precision.  We can then call round with no sticky and the result
+| will be correct for the user's rounding mode and precision.  If
+| the signs are unlike, we call round with the sticky bit set
+| and the result will be correct for the user's rounding mode and
+| precision.
+|
+sub_wrap:
+	movew	ETEMP_EX(%a6),%d0
+	movew	FPTEMP_EX(%a6),%d1
+	eorw	%d1,%d0
+	andiw	#0x8000,%d0
+	bne	sub_diff
+|
+| The signs are alike.
+|
+	cmpb	#0x0f,DNRM_FLG(%a6) |is dest the denorm?
+	bnes	sub_u_srcd
+	movew	FPTEMP_EX(%a6),%d0
+	andiw	#0x8000,%d0
+	orw	#0x3fff,%d0	|force the exponent to +/- 1
+	movew	%d0,FPTEMP_EX(%a6) |in the denorm
+	movel	USER_FPCR(%a6),%d0
+	andil	#0x30,%d0
+	fmovel	%d0,%fpcr		|set up users rmode and X
+	fmovex	FPTEMP(%a6),%fp0
+	fsubx	ETEMP(%a6),%fp0
+	fmovel	%fpsr,%d1
+	orl	%d1,USER_FPSR(%a6) |capture cc's and inex from fadd
+	leal	WBTEMP(%a6),%a0	|point a0 to wbtemp in frame
+	fmovex	%fp0,WBTEMP(%a6)	|write result to memory
+	lsrl	#4,%d0		|put rmode in lower 2 bits
+	movel	USER_FPCR(%a6),%d1
+	andil	#0xc0,%d1
+	lsrl	#6,%d1		|put precision in upper word
+	swap	%d1
+	orl	%d0,%d1		|set up for round call
+	clrl	%d0		|force sticky to zero
+	bclrb	#sign_bit,WBTEMP_EX(%a6)
+	sne	WBTEMP_SGN(%a6)
+	bsrl	round		|round result to users rmode & prec
+	bfclr	WBTEMP_SGN(%a6){#0:#8}	|convert back to IEEE ext format
+	beq	frcfpnr
+	bsetb	#sign_bit,WBTEMP_EX(%a6)
+	bra	frcfpnr
+sub_u_srcd:
+	movew	ETEMP_EX(%a6),%d0
+	andiw	#0x8000,%d0
+	orw	#0x3fff,%d0	|force the exponent to +/- 1
+	movew	%d0,ETEMP_EX(%a6) |in the denorm
+	movel	USER_FPCR(%a6),%d0
+	andil	#0x30,%d0
+	fmovel	%d0,%fpcr		|set up users rmode and X
+	fmovex	FPTEMP(%a6),%fp0
+	fsubx	ETEMP(%a6),%fp0
+	fmovel	%fpsr,%d1
+	orl	%d1,USER_FPSR(%a6) |capture cc's and inex from fadd
+	leal	WBTEMP(%a6),%a0	|point a0 to wbtemp in frame
+	fmovex	%fp0,WBTEMP(%a6)	|write result to memory
+	lsrl	#4,%d0		|put rmode in lower 2 bits
+	movel	USER_FPCR(%a6),%d1
+	andil	#0xc0,%d1
+	lsrl	#6,%d1		|put precision in upper word
+	swap	%d1
+	orl	%d0,%d1		|set up for round call
+	clrl	%d0		|force sticky to zero
+	bclrb	#sign_bit,WBTEMP_EX(%a6)
+	sne	WBTEMP_SGN(%a6)
+	bsrl	round		|round result to users rmode & prec
+	bfclr	WBTEMP_SGN(%a6){#0:#8}	|convert back to IEEE ext format
+	beq	frcfpnr
+	bsetb	#sign_bit,WBTEMP_EX(%a6)
+	bra	frcfpnr
+|
+| Signs are unlike:
+|
+sub_diff:
+	cmpb	#0x0f,DNRM_FLG(%a6) |is dest the denorm?
+	bnes	sub_s_srcd
+sub_s_destd:
+	leal	ETEMP(%a6),%a0
+	movel	USER_FPCR(%a6),%d0
+	andil	#0x30,%d0
+	lsrl	#4,%d0		|put rmode in lower 2 bits
+	movel	USER_FPCR(%a6),%d1
+	andil	#0xc0,%d1
+	lsrl	#6,%d1		|put precision in upper word
+	swap	%d1
+	orl	%d0,%d1		|set up for round call
+	movel	#0x20000000,%d0	|set sticky for round
+|
+| Since the dest is the denorm, the sign is the opposite of the
+| norm sign.
+|
+	eoriw	#0x8000,ETEMP_EX(%a6)	|flip sign on result
+	tstw	ETEMP_EX(%a6)
+	bgts	sub_s_dwr
+	orl	#neg_mask,USER_FPSR(%a6)
+sub_s_dwr:
+	bclrb	#sign_bit,ETEMP_EX(%a6)
+	sne	ETEMP_SGN(%a6)
+	bsrl	round		|round result to users rmode & prec
+	bfclr	ETEMP_SGN(%a6){#0:#8}	|convert back to IEEE ext format
+	beqs	sub_s_dclr
+	bsetb	#sign_bit,ETEMP_EX(%a6)
+sub_s_dclr:
+	leal	WBTEMP(%a6),%a0
+	movel	ETEMP(%a6),(%a0)	|write result to wbtemp
+	movel	ETEMP_HI(%a6),4(%a0)
+	movel	ETEMP_LO(%a6),8(%a0)
+	bra	sub_ckovf
+sub_s_srcd:
+	leal	FPTEMP(%a6),%a0
+	movel	USER_FPCR(%a6),%d0
+	andil	#0x30,%d0
+	lsrl	#4,%d0		|put rmode in lower 2 bits
+	movel	USER_FPCR(%a6),%d1
+	andil	#0xc0,%d1
+	lsrl	#6,%d1		|put precision in upper word
+	swap	%d1
+	orl	%d0,%d1		|set up for round call
+	movel	#0x20000000,%d0	|set sticky for round
+	bclrb	#sign_bit,FPTEMP_EX(%a6)
+	sne	FPTEMP_SGN(%a6)
+	bsrl	round		|round result to users rmode & prec
+	bfclr	FPTEMP_SGN(%a6){#0:#8}	|convert back to IEEE ext format
+	beqs	sub_s_sclr
+	bsetb	#sign_bit,FPTEMP_EX(%a6)
+sub_s_sclr:
+	leal	WBTEMP(%a6),%a0
+	movel	FPTEMP(%a6),(%a0)	|write result to wbtemp
+	movel	FPTEMP_HI(%a6),4(%a0)
+	movel	FPTEMP_LO(%a6),8(%a0)
+	tstw	FPTEMP_EX(%a6)
+	bgt	sub_ckovf
+	orl	#neg_mask,USER_FPSR(%a6)
+sub_ckovf:
+	movew	WBTEMP_EX(%a6),%d0
+	andiw	#0x7fff,%d0
+	cmpiw	#0x7fff,%d0
+	bne	frcfpnr
+|
+| The result has overflowed to $7fff exponent.  Set I, ovfl,
+| and aovfl, and clr the mantissa (incorrectly set by the
+| round routine.)
+|
+	orl	#inf_mask+ovfl_inx_mask,USER_FPSR(%a6)
+	clrl	4(%a0)
+	bra	frcfpnr
+|
+| Inst is fcmp.
+|
+wrap_cmp:
+	cmpb	#0xff,DNRM_FLG(%a6) |if both ops denorm,
+	beq	fix_stk		 |restore to fpu
+|
+| One of the ops is denormalized.  Test for wrap condition
+| and complete the instruction.
+|
+	cmpb	#0x0f,DNRM_FLG(%a6) |check for dest denorm
+	bnes	cmp_srcd
+cmp_destd:
+	bsrl	ckinf_ns
+	bne	fix_stk
+	bfextu	ETEMP_EX(%a6){#1:#15},%d0	|get src exp (always pos)
+	bfexts	FPTEMP_EX(%a6){#1:#15},%d1	|get dest exp (always neg)
+	subl	%d1,%d0			|subtract dest from src
+	cmpl	#0x8000,%d0
+	blt	fix_stk			|if less, not wrap case
+	tstw	ETEMP_EX(%a6)		|set N to ~sign_of(src)
+	bge	cmp_setn
+	rts
+cmp_srcd:
+	bsrl	ckinf_nd
+	bne	fix_stk
+	bfextu	FPTEMP_EX(%a6){#1:#15},%d0	|get dest exp (always pos)
+	bfexts	ETEMP_EX(%a6){#1:#15},%d1	|get src exp (always neg)
+	subl	%d1,%d0			|subtract src from dest
+	cmpl	#0x8000,%d0
+	blt	fix_stk			|if less, not wrap case
+	tstw	FPTEMP_EX(%a6)		|set N to sign_of(dest)
+	blt	cmp_setn
+	rts
+cmp_setn:
+	orl	#neg_mask,USER_FPSR(%a6)
+	rts
+
+|
+| Inst is fmul.
+|
+wrap_mul:
+	cmpb	#0xff,DNRM_FLG(%a6) |if both ops denorm,
+	beq	force_unf	|force an underflow (really!)
+|
+| One of the ops is denormalized.  Test for wrap condition
+| and complete the instruction.
+|
+	cmpb	#0x0f,DNRM_FLG(%a6) |check for dest denorm
+	bnes	mul_srcd
+mul_destd:
+	bsrl	ckinf_ns
+	bne	fix_stk
+	bfextu	ETEMP_EX(%a6){#1:#15},%d0	|get src exp (always pos)
+	bfexts	FPTEMP_EX(%a6){#1:#15},%d1	|get dest exp (always neg)
+	addl	%d1,%d0			|subtract dest from src
+	bgt	fix_stk
+	bra	force_unf
+mul_srcd:
+	bsrl	ckinf_nd
+	bne	fix_stk
+	bfextu	FPTEMP_EX(%a6){#1:#15},%d0	|get dest exp (always pos)
+	bfexts	ETEMP_EX(%a6){#1:#15},%d1	|get src exp (always neg)
+	addl	%d1,%d0			|subtract src from dest
+	bgt	fix_stk
+
+|
+| This code handles the case of the instruction resulting in
+| an underflow condition.
+|
+force_unf:
+	bclrb	#E1,E_BYTE(%a6)
+	orl	#unfinx_mask,USER_FPSR(%a6)
+	clrw	NMNEXC(%a6)
+	clrb	WBTEMP_SGN(%a6)
+	movew	ETEMP_EX(%a6),%d0		|find the sign of the result
+	movew	FPTEMP_EX(%a6),%d1
+	eorw	%d1,%d0
+	andiw	#0x8000,%d0
+	beqs	frcunfcont
+	st	WBTEMP_SGN(%a6)
+frcunfcont:
+	lea	WBTEMP(%a6),%a0		|point a0 to memory location
+	movew	CMDREG1B(%a6),%d0
+	btstl	#6,%d0			|test for forced precision
+	beqs	frcunf_fpcr
+	btstl	#2,%d0			|check for double
+	bnes	frcunf_dbl
+	movel	#0x1,%d0			|inst is forced single
+	bras	frcunf_rnd
+frcunf_dbl:
+	movel	#0x2,%d0			|inst is forced double
+	bras	frcunf_rnd
+frcunf_fpcr:
+	bfextu	FPCR_MODE(%a6){#0:#2},%d0	|inst not forced - use fpcr prec
+frcunf_rnd:
+	bsrl	unf_sub			|get correct result based on
+|					;round precision/mode.  This
+|					;sets FPSR_CC correctly
+	bfclr	WBTEMP_SGN(%a6){#0:#8}	|convert back to IEEE ext format
+	beqs	frcfpn
+	bsetb	#sign_bit,WBTEMP_EX(%a6)
+	bra	frcfpn
+
+|
+| Write the result to the user's fpn.  All results must be HUGE to be
+| written; otherwise the results would have overflowed or underflowed.
+| If the rounding precision is single or double, the ovf_res routine
+| is needed to correctly supply the max value.
+|
+frcfpnr:
+	movew	CMDREG1B(%a6),%d0
+	btstl	#6,%d0			|test for forced precision
+	beqs	frcfpn_fpcr
+	btstl	#2,%d0			|check for double
+	bnes	frcfpn_dbl
+	movel	#0x1,%d0			|inst is forced single
+	bras	frcfpn_rnd
+frcfpn_dbl:
+	movel	#0x2,%d0			|inst is forced double
+	bras	frcfpn_rnd
+frcfpn_fpcr:
+	bfextu	FPCR_MODE(%a6){#0:#2},%d0	|inst not forced - use fpcr prec
+	tstb	%d0
+	beqs	frcfpn			|if extended, write what you got
+frcfpn_rnd:
+	bclrb	#sign_bit,WBTEMP_EX(%a6)
+	sne	WBTEMP_SGN(%a6)
+	bsrl	ovf_res			|get correct result based on
+|					;round precision/mode.  This
+|					;sets FPSR_CC correctly
+	bfclr	WBTEMP_SGN(%a6){#0:#8}	|convert back to IEEE ext format
+	beqs	frcfpn_clr
+	bsetb	#sign_bit,WBTEMP_EX(%a6)
+frcfpn_clr:
+	orl	#ovfinx_mask,USER_FPSR(%a6)
+|
+| Perform the write.
+|
+frcfpn:
+	bfextu	CMDREG1B(%a6){#6:#3},%d0	|extract fp destination register
+	cmpib	#3,%d0
+	bles	frc0123			|check if dest is fp0-fp3
+	movel	#7,%d1
+	subl	%d0,%d1
+	clrl	%d0
+	bsetl	%d1,%d0
+	fmovemx WBTEMP(%a6),%d0
+	rts
+frc0123:
+	cmpib	#0,%d0
+	beqs	frc0_dst
+	cmpib	#1,%d0
+	beqs	frc1_dst
+	cmpib	#2,%d0
+	beqs	frc2_dst
+frc3_dst:
+	movel	WBTEMP_EX(%a6),USER_FP3(%a6)
+	movel	WBTEMP_HI(%a6),USER_FP3+4(%a6)
+	movel	WBTEMP_LO(%a6),USER_FP3+8(%a6)
+	rts
+frc2_dst:
+	movel	WBTEMP_EX(%a6),USER_FP2(%a6)
+	movel	WBTEMP_HI(%a6),USER_FP2+4(%a6)
+	movel	WBTEMP_LO(%a6),USER_FP2+8(%a6)
+	rts
+frc1_dst:
+	movel	WBTEMP_EX(%a6),USER_FP1(%a6)
+	movel	WBTEMP_HI(%a6),USER_FP1+4(%a6)
+	movel	WBTEMP_LO(%a6),USER_FP1+8(%a6)
+	rts
+frc0_dst:
+	movel	WBTEMP_EX(%a6),USER_FP0(%a6)
+	movel	WBTEMP_HI(%a6),USER_FP0+4(%a6)
+	movel	WBTEMP_LO(%a6),USER_FP0+8(%a6)
+	rts
+
+|
+| Write etemp to fpn.
+| A check is made on enabled and signalled snan exceptions,
+| and the destination is not overwritten if this condition exists.
+| This code is designed to make fmoveins of unsupported data types
+| faster.
+|
+wr_etemp:
+	btstb	#snan_bit,FPSR_EXCEPT(%a6)	|if snan is set, and
+	beqs	fmoveinc		|enabled, force restore
+	btstb	#snan_bit,FPCR_ENABLE(%a6) |and don't overwrite
+	beqs	fmoveinc		|the dest
+	movel	ETEMP_EX(%a6),FPTEMP_EX(%a6)	|set up fptemp sign for
+|						;snan handler
+	tstb	ETEMP(%a6)		|check for negative
+	blts	snan_neg
+	rts
+snan_neg:
+	orl	#neg_bit,USER_FPSR(%a6)	|snan is negative; set N
+	rts
+fmoveinc:
+	clrw	NMNEXC(%a6)
+	bclrb	#E1,E_BYTE(%a6)
+	moveb	STAG(%a6),%d0		|check if stag is inf
+	andib	#0xe0,%d0
+	cmpib	#0x40,%d0
+	bnes	fminc_cnan
+	orl	#inf_mask,USER_FPSR(%a6) |if inf, nothing yet has set I
+	tstw	LOCAL_EX(%a0)		|check sign
+	bges	fminc_con
+	orl	#neg_mask,USER_FPSR(%a6)
+	bra	fminc_con
+fminc_cnan:
+	cmpib	#0x60,%d0			|check if stag is NaN
+	bnes	fminc_czero
+	orl	#nan_mask,USER_FPSR(%a6) |if nan, nothing yet has set NaN
+	movel	ETEMP_EX(%a6),FPTEMP_EX(%a6)	|set up fptemp sign for
+|						;snan handler
+	tstw	LOCAL_EX(%a0)		|check sign
+	bges	fminc_con
+	orl	#neg_mask,USER_FPSR(%a6)
+	bra	fminc_con
+fminc_czero:
+	cmpib	#0x20,%d0			|check if zero
+	bnes	fminc_con
+	orl	#z_mask,USER_FPSR(%a6)	|if zero, set Z
+	tstw	LOCAL_EX(%a0)		|check sign
+	bges	fminc_con
+	orl	#neg_mask,USER_FPSR(%a6)
+fminc_con:
+	bfextu	CMDREG1B(%a6){#6:#3},%d0	|extract fp destination register
+	cmpib	#3,%d0
+	bles	fp0123			|check if dest is fp0-fp3
+	movel	#7,%d1
+	subl	%d0,%d1
+	clrl	%d0
+	bsetl	%d1,%d0
+	fmovemx ETEMP(%a6),%d0
+	rts
+
+fp0123:
+	cmpib	#0,%d0
+	beqs	fp0_dst
+	cmpib	#1,%d0
+	beqs	fp1_dst
+	cmpib	#2,%d0
+	beqs	fp2_dst
+fp3_dst:
+	movel	ETEMP_EX(%a6),USER_FP3(%a6)
+	movel	ETEMP_HI(%a6),USER_FP3+4(%a6)
+	movel	ETEMP_LO(%a6),USER_FP3+8(%a6)
+	rts
+fp2_dst:
+	movel	ETEMP_EX(%a6),USER_FP2(%a6)
+	movel	ETEMP_HI(%a6),USER_FP2+4(%a6)
+	movel	ETEMP_LO(%a6),USER_FP2+8(%a6)
+	rts
+fp1_dst:
+	movel	ETEMP_EX(%a6),USER_FP1(%a6)
+	movel	ETEMP_HI(%a6),USER_FP1+4(%a6)
+	movel	ETEMP_LO(%a6),USER_FP1+8(%a6)
+	rts
+fp0_dst:
+	movel	ETEMP_EX(%a6),USER_FP0(%a6)
+	movel	ETEMP_HI(%a6),USER_FP0+4(%a6)
+	movel	ETEMP_LO(%a6),USER_FP0+8(%a6)
+	rts
+
+opclass3:
+	st	CU_ONLY(%a6)
+	movew	CMDREG1B(%a6),%d0	|check if packed moveout
+	andiw	#0x0c00,%d0	|isolate last 2 bits of size field
+	cmpiw	#0x0c00,%d0	|if size is 011 or 111, it is packed
+	beq	pack_out	|else it is norm or denorm
+	bra	mv_out
+
+
+|
+|	MOVE OUT
+|
+
+mv_tbl:
+	.long	li
+	.long	sgp
+	.long	xp
+	.long	mvout_end	|should never be taken
+	.long	wi
+	.long	dp
+	.long	bi
+	.long	mvout_end	|should never be taken
+mv_out:
+	bfextu	CMDREG1B(%a6){#3:#3},%d1	|put source specifier in d1
+	leal	mv_tbl,%a0
+	movel	%a0@(%d1:l:4),%a0
+	jmp	(%a0)
+
+|
+| This exit is for move-out to memory.  The aunfl bit is
+| set if the result is inex and unfl is signalled.
+|
+mvout_end:
+	btstb	#inex2_bit,FPSR_EXCEPT(%a6)
+	beqs	no_aufl
+	btstb	#unfl_bit,FPSR_EXCEPT(%a6)
+	beqs	no_aufl
+	bsetb	#aunfl_bit,FPSR_AEXCEPT(%a6)
+no_aufl:
+	clrw	NMNEXC(%a6)
+	bclrb	#E1,E_BYTE(%a6)
+	fmovel	#0,%FPSR			|clear any cc bits from res_func
+|
+| Return ETEMP to extended format from internal extended format so
+| that gen_except will have a correctly signed value for ovfl/unfl
+| handlers.
+|
+	bfclr	ETEMP_SGN(%a6){#0:#8}
+	beqs	mvout_con
+	bsetb	#sign_bit,ETEMP_EX(%a6)
+mvout_con:
+	rts
+|
+| This exit is for move-out to int register.  The aunfl bit is
+| not set in any case for this move.
+|
+mvouti_end:
+	clrw	NMNEXC(%a6)
+	bclrb	#E1,E_BYTE(%a6)
+	fmovel	#0,%FPSR			|clear any cc bits from res_func
+|
+| Return ETEMP to extended format from internal extended format so
+| that gen_except will have a correctly signed value for ovfl/unfl
+| handlers.
+|
+	bfclr	ETEMP_SGN(%a6){#0:#8}
+	beqs	mvouti_con
+	bsetb	#sign_bit,ETEMP_EX(%a6)
+mvouti_con:
+	rts
+|
+| li is used to handle a long integer source specifier
+|
+
+li:
+	moveql	#4,%d0		|set byte count
+
+	btstb	#7,STAG(%a6)	|check for extended denorm
+	bne	int_dnrm	|if so, branch
+
+	fmovemx ETEMP(%a6),%fp0-%fp0
+	fcmpd	#0x41dfffffffc00000,%fp0
+| 41dfffffffc00000 in dbl prec = 401d0000fffffffe00000000 in ext prec
+	fbge	lo_plrg
+	fcmpd	#0xc1e0000000000000,%fp0
+| c1e0000000000000 in dbl prec = c01e00008000000000000000 in ext prec
+	fble	lo_nlrg
+|
+| at this point, the answer is between the largest pos and neg values
+|
+	movel	USER_FPCR(%a6),%d1	|use user's rounding mode
+	andil	#0x30,%d1
+	fmovel	%d1,%fpcr
+	fmovel	%fp0,L_SCR1(%a6)	|let the 040 perform conversion
+	fmovel %fpsr,%d1
+	orl	%d1,USER_FPSR(%a6)	|capture inex2/ainex if set
+	bra	int_wrt
+
+
+lo_plrg:
+	movel	#0x7fffffff,L_SCR1(%a6)	|answer is largest positive int
+	fbeq	int_wrt			|exact answer
+	fcmpd	#0x41dfffffffe00000,%fp0
+| 41dfffffffe00000 in dbl prec = 401d0000ffffffff00000000 in ext prec
+	fbge	int_operr		|set operr
+	bra	int_inx			|set inexact
+
+lo_nlrg:
+	movel	#0x80000000,L_SCR1(%a6)
+	fbeq	int_wrt			|exact answer
+	fcmpd	#0xc1e0000000100000,%fp0
+| c1e0000000100000 in dbl prec = c01e00008000000080000000 in ext prec
+	fblt	int_operr		|set operr
+	bra	int_inx			|set inexact
+
+|
+| wi is used to handle a word integer source specifier
+|
+
+wi:
+	moveql	#2,%d0		|set byte count
+
+	btstb	#7,STAG(%a6)	|check for extended denorm
+	bne	int_dnrm	|branch if so
+
+	fmovemx ETEMP(%a6),%fp0-%fp0
+	fcmps	#0x46fffe00,%fp0
+| 46fffe00 in sgl prec = 400d0000fffe000000000000 in ext prec
+	fbge	wo_plrg
+	fcmps	#0xc7000000,%fp0
+| c7000000 in sgl prec = c00e00008000000000000000 in ext prec
+	fble	wo_nlrg
+
+|
+| at this point, the answer is between the largest pos and neg values
+|
+	movel	USER_FPCR(%a6),%d1	|use user's rounding mode
+	andil	#0x30,%d1
+	fmovel	%d1,%fpcr
+	fmovew	%fp0,L_SCR1(%a6)	|let the 040 perform conversion
+	fmovel %fpsr,%d1
+	orl	%d1,USER_FPSR(%a6)	|capture inex2/ainex if set
+	bra	int_wrt
+
+wo_plrg:
+	movew	#0x7fff,L_SCR1(%a6)	|answer is largest positive int
+	fbeq	int_wrt			|exact answer
+	fcmps	#0x46ffff00,%fp0
+| 46ffff00 in sgl prec = 400d0000ffff000000000000 in ext prec
+	fbge	int_operr		|set operr
+	bra	int_inx			|set inexact
+
+wo_nlrg:
+	movew	#0x8000,L_SCR1(%a6)
+	fbeq	int_wrt			|exact answer
+	fcmps	#0xc7000080,%fp0
+| c7000080 in sgl prec = c00e00008000800000000000 in ext prec
+	fblt	int_operr		|set operr
+	bra	int_inx			|set inexact
+
+|
+| bi is used to handle a byte integer source specifier
+|
+
+bi:
+	moveql	#1,%d0		|set byte count
+
+	btstb	#7,STAG(%a6)	|check for extended denorm
+	bne	int_dnrm	|branch if so
+
+	fmovemx ETEMP(%a6),%fp0-%fp0
+	fcmps	#0x42fe0000,%fp0
+| 42fe0000 in sgl prec = 40050000fe00000000000000 in ext prec
+	fbge	by_plrg
+	fcmps	#0xc3000000,%fp0
+| c3000000 in sgl prec = c00600008000000000000000 in ext prec
+	fble	by_nlrg
+
+|
+| at this point, the answer is between the largest pos and neg values
+|
+	movel	USER_FPCR(%a6),%d1	|use user's rounding mode
+	andil	#0x30,%d1
+	fmovel	%d1,%fpcr
+	fmoveb	%fp0,L_SCR1(%a6)	|let the 040 perform conversion
+	fmovel %fpsr,%d1
+	orl	%d1,USER_FPSR(%a6)	|capture inex2/ainex if set
+	bra	int_wrt
+
+by_plrg:
+	moveb	#0x7f,L_SCR1(%a6)		|answer is largest positive int
+	fbeq	int_wrt			|exact answer
+	fcmps	#0x42ff0000,%fp0
+| 42ff0000 in sgl prec = 40050000ff00000000000000 in ext prec
+	fbge	int_operr		|set operr
+	bra	int_inx			|set inexact
+
+by_nlrg:
+	moveb	#0x80,L_SCR1(%a6)
+	fbeq	int_wrt			|exact answer
+	fcmps	#0xc3008000,%fp0
+| c3008000 in sgl prec = c00600008080000000000000 in ext prec
+	fblt	int_operr		|set operr
+	bra	int_inx			|set inexact
+
+|
+| Common integer routines
+|
+| int_drnrm---account for possible nonzero result for round up with positive
+| operand and round down for negative answer.  In the first case (result = 1)
+| byte-width (store in d0) of result must be honored.  In the second case,
+| -1 in L_SCR1(a6) will cover all contingencies (FMOVE.B/W/L out).
+
+int_dnrm:
+	movel	#0,L_SCR1(%a6)	| initialize result to 0
+	bfextu	FPCR_MODE(%a6){#2:#2},%d1	| d1 is the rounding mode
+	cmpb	#2,%d1
+	bmis	int_inx		| if RN or RZ, done
+	bnes	int_rp		| if RP, continue below
+	tstw	ETEMP(%a6)	| RM: store -1 in L_SCR1 if src is negative
+	bpls	int_inx		| otherwise result is 0
+	movel	#-1,L_SCR1(%a6)
+	bras	int_inx
+int_rp:
+	tstw	ETEMP(%a6)	| RP: store +1 of proper width in L_SCR1 if
+|				; source is greater than 0
+	bmis	int_inx		| otherwise, result is 0
+	lea	L_SCR1(%a6),%a1	| a1 is address of L_SCR1
+	addal	%d0,%a1		| offset by destination width -1
+	subal	#1,%a1
+	bsetb	#0,(%a1)		| set low bit at a1 address
+int_inx:
+	oril	#inx2a_mask,USER_FPSR(%a6)
+	bras	int_wrt
+int_operr:
+	fmovemx %fp0-%fp0,FPTEMP(%a6)	|FPTEMP must contain the extended
+|				;precision source that needs to be
+|				;converted to integer this is required
+|				;if the operr exception is enabled.
+|				;set operr/aiop (no inex2 on int ovfl)
+
+	oril	#opaop_mask,USER_FPSR(%a6)
+|				;fall through to perform int_wrt
+int_wrt:
+	movel	EXC_EA(%a6),%a1	|load destination address
+	tstl	%a1		|check to see if it is a dest register
+	beqs	wrt_dn		|write data register
+	lea	L_SCR1(%a6),%a0	|point to supervisor source address
+	bsrl	mem_write
+	bra	mvouti_end
+
+wrt_dn:
+	movel	%d0,-(%sp)	|d0 currently contains the size to write
+	bsrl	get_fline	|get_fline returns Dn in d0
+	andiw	#0x7,%d0		|isolate register
+	movel	(%sp)+,%d1	|get size
+	cmpil	#4,%d1		|most frequent case
+	beqs	sz_long
+	cmpil	#2,%d1
+	bnes	sz_con
+	orl	#8,%d0		|add 'word' size to register#
+	bras	sz_con
+sz_long:
+	orl	#0x10,%d0		|add 'long' size to register#
+sz_con:
+	movel	%d0,%d1		|reg_dest expects size:reg in d1
+	bsrl	reg_dest	|load proper data register
+	bra	mvouti_end
+xp:
+	lea	ETEMP(%a6),%a0
+	bclrb	#sign_bit,LOCAL_EX(%a0)
+	sne	LOCAL_SGN(%a0)
+	btstb	#7,STAG(%a6)	|check for extended denorm
+	bne	xdnrm
+	clrl	%d0
+	bras	do_fp		|do normal case
+sgp:
+	lea	ETEMP(%a6),%a0
+	bclrb	#sign_bit,LOCAL_EX(%a0)
+	sne	LOCAL_SGN(%a0)
+	btstb	#7,STAG(%a6)	|check for extended denorm
+	bne	sp_catas	|branch if so
+	movew	LOCAL_EX(%a0),%d0
+	lea	sp_bnds,%a1
+	cmpw	(%a1),%d0
+	blt	sp_under
+	cmpw	2(%a1),%d0
+	bgt	sp_over
+	movel	#1,%d0		|set destination format to single
+	bras	do_fp		|do normal case
+dp:
+	lea	ETEMP(%a6),%a0
+	bclrb	#sign_bit,LOCAL_EX(%a0)
+	sne	LOCAL_SGN(%a0)
+
+	btstb	#7,STAG(%a6)	|check for extended denorm
+	bne	dp_catas	|branch if so
+
+	movew	LOCAL_EX(%a0),%d0
+	lea	dp_bnds,%a1
+
+	cmpw	(%a1),%d0
+	blt	dp_under
+	cmpw	2(%a1),%d0
+	bgt	dp_over
+
+	movel	#2,%d0		|set destination format to double
+|				;fall through to do_fp
+|
+do_fp:
+	bfextu	FPCR_MODE(%a6){#2:#2},%d1	|rnd mode in d1
+	swap	%d0			|rnd prec in upper word
+	addl	%d0,%d1			|d1 has PREC/MODE info
+
+	clrl	%d0			|clear g,r,s
+
+	bsrl	round			|round
+
+	movel	%a0,%a1
+	movel	EXC_EA(%a6),%a0
+
+	bfextu	CMDREG1B(%a6){#3:#3},%d1	|extract destination format
+|					;at this point only the dest
+|					;formats sgl, dbl, ext are
+|					;possible
+	cmpb	#2,%d1
+	bgts	ddbl			|double=5, extended=2, single=1
+	bnes	dsgl
+|					;fall through to dext
+dext:
+	bsrl	dest_ext
+	bra	mvout_end
+dsgl:
+	bsrl	dest_sgl
+	bra	mvout_end
+ddbl:
+	bsrl	dest_dbl
+	bra	mvout_end
+
+|
+| Handle possible denorm or catastrophic underflow cases here
+|
+xdnrm:
+	bsr	set_xop		|initialize WBTEMP
+	bsetb	#wbtemp15_bit,WB_BYTE(%a6) |set wbtemp15
+
+	movel	%a0,%a1
+	movel	EXC_EA(%a6),%a0	|a0 has the destination pointer
+	bsrl	dest_ext	|store to memory
+	bsetb	#unfl_bit,FPSR_EXCEPT(%a6)
+	bra	mvout_end
+
+sp_under:
+	bsetb	#etemp15_bit,STAG(%a6)
+
+	cmpw	4(%a1),%d0
+	blts	sp_catas	|catastrophic underflow case
+
+	movel	#1,%d0		|load in round precision
+	movel	#sgl_thresh,%d1	|load in single denorm threshold
+	bsrl	dpspdnrm	|expects d1 to have the proper
+|				;denorm threshold
+	bsrl	dest_sgl	|stores value to destination
+	bsetb	#unfl_bit,FPSR_EXCEPT(%a6)
+	bra	mvout_end	|exit
+
+dp_under:
+	bsetb	#etemp15_bit,STAG(%a6)
+
+	cmpw	4(%a1),%d0
+	blts	dp_catas	|catastrophic underflow case
+
+	movel	#dbl_thresh,%d1	|load in double precision threshold
+	movel	#2,%d0
+	bsrl	dpspdnrm	|expects d1 to have proper
+|				;denorm threshold
+|				;expects d0 to have round precision
+	bsrl	dest_dbl	|store value to destination
+	bsetb	#unfl_bit,FPSR_EXCEPT(%a6)
+	bra	mvout_end	|exit
+
+|
+| Handle catastrophic underflow cases here
+|
+sp_catas:
+| Temp fix for z bit set in unf_sub
+	movel	USER_FPSR(%a6),-(%a7)
+
+	movel	#1,%d0		|set round precision to sgl
+
+	bsrl	unf_sub		|a0 points to result
+
+	movel	(%a7)+,USER_FPSR(%a6)
+
+	movel	#1,%d0
+	subw	%d0,LOCAL_EX(%a0) |account for difference between
+|				;denorm/norm bias
+
+	movel	%a0,%a1		|a1 has the operand input
+	movel	EXC_EA(%a6),%a0	|a0 has the destination pointer
+
+	bsrl	dest_sgl	|store the result
+	oril	#unfinx_mask,USER_FPSR(%a6)
+	bra	mvout_end
+
+dp_catas:
+| Temp fix for z bit set in unf_sub
+	movel	USER_FPSR(%a6),-(%a7)
+
+	movel	#2,%d0		|set round precision to dbl
+	bsrl	unf_sub		|a0 points to result
+
+	movel	(%a7)+,USER_FPSR(%a6)
+
+	movel	#1,%d0
+	subw	%d0,LOCAL_EX(%a0) |account for difference between
+|				;denorm/norm bias
+
+	movel	%a0,%a1		|a1 has the operand input
+	movel	EXC_EA(%a6),%a0	|a0 has the destination pointer
+
+	bsrl	dest_dbl	|store the result
+	oril	#unfinx_mask,USER_FPSR(%a6)
+	bra	mvout_end
+
+|
+| Handle catastrophic overflow cases here
+|
+sp_over:
+| Temp fix for z bit set in unf_sub
+	movel	USER_FPSR(%a6),-(%a7)
+
+	movel	#1,%d0
+	leal	FP_SCR1(%a6),%a0	|use FP_SCR1 for creating result
+	movel	ETEMP_EX(%a6),(%a0)
+	movel	ETEMP_HI(%a6),4(%a0)
+	movel	ETEMP_LO(%a6),8(%a0)
+	bsrl	ovf_res
+
+	movel	(%a7)+,USER_FPSR(%a6)
+
+	movel	%a0,%a1
+	movel	EXC_EA(%a6),%a0
+	bsrl	dest_sgl
+	orl	#ovfinx_mask,USER_FPSR(%a6)
+	bra	mvout_end
+
+dp_over:
+| Temp fix for z bit set in ovf_res
+	movel	USER_FPSR(%a6),-(%a7)
+
+	movel	#2,%d0
+	leal	FP_SCR1(%a6),%a0	|use FP_SCR1 for creating result
+	movel	ETEMP_EX(%a6),(%a0)
+	movel	ETEMP_HI(%a6),4(%a0)
+	movel	ETEMP_LO(%a6),8(%a0)
+	bsrl	ovf_res
+
+	movel	(%a7)+,USER_FPSR(%a6)
+
+	movel	%a0,%a1
+	movel	EXC_EA(%a6),%a0
+	bsrl	dest_dbl
+	orl	#ovfinx_mask,USER_FPSR(%a6)
+	bra	mvout_end
+
+|
+|	DPSPDNRM
+|
+| This subroutine takes an extended normalized number and denormalizes
+| it to the given round precision. This subroutine also decrements
+| the input operand's exponent by 1 to account for the fact that
+| dest_sgl or dest_dbl expects a normalized number's bias.
+|
+| Input: a0  points to a normalized number in internal extended format
+|	 d0  is the round precision (=1 for sgl; =2 for dbl)
+|	 d1  is the single precision or double precision
+|	     denorm threshold
+|
+| Output: (In the format for dest_sgl or dest_dbl)
+|	 a0   points to the destination
+|	 a1   points to the operand
+|
+| Exceptions: Reports inexact 2 exception by setting USER_FPSR bits
+|
+dpspdnrm:
+	movel	%d0,-(%a7)	|save round precision
+	clrl	%d0		|clear initial g,r,s
+	bsrl	dnrm_lp		|careful with d0, it's needed by round
+
+	bfextu	FPCR_MODE(%a6){#2:#2},%d1 |get rounding mode
+	swap	%d1
+	movew	2(%a7),%d1	|set rounding precision
+	swap	%d1		|at this point d1 has PREC/MODE info
+	bsrl	round		|round result, sets the inex bit in
+|				;USER_FPSR if needed
+
+	movew	#1,%d0
+	subw	%d0,LOCAL_EX(%a0) |account for difference in denorm
+|				;vs norm bias
+
+	movel	%a0,%a1		|a1 has the operand input
+	movel	EXC_EA(%a6),%a0	|a0 has the destination pointer
+	addw	#4,%a7		|pop stack
+	rts
+|
+| SET_XOP initialized WBTEMP with the value pointed to by a0
+| input: a0 points to input operand in the internal extended format
+|
+set_xop:
+	movel	LOCAL_EX(%a0),WBTEMP_EX(%a6)
+	movel	LOCAL_HI(%a0),WBTEMP_HI(%a6)
+	movel	LOCAL_LO(%a0),WBTEMP_LO(%a6)
+	bfclr	WBTEMP_SGN(%a6){#0:#8}
+	beqs	sxop
+	bsetb	#sign_bit,WBTEMP_EX(%a6)
+sxop:
+	bfclr	STAG(%a6){#5:#4}	|clear wbtm66,wbtm1,wbtm0,sbit
+	rts
+|
+|	P_MOVE
+|
+p_movet:
+	.long	p_move
+	.long	p_movez
+	.long	p_movei
+	.long	p_moven
+	.long	p_move
+p_regd:
+	.long	p_dyd0
+	.long	p_dyd1
+	.long	p_dyd2
+	.long	p_dyd3
+	.long	p_dyd4
+	.long	p_dyd5
+	.long	p_dyd6
+	.long	p_dyd7
+
+pack_out:
+	leal	p_movet,%a0	|load jmp table address
+	movew	STAG(%a6),%d0	|get source tag
+	bfextu	%d0{#16:#3},%d0	|isolate source bits
+	movel	(%a0,%d0.w*4),%a0	|load a0 with routine label for tag
+	jmp	(%a0)		|go to the routine
+
+p_write:
+	movel	#0x0c,%d0	|get byte count
+	movel	EXC_EA(%a6),%a1	|get the destination address
+	bsr	mem_write	|write the user's destination
+	moveb	#0,CU_SAVEPC(%a6) |set the cu save pc to all 0's
+
+|
+| Also note that the dtag must be set to norm here - this is because
+| the 040 uses the dtag to execute the correct microcode.
+|
+        bfclr    DTAG(%a6){#0:#3}  |set dtag to norm
+
+	rts
+
+| Notes on handling of special case (zero, inf, and nan) inputs:
+|	1. Operr is not signalled if the k-factor is greater than 18.
+|	2. Per the manual, status bits are not set.
+|
+
+p_move:
+	movew	CMDREG1B(%a6),%d0
+	btstl	#kfact_bit,%d0	|test for dynamic k-factor
+	beqs	statick		|if clear, k-factor is static
+dynamick:
+	bfextu	%d0{#25:#3},%d0	|isolate register for dynamic k-factor
+	lea	p_regd,%a0
+	movel	%a0@(%d0:l:4),%a0
+	jmp	(%a0)
+statick:
+	andiw	#0x007f,%d0	|get k-factor
+	bfexts	%d0{#25:#7},%d0	|sign extend d0 for bindec
+	leal	ETEMP(%a6),%a0	|a0 will point to the packed decimal
+	bsrl	bindec		|perform the convert; data at a6
+	leal	FP_SCR1(%a6),%a0	|load a0 with result address
+	bral	p_write
+p_movez:
+	leal	ETEMP(%a6),%a0	|a0 will point to the packed decimal
+	clrw	2(%a0)		|clear lower word of exp
+	clrl	4(%a0)		|load second lword of ZERO
+	clrl	8(%a0)		|load third lword of ZERO
+	bra	p_write		|go write results
+p_movei:
+	fmovel	#0,%FPSR		|clear aiop
+	leal	ETEMP(%a6),%a0	|a0 will point to the packed decimal
+	clrw	2(%a0)		|clear lower word of exp
+	bra	p_write		|go write the result
+p_moven:
+	leal	ETEMP(%a6),%a0	|a0 will point to the packed decimal
+	clrw	2(%a0)		|clear lower word of exp
+	bra	p_write		|go write the result
+
+|
+| Routines to read the dynamic k-factor from Dn.
+|
+p_dyd0:
+	movel	USER_D0(%a6),%d0
+	bras	statick
+p_dyd1:
+	movel	USER_D1(%a6),%d0
+	bras	statick
+p_dyd2:
+	movel	%d2,%d0
+	bras	statick
+p_dyd3:
+	movel	%d3,%d0
+	bras	statick
+p_dyd4:
+	movel	%d4,%d0
+	bras	statick
+p_dyd5:
+	movel	%d5,%d0
+	bras	statick
+p_dyd6:
+	movel	%d6,%d0
+	bra	statick
+p_dyd7:
+	movel	%d7,%d0
+	bra	statick
+
+	|end
diff --git a/arch/m68k/fpsp040/round.S b/arch/m68k/fpsp040/round.S
new file mode 100644
index 0000000..00f9806
--- /dev/null
+++ b/arch/m68k/fpsp040/round.S
@@ -0,0 +1,649 @@
+|
+|	round.sa 3.4 7/29/91
+|
+|	handle rounding and normalization tasks
+|
+|
+|
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|ROUND	idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+|
+|	round --- round result according to precision/mode
+|
+|	a0 points to the input operand in the internal extended format
+|	d1(high word) contains rounding precision:
+|		ext = $0000xxxx
+|		sgl = $0001xxxx
+|		dbl = $0002xxxx
+|	d1(low word) contains rounding mode:
+|		RN  = $xxxx0000
+|		RZ  = $xxxx0001
+|		RM  = $xxxx0010
+|		RP  = $xxxx0011
+|	d0{31:29} contains the g,r,s bits (extended)
+|
+|	On return the value pointed to by a0 is correctly rounded,
+|	a0 is preserved and the g-r-s bits in d0 are cleared.
+|	The result is not typed - the tag field is invalid.  The
+|	result is still in the internal extended format.
+|
+|	The INEX bit of USER_FPSR will be set if the rounded result was
+|	inexact (i.e. if any of the g-r-s bits were set).
+|
+
+	.global	round
+round:
+| If g=r=s=0 then result is exact and round is done, else set
+| the inex flag in status reg and continue.
+|
+	bsrs	ext_grs			|this subroutine looks at the
+|					:rounding precision and sets
+|					;the appropriate g-r-s bits.
+	tstl	%d0			|if grs are zero, go force
+	bne	rnd_cont		|lower bits to zero for size
+
+	swap	%d1			|set up d1.w for round prec.
+	bra	truncate
+
+rnd_cont:
+|
+| Use rounding mode as an index into a jump table for these modes.
+|
+	orl	#inx2a_mask,USER_FPSR(%a6) |set inex2/ainex
+	lea	mode_tab,%a1
+	movel	(%a1,%d1.w*4),%a1
+	jmp	(%a1)
+|
+| Jump table indexed by rounding mode in d1.w.  All following assumes
+| grs != 0.
+|
+mode_tab:
+	.long	rnd_near
+	.long	rnd_zero
+	.long	rnd_mnus
+	.long	rnd_plus
+|
+|	ROUND PLUS INFINITY
+|
+|	If sign of fp number = 0 (positive), then add 1 to l.
+|
+rnd_plus:
+	swap	%d1			|set up d1 for round prec.
+	tstb	LOCAL_SGN(%a0)		|check for sign
+	bmi	truncate		|if positive then truncate
+	movel	#0xffffffff,%d0		|force g,r,s to be all f's
+	lea	add_to_l,%a1
+	movel	(%a1,%d1.w*4),%a1
+	jmp	(%a1)
+|
+|	ROUND MINUS INFINITY
+|
+|	If sign of fp number = 1 (negative), then add 1 to l.
+|
+rnd_mnus:
+	swap	%d1			|set up d1 for round prec.
+	tstb	LOCAL_SGN(%a0)		|check for sign
+	bpl	truncate		|if negative then truncate
+	movel	#0xffffffff,%d0		|force g,r,s to be all f's
+	lea	add_to_l,%a1
+	movel	(%a1,%d1.w*4),%a1
+	jmp	(%a1)
+|
+|	ROUND ZERO
+|
+|	Always truncate.
+rnd_zero:
+	swap	%d1			|set up d1 for round prec.
+	bra	truncate
+|
+|
+|	ROUND NEAREST
+|
+|	If (g=1), then add 1 to l and if (r=s=0), then clear l
+|	Note that this will round to even in case of a tie.
+|
+rnd_near:
+	swap	%d1			|set up d1 for round prec.
+	asll	#1,%d0			|shift g-bit to c-bit
+	bcc	truncate		|if (g=1) then
+	lea	add_to_l,%a1
+	movel	(%a1,%d1.w*4),%a1
+	jmp	(%a1)
+
+|
+|	ext_grs --- extract guard, round and sticky bits
+|
+| Input:	d1 =		PREC:ROUND
+| Output:	d0{31:29}=	guard, round, sticky
+|
+| The ext_grs extract the guard/round/sticky bits according to the
+| selected rounding precision. It is called by the round subroutine
+| only.  All registers except d0 are kept intact. d0 becomes an
+| updated guard,round,sticky in d0{31:29}
+|
+| Notes: the ext_grs uses the round PREC, and therefore has to swap d1
+|	 prior to usage, and needs to restore d1 to original.
+|
+ext_grs:
+	swap	%d1			|have d1.w point to round precision
+	cmpiw	#0,%d1
+	bnes	sgl_or_dbl
+	bras	end_ext_grs
+
+sgl_or_dbl:
+	moveml	%d2/%d3,-(%a7)		|make some temp registers
+	cmpiw	#1,%d1
+	bnes	grs_dbl
+grs_sgl:
+	bfextu	LOCAL_HI(%a0){#24:#2},%d3	|sgl prec. g-r are 2 bits right
+	movel	#30,%d2			|of the sgl prec. limits
+	lsll	%d2,%d3			|shift g-r bits to MSB of d3
+	movel	LOCAL_HI(%a0),%d2		|get word 2 for s-bit test
+	andil	#0x0000003f,%d2		|s bit is the or of all other
+	bnes	st_stky			|bits to the right of g-r
+	tstl	LOCAL_LO(%a0)		|test lower mantissa
+	bnes	st_stky			|if any are set, set sticky
+	tstl	%d0			|test original g,r,s
+	bnes	st_stky			|if any are set, set sticky
+	bras	end_sd			|if words 3 and 4 are clr, exit
+grs_dbl:
+	bfextu	LOCAL_LO(%a0){#21:#2},%d3	|dbl-prec. g-r are 2 bits right
+	movel	#30,%d2			|of the dbl prec. limits
+	lsll	%d2,%d3			|shift g-r bits to the MSB of d3
+	movel	LOCAL_LO(%a0),%d2		|get lower mantissa  for s-bit test
+	andil	#0x000001ff,%d2		|s bit is the or-ing of all
+	bnes	st_stky			|other bits to the right of g-r
+	tstl	%d0			|test word original g,r,s
+	bnes	st_stky			|if any are set, set sticky
+	bras	end_sd			|if clear, exit
+st_stky:
+	bset	#rnd_stky_bit,%d3
+end_sd:
+	movel	%d3,%d0			|return grs to d0
+	moveml	(%a7)+,%d2/%d3		|restore scratch registers
+end_ext_grs:
+	swap	%d1			|restore d1 to original
+	rts
+
+|*******************  Local Equates
+	.set	ad_1_sgl,0x00000100	|  constant to add 1 to l-bit in sgl prec
+	.set	ad_1_dbl,0x00000800	|  constant to add 1 to l-bit in dbl prec
+
+
+|Jump table for adding 1 to the l-bit indexed by rnd prec
+
+add_to_l:
+	.long	add_ext
+	.long	add_sgl
+	.long	add_dbl
+	.long	add_dbl
+|
+|	ADD SINGLE
+|
+add_sgl:
+	addl	#ad_1_sgl,LOCAL_HI(%a0)
+	bccs	scc_clr			|no mantissa overflow
+	roxrw  LOCAL_HI(%a0)		|shift v-bit back in
+	roxrw  LOCAL_HI+2(%a0)		|shift v-bit back in
+	addw	#0x1,LOCAL_EX(%a0)	|and incr exponent
+scc_clr:
+	tstl	%d0			|test for rs = 0
+	bnes	sgl_done
+	andiw  #0xfe00,LOCAL_HI+2(%a0)	|clear the l-bit
+sgl_done:
+	andil	#0xffffff00,LOCAL_HI(%a0) |truncate bits beyond sgl limit
+	clrl	LOCAL_LO(%a0)		|clear d2
+	rts
+
+|
+|	ADD EXTENDED
+|
+add_ext:
+	addql  #1,LOCAL_LO(%a0)		|add 1 to l-bit
+	bccs	xcc_clr			|test for carry out
+	addql  #1,LOCAL_HI(%a0)		|propagate carry
+	bccs	xcc_clr
+	roxrw  LOCAL_HI(%a0)		|mant is 0 so restore v-bit
+	roxrw  LOCAL_HI+2(%a0)		|mant is 0 so restore v-bit
+	roxrw	LOCAL_LO(%a0)
+	roxrw	LOCAL_LO+2(%a0)
+	addw	#0x1,LOCAL_EX(%a0)	|and inc exp
+xcc_clr:
+	tstl	%d0			|test rs = 0
+	bnes	add_ext_done
+	andib	#0xfe,LOCAL_LO+3(%a0)	|clear the l bit
+add_ext_done:
+	rts
+|
+|	ADD DOUBLE
+|
+add_dbl:
+	addl	#ad_1_dbl,LOCAL_LO(%a0)
+	bccs	dcc_clr
+	addql	#1,LOCAL_HI(%a0)		|propagate carry
+	bccs	dcc_clr
+	roxrw	LOCAL_HI(%a0)		|mant is 0 so restore v-bit
+	roxrw	LOCAL_HI+2(%a0)		|mant is 0 so restore v-bit
+	roxrw	LOCAL_LO(%a0)
+	roxrw	LOCAL_LO+2(%a0)
+	addw	#0x1,LOCAL_EX(%a0)	|incr exponent
+dcc_clr:
+	tstl	%d0			|test for rs = 0
+	bnes	dbl_done
+	andiw	#0xf000,LOCAL_LO+2(%a0)	|clear the l-bit
+
+dbl_done:
+	andil	#0xfffff800,LOCAL_LO(%a0) |truncate bits beyond dbl limit
+	rts
+
+error:
+	rts
+|
+| Truncate all other bits
+|
+trunct:
+	.long	end_rnd
+	.long	sgl_done
+	.long	dbl_done
+	.long	dbl_done
+
+truncate:
+	lea	trunct,%a1
+	movel	(%a1,%d1.w*4),%a1
+	jmp	(%a1)
+
+end_rnd:
+	rts
+
+|
+|	NORMALIZE
+|
+| These routines (nrm_zero & nrm_set) normalize the unnorm.  This
+| is done by shifting the mantissa left while decrementing the
+| exponent.
+|
+| NRM_SET shifts and decrements until there is a 1 set in the integer
+| bit of the mantissa (msb in d1).
+|
+| NRM_ZERO shifts and decrements until there is a 1 set in the integer
+| bit of the mantissa (msb in d1) unless this would mean the exponent
+| would go less than 0.  In that case the number becomes a denorm - the
+| exponent (d0) is set to 0 and the mantissa (d1 & d2) is not
+| normalized.
+|
+| Note that both routines have been optimized (for the worst case) and
+| therefore do not have the easy to follow decrement/shift loop.
+|
+|	NRM_ZERO
+|
+|	Distance to first 1 bit in mantissa = X
+|	Distance to 0 from exponent = Y
+|	If X < Y
+|	Then
+|	  nrm_set
+|	Else
+|	  shift mantissa by Y
+|	  set exponent = 0
+|
+|input:
+|	FP_SCR1 = exponent, ms mantissa part, ls mantissa part
+|output:
+|	L_SCR1{4} = fpte15 or ete15 bit
+|
+	.global	nrm_zero
+nrm_zero:
+	movew	LOCAL_EX(%a0),%d0
+	cmpw   #64,%d0          |see if exp > 64
+	bmis	d0_less
+	bsr	nrm_set		|exp > 64 so exp won't exceed 0
+	rts
+d0_less:
+	moveml	%d2/%d3/%d5/%d6,-(%a7)
+	movel	LOCAL_HI(%a0),%d1
+	movel	LOCAL_LO(%a0),%d2
+
+	bfffo	%d1{#0:#32},%d3	|get the distance to the first 1
+|				;in ms mant
+	beqs	ms_clr		|branch if no bits were set
+	cmpw	%d3,%d0		|of X>Y
+	bmis	greater		|then exp will go past 0 (neg) if
+|				;it is just shifted
+	bsr	nrm_set		|else exp won't go past 0
+	moveml	(%a7)+,%d2/%d3/%d5/%d6
+	rts
+greater:
+	movel	%d2,%d6		|save ls mant in d6
+	lsll	%d0,%d2		|shift ls mant by count
+	lsll	%d0,%d1		|shift ms mant by count
+	movel	#32,%d5
+	subl	%d0,%d5		|make op a denorm by shifting bits
+	lsrl	%d5,%d6		|by the number in the exp, then
+|				;set exp = 0.
+	orl	%d6,%d1		|shift the ls mant bits into the ms mant
+	movel	#0,%d0		|same as if decremented exp to 0
+|				;while shifting
+	movew	%d0,LOCAL_EX(%a0)
+	movel	%d1,LOCAL_HI(%a0)
+	movel	%d2,LOCAL_LO(%a0)
+	moveml	(%a7)+,%d2/%d3/%d5/%d6
+	rts
+ms_clr:
+	bfffo	%d2{#0:#32},%d3	|check if any bits set in ls mant
+	beqs	all_clr		|branch if none set
+	addw	#32,%d3
+	cmpw	%d3,%d0		|if X>Y
+	bmis	greater		|then branch
+	bsr	nrm_set		|else exp won't go past 0
+	moveml	(%a7)+,%d2/%d3/%d5/%d6
+	rts
+all_clr:
+	movew	#0,LOCAL_EX(%a0)	|no mantissa bits set. Set exp = 0.
+	moveml	(%a7)+,%d2/%d3/%d5/%d6
+	rts
+|
+|	NRM_SET
+|
+	.global	nrm_set
+nrm_set:
+	movel	%d7,-(%a7)
+	bfffo	LOCAL_HI(%a0){#0:#32},%d7 |find first 1 in ms mant to d7)
+	beqs	lower		|branch if ms mant is all 0's
+
+	movel	%d6,-(%a7)
+
+	subw	%d7,LOCAL_EX(%a0)	|sub exponent by count
+	movel	LOCAL_HI(%a0),%d0	|d0 has ms mant
+	movel	LOCAL_LO(%a0),%d1 |d1 has ls mant
+
+	lsll	%d7,%d0		|shift first 1 to j bit position
+	movel	%d1,%d6		|copy ls mant into d6
+	lsll	%d7,%d6		|shift ls mant by count
+	movel	%d6,LOCAL_LO(%a0)	|store ls mant into memory
+	moveql	#32,%d6
+	subl	%d7,%d6		|continue shift
+	lsrl	%d6,%d1		|shift off all bits but those that will
+|				;be shifted into ms mant
+	orl	%d1,%d0		|shift the ls mant bits into the ms mant
+	movel	%d0,LOCAL_HI(%a0)	|store ms mant into memory
+	moveml	(%a7)+,%d7/%d6	|restore registers
+	rts
+
+|
+| We get here if ms mant was = 0, and we assume ls mant has bits
+| set (otherwise this would have been tagged a zero not a denorm).
+|
+lower:
+	movew	LOCAL_EX(%a0),%d0	|d0 has exponent
+	movel	LOCAL_LO(%a0),%d1	|d1 has ls mant
+	subw	#32,%d0		|account for ms mant being all zeros
+	bfffo	%d1{#0:#32},%d7	|find first 1 in ls mant to d7)
+	subw	%d7,%d0		|subtract shift count from exp
+	lsll	%d7,%d1		|shift first 1 to integer bit in ms mant
+	movew	%d0,LOCAL_EX(%a0)	|store ms mant
+	movel	%d1,LOCAL_HI(%a0)	|store exp
+	clrl	LOCAL_LO(%a0)	|clear ls mant
+	movel	(%a7)+,%d7
+	rts
+|
+|	denorm --- denormalize an intermediate result
+|
+|	Used by underflow.
+|
+| Input:
+|	a0	 points to the operand to be denormalized
+|		 (in the internal extended format)
+|
+|	d0:	 rounding precision
+| Output:
+|	a0	 points to the denormalized result
+|		 (in the internal extended format)
+|
+|	d0	is guard,round,sticky
+|
+| d0 comes into this routine with the rounding precision. It
+| is then loaded with the denormalized exponent threshold for the
+| rounding precision.
+|
+
+	.global	denorm
+denorm:
+	btstb	#6,LOCAL_EX(%a0)	|check for exponents between $7fff-$4000
+	beqs	no_sgn_ext
+	bsetb	#7,LOCAL_EX(%a0)	|sign extend if it is so
+no_sgn_ext:
+
+	cmpib	#0,%d0		|if 0 then extended precision
+	bnes	not_ext		|else branch
+
+	clrl	%d1		|load d1 with ext threshold
+	clrl	%d0		|clear the sticky flag
+	bsr	dnrm_lp		|denormalize the number
+	tstb	%d1		|check for inex
+	beq	no_inex		|if clr, no inex
+	bras	dnrm_inex	|if set, set inex
+
+not_ext:
+	cmpil	#1,%d0		|if 1 then single precision
+	beqs	load_sgl	|else must be 2, double prec
+
+load_dbl:
+	movew	#dbl_thresh,%d1	|put copy of threshold in d1
+	movel	%d1,%d0		|copy d1 into d0
+	subw	LOCAL_EX(%a0),%d0	|diff = threshold - exp
+	cmpw	#67,%d0		|if diff > 67 (mant + grs bits)
+	bpls	chk_stky	|then branch (all bits would be
+|				; shifted off in denorm routine)
+	clrl	%d0		|else clear the sticky flag
+	bsr	dnrm_lp		|denormalize the number
+	tstb	%d1		|check flag
+	beqs	no_inex		|if clr, no inex
+	bras	dnrm_inex	|if set, set inex
+
+load_sgl:
+	movew	#sgl_thresh,%d1	|put copy of threshold in d1
+	movel	%d1,%d0		|copy d1 into d0
+	subw	LOCAL_EX(%a0),%d0	|diff = threshold - exp
+	cmpw	#67,%d0		|if diff > 67 (mant + grs bits)
+	bpls	chk_stky	|then branch (all bits would be
+|				; shifted off in denorm routine)
+	clrl	%d0		|else clear the sticky flag
+	bsr	dnrm_lp		|denormalize the number
+	tstb	%d1		|check flag
+	beqs	no_inex		|if clr, no inex
+	bras	dnrm_inex	|if set, set inex
+
+chk_stky:
+	tstl	LOCAL_HI(%a0)	|check for any bits set
+	bnes	set_stky
+	tstl	LOCAL_LO(%a0)	|check for any bits set
+	bnes	set_stky
+	bras	clr_mant
+set_stky:
+	orl	#inx2a_mask,USER_FPSR(%a6) |set inex2/ainex
+	movel	#0x20000000,%d0	|set sticky bit in return value
+clr_mant:
+	movew	%d1,LOCAL_EX(%a0)		|load exp with threshold
+	movel	#0,LOCAL_HI(%a0)	|set d1 = 0 (ms mantissa)
+	movel	#0,LOCAL_LO(%a0)		|set d2 = 0 (ms mantissa)
+	rts
+dnrm_inex:
+	orl	#inx2a_mask,USER_FPSR(%a6) |set inex2/ainex
+no_inex:
+	rts
+
+|
+|	dnrm_lp --- normalize exponent/mantissa to specified threshold
+|
+| Input:
+|	a0		points to the operand to be denormalized
+|	d0{31:29}	initial guard,round,sticky
+|	d1{15:0}	denormalization threshold
+| Output:
+|	a0		points to the denormalized operand
+|	d0{31:29}	final guard,round,sticky
+|	d1.b		inexact flag:  all ones means inexact result
+|
+| The LOCAL_LO and LOCAL_GRS parts of the value are copied to FP_SCR2
+| so that bfext can be used to extract the new low part of the mantissa.
+| Dnrm_lp can be called with a0 pointing to ETEMP or WBTEMP and there
+| is no LOCAL_GRS scratch word following it on the fsave frame.
+|
+	.global	dnrm_lp
+dnrm_lp:
+	movel	%d2,-(%sp)		|save d2 for temp use
+	btstb	#E3,E_BYTE(%a6)		|test for type E3 exception
+	beqs	not_E3			|not type E3 exception
+	bfextu	WBTEMP_GRS(%a6){#6:#3},%d2	|extract guard,round, sticky  bit
+	movel	#29,%d0
+	lsll	%d0,%d2			|shift g,r,s to their positions
+	movel	%d2,%d0
+not_E3:
+	movel	(%sp)+,%d2		|restore d2
+	movel	LOCAL_LO(%a0),FP_SCR2+LOCAL_LO(%a6)
+	movel	%d0,FP_SCR2+LOCAL_GRS(%a6)
+	movel	%d1,%d0			|copy the denorm threshold
+	subw	LOCAL_EX(%a0),%d1		|d1 = threshold - uns exponent
+	bles	no_lp			|d1 <= 0
+	cmpw	#32,%d1
+	blts	case_1			|0 = d1 < 32
+	cmpw	#64,%d1
+	blts	case_2			|32 <= d1 < 64
+	bra	case_3			|d1 >= 64
+|
+| No normalization necessary
+|
+no_lp:
+	clrb	%d1			|set no inex2 reported
+	movel	FP_SCR2+LOCAL_GRS(%a6),%d0	|restore original g,r,s
+	rts
+|
+| case (0<d1<32)
+|
+case_1:
+	movel	%d2,-(%sp)
+	movew	%d0,LOCAL_EX(%a0)		|exponent = denorm threshold
+	movel	#32,%d0
+	subw	%d1,%d0			|d0 = 32 - d1
+	bfextu	LOCAL_EX(%a0){%d0:#32},%d2
+	bfextu	%d2{%d1:%d0},%d2		|d2 = new LOCAL_HI
+	bfextu	LOCAL_HI(%a0){%d0:#32},%d1	|d1 = new LOCAL_LO
+	bfextu	FP_SCR2+LOCAL_LO(%a6){%d0:#32},%d0	|d0 = new G,R,S
+	movel	%d2,LOCAL_HI(%a0)		|store new LOCAL_HI
+	movel	%d1,LOCAL_LO(%a0)		|store new LOCAL_LO
+	clrb	%d1
+	bftst	%d0{#2:#30}
+	beqs	c1nstky
+	bsetl	#rnd_stky_bit,%d0
+	st	%d1
+c1nstky:
+	movel	FP_SCR2+LOCAL_GRS(%a6),%d2	|restore original g,r,s
+	andil	#0xe0000000,%d2		|clear all but G,R,S
+	tstl	%d2			|test if original G,R,S are clear
+	beqs	grs_clear
+	orl	#0x20000000,%d0		|set sticky bit in d0
+grs_clear:
+	andil	#0xe0000000,%d0		|clear all but G,R,S
+	movel	(%sp)+,%d2
+	rts
+|
+| case (32<=d1<64)
+|
+case_2:
+	movel	%d2,-(%sp)
+	movew	%d0,LOCAL_EX(%a0)		|unsigned exponent = threshold
+	subw	#32,%d1			|d1 now between 0 and 32
+	movel	#32,%d0
+	subw	%d1,%d0			|d0 = 32 - d1
+	bfextu	LOCAL_EX(%a0){%d0:#32},%d2
+	bfextu	%d2{%d1:%d0},%d2		|d2 = new LOCAL_LO
+	bfextu	LOCAL_HI(%a0){%d0:#32},%d1	|d1 = new G,R,S
+	bftst	%d1{#2:#30}
+	bnes	c2_sstky		|bra if sticky bit to be set
+	bftst	FP_SCR2+LOCAL_LO(%a6){%d0:#32}
+	bnes	c2_sstky		|bra if sticky bit to be set
+	movel	%d1,%d0
+	clrb	%d1
+	bras	end_c2
+c2_sstky:
+	movel	%d1,%d0
+	bsetl	#rnd_stky_bit,%d0
+	st	%d1
+end_c2:
+	clrl	LOCAL_HI(%a0)		|store LOCAL_HI = 0
+	movel	%d2,LOCAL_LO(%a0)		|store LOCAL_LO
+	movel	FP_SCR2+LOCAL_GRS(%a6),%d2	|restore original g,r,s
+	andil	#0xe0000000,%d2		|clear all but G,R,S
+	tstl	%d2			|test if original G,R,S are clear
+	beqs	clear_grs
+	orl	#0x20000000,%d0		|set sticky bit in d0
+clear_grs:
+	andil	#0xe0000000,%d0		|get rid of all but G,R,S
+	movel	(%sp)+,%d2
+	rts
+|
+| d1 >= 64 Force the exponent to be the denorm threshold with the
+| correct sign.
+|
+case_3:
+	movew	%d0,LOCAL_EX(%a0)
+	tstw	LOCAL_SGN(%a0)
+	bges	c3con
+c3neg:
+	orl	#0x80000000,LOCAL_EX(%a0)
+c3con:
+	cmpw	#64,%d1
+	beqs	sixty_four
+	cmpw	#65,%d1
+	beqs	sixty_five
+|
+| Shift value is out of range.  Set d1 for inex2 flag and
+| return a zero with the given threshold.
+|
+	clrl	LOCAL_HI(%a0)
+	clrl	LOCAL_LO(%a0)
+	movel	#0x20000000,%d0
+	st	%d1
+	rts
+
+sixty_four:
+	movel	LOCAL_HI(%a0),%d0
+	bfextu	%d0{#2:#30},%d1
+	andil	#0xc0000000,%d0
+	bras	c3com
+
+sixty_five:
+	movel	LOCAL_HI(%a0),%d0
+	bfextu	%d0{#1:#31},%d1
+	andil	#0x80000000,%d0
+	lsrl	#1,%d0			|shift high bit into R bit
+
+c3com:
+	tstl	%d1
+	bnes	c3ssticky
+	tstl	LOCAL_LO(%a0)
+	bnes	c3ssticky
+	tstb	FP_SCR2+LOCAL_GRS(%a6)
+	bnes	c3ssticky
+	clrb	%d1
+	bras	c3end
+
+c3ssticky:
+	bsetl	#rnd_stky_bit,%d0
+	st	%d1
+c3end:
+	clrl	LOCAL_HI(%a0)
+	clrl	LOCAL_LO(%a0)
+	rts
+
+	|end
diff --git a/arch/m68k/fpsp040/sacos.S b/arch/m68k/fpsp040/sacos.S
new file mode 100644
index 0000000..83b00ab
--- /dev/null
+++ b/arch/m68k/fpsp040/sacos.S
@@ -0,0 +1,115 @@
+|
+|	sacos.sa 3.3 12/19/90
+|
+|	Description: The entry point sAcos computes the inverse cosine of
+|		an input argument; sAcosd does the same except for denormalized
+|		input.
+|
+|	Input: Double-extended number X in location pointed to
+|		by address register a0.
+|
+|	Output: The value arccos(X) returned in floating-point register Fp0.
+|
+|	Accuracy and Monotonicity: The returned result is within 3 ulps in
+|		64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
+|		result is subsequently rounded to double precision. The
+|		result is provably monotonic in double precision.
+|
+|	Speed: The program sCOS takes approximately 310 cycles.
+|
+|	Algorithm:
+|
+|	ACOS
+|	1. If |X| >= 1, go to 3.
+|
+|	2. (|X| < 1) Calculate acos(X) by
+|		z := (1-X) / (1+X)
+|		acos(X) = 2 * atan( sqrt(z) ).
+|		Exit.
+|
+|	3. If |X| > 1, go to 5.
+|
+|	4. (|X| = 1) If X > 0, return 0. Otherwise, return Pi. Exit.
+|
+|	5. (|X| > 1) Generate an invalid operation by 0 * infinity.
+|		Exit.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|SACOS	idnt	2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+PI:	.long 0x40000000,0xC90FDAA2,0x2168C235,0x00000000
+PIBY2:	.long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+	|xref	t_operr
+	|xref	t_frcinx
+	|xref	satan
+
+	.global	sacosd
+sacosd:
+|--ACOS(X) = PI/2 FOR DENORMALIZED X
+	fmovel		%d1,%fpcr		| ...load user's rounding mode/precision
+	fmovex		PIBY2,%fp0
+	bra		t_frcinx
+
+	.global	sacos
+sacos:
+	fmovex		(%a0),%fp0	| ...LOAD INPUT
+
+	movel		(%a0),%d0		| ...pack exponent with upper 16 fraction
+	movew		4(%a0),%d0
+	andil		#0x7FFFFFFF,%d0
+	cmpil		#0x3FFF8000,%d0
+	bges		ACOSBIG
+
+|--THIS IS THE USUAL CASE, |X| < 1
+|--ACOS(X) = 2 * ATAN(	SQRT( (1-X)/(1+X) )	)
+
+	fmoves		#0x3F800000,%fp1
+	faddx		%fp0,%fp1		| ...1+X
+	fnegx		%fp0		| ... -X
+	fadds		#0x3F800000,%fp0	| ...1-X
+	fdivx		%fp1,%fp0		| ...(1-X)/(1+X)
+	fsqrtx		%fp0		| ...SQRT((1-X)/(1+X))
+	fmovemx	%fp0-%fp0,(%a0)	| ...overwrite input
+	movel		%d1,-(%sp)	|save original users fpcr
+	clrl		%d1
+	bsr		satan		| ...ATAN(SQRT([1-X]/[1+X]))
+	fmovel		(%sp)+,%fpcr	|restore users exceptions
+	faddx		%fp0,%fp0		| ...2 * ATAN( STUFF )
+	bra		t_frcinx
+
+ACOSBIG:
+	fabsx		%fp0
+	fcmps		#0x3F800000,%fp0
+	fbgt		t_operr		|cause an operr exception
+
+|--|X| = 1, ACOS(X) = 0 OR PI
+	movel		(%a0),%d0		| ...pack exponent with upper 16 fraction
+	movew		4(%a0),%d0
+	cmpl		#0,%d0		|D0 has original exponent+fraction
+	bgts		ACOSP1
+
+|--X = -1
+|Returns PI and inexact exception
+	fmovex		PI,%fp0
+	fmovel		%d1,%FPCR
+	fadds		#0x00800000,%fp0	|cause an inexact exception to be put
+|					;into the 040 - will not trap until next
+|					;fp inst.
+	bra		t_frcinx
+
+ACOSP1:
+	fmovel		%d1,%FPCR
+	fmoves		#0x00000000,%fp0
+	rts				|Facos ; of +1 is exact
+
+	|end
diff --git a/arch/m68k/fpsp040/sasin.S b/arch/m68k/fpsp040/sasin.S
new file mode 100644
index 0000000..5647a60
--- /dev/null
+++ b/arch/m68k/fpsp040/sasin.S
@@ -0,0 +1,104 @@
+|
+|	sasin.sa 3.3 12/19/90
+|
+|	Description: The entry point sAsin computes the inverse sine of
+|		an input argument; sAsind does the same except for denormalized
+|		input.
+|
+|	Input: Double-extended number X in location pointed to
+|		by address register a0.
+|
+|	Output: The value arcsin(X) returned in floating-point register Fp0.
+|
+|	Accuracy and Monotonicity: The returned result is within 3 ulps in
+|		64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
+|		result is subsequently rounded to double precision. The
+|		result is provably monotonic in double precision.
+|
+|	Speed: The program sASIN takes approximately 310 cycles.
+|
+|	Algorithm:
+|
+|	ASIN
+|	1. If |X| >= 1, go to 3.
+|
+|	2. (|X| < 1) Calculate asin(X) by
+|		z := sqrt( [1-X][1+X] )
+|		asin(X) = atan( x / z ).
+|		Exit.
+|
+|	3. If |X| > 1, go to 5.
+|
+|	4. (|X| = 1) sgn := sign(X), return asin(X) := sgn * Pi/2. Exit.
+|
+|	5. (|X| > 1) Generate an invalid operation by 0 * infinity.
+|		Exit.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|SASIN	idnt	2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+PIBY2:	.long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+	|xref	t_operr
+	|xref	t_frcinx
+	|xref	t_extdnrm
+	|xref	satan
+
+	.global	sasind
+sasind:
+|--ASIN(X) = X FOR DENORMALIZED X
+
+	bra		t_extdnrm
+
+	.global	sasin
+sasin:
+	fmovex		(%a0),%fp0	| ...LOAD INPUT
+
+	movel		(%a0),%d0
+	movew		4(%a0),%d0
+	andil		#0x7FFFFFFF,%d0
+	cmpil		#0x3FFF8000,%d0
+	bges		asinbig
+
+|--THIS IS THE USUAL CASE, |X| < 1
+|--ASIN(X) = ATAN( X / SQRT( (1-X)(1+X) ) )
+
+	fmoves		#0x3F800000,%fp1
+	fsubx		%fp0,%fp1		| ...1-X
+	fmovemx	%fp2-%fp2,-(%a7)
+	fmoves		#0x3F800000,%fp2
+	faddx		%fp0,%fp2		| ...1+X
+	fmulx		%fp2,%fp1		| ...(1+X)(1-X)
+	fmovemx	(%a7)+,%fp2-%fp2
+	fsqrtx		%fp1		| ...SQRT([1-X][1+X])
+	fdivx		%fp1,%fp0		| ...X/SQRT([1-X][1+X])
+	fmovemx	%fp0-%fp0,(%a0)
+	bsr		satan
+	bra		t_frcinx
+
+asinbig:
+	fabsx		%fp0	 | ...|X|
+	fcmps		#0x3F800000,%fp0
+	fbgt		t_operr		|cause an operr exception
+
+|--|X| = 1, ASIN(X) = +- PI/2.
+
+	fmovex		PIBY2,%fp0
+	movel		(%a0),%d0
+	andil		#0x80000000,%d0	| ...SIGN BIT OF X
+	oril		#0x3F800000,%d0	| ...+-1 IN SGL FORMAT
+	movel		%d0,-(%sp)	| ...push SIGN(X) IN SGL-FMT
+	fmovel		%d1,%FPCR
+	fmuls		(%sp)+,%fp0
+	bra		t_frcinx
+
+	|end
diff --git a/arch/m68k/fpsp040/satan.S b/arch/m68k/fpsp040/satan.S
new file mode 100644
index 0000000..20dae22
--- /dev/null
+++ b/arch/m68k/fpsp040/satan.S
@@ -0,0 +1,478 @@
+|
+|	satan.sa 3.3 12/19/90
+|
+|	The entry point satan computes the arctangent of an
+|	input value. satand does the same except the input value is a
+|	denormalized number.
+|
+|	Input: Double-extended value in memory location pointed to by address
+|		register a0.
+|
+|	Output:	Arctan(X) returned in floating-point register Fp0.
+|
+|	Accuracy and Monotonicity: The returned result is within 2 ulps in
+|		64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
+|		result is subsequently rounded to double precision. The
+|		result is provably monotonic in double precision.
+|
+|	Speed: The program satan takes approximately 160 cycles for input
+|		argument X such that 1/16 < |X| < 16. For the other arguments,
+|		the program will run no worse than 10% slower.
+|
+|	Algorithm:
+|	Step 1. If |X| >= 16 or |X| < 1/16, go to Step 5.
+|
+|	Step 2. Let X = sgn * 2**k * 1.xxxxxxxx...x. Note that k = -4, -3,..., or 3.
+|		Define F = sgn * 2**k * 1.xxxx1, i.e. the first 5 significant bits
+|		of X with a bit-1 attached at the 6-th bit position. Define u
+|		to be u = (X-F) / (1 + X*F).
+|
+|	Step 3. Approximate arctan(u) by a polynomial poly.
+|
+|	Step 4. Return arctan(F) + poly, arctan(F) is fetched from a table of values
+|		calculated beforehand. Exit.
+|
+|	Step 5. If |X| >= 16, go to Step 7.
+|
+|	Step 6. Approximate arctan(X) by an odd polynomial in X. Exit.
+|
+|	Step 7. Define X' = -1/X. Approximate arctan(X') by an odd polynomial in X'.
+|		Arctan(X) = sign(X)*Pi/2 + arctan(X'). Exit.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|satan	idnt	2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+BOUNDS1:	.long 0x3FFB8000,0x4002FFFF
+
+ONE:	.long 0x3F800000
+
+	.long 0x00000000
+
+ATANA3:	.long 0xBFF6687E,0x314987D8
+ATANA2:	.long 0x4002AC69,0x34A26DB3
+
+ATANA1:	.long 0xBFC2476F,0x4E1DA28E
+ATANB6:	.long 0x3FB34444,0x7F876989
+
+ATANB5:	.long 0xBFB744EE,0x7FAF45DB
+ATANB4:	.long 0x3FBC71C6,0x46940220
+
+ATANB3:	.long 0xBFC24924,0x921872F9
+ATANB2:	.long 0x3FC99999,0x99998FA9
+
+ATANB1:	.long 0xBFD55555,0x55555555
+ATANC5:	.long 0xBFB70BF3,0x98539E6A
+
+ATANC4:	.long 0x3FBC7187,0x962D1D7D
+ATANC3:	.long 0xBFC24924,0x827107B8
+
+ATANC2:	.long 0x3FC99999,0x9996263E
+ATANC1:	.long 0xBFD55555,0x55555536
+
+PPIBY2:	.long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+NPIBY2:	.long 0xBFFF0000,0xC90FDAA2,0x2168C235,0x00000000
+PTINY:	.long 0x00010000,0x80000000,0x00000000,0x00000000
+NTINY:	.long 0x80010000,0x80000000,0x00000000,0x00000000
+
+ATANTBL:
+	.long	0x3FFB0000,0x83D152C5,0x060B7A51,0x00000000
+	.long	0x3FFB0000,0x8BC85445,0x65498B8B,0x00000000
+	.long	0x3FFB0000,0x93BE4060,0x17626B0D,0x00000000
+	.long	0x3FFB0000,0x9BB3078D,0x35AEC202,0x00000000
+	.long	0x3FFB0000,0xA3A69A52,0x5DDCE7DE,0x00000000
+	.long	0x3FFB0000,0xAB98E943,0x62765619,0x00000000
+	.long	0x3FFB0000,0xB389E502,0xF9C59862,0x00000000
+	.long	0x3FFB0000,0xBB797E43,0x6B09E6FB,0x00000000
+	.long	0x3FFB0000,0xC367A5C7,0x39E5F446,0x00000000
+	.long	0x3FFB0000,0xCB544C61,0xCFF7D5C6,0x00000000
+	.long	0x3FFB0000,0xD33F62F8,0x2488533E,0x00000000
+	.long	0x3FFB0000,0xDB28DA81,0x62404C77,0x00000000
+	.long	0x3FFB0000,0xE310A407,0x8AD34F18,0x00000000
+	.long	0x3FFB0000,0xEAF6B0A8,0x188EE1EB,0x00000000
+	.long	0x3FFB0000,0xF2DAF194,0x9DBE79D5,0x00000000
+	.long	0x3FFB0000,0xFABD5813,0x61D47E3E,0x00000000
+	.long	0x3FFC0000,0x8346AC21,0x0959ECC4,0x00000000
+	.long	0x3FFC0000,0x8B232A08,0x304282D8,0x00000000
+	.long	0x3FFC0000,0x92FB70B8,0xD29AE2F9,0x00000000
+	.long	0x3FFC0000,0x9ACF476F,0x5CCD1CB4,0x00000000
+	.long	0x3FFC0000,0xA29E7630,0x4954F23F,0x00000000
+	.long	0x3FFC0000,0xAA68C5D0,0x8AB85230,0x00000000
+	.long	0x3FFC0000,0xB22DFFFD,0x9D539F83,0x00000000
+	.long	0x3FFC0000,0xB9EDEF45,0x3E900EA5,0x00000000
+	.long	0x3FFC0000,0xC1A85F1C,0xC75E3EA5,0x00000000
+	.long	0x3FFC0000,0xC95D1BE8,0x28138DE6,0x00000000
+	.long	0x3FFC0000,0xD10BF300,0x840D2DE4,0x00000000
+	.long	0x3FFC0000,0xD8B4B2BA,0x6BC05E7A,0x00000000
+	.long	0x3FFC0000,0xE0572A6B,0xB42335F6,0x00000000
+	.long	0x3FFC0000,0xE7F32A70,0xEA9CAA8F,0x00000000
+	.long	0x3FFC0000,0xEF888432,0x64ECEFAA,0x00000000
+	.long	0x3FFC0000,0xF7170A28,0xECC06666,0x00000000
+	.long	0x3FFD0000,0x812FD288,0x332DAD32,0x00000000
+	.long	0x3FFD0000,0x88A8D1B1,0x218E4D64,0x00000000
+	.long	0x3FFD0000,0x9012AB3F,0x23E4AEE8,0x00000000
+	.long	0x3FFD0000,0x976CC3D4,0x11E7F1B9,0x00000000
+	.long	0x3FFD0000,0x9EB68949,0x3889A227,0x00000000
+	.long	0x3FFD0000,0xA5EF72C3,0x4487361B,0x00000000
+	.long	0x3FFD0000,0xAD1700BA,0xF07A7227,0x00000000
+	.long	0x3FFD0000,0xB42CBCFA,0xFD37EFB7,0x00000000
+	.long	0x3FFD0000,0xBB303A94,0x0BA80F89,0x00000000
+	.long	0x3FFD0000,0xC22115C6,0xFCAEBBAF,0x00000000
+	.long	0x3FFD0000,0xC8FEF3E6,0x86331221,0x00000000
+	.long	0x3FFD0000,0xCFC98330,0xB4000C70,0x00000000
+	.long	0x3FFD0000,0xD6807AA1,0x102C5BF9,0x00000000
+	.long	0x3FFD0000,0xDD2399BC,0x31252AA3,0x00000000
+	.long	0x3FFD0000,0xE3B2A855,0x6B8FC517,0x00000000
+	.long	0x3FFD0000,0xEA2D764F,0x64315989,0x00000000
+	.long	0x3FFD0000,0xF3BF5BF8,0xBAD1A21D,0x00000000
+	.long	0x3FFE0000,0x801CE39E,0x0D205C9A,0x00000000
+	.long	0x3FFE0000,0x8630A2DA,0xDA1ED066,0x00000000
+	.long	0x3FFE0000,0x8C1AD445,0xF3E09B8C,0x00000000
+	.long	0x3FFE0000,0x91DB8F16,0x64F350E2,0x00000000
+	.long	0x3FFE0000,0x97731420,0x365E538C,0x00000000
+	.long	0x3FFE0000,0x9CE1C8E6,0xA0B8CDBA,0x00000000
+	.long	0x3FFE0000,0xA22832DB,0xCADAAE09,0x00000000
+	.long	0x3FFE0000,0xA746F2DD,0xB7602294,0x00000000
+	.long	0x3FFE0000,0xAC3EC0FB,0x997DD6A2,0x00000000
+	.long	0x3FFE0000,0xB110688A,0xEBDC6F6A,0x00000000
+	.long	0x3FFE0000,0xB5BCC490,0x59ECC4B0,0x00000000
+	.long	0x3FFE0000,0xBA44BC7D,0xD470782F,0x00000000
+	.long	0x3FFE0000,0xBEA94144,0xFD049AAC,0x00000000
+	.long	0x3FFE0000,0xC2EB4ABB,0x661628B6,0x00000000
+	.long	0x3FFE0000,0xC70BD54C,0xE602EE14,0x00000000
+	.long	0x3FFE0000,0xCD000549,0xADEC7159,0x00000000
+	.long	0x3FFE0000,0xD48457D2,0xD8EA4EA3,0x00000000
+	.long	0x3FFE0000,0xDB948DA7,0x12DECE3B,0x00000000
+	.long	0x3FFE0000,0xE23855F9,0x69E8096A,0x00000000
+	.long	0x3FFE0000,0xE8771129,0xC4353259,0x00000000
+	.long	0x3FFE0000,0xEE57C16E,0x0D379C0D,0x00000000
+	.long	0x3FFE0000,0xF3E10211,0xA87C3779,0x00000000
+	.long	0x3FFE0000,0xF919039D,0x758B8D41,0x00000000
+	.long	0x3FFE0000,0xFE058B8F,0x64935FB3,0x00000000
+	.long	0x3FFF0000,0x8155FB49,0x7B685D04,0x00000000
+	.long	0x3FFF0000,0x83889E35,0x49D108E1,0x00000000
+	.long	0x3FFF0000,0x859CFA76,0x511D724B,0x00000000
+	.long	0x3FFF0000,0x87952ECF,0xFF8131E7,0x00000000
+	.long	0x3FFF0000,0x89732FD1,0x9557641B,0x00000000
+	.long	0x3FFF0000,0x8B38CAD1,0x01932A35,0x00000000
+	.long	0x3FFF0000,0x8CE7A8D8,0x301EE6B5,0x00000000
+	.long	0x3FFF0000,0x8F46A39E,0x2EAE5281,0x00000000
+	.long	0x3FFF0000,0x922DA7D7,0x91888487,0x00000000
+	.long	0x3FFF0000,0x94D19FCB,0xDEDF5241,0x00000000
+	.long	0x3FFF0000,0x973AB944,0x19D2A08B,0x00000000
+	.long	0x3FFF0000,0x996FF00E,0x08E10B96,0x00000000
+	.long	0x3FFF0000,0x9B773F95,0x12321DA7,0x00000000
+	.long	0x3FFF0000,0x9D55CC32,0x0F935624,0x00000000
+	.long	0x3FFF0000,0x9F100575,0x006CC571,0x00000000
+	.long	0x3FFF0000,0xA0A9C290,0xD97CC06C,0x00000000
+	.long	0x3FFF0000,0xA22659EB,0xEBC0630A,0x00000000
+	.long	0x3FFF0000,0xA388B4AF,0xF6EF0EC9,0x00000000
+	.long	0x3FFF0000,0xA4D35F10,0x61D292C4,0x00000000
+	.long	0x3FFF0000,0xA60895DC,0xFBE3187E,0x00000000
+	.long	0x3FFF0000,0xA72A51DC,0x7367BEAC,0x00000000
+	.long	0x3FFF0000,0xA83A5153,0x0956168F,0x00000000
+	.long	0x3FFF0000,0xA93A2007,0x7539546E,0x00000000
+	.long	0x3FFF0000,0xAA9E7245,0x023B2605,0x00000000
+	.long	0x3FFF0000,0xAC4C84BA,0x6FE4D58F,0x00000000
+	.long	0x3FFF0000,0xADCE4A4A,0x606B9712,0x00000000
+	.long	0x3FFF0000,0xAF2A2DCD,0x8D263C9C,0x00000000
+	.long	0x3FFF0000,0xB0656F81,0xF22265C7,0x00000000
+	.long	0x3FFF0000,0xB1846515,0x0F71496A,0x00000000
+	.long	0x3FFF0000,0xB28AAA15,0x6F9ADA35,0x00000000
+	.long	0x3FFF0000,0xB37B44FF,0x3766B895,0x00000000
+	.long	0x3FFF0000,0xB458C3DC,0xE9630433,0x00000000
+	.long	0x3FFF0000,0xB525529D,0x562246BD,0x00000000
+	.long	0x3FFF0000,0xB5E2CCA9,0x5F9D88CC,0x00000000
+	.long	0x3FFF0000,0xB692CADA,0x7ACA1ADA,0x00000000
+	.long	0x3FFF0000,0xB736AEA7,0xA6925838,0x00000000
+	.long	0x3FFF0000,0xB7CFAB28,0x7E9F7B36,0x00000000
+	.long	0x3FFF0000,0xB85ECC66,0xCB219835,0x00000000
+	.long	0x3FFF0000,0xB8E4FD5A,0x20A593DA,0x00000000
+	.long	0x3FFF0000,0xB99F41F6,0x4AFF9BB5,0x00000000
+	.long	0x3FFF0000,0xBA7F1E17,0x842BBE7B,0x00000000
+	.long	0x3FFF0000,0xBB471285,0x7637E17D,0x00000000
+	.long	0x3FFF0000,0xBBFABE8A,0x4788DF6F,0x00000000
+	.long	0x3FFF0000,0xBC9D0FAD,0x2B689D79,0x00000000
+	.long	0x3FFF0000,0xBD306A39,0x471ECD86,0x00000000
+	.long	0x3FFF0000,0xBDB6C731,0x856AF18A,0x00000000
+	.long	0x3FFF0000,0xBE31CAC5,0x02E80D70,0x00000000
+	.long	0x3FFF0000,0xBEA2D55C,0xE33194E2,0x00000000
+	.long	0x3FFF0000,0xBF0B10B7,0xC03128F0,0x00000000
+	.long	0x3FFF0000,0xBF6B7A18,0xDACB778D,0x00000000
+	.long	0x3FFF0000,0xBFC4EA46,0x63FA18F6,0x00000000
+	.long	0x3FFF0000,0xC0181BDE,0x8B89A454,0x00000000
+	.long	0x3FFF0000,0xC065B066,0xCFBF6439,0x00000000
+	.long	0x3FFF0000,0xC0AE345F,0x56340AE6,0x00000000
+	.long	0x3FFF0000,0xC0F22291,0x9CB9E6A7,0x00000000
+
+	.set	X,FP_SCR1
+	.set	XDCARE,X+2
+	.set	XFRAC,X+4
+	.set	XFRACLO,X+8
+
+	.set	ATANF,FP_SCR2
+	.set	ATANFHI,ATANF+4
+	.set	ATANFLO,ATANF+8
+
+
+	| xref	t_frcinx
+	|xref	t_extdnrm
+
+	.global	satand
+satand:
+|--ENTRY POINT FOR ATAN(X) FOR DENORMALIZED ARGUMENT
+
+	bra		t_extdnrm
+
+	.global	satan
+satan:
+|--ENTRY POINT FOR ATAN(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+
+	fmovex		(%a0),%fp0	| ...LOAD INPUT
+
+	movel		(%a0),%d0
+	movew		4(%a0),%d0
+	fmovex		%fp0,X(%a6)
+	andil		#0x7FFFFFFF,%d0
+
+	cmpil		#0x3FFB8000,%d0		| ...|X| >= 1/16?
+	bges		ATANOK1
+	bra		ATANSM
+
+ATANOK1:
+	cmpil		#0x4002FFFF,%d0		| ...|X| < 16 ?
+	bles		ATANMAIN
+	bra		ATANBIG
+
+
+|--THE MOST LIKELY CASE, |X| IN [1/16, 16). WE USE TABLE TECHNIQUE
+|--THE IDEA IS ATAN(X) = ATAN(F) + ATAN( [X-F] / [1+XF] ).
+|--SO IF F IS CHOSEN TO BE CLOSE TO X AND ATAN(F) IS STORED IN
+|--A TABLE, ALL WE NEED IS TO APPROXIMATE ATAN(U) WHERE
+|--U = (X-F)/(1+XF) IS SMALL (REMEMBER F IS CLOSE TO X). IT IS
+|--TRUE THAT A DIVIDE IS NOW NEEDED, BUT THE APPROXIMATION FOR
+|--ATAN(U) IS A VERY SHORT POLYNOMIAL AND THE INDEXING TO
+|--FETCH F AND SAVING OF REGISTERS CAN BE ALL HIDED UNDER THE
+|--DIVIDE. IN THE END THIS METHOD IS MUCH FASTER THAN A TRADITIONAL
+|--ONE. NOTE ALSO THAT THE TRADITIONAL SCHEME THAT APPROXIMATE
+|--ATAN(X) DIRECTLY WILL NEED TO USE A RATIONAL APPROXIMATION
+|--(DIVISION NEEDED) ANYWAY BECAUSE A POLYNOMIAL APPROXIMATION
+|--WILL INVOLVE A VERY LONG POLYNOMIAL.
+
+|--NOW WE SEE X AS +-2^K * 1.BBBBBBB....B <- 1. + 63 BITS
+|--WE CHOSE F TO BE +-2^K * 1.BBBB1
+|--THAT IS IT MATCHES THE EXPONENT AND FIRST 5 BITS OF X, THE
+|--SIXTH BITS IS SET TO BE 1. SINCE K = -4, -3, ..., 3, THERE
+|--ARE ONLY 8 TIMES 16 = 2^7 = 128 |F|'S. SINCE ATAN(-|F|) IS
+|-- -ATAN(|F|), WE NEED TO STORE ONLY ATAN(|F|).
+
+ATANMAIN:
+
+	movew		#0x0000,XDCARE(%a6)	| ...CLEAN UP X JUST IN CASE
+	andil		#0xF8000000,XFRAC(%a6)	| ...FIRST 5 BITS
+	oril		#0x04000000,XFRAC(%a6)	| ...SET 6-TH BIT TO 1
+	movel		#0x00000000,XFRACLO(%a6)	| ...LOCATION OF X IS NOW F
+
+	fmovex		%fp0,%fp1			| ...FP1 IS X
+	fmulx		X(%a6),%fp1		| ...FP1 IS X*F, NOTE THAT X*F > 0
+	fsubx		X(%a6),%fp0		| ...FP0 IS X-F
+	fadds		#0x3F800000,%fp1		| ...FP1 IS 1 + X*F
+	fdivx		%fp1,%fp0			| ...FP0 IS U = (X-F)/(1+X*F)
+
+|--WHILE THE DIVISION IS TAKING ITS TIME, WE FETCH ATAN(|F|)
+|--CREATE ATAN(F) AND STORE IT IN ATANF, AND
+|--SAVE REGISTERS FP2.
+
+	movel		%d2,-(%a7)	| ...SAVE d2 TEMPORARILY
+	movel		%d0,%d2		| ...THE EXPO AND 16 BITS OF X
+	andil		#0x00007800,%d0	| ...4 VARYING BITS OF F'S FRACTION
+	andil		#0x7FFF0000,%d2	| ...EXPONENT OF F
+	subil		#0x3FFB0000,%d2	| ...K+4
+	asrl		#1,%d2
+	addl		%d2,%d0		| ...THE 7 BITS IDENTIFYING F
+	asrl		#7,%d0		| ...INDEX INTO TBL OF ATAN(|F|)
+	lea		ATANTBL,%a1
+	addal		%d0,%a1		| ...ADDRESS OF ATAN(|F|)
+	movel		(%a1)+,ATANF(%a6)
+	movel		(%a1)+,ATANFHI(%a6)
+	movel		(%a1)+,ATANFLO(%a6)	| ...ATANF IS NOW ATAN(|F|)
+	movel		X(%a6),%d0		| ...LOAD SIGN AND EXPO. AGAIN
+	andil		#0x80000000,%d0	| ...SIGN(F)
+	orl		%d0,ATANF(%a6)	| ...ATANF IS NOW SIGN(F)*ATAN(|F|)
+	movel		(%a7)+,%d2	| ...RESTORE d2
+
+|--THAT'S ALL I HAVE TO DO FOR NOW,
+|--BUT ALAS, THE DIVIDE IS STILL CRANKING!
+
+|--U IN FP0, WE ARE NOW READY TO COMPUTE ATAN(U) AS
+|--U + A1*U*V*(A2 + V*(A3 + V)), V = U*U
+|--THE POLYNOMIAL MAY LOOK STRANGE, BUT IS NEVERTHELESS CORRECT.
+|--THE NATURAL FORM IS U + U*V*(A1 + V*(A2 + V*A3))
+|--WHAT WE HAVE HERE IS MERELY	A1 = A3, A2 = A1/A3, A3 = A2/A3.
+|--THE REASON FOR THIS REARRANGEMENT IS TO MAKE THE INDEPENDENT
+|--PARTS A1*U*V AND (A2 + ... STUFF) MORE LOAD-BALANCED
+
+
+	fmovex		%fp0,%fp1
+	fmulx		%fp1,%fp1
+	fmoved		ATANA3,%fp2
+	faddx		%fp1,%fp2		| ...A3+V
+	fmulx		%fp1,%fp2		| ...V*(A3+V)
+	fmulx		%fp0,%fp1		| ...U*V
+	faddd		ATANA2,%fp2	| ...A2+V*(A3+V)
+	fmuld		ATANA1,%fp1	| ...A1*U*V
+	fmulx		%fp2,%fp1		| ...A1*U*V*(A2+V*(A3+V))
+
+	faddx		%fp1,%fp0		| ...ATAN(U), FP1 RELEASED
+	fmovel		%d1,%FPCR		|restore users exceptions
+	faddx		ATANF(%a6),%fp0	| ...ATAN(X)
+	bra		t_frcinx
+
+ATANBORS:
+|--|X| IS IN d0 IN COMPACT FORM. FP1, d0 SAVED.
+|--FP0 IS X AND |X| <= 1/16 OR |X| >= 16.
+	cmpil		#0x3FFF8000,%d0
+	bgt		ATANBIG	| ...I.E. |X| >= 16
+
+ATANSM:
+|--|X| <= 1/16
+|--IF |X| < 2^(-40), RETURN X AS ANSWER. OTHERWISE, APPROXIMATE
+|--ATAN(X) BY X + X*Y*(B1+Y*(B2+Y*(B3+Y*(B4+Y*(B5+Y*B6)))))
+|--WHICH IS X + X*Y*( [B1+Z*(B3+Z*B5)] + [Y*(B2+Z*(B4+Z*B6)] )
+|--WHERE Y = X*X, AND Z = Y*Y.
+
+	cmpil		#0x3FD78000,%d0
+	blt		ATANTINY
+|--COMPUTE POLYNOMIAL
+	fmulx		%fp0,%fp0	| ...FP0 IS Y = X*X
+
+
+	movew		#0x0000,XDCARE(%a6)
+
+	fmovex		%fp0,%fp1
+	fmulx		%fp1,%fp1		| ...FP1 IS Z = Y*Y
+
+	fmoved		ATANB6,%fp2
+	fmoved		ATANB5,%fp3
+
+	fmulx		%fp1,%fp2		| ...Z*B6
+	fmulx		%fp1,%fp3		| ...Z*B5
+
+	faddd		ATANB4,%fp2	| ...B4+Z*B6
+	faddd		ATANB3,%fp3	| ...B3+Z*B5
+
+	fmulx		%fp1,%fp2		| ...Z*(B4+Z*B6)
+	fmulx		%fp3,%fp1		| ...Z*(B3+Z*B5)
+
+	faddd		ATANB2,%fp2	| ...B2+Z*(B4+Z*B6)
+	faddd		ATANB1,%fp1	| ...B1+Z*(B3+Z*B5)
+
+	fmulx		%fp0,%fp2		| ...Y*(B2+Z*(B4+Z*B6))
+	fmulx		X(%a6),%fp0		| ...X*Y
+
+	faddx		%fp2,%fp1		| ...[B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]
+
+
+	fmulx		%fp1,%fp0	| ...X*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))])
+
+	fmovel		%d1,%FPCR		|restore users exceptions
+	faddx		X(%a6),%fp0
+
+	bra		t_frcinx
+
+ATANTINY:
+|--|X| < 2^(-40), ATAN(X) = X
+	movew		#0x0000,XDCARE(%a6)
+
+	fmovel		%d1,%FPCR		|restore users exceptions
+	fmovex		X(%a6),%fp0	|last inst - possible exception set
+
+	bra		t_frcinx
+
+ATANBIG:
+|--IF |X| > 2^(100), RETURN	SIGN(X)*(PI/2 - TINY). OTHERWISE,
+|--RETURN SIGN(X)*PI/2 + ATAN(-1/X).
+	cmpil		#0x40638000,%d0
+	bgt		ATANHUGE
+
+|--APPROXIMATE ATAN(-1/X) BY
+|--X'+X'*Y*(C1+Y*(C2+Y*(C3+Y*(C4+Y*C5)))), X' = -1/X, Y = X'*X'
+|--THIS CAN BE RE-WRITTEN AS
+|--X'+X'*Y*( [C1+Z*(C3+Z*C5)] + [Y*(C2+Z*C4)] ), Z = Y*Y.
+
+	fmoves		#0xBF800000,%fp1	| ...LOAD -1
+	fdivx		%fp0,%fp1		| ...FP1 IS -1/X
+
+
+|--DIVIDE IS STILL CRANKING
+
+	fmovex		%fp1,%fp0		| ...FP0 IS X'
+	fmulx		%fp0,%fp0		| ...FP0 IS Y = X'*X'
+	fmovex		%fp1,X(%a6)		| ...X IS REALLY X'
+
+	fmovex		%fp0,%fp1
+	fmulx		%fp1,%fp1		| ...FP1 IS Z = Y*Y
+
+	fmoved		ATANC5,%fp3
+	fmoved		ATANC4,%fp2
+
+	fmulx		%fp1,%fp3		| ...Z*C5
+	fmulx		%fp1,%fp2		| ...Z*B4
+
+	faddd		ATANC3,%fp3	| ...C3+Z*C5
+	faddd		ATANC2,%fp2	| ...C2+Z*C4
+
+	fmulx		%fp3,%fp1		| ...Z*(C3+Z*C5), FP3 RELEASED
+	fmulx		%fp0,%fp2		| ...Y*(C2+Z*C4)
+
+	faddd		ATANC1,%fp1	| ...C1+Z*(C3+Z*C5)
+	fmulx		X(%a6),%fp0		| ...X'*Y
+
+	faddx		%fp2,%fp1		| ...[Y*(C2+Z*C4)]+[C1+Z*(C3+Z*C5)]
+
+
+	fmulx		%fp1,%fp0		| ...X'*Y*([B1+Z*(B3+Z*B5)]
+|					...	+[Y*(B2+Z*(B4+Z*B6))])
+	faddx		X(%a6),%fp0
+
+	fmovel		%d1,%FPCR		|restore users exceptions
+
+	btstb		#7,(%a0)
+	beqs		pos_big
+
+neg_big:
+	faddx		NPIBY2,%fp0
+	bra		t_frcinx
+
+pos_big:
+	faddx		PPIBY2,%fp0
+	bra		t_frcinx
+
+ATANHUGE:
+|--RETURN SIGN(X)*(PIBY2 - TINY) = SIGN(X)*PIBY2 - SIGN(X)*TINY
+	btstb		#7,(%a0)
+	beqs		pos_huge
+
+neg_huge:
+	fmovex		NPIBY2,%fp0
+	fmovel		%d1,%fpcr
+	fsubx		NTINY,%fp0
+	bra		t_frcinx
+
+pos_huge:
+	fmovex		PPIBY2,%fp0
+	fmovel		%d1,%fpcr
+	fsubx		PTINY,%fp0
+	bra		t_frcinx
+
+	|end
diff --git a/arch/m68k/fpsp040/satanh.S b/arch/m68k/fpsp040/satanh.S
new file mode 100644
index 0000000..20f0781
--- /dev/null
+++ b/arch/m68k/fpsp040/satanh.S
@@ -0,0 +1,104 @@
+|
+|	satanh.sa 3.3 12/19/90
+|
+|	The entry point satanh computes the inverse
+|	hyperbolic tangent of
+|	an input argument; satanhd does the same except for denormalized
+|	input.
+|
+|	Input: Double-extended number X in location pointed to
+|		by address register a0.
+|
+|	Output: The value arctanh(X) returned in floating-point register Fp0.
+|
+|	Accuracy and Monotonicity: The returned result is within 3 ulps in
+|		64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
+|		result is subsequently rounded to double precision. The
+|		result is provably monotonic in double precision.
+|
+|	Speed: The program satanh takes approximately 270 cycles.
+|
+|	Algorithm:
+|
+|	ATANH
+|	1. If |X| >= 1, go to 3.
+|
+|	2. (|X| < 1) Calculate atanh(X) by
+|		sgn := sign(X)
+|		y := |X|
+|		z := 2y/(1-y)
+|		atanh(X) := sgn * (1/2) * logp1(z)
+|		Exit.
+|
+|	3. If |X| > 1, go to 5.
+|
+|	4. (|X| = 1) Generate infinity with an appropriate sign and
+|		divide-by-zero by
+|		sgn := sign(X)
+|		atan(X) := sgn / (+0).
+|		Exit.
+|
+|	5. (|X| > 1) Generate an invalid operation by 0 * infinity.
+|		Exit.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|satanh	idnt	2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+	|xref	t_dz
+	|xref	t_operr
+	|xref	t_frcinx
+	|xref	t_extdnrm
+	|xref	slognp1
+
+	.global	satanhd
+satanhd:
+|--ATANH(X) = X FOR DENORMALIZED X
+
+	bra		t_extdnrm
+
+	.global	satanh
+satanh:
+	movel		(%a0),%d0
+	movew		4(%a0),%d0
+	andil		#0x7FFFFFFF,%d0
+	cmpil		#0x3FFF8000,%d0
+	bges		ATANHBIG
+
+|--THIS IS THE USUAL CASE, |X| < 1
+|--Y = |X|, Z = 2Y/(1-Y), ATANH(X) = SIGN(X) * (1/2) * LOG1P(Z).
+
+	fabsx		(%a0),%fp0	| ...Y = |X|
+	fmovex		%fp0,%fp1
+	fnegx		%fp1		| ...-Y
+	faddx		%fp0,%fp0		| ...2Y
+	fadds		#0x3F800000,%fp1	| ...1-Y
+	fdivx		%fp1,%fp0		| ...2Y/(1-Y)
+	movel		(%a0),%d0
+	andil		#0x80000000,%d0
+	oril		#0x3F000000,%d0	| ...SIGN(X)*HALF
+	movel		%d0,-(%sp)
+
+	fmovemx	%fp0-%fp0,(%a0)	| ...overwrite input
+	movel		%d1,-(%sp)
+	clrl		%d1
+	bsr		slognp1		| ...LOG1P(Z)
+	fmovel		(%sp)+,%fpcr
+	fmuls		(%sp)+,%fp0
+	bra		t_frcinx
+
+ATANHBIG:
+	fabsx		(%a0),%fp0	| ...|X|
+	fcmps		#0x3F800000,%fp0
+	fbgt		t_operr
+	bra		t_dz
+
+	|end
diff --git a/arch/m68k/fpsp040/scale.S b/arch/m68k/fpsp040/scale.S
new file mode 100644
index 0000000..5c9b805
--- /dev/null
+++ b/arch/m68k/fpsp040/scale.S
@@ -0,0 +1,371 @@
+|
+|	scale.sa 3.3 7/30/91
+|
+|	The entry point sSCALE computes the destination operand
+|	scaled by the source operand.  If the absolute value of
+|	the source operand is (>= 2^14) an overflow or underflow
+|	is returned.
+|
+|	The entry point sscale is called from do_func to emulate
+|	the fscale unimplemented instruction.
+|
+|	Input: Double-extended destination operand in FPTEMP,
+|		double-extended source operand in ETEMP.
+|
+|	Output: The function returns scale(X,Y) to fp0.
+|
+|	Modifies: fp0.
+|
+|	Algorithm:
+|
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|SCALE    idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+	|xref	t_ovfl2
+	|xref	t_unfl
+	|xref	round
+	|xref	t_resdnrm
+
+SRC_BNDS: .short	0x3fff,0x400c
+
+|
+| This entry point is used by the unimplemented instruction exception
+| handler.
+|
+|
+|
+|	FSCALE
+|
+	.global	sscale
+sscale:
+	fmovel		#0,%fpcr		|clr user enabled exc
+	clrl		%d1
+	movew		FPTEMP(%a6),%d1	|get dest exponent
+	smi		L_SCR1(%a6)	|use L_SCR1 to hold sign
+	andil		#0x7fff,%d1	|strip sign
+	movew		ETEMP(%a6),%d0	|check src bounds
+	andiw		#0x7fff,%d0	|clr sign bit
+	cmp2w		SRC_BNDS,%d0
+	bccs		src_in
+	cmpiw		#0x400c,%d0	|test for too large
+	bge		src_out
+|
+| The source input is below 1, so we check for denormalized numbers
+| and set unfl.
+|
+src_small:
+	moveb		DTAG(%a6),%d0
+	andib		#0xe0,%d0
+	tstb		%d0
+	beqs		no_denorm
+	st		STORE_FLG(%a6)	|dest already contains result
+	orl		#unfl_mask,USER_FPSR(%a6) |set UNFL
+den_done:
+	leal		FPTEMP(%a6),%a0
+	bra		t_resdnrm
+no_denorm:
+	fmovel		USER_FPCR(%a6),%FPCR
+	fmovex		FPTEMP(%a6),%fp0	|simply return dest
+	rts
+
+
+|
+| Source is within 2^14 range.  To perform the int operation,
+| move it to d0.
+|
+src_in:
+	fmovex		ETEMP(%a6),%fp0	|move in src for int
+	fmovel		#rz_mode,%fpcr	|force rz for src conversion
+	fmovel		%fp0,%d0		|int src to d0
+	fmovel		#0,%FPSR		|clr status from above
+	tstw		ETEMP(%a6)	|check src sign
+	blt		src_neg
+|
+| Source is positive.  Add the src to the dest exponent.
+| The result can be denormalized, if src = 0, or overflow,
+| if the result of the add sets a bit in the upper word.
+|
+src_pos:
+	tstw		%d1		|check for denorm
+	beq		dst_dnrm
+	addl		%d0,%d1		|add src to dest exp
+	beqs		denorm		|if zero, result is denorm
+	cmpil		#0x7fff,%d1	|test for overflow
+	bges		ovfl
+	tstb		L_SCR1(%a6)
+	beqs		spos_pos
+	orw		#0x8000,%d1
+spos_pos:
+	movew		%d1,FPTEMP(%a6)	|result in FPTEMP
+	fmovel		USER_FPCR(%a6),%FPCR
+	fmovex		FPTEMP(%a6),%fp0	|write result to fp0
+	rts
+ovfl:
+	tstb		L_SCR1(%a6)
+	beqs		sovl_pos
+	orw		#0x8000,%d1
+sovl_pos:
+	movew		FPTEMP(%a6),ETEMP(%a6)	|result in ETEMP
+	movel		FPTEMP_HI(%a6),ETEMP_HI(%a6)
+	movel		FPTEMP_LO(%a6),ETEMP_LO(%a6)
+	bra		t_ovfl2
+
+denorm:
+	tstb		L_SCR1(%a6)
+	beqs		den_pos
+	orw		#0x8000,%d1
+den_pos:
+	tstl		FPTEMP_HI(%a6)	|check j bit
+	blts		nden_exit	|if set, not denorm
+	movew		%d1,ETEMP(%a6)	|input expected in ETEMP
+	movel		FPTEMP_HI(%a6),ETEMP_HI(%a6)
+	movel		FPTEMP_LO(%a6),ETEMP_LO(%a6)
+	orl		#unfl_bit,USER_FPSR(%a6)	|set unfl
+	leal		ETEMP(%a6),%a0
+	bra		t_resdnrm
+nden_exit:
+	movew		%d1,FPTEMP(%a6)	|result in FPTEMP
+	fmovel		USER_FPCR(%a6),%FPCR
+	fmovex		FPTEMP(%a6),%fp0	|write result to fp0
+	rts
+
+|
+| Source is negative.  Add the src to the dest exponent.
+| (The result exponent will be reduced).  The result can be
+| denormalized.
+|
+src_neg:
+	addl		%d0,%d1		|add src to dest
+	beqs		denorm		|if zero, result is denorm
+	blts		fix_dnrm	|if negative, result is
+|					;needing denormalization
+	tstb		L_SCR1(%a6)
+	beqs		sneg_pos
+	orw		#0x8000,%d1
+sneg_pos:
+	movew		%d1,FPTEMP(%a6)	|result in FPTEMP
+	fmovel		USER_FPCR(%a6),%FPCR
+	fmovex		FPTEMP(%a6),%fp0	|write result to fp0
+	rts
+
+
+|
+| The result exponent is below denorm value.  Test for catastrophic
+| underflow and force zero if true.  If not, try to shift the
+| mantissa right until a zero exponent exists.
+|
+fix_dnrm:
+	cmpiw		#0xffc0,%d1	|lower bound for normalization
+	blt		fix_unfl	|if lower, catastrophic unfl
+	movew		%d1,%d0		|use d0 for exp
+	movel		%d2,-(%a7)	|free d2 for norm
+	movel		FPTEMP_HI(%a6),%d1
+	movel		FPTEMP_LO(%a6),%d2
+	clrl		L_SCR2(%a6)
+fix_loop:
+	addw		#1,%d0		|drive d0 to 0
+	lsrl		#1,%d1		|while shifting the
+	roxrl		#1,%d2		|mantissa to the right
+	bccs		no_carry
+	st		L_SCR2(%a6)	|use L_SCR2 to capture inex
+no_carry:
+	tstw		%d0		|it is finished when
+	blts		fix_loop	|d0 is zero or the mantissa
+	tstb		L_SCR2(%a6)
+	beqs		tst_zero
+	orl		#unfl_inx_mask,USER_FPSR(%a6)
+|					;set unfl, aunfl, ainex
+|
+| Test for zero. If zero, simply use fmove to return +/- zero
+| to the fpu.
+|
+tst_zero:
+	clrw		FPTEMP_EX(%a6)
+	tstb		L_SCR1(%a6)	|test for sign
+	beqs		tst_con
+	orw		#0x8000,FPTEMP_EX(%a6) |set sign bit
+tst_con:
+	movel		%d1,FPTEMP_HI(%a6)
+	movel		%d2,FPTEMP_LO(%a6)
+	movel		(%a7)+,%d2
+	tstl		%d1
+	bnes		not_zero
+	tstl		FPTEMP_LO(%a6)
+	bnes		not_zero
+|
+| Result is zero.  Check for rounding mode to set lsb.  If the
+| mode is rp, and the zero is positive, return smallest denorm.
+| If the mode is rm, and the zero is negative, return smallest
+| negative denorm.
+|
+	btstb		#5,FPCR_MODE(%a6) |test if rm or rp
+	beqs		no_dir
+	btstb		#4,FPCR_MODE(%a6) |check which one
+	beqs		zer_rm
+zer_rp:
+	tstb		L_SCR1(%a6)	|check sign
+	bnes		no_dir		|if set, neg op, no inc
+	movel		#1,FPTEMP_LO(%a6) |set lsb
+	bras		sm_dnrm
+zer_rm:
+	tstb		L_SCR1(%a6)	|check sign
+	beqs		no_dir		|if clr, neg op, no inc
+	movel		#1,FPTEMP_LO(%a6) |set lsb
+	orl		#neg_mask,USER_FPSR(%a6) |set N
+	bras		sm_dnrm
+no_dir:
+	fmovel		USER_FPCR(%a6),%FPCR
+	fmovex		FPTEMP(%a6),%fp0	|use fmove to set cc's
+	rts
+
+|
+| The rounding mode changed the zero to a smallest denorm. Call
+| t_resdnrm with exceptional operand in ETEMP.
+|
+sm_dnrm:
+	movel		FPTEMP_EX(%a6),ETEMP_EX(%a6)
+	movel		FPTEMP_HI(%a6),ETEMP_HI(%a6)
+	movel		FPTEMP_LO(%a6),ETEMP_LO(%a6)
+	leal		ETEMP(%a6),%a0
+	bra		t_resdnrm
+
+|
+| Result is still denormalized.
+|
+not_zero:
+	orl		#unfl_mask,USER_FPSR(%a6) |set unfl
+	tstb		L_SCR1(%a6)	|check for sign
+	beqs		fix_exit
+	orl		#neg_mask,USER_FPSR(%a6) |set N
+fix_exit:
+	bras		sm_dnrm
+
+
+|
+| The result has underflowed to zero. Return zero and set
+| unfl, aunfl, and ainex.
+|
+fix_unfl:
+	orl		#unfl_inx_mask,USER_FPSR(%a6)
+	btstb		#5,FPCR_MODE(%a6) |test if rm or rp
+	beqs		no_dir2
+	btstb		#4,FPCR_MODE(%a6) |check which one
+	beqs		zer_rm2
+zer_rp2:
+	tstb		L_SCR1(%a6)	|check sign
+	bnes		no_dir2		|if set, neg op, no inc
+	clrl		FPTEMP_EX(%a6)
+	clrl		FPTEMP_HI(%a6)
+	movel		#1,FPTEMP_LO(%a6) |set lsb
+	bras		sm_dnrm		|return smallest denorm
+zer_rm2:
+	tstb		L_SCR1(%a6)	|check sign
+	beqs		no_dir2		|if clr, neg op, no inc
+	movew		#0x8000,FPTEMP_EX(%a6)
+	clrl		FPTEMP_HI(%a6)
+	movel		#1,FPTEMP_LO(%a6) |set lsb
+	orl		#neg_mask,USER_FPSR(%a6) |set N
+	bra		sm_dnrm		|return smallest denorm
+
+no_dir2:
+	tstb		L_SCR1(%a6)
+	bges		pos_zero
+neg_zero:
+	clrl		FP_SCR1(%a6)	|clear the exceptional operand
+	clrl		FP_SCR1+4(%a6)	|for gen_except.
+	clrl		FP_SCR1+8(%a6)
+	fmoves		#0x80000000,%fp0
+	rts
+pos_zero:
+	clrl		FP_SCR1(%a6)	|clear the exceptional operand
+	clrl		FP_SCR1+4(%a6)	|for gen_except.
+	clrl		FP_SCR1+8(%a6)
+	fmoves		#0x00000000,%fp0
+	rts
+
+|
+| The destination is a denormalized number.  It must be handled
+| by first shifting the bits in the mantissa until it is normalized,
+| then adding the remainder of the source to the exponent.
+|
+dst_dnrm:
+	moveml		%d2/%d3,-(%a7)
+	movew		FPTEMP_EX(%a6),%d1
+	movel		FPTEMP_HI(%a6),%d2
+	movel		FPTEMP_LO(%a6),%d3
+dst_loop:
+	tstl		%d2		|test for normalized result
+	blts		dst_norm	|exit loop if so
+	tstl		%d0		|otherwise, test shift count
+	beqs		dst_fin		|if zero, shifting is done
+	subil		#1,%d0		|dec src
+	lsll		#1,%d3
+	roxll		#1,%d2
+	bras		dst_loop
+|
+| Destination became normalized.  Simply add the remaining
+| portion of the src to the exponent.
+|
+dst_norm:
+	addw		%d0,%d1		|dst is normalized; add src
+	tstb		L_SCR1(%a6)
+	beqs		dnrm_pos
+	orl		#0x8000,%d1
+dnrm_pos:
+	movemw		%d1,FPTEMP_EX(%a6)
+	moveml		%d2,FPTEMP_HI(%a6)
+	moveml		%d3,FPTEMP_LO(%a6)
+	fmovel		USER_FPCR(%a6),%FPCR
+	fmovex		FPTEMP(%a6),%fp0
+	moveml		(%a7)+,%d2/%d3
+	rts
+
+|
+| Destination remained denormalized.  Call t_excdnrm with
+| exceptional operand in ETEMP.
+|
+dst_fin:
+	tstb		L_SCR1(%a6)	|check for sign
+	beqs		dst_exit
+	orl		#neg_mask,USER_FPSR(%a6) |set N
+	orl		#0x8000,%d1
+dst_exit:
+	movemw		%d1,ETEMP_EX(%a6)
+	moveml		%d2,ETEMP_HI(%a6)
+	moveml		%d3,ETEMP_LO(%a6)
+	orl		#unfl_mask,USER_FPSR(%a6) |set unfl
+	moveml		(%a7)+,%d2/%d3
+	leal		ETEMP(%a6),%a0
+	bra		t_resdnrm
+
+|
+| Source is outside of 2^14 range.  Test the sign and branch
+| to the appropriate exception handler.
+|
+src_out:
+	tstb		L_SCR1(%a6)
+	beqs		scro_pos
+	orl		#0x8000,%d1
+scro_pos:
+	movel		FPTEMP_HI(%a6),ETEMP_HI(%a6)
+	movel		FPTEMP_LO(%a6),ETEMP_LO(%a6)
+	tstw		ETEMP(%a6)
+	blts		res_neg
+res_pos:
+	movew		%d1,ETEMP(%a6)	|result in ETEMP
+	bra		t_ovfl2
+res_neg:
+	movew		%d1,ETEMP(%a6)	|result in ETEMP
+	leal		ETEMP(%a6),%a0
+	bra		t_unfl
+	|end
diff --git a/arch/m68k/fpsp040/scosh.S b/arch/m68k/fpsp040/scosh.S
new file mode 100644
index 0000000..e81edbb
--- /dev/null
+++ b/arch/m68k/fpsp040/scosh.S
@@ -0,0 +1,132 @@
+|
+|	scosh.sa 3.1 12/10/90
+|
+|	The entry point sCosh computes the hyperbolic cosine of
+|	an input argument; sCoshd does the same except for denormalized
+|	input.
+|
+|	Input: Double-extended number X in location pointed to
+|		by address register a0.
+|
+|	Output: The value cosh(X) returned in floating-point register Fp0.
+|
+|	Accuracy and Monotonicity: The returned result is within 3 ulps in
+|		64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
+|		result is subsequently rounded to double precision. The
+|		result is provably monotonic in double precision.
+|
+|	Speed: The program sCOSH takes approximately 250 cycles.
+|
+|	Algorithm:
+|
+|	COSH
+|	1. If |X| > 16380 log2, go to 3.
+|
+|	2. (|X| <= 16380 log2) Cosh(X) is obtained by the formulae
+|		y = |X|, z = exp(Y), and
+|		cosh(X) = (1/2)*( z + 1/z ).
+|		Exit.
+|
+|	3. (|X| > 16380 log2). If |X| > 16480 log2, go to 5.
+|
+|	4. (16380 log2 < |X| <= 16480 log2)
+|		cosh(X) = sign(X) * exp(|X|)/2.
+|		However, invoking exp(|X|) may cause premature overflow.
+|		Thus, we calculate sinh(X) as follows:
+|		Y	:= |X|
+|		Fact	:=	2**(16380)
+|		Y'	:= Y - 16381 log2
+|		cosh(X) := Fact * exp(Y').
+|		Exit.
+|
+|	5. (|X| > 16480 log2) sinh(X) must overflow. Return
+|		Huge*Huge to generate overflow and an infinity with
+|		the appropriate sign. Huge is the largest finite number in
+|		extended format. Exit.
+|
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|SCOSH	idnt	2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+	|xref	t_ovfl
+	|xref	t_frcinx
+	|xref	setox
+
+T1:	.long 0x40C62D38,0xD3D64634 | ... 16381 LOG2 LEAD
+T2:	.long 0x3D6F90AE,0xB1E75CC7 | ... 16381 LOG2 TRAIL
+
+TWO16380: .long 0x7FFB0000,0x80000000,0x00000000,0x00000000
+
+	.global	scoshd
+scoshd:
+|--COSH(X) = 1 FOR DENORMALIZED X
+
+	fmoves		#0x3F800000,%fp0
+
+	fmovel		%d1,%FPCR
+	fadds		#0x00800000,%fp0
+	bra		t_frcinx
+
+	.global	scosh
+scosh:
+	fmovex		(%a0),%fp0	| ...LOAD INPUT
+
+	movel		(%a0),%d0
+	movew		4(%a0),%d0
+	andil		#0x7FFFFFFF,%d0
+	cmpil		#0x400CB167,%d0
+	bgts		COSHBIG
+
+|--THIS IS THE USUAL CASE, |X| < 16380 LOG2
+|--COSH(X) = (1/2) * ( EXP(X) + 1/EXP(X) )
+
+	fabsx		%fp0		| ...|X|
+
+	movel		%d1,-(%sp)
+	clrl		%d1
+	fmovemx	%fp0-%fp0,(%a0)	|pass parameter to setox
+	bsr		setox		| ...FP0 IS EXP(|X|)
+	fmuls		#0x3F000000,%fp0	| ...(1/2)EXP(|X|)
+	movel		(%sp)+,%d1
+
+	fmoves		#0x3E800000,%fp1	| ...(1/4)
+	fdivx		%fp0,%fp1		| ...1/(2 EXP(|X|))
+
+	fmovel		%d1,%FPCR
+	faddx		%fp1,%fp0
+
+	bra		t_frcinx
+
+COSHBIG:
+	cmpil		#0x400CB2B3,%d0
+	bgts		COSHHUGE
+
+	fabsx		%fp0
+	fsubd		T1(%pc),%fp0		| ...(|X|-16381LOG2_LEAD)
+	fsubd		T2(%pc),%fp0		| ...|X| - 16381 LOG2, ACCURATE
+
+	movel		%d1,-(%sp)
+	clrl		%d1
+	fmovemx	%fp0-%fp0,(%a0)
+	bsr		setox
+	fmovel		(%sp)+,%fpcr
+
+	fmulx		TWO16380(%pc),%fp0
+	bra		t_frcinx
+
+COSHHUGE:
+	fmovel		#0,%fpsr		|clr N bit if set by source
+	bclrb		#7,(%a0)		|always return positive value
+	fmovemx	(%a0),%fp0-%fp0
+	bra		t_ovfl
+
+	|end
diff --git a/arch/m68k/fpsp040/setox.S b/arch/m68k/fpsp040/setox.S
new file mode 100644
index 0000000..0aa75f9
--- /dev/null
+++ b/arch/m68k/fpsp040/setox.S
@@ -0,0 +1,865 @@
+|
+|	setox.sa 3.1 12/10/90
+|
+|	The entry point setox computes the exponential of a value.
+|	setoxd does the same except the input value is a denormalized
+|	number.	setoxm1 computes exp(X)-1, and setoxm1d computes
+|	exp(X)-1 for denormalized X.
+|
+|	INPUT
+|	-----
+|	Double-extended value in memory location pointed to by address
+|	register a0.
+|
+|	OUTPUT
+|	------
+|	exp(X) or exp(X)-1 returned in floating-point register fp0.
+|
+|	ACCURACY and MONOTONICITY
+|	-------------------------
+|	The returned result is within 0.85 ulps in 64 significant bit, i.e.
+|	within 0.5001 ulp to 53 bits if the result is subsequently rounded
+|	to double precision. The result is provably monotonic in double
+|	precision.
+|
+|	SPEED
+|	-----
+|	Two timings are measured, both in the copy-back mode. The
+|	first one is measured when the function is invoked the first time
+|	(so the instructions and data are not in cache), and the
+|	second one is measured when the function is reinvoked at the same
+|	input argument.
+|
+|	The program setox takes approximately 210/190 cycles for input
+|	argument X whose magnitude is less than 16380 log2, which
+|	is the usual situation.	For the less common arguments,
+|	depending on their values, the program may run faster or slower --
+|	but no worse than 10% slower even in the extreme cases.
+|
+|	The program setoxm1 takes approximately ???/??? cycles for input
+|	argument X, 0.25 <= |X| < 70log2. For |X| < 0.25, it takes
+|	approximately ???/??? cycles. For the less common arguments,
+|	depending on their values, the program may run faster or slower --
+|	but no worse than 10% slower even in the extreme cases.
+|
+|	ALGORITHM and IMPLEMENTATION NOTES
+|	----------------------------------
+|
+|	setoxd
+|	------
+|	Step 1.	Set ans := 1.0
+|
+|	Step 2.	Return	ans := ans + sign(X)*2^(-126). Exit.
+|	Notes:	This will always generate one exception -- inexact.
+|
+|
+|	setox
+|	-----
+|
+|	Step 1.	Filter out extreme cases of input argument.
+|		1.1	If |X| >= 2^(-65), go to Step 1.3.
+|		1.2	Go to Step 7.
+|		1.3	If |X| < 16380 log(2), go to Step 2.
+|		1.4	Go to Step 8.
+|	Notes:	The usual case should take the branches 1.1 -> 1.3 -> 2.
+|		 To avoid the use of floating-point comparisons, a
+|		 compact representation of |X| is used. This format is a
+|		 32-bit integer, the upper (more significant) 16 bits are
+|		 the sign and biased exponent field of |X|; the lower 16
+|		 bits are the 16 most significant fraction (including the
+|		 explicit bit) bits of |X|. Consequently, the comparisons
+|		 in Steps 1.1 and 1.3 can be performed by integer comparison.
+|		 Note also that the constant 16380 log(2) used in Step 1.3
+|		 is also in the compact form. Thus taking the branch
+|		 to Step 2 guarantees |X| < 16380 log(2). There is no harm
+|		 to have a small number of cases where |X| is less than,
+|		 but close to, 16380 log(2) and the branch to Step 9 is
+|		 taken.
+|
+|	Step 2.	Calculate N = round-to-nearest-int( X * 64/log2 ).
+|		2.1	Set AdjFlag := 0 (indicates the branch 1.3 -> 2 was taken)
+|		2.2	N := round-to-nearest-integer( X * 64/log2 ).
+|		2.3	Calculate	J = N mod 64; so J = 0,1,2,..., or 63.
+|		2.4	Calculate	M = (N - J)/64; so N = 64M + J.
+|		2.5	Calculate the address of the stored value of 2^(J/64).
+|		2.6	Create the value Scale = 2^M.
+|	Notes:	The calculation in 2.2 is really performed by
+|
+|			Z := X * constant
+|			N := round-to-nearest-integer(Z)
+|
+|		 where
+|
+|			constant := single-precision( 64/log 2 ).
+|
+|		 Using a single-precision constant avoids memory access.
+|		 Another effect of using a single-precision "constant" is
+|		 that the calculated value Z is
+|
+|			Z = X*(64/log2)*(1+eps), |eps| <= 2^(-24).
+|
+|		 This error has to be considered later in Steps 3 and 4.
+|
+|	Step 3.	Calculate X - N*log2/64.
+|		3.1	R := X + N*L1, where L1 := single-precision(-log2/64).
+|		3.2	R := R + N*L2, L2 := extended-precision(-log2/64 - L1).
+|	Notes:	a) The way L1 and L2 are chosen ensures L1+L2 approximate
+|		 the value	-log2/64	to 88 bits of accuracy.
+|		 b) N*L1 is exact because N is no longer than 22 bits and
+|		 L1 is no longer than 24 bits.
+|		 c) The calculation X+N*L1 is also exact due to cancellation.
+|		 Thus, R is practically X+N(L1+L2) to full 64 bits.
+|		 d) It is important to estimate how large can |R| be after
+|		 Step 3.2.
+|
+|			N = rnd-to-int( X*64/log2 (1+eps) ), |eps|<=2^(-24)
+|			X*64/log2 (1+eps)	=	N + f,	|f| <= 0.5
+|			X*64/log2 - N	=	f - eps*X 64/log2
+|			X - N*log2/64	=	f*log2/64 - eps*X
+|
+|
+|		 Now |X| <= 16446 log2, thus
+|
+|			|X - N*log2/64| <= (0.5 + 16446/2^(18))*log2/64
+|					<= 0.57 log2/64.
+|		 This bound will be used in Step 4.
+|
+|	Step 4.	Approximate exp(R)-1 by a polynomial
+|			p = R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))
+|	Notes:	a) In order to reduce memory access, the coefficients are
+|		 made as "short" as possible: A1 (which is 1/2), A4 and A5
+|		 are single precision; A2 and A3 are double precision.
+|		 b) Even with the restrictions above,
+|			|p - (exp(R)-1)| < 2^(-68.8) for all |R| <= 0.0062.
+|		 Note that 0.0062 is slightly bigger than 0.57 log2/64.
+|		 c) To fully utilize the pipeline, p is separated into
+|		 two independent pieces of roughly equal complexities
+|			p = [ R + R*S*(A2 + S*A4) ]	+
+|				[ S*(A1 + S*(A3 + S*A5)) ]
+|		 where S = R*R.
+|
+|	Step 5.	Compute 2^(J/64)*exp(R) = 2^(J/64)*(1+p) by
+|				ans := T + ( T*p + t)
+|		 where T and t are the stored values for 2^(J/64).
+|	Notes:	2^(J/64) is stored as T and t where T+t approximates
+|		 2^(J/64) to roughly 85 bits; T is in extended precision
+|		 and t is in single precision. Note also that T is rounded
+|		 to 62 bits so that the last two bits of T are zero. The
+|		 reason for such a special form is that T-1, T-2, and T-8
+|		 will all be exact --- a property that will give much
+|		 more accurate computation of the function EXPM1.
+|
+|	Step 6.	Reconstruction of exp(X)
+|			exp(X) = 2^M * 2^(J/64) * exp(R).
+|		6.1	If AdjFlag = 0, go to 6.3
+|		6.2	ans := ans * AdjScale
+|		6.3	Restore the user FPCR
+|		6.4	Return ans := ans * Scale. Exit.
+|	Notes:	If AdjFlag = 0, we have X = Mlog2 + Jlog2/64 + R,
+|		 |M| <= 16380, and Scale = 2^M. Moreover, exp(X) will
+|		 neither overflow nor underflow. If AdjFlag = 1, that
+|		 means that
+|			X = (M1+M)log2 + Jlog2/64 + R, |M1+M| >= 16380.
+|		 Hence, exp(X) may overflow or underflow or neither.
+|		 When that is the case, AdjScale = 2^(M1) where M1 is
+|		 approximately M. Thus 6.2 will never cause over/underflow.
+|		 Possible exception in 6.4 is overflow or underflow.
+|		 The inexact exception is not generated in 6.4. Although
+|		 one can argue that the inexact flag should always be
+|		 raised, to simulate that exception cost to much than the
+|		 flag is worth in practical uses.
+|
+|	Step 7.	Return 1 + X.
+|		7.1	ans := X
+|		7.2	Restore user FPCR.
+|		7.3	Return ans := 1 + ans. Exit
+|	Notes:	For non-zero X, the inexact exception will always be
+|		 raised by 7.3. That is the only exception raised by 7.3.
+|		 Note also that we use the FMOVEM instruction to move X
+|		 in Step 7.1 to avoid unnecessary trapping. (Although
+|		 the FMOVEM may not seem relevant since X is normalized,
+|		 the precaution will be useful in the library version of
+|		 this code where the separate entry for denormalized inputs
+|		 will be done away with.)
+|
+|	Step 8.	Handle exp(X) where |X| >= 16380log2.
+|		8.1	If |X| > 16480 log2, go to Step 9.
+|		(mimic 2.2 - 2.6)
+|		8.2	N := round-to-integer( X * 64/log2 )
+|		8.3	Calculate J = N mod 64, J = 0,1,...,63
+|		8.4	K := (N-J)/64, M1 := truncate(K/2), M = K-M1, AdjFlag := 1.
+|		8.5	Calculate the address of the stored value 2^(J/64).
+|		8.6	Create the values Scale = 2^M, AdjScale = 2^M1.
+|		8.7	Go to Step 3.
+|	Notes:	Refer to notes for 2.2 - 2.6.
+|
+|	Step 9.	Handle exp(X), |X| > 16480 log2.
+|		9.1	If X < 0, go to 9.3
+|		9.2	ans := Huge, go to 9.4
+|		9.3	ans := Tiny.
+|		9.4	Restore user FPCR.
+|		9.5	Return ans := ans * ans. Exit.
+|	Notes:	Exp(X) will surely overflow or underflow, depending on
+|		 X's sign. "Huge" and "Tiny" are respectively large/tiny
+|		 extended-precision numbers whose square over/underflow
+|		 with an inexact result. Thus, 9.5 always raises the
+|		 inexact together with either overflow or underflow.
+|
+|
+|	setoxm1d
+|	--------
+|
+|	Step 1.	Set ans := 0
+|
+|	Step 2.	Return	ans := X + ans. Exit.
+|	Notes:	This will return X with the appropriate rounding
+|		 precision prescribed by the user FPCR.
+|
+|	setoxm1
+|	-------
+|
+|	Step 1.	Check |X|
+|		1.1	If |X| >= 1/4, go to Step 1.3.
+|		1.2	Go to Step 7.
+|		1.3	If |X| < 70 log(2), go to Step 2.
+|		1.4	Go to Step 10.
+|	Notes:	The usual case should take the branches 1.1 -> 1.3 -> 2.
+|		 However, it is conceivable |X| can be small very often
+|		 because EXPM1 is intended to evaluate exp(X)-1 accurately
+|		 when |X| is small. For further details on the comparisons,
+|		 see the notes on Step 1 of setox.
+|
+|	Step 2.	Calculate N = round-to-nearest-int( X * 64/log2 ).
+|		2.1	N := round-to-nearest-integer( X * 64/log2 ).
+|		2.2	Calculate	J = N mod 64; so J = 0,1,2,..., or 63.
+|		2.3	Calculate	M = (N - J)/64; so N = 64M + J.
+|		2.4	Calculate the address of the stored value of 2^(J/64).
+|		2.5	Create the values Sc = 2^M and OnebySc := -2^(-M).
+|	Notes:	See the notes on Step 2 of setox.
+|
+|	Step 3.	Calculate X - N*log2/64.
+|		3.1	R := X + N*L1, where L1 := single-precision(-log2/64).
+|		3.2	R := R + N*L2, L2 := extended-precision(-log2/64 - L1).
+|	Notes:	Applying the analysis of Step 3 of setox in this case
+|		 shows that |R| <= 0.0055 (note that |X| <= 70 log2 in
+|		 this case).
+|
+|	Step 4.	Approximate exp(R)-1 by a polynomial
+|			p = R+R*R*(A1+R*(A2+R*(A3+R*(A4+R*(A5+R*A6)))))
+|	Notes:	a) In order to reduce memory access, the coefficients are
+|		 made as "short" as possible: A1 (which is 1/2), A5 and A6
+|		 are single precision; A2, A3 and A4 are double precision.
+|		 b) Even with the restriction above,
+|			|p - (exp(R)-1)| <	|R| * 2^(-72.7)
+|		 for all |R| <= 0.0055.
+|		 c) To fully utilize the pipeline, p is separated into
+|		 two independent pieces of roughly equal complexity
+|			p = [ R*S*(A2 + S*(A4 + S*A6)) ]	+
+|				[ R + S*(A1 + S*(A3 + S*A5)) ]
+|		 where S = R*R.
+|
+|	Step 5.	Compute 2^(J/64)*p by
+|				p := T*p
+|		 where T and t are the stored values for 2^(J/64).
+|	Notes:	2^(J/64) is stored as T and t where T+t approximates
+|		 2^(J/64) to roughly 85 bits; T is in extended precision
+|		 and t is in single precision. Note also that T is rounded
+|		 to 62 bits so that the last two bits of T are zero. The
+|		 reason for such a special form is that T-1, T-2, and T-8
+|		 will all be exact --- a property that will be exploited
+|		 in Step 6 below. The total relative error in p is no
+|		 bigger than 2^(-67.7) compared to the final result.
+|
+|	Step 6.	Reconstruction of exp(X)-1
+|			exp(X)-1 = 2^M * ( 2^(J/64) + p - 2^(-M) ).
+|		6.1	If M <= 63, go to Step 6.3.
+|		6.2	ans := T + (p + (t + OnebySc)). Go to 6.6
+|		6.3	If M >= -3, go to 6.5.
+|		6.4	ans := (T + (p + t)) + OnebySc. Go to 6.6
+|		6.5	ans := (T + OnebySc) + (p + t).
+|		6.6	Restore user FPCR.
+|		6.7	Return ans := Sc * ans. Exit.
+|	Notes:	The various arrangements of the expressions give accurate
+|		 evaluations.
+|
+|	Step 7.	exp(X)-1 for |X| < 1/4.
+|		7.1	If |X| >= 2^(-65), go to Step 9.
+|		7.2	Go to Step 8.
+|
+|	Step 8.	Calculate exp(X)-1, |X| < 2^(-65).
+|		8.1	If |X| < 2^(-16312), goto 8.3
+|		8.2	Restore FPCR; return ans := X - 2^(-16382). Exit.
+|		8.3	X := X * 2^(140).
+|		8.4	Restore FPCR; ans := ans - 2^(-16382).
+|		 Return ans := ans*2^(140). Exit
+|	Notes:	The idea is to return "X - tiny" under the user
+|		 precision and rounding modes. To avoid unnecessary
+|		 inefficiency, we stay away from denormalized numbers the
+|		 best we can. For |X| >= 2^(-16312), the straightforward
+|		 8.2 generates the inexact exception as the case warrants.
+|
+|	Step 9.	Calculate exp(X)-1, |X| < 1/4, by a polynomial
+|			p = X + X*X*(B1 + X*(B2 + ... + X*B12))
+|	Notes:	a) In order to reduce memory access, the coefficients are
+|		 made as "short" as possible: B1 (which is 1/2), B9 to B12
+|		 are single precision; B3 to B8 are double precision; and
+|		 B2 is double extended.
+|		 b) Even with the restriction above,
+|			|p - (exp(X)-1)| < |X| 2^(-70.6)
+|		 for all |X| <= 0.251.
+|		 Note that 0.251 is slightly bigger than 1/4.
+|		 c) To fully preserve accuracy, the polynomial is computed
+|		 as	X + ( S*B1 +	Q ) where S = X*X and
+|			Q	=	X*S*(B2 + X*(B3 + ... + X*B12))
+|		 d) To fully utilize the pipeline, Q is separated into
+|		 two independent pieces of roughly equal complexity
+|			Q = [ X*S*(B2 + S*(B4 + ... + S*B12)) ] +
+|				[ S*S*(B3 + S*(B5 + ... + S*B11)) ]
+|
+|	Step 10.	Calculate exp(X)-1 for |X| >= 70 log 2.
+|		10.1 If X >= 70log2 , exp(X) - 1 = exp(X) for all practical
+|		 purposes. Therefore, go to Step 1 of setox.
+|		10.2 If X <= -70log2, exp(X) - 1 = -1 for all practical purposes.
+|		 ans := -1
+|		 Restore user FPCR
+|		 Return ans := ans + 2^(-126). Exit.
+|	Notes:	10.2 will always create an inexact and return -1 + tiny
+|		 in the user rounding precision and mode.
+|
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|setox	idnt	2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+L2:	.long	0x3FDC0000,0x82E30865,0x4361C4C6,0x00000000
+
+EXPA3:	.long	0x3FA55555,0x55554431
+EXPA2:	.long	0x3FC55555,0x55554018
+
+HUGE:	.long	0x7FFE0000,0xFFFFFFFF,0xFFFFFFFF,0x00000000
+TINY:	.long	0x00010000,0xFFFFFFFF,0xFFFFFFFF,0x00000000
+
+EM1A4:	.long	0x3F811111,0x11174385
+EM1A3:	.long	0x3FA55555,0x55554F5A
+
+EM1A2:	.long	0x3FC55555,0x55555555,0x00000000,0x00000000
+
+EM1B8:	.long	0x3EC71DE3,0xA5774682
+EM1B7:	.long	0x3EFA01A0,0x19D7CB68
+
+EM1B6:	.long	0x3F2A01A0,0x1A019DF3
+EM1B5:	.long	0x3F56C16C,0x16C170E2
+
+EM1B4:	.long	0x3F811111,0x11111111
+EM1B3:	.long	0x3FA55555,0x55555555
+
+EM1B2:	.long	0x3FFC0000,0xAAAAAAAA,0xAAAAAAAB
+	.long	0x00000000
+
+TWO140:	.long	0x48B00000,0x00000000
+TWON140:	.long	0x37300000,0x00000000
+
+EXPTBL:
+	.long	0x3FFF0000,0x80000000,0x00000000,0x00000000
+	.long	0x3FFF0000,0x8164D1F3,0xBC030774,0x9F841A9B
+	.long	0x3FFF0000,0x82CD8698,0xAC2BA1D8,0x9FC1D5B9
+	.long	0x3FFF0000,0x843A28C3,0xACDE4048,0xA0728369
+	.long	0x3FFF0000,0x85AAC367,0xCC487B14,0x1FC5C95C
+	.long	0x3FFF0000,0x871F6196,0x9E8D1010,0x1EE85C9F
+	.long	0x3FFF0000,0x88980E80,0x92DA8528,0x9FA20729
+	.long	0x3FFF0000,0x8A14D575,0x496EFD9C,0xA07BF9AF
+	.long	0x3FFF0000,0x8B95C1E3,0xEA8BD6E8,0xA0020DCF
+	.long	0x3FFF0000,0x8D1ADF5B,0x7E5BA9E4,0x205A63DA
+	.long	0x3FFF0000,0x8EA4398B,0x45CD53C0,0x1EB70051
+	.long	0x3FFF0000,0x9031DC43,0x1466B1DC,0x1F6EB029
+	.long	0x3FFF0000,0x91C3D373,0xAB11C338,0xA0781494
+	.long	0x3FFF0000,0x935A2B2F,0x13E6E92C,0x9EB319B0
+	.long	0x3FFF0000,0x94F4EFA8,0xFEF70960,0x2017457D
+	.long	0x3FFF0000,0x96942D37,0x20185A00,0x1F11D537
+	.long	0x3FFF0000,0x9837F051,0x8DB8A970,0x9FB952DD
+	.long	0x3FFF0000,0x99E04593,0x20B7FA64,0x1FE43087
+	.long	0x3FFF0000,0x9B8D39B9,0xD54E5538,0x1FA2A818
+	.long	0x3FFF0000,0x9D3ED9A7,0x2CFFB750,0x1FDE494D
+	.long	0x3FFF0000,0x9EF53260,0x91A111AC,0x20504890
+	.long	0x3FFF0000,0xA0B0510F,0xB9714FC4,0xA073691C
+	.long	0x3FFF0000,0xA2704303,0x0C496818,0x1F9B7A05
+	.long	0x3FFF0000,0xA43515AE,0x09E680A0,0xA0797126
+	.long	0x3FFF0000,0xA5FED6A9,0xB15138EC,0xA071A140
+	.long	0x3FFF0000,0xA7CD93B4,0xE9653568,0x204F62DA
+	.long	0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x1F283C4A
+	.long	0x3FFF0000,0xAB7A39B5,0xA93ED338,0x9F9A7FDC
+	.long	0x3FFF0000,0xAD583EEA,0x42A14AC8,0xA05B3FAC
+	.long	0x3FFF0000,0xAF3B78AD,0x690A4374,0x1FDF2610
+	.long	0x3FFF0000,0xB123F581,0xD2AC2590,0x9F705F90
+	.long	0x3FFF0000,0xB311C412,0xA9112488,0x201F678A
+	.long	0x3FFF0000,0xB504F333,0xF9DE6484,0x1F32FB13
+	.long	0x3FFF0000,0xB6FD91E3,0x28D17790,0x20038B30
+	.long	0x3FFF0000,0xB8FBAF47,0x62FB9EE8,0x200DC3CC
+	.long	0x3FFF0000,0xBAFF5AB2,0x133E45FC,0x9F8B2AE6
+	.long	0x3FFF0000,0xBD08A39F,0x580C36C0,0xA02BBF70
+	.long	0x3FFF0000,0xBF1799B6,0x7A731084,0xA00BF518
+	.long	0x3FFF0000,0xC12C4CCA,0x66709458,0xA041DD41
+	.long	0x3FFF0000,0xC346CCDA,0x24976408,0x9FDF137B
+	.long	0x3FFF0000,0xC5672A11,0x5506DADC,0x201F1568
+	.long	0x3FFF0000,0xC78D74C8,0xABB9B15C,0x1FC13A2E
+	.long	0x3FFF0000,0xC9B9BD86,0x6E2F27A4,0xA03F8F03
+	.long	0x3FFF0000,0xCBEC14FE,0xF2727C5C,0x1FF4907D
+	.long	0x3FFF0000,0xCE248C15,0x1F8480E4,0x9E6E53E4
+	.long	0x3FFF0000,0xD06333DA,0xEF2B2594,0x1FD6D45C
+	.long	0x3FFF0000,0xD2A81D91,0xF12AE45C,0xA076EDB9
+	.long	0x3FFF0000,0xD4F35AAB,0xCFEDFA20,0x9FA6DE21
+	.long	0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x1EE69A2F
+	.long	0x3FFF0000,0xD99D15C2,0x78AFD7B4,0x207F439F
+	.long	0x3FFF0000,0xDBFBB797,0xDAF23754,0x201EC207
+	.long	0x3FFF0000,0xDE60F482,0x5E0E9124,0x9E8BE175
+	.long	0x3FFF0000,0xE0CCDEEC,0x2A94E110,0x20032C4B
+	.long	0x3FFF0000,0xE33F8972,0xBE8A5A50,0x2004DFF5
+	.long	0x3FFF0000,0xE5B906E7,0x7C8348A8,0x1E72F47A
+	.long	0x3FFF0000,0xE8396A50,0x3C4BDC68,0x1F722F22
+	.long	0x3FFF0000,0xEAC0C6E7,0xDD243930,0xA017E945
+	.long	0x3FFF0000,0xED4F301E,0xD9942B84,0x1F401A5B
+	.long	0x3FFF0000,0xEFE4B99B,0xDCDAF5CC,0x9FB9A9E3
+	.long	0x3FFF0000,0xF281773C,0x59FFB138,0x20744C05
+	.long	0x3FFF0000,0xF5257D15,0x2486CC2C,0x1F773A19
+	.long	0x3FFF0000,0xF7D0DF73,0x0AD13BB8,0x1FFE90D5
+	.long	0x3FFF0000,0xFA83B2DB,0x722A033C,0xA041ED22
+	.long	0x3FFF0000,0xFD3E0C0C,0xF486C174,0x1F853F3A
+
+	.set	ADJFLAG,L_SCR2
+	.set	SCALE,FP_SCR1
+	.set	ADJSCALE,FP_SCR2
+	.set	SC,FP_SCR3
+	.set	ONEBYSC,FP_SCR4
+
+	| xref	t_frcinx
+	|xref	t_extdnrm
+	|xref	t_unfl
+	|xref	t_ovfl
+
+	.global	setoxd
+setoxd:
+|--entry point for EXP(X), X is denormalized
+	movel		(%a0),%d0
+	andil		#0x80000000,%d0
+	oril		#0x00800000,%d0		| ...sign(X)*2^(-126)
+	movel		%d0,-(%sp)
+	fmoves		#0x3F800000,%fp0
+	fmovel		%d1,%fpcr
+	fadds		(%sp)+,%fp0
+	bra		t_frcinx
+
+	.global	setox
+setox:
+|--entry point for EXP(X), here X is finite, non-zero, and not NaN's
+
+|--Step 1.
+	movel		(%a0),%d0	 | ...load part of input X
+	andil		#0x7FFF0000,%d0	| ...biased expo. of X
+	cmpil		#0x3FBE0000,%d0	| ...2^(-65)
+	bges		EXPC1		| ...normal case
+	bra		EXPSM
+
+EXPC1:
+|--The case |X| >= 2^(-65)
+	movew		4(%a0),%d0	| ...expo. and partial sig. of |X|
+	cmpil		#0x400CB167,%d0	| ...16380 log2 trunc. 16 bits
+	blts		EXPMAIN	 | ...normal case
+	bra		EXPBIG
+
+EXPMAIN:
+|--Step 2.
+|--This is the normal branch:	2^(-65) <= |X| < 16380 log2.
+	fmovex		(%a0),%fp0	| ...load input from (a0)
+
+	fmovex		%fp0,%fp1
+	fmuls		#0x42B8AA3B,%fp0	| ...64/log2 * X
+	fmovemx	%fp2-%fp2/%fp3,-(%a7)		| ...save fp2
+	movel		#0,ADJFLAG(%a6)
+	fmovel		%fp0,%d0		| ...N = int( X * 64/log2 )
+	lea		EXPTBL,%a1
+	fmovel		%d0,%fp0		| ...convert to floating-format
+
+	movel		%d0,L_SCR1(%a6)	| ...save N temporarily
+	andil		#0x3F,%d0		| ...D0 is J = N mod 64
+	lsll		#4,%d0
+	addal		%d0,%a1		| ...address of 2^(J/64)
+	movel		L_SCR1(%a6),%d0
+	asrl		#6,%d0		| ...D0 is M
+	addiw		#0x3FFF,%d0	| ...biased expo. of 2^(M)
+	movew		L2,L_SCR1(%a6)	| ...prefetch L2, no need in CB
+
+EXPCONT1:
+|--Step 3.
+|--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
+|--a0 points to 2^(J/64), D0 is biased expo. of 2^(M)
+	fmovex		%fp0,%fp2
+	fmuls		#0xBC317218,%fp0	| ...N * L1, L1 = lead(-log2/64)
+	fmulx		L2,%fp2		| ...N * L2, L1+L2 = -log2/64
+	faddx		%fp1,%fp0		| ...X + N*L1
+	faddx		%fp2,%fp0		| ...fp0 is R, reduced arg.
+|	MOVE.W		#$3FA5,EXPA3	...load EXPA3 in cache
+
+|--Step 4.
+|--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
+|-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))
+|--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
+|--[R+R*S*(A2+S*A4)] + [S*(A1+S*(A3+S*A5))]
+
+	fmovex		%fp0,%fp1
+	fmulx		%fp1,%fp1		| ...fp1 IS S = R*R
+
+	fmoves		#0x3AB60B70,%fp2	| ...fp2 IS A5
+|	MOVE.W		#0,2(%a1)	...load 2^(J/64) in cache
+
+	fmulx		%fp1,%fp2		| ...fp2 IS S*A5
+	fmovex		%fp1,%fp3
+	fmuls		#0x3C088895,%fp3	| ...fp3 IS S*A4
+
+	faddd		EXPA3,%fp2	| ...fp2 IS A3+S*A5
+	faddd		EXPA2,%fp3	| ...fp3 IS A2+S*A4
+
+	fmulx		%fp1,%fp2		| ...fp2 IS S*(A3+S*A5)
+	movew		%d0,SCALE(%a6)	| ...SCALE is 2^(M) in extended
+	clrw		SCALE+2(%a6)
+	movel		#0x80000000,SCALE+4(%a6)
+	clrl		SCALE+8(%a6)
+
+	fmulx		%fp1,%fp3		| ...fp3 IS S*(A2+S*A4)
+
+	fadds		#0x3F000000,%fp2	| ...fp2 IS A1+S*(A3+S*A5)
+	fmulx		%fp0,%fp3		| ...fp3 IS R*S*(A2+S*A4)
+
+	fmulx		%fp1,%fp2		| ...fp2 IS S*(A1+S*(A3+S*A5))
+	faddx		%fp3,%fp0		| ...fp0 IS R+R*S*(A2+S*A4),
+|					...fp3 released
+
+	fmovex		(%a1)+,%fp1	| ...fp1 is lead. pt. of 2^(J/64)
+	faddx		%fp2,%fp0		| ...fp0 is EXP(R) - 1
+|					...fp2 released
+
+|--Step 5
+|--final reconstruction process
+|--EXP(X) = 2^M * ( 2^(J/64) + 2^(J/64)*(EXP(R)-1) )
+
+	fmulx		%fp1,%fp0		| ...2^(J/64)*(Exp(R)-1)
+	fmovemx	(%a7)+,%fp2-%fp2/%fp3	| ...fp2 restored
+	fadds		(%a1),%fp0	| ...accurate 2^(J/64)
+
+	faddx		%fp1,%fp0		| ...2^(J/64) + 2^(J/64)*...
+	movel		ADJFLAG(%a6),%d0
+
+|--Step 6
+	tstl		%d0
+	beqs		NORMAL
+ADJUST:
+	fmulx		ADJSCALE(%a6),%fp0
+NORMAL:
+	fmovel		%d1,%FPCR		| ...restore user FPCR
+	fmulx		SCALE(%a6),%fp0	| ...multiply 2^(M)
+	bra		t_frcinx
+
+EXPSM:
+|--Step 7
+	fmovemx	(%a0),%fp0-%fp0	| ...in case X is denormalized
+	fmovel		%d1,%FPCR
+	fadds		#0x3F800000,%fp0	| ...1+X in user mode
+	bra		t_frcinx
+
+EXPBIG:
+|--Step 8
+	cmpil		#0x400CB27C,%d0	| ...16480 log2
+	bgts		EXP2BIG
+|--Steps 8.2 -- 8.6
+	fmovex		(%a0),%fp0	| ...load input from (a0)
+
+	fmovex		%fp0,%fp1
+	fmuls		#0x42B8AA3B,%fp0	| ...64/log2 * X
+	fmovemx	 %fp2-%fp2/%fp3,-(%a7)		| ...save fp2
+	movel		#1,ADJFLAG(%a6)
+	fmovel		%fp0,%d0		| ...N = int( X * 64/log2 )
+	lea		EXPTBL,%a1
+	fmovel		%d0,%fp0		| ...convert to floating-format
+	movel		%d0,L_SCR1(%a6)			| ...save N temporarily
+	andil		#0x3F,%d0		 | ...D0 is J = N mod 64
+	lsll		#4,%d0
+	addal		%d0,%a1			| ...address of 2^(J/64)
+	movel		L_SCR1(%a6),%d0
+	asrl		#6,%d0			| ...D0 is K
+	movel		%d0,L_SCR1(%a6)			| ...save K temporarily
+	asrl		#1,%d0			| ...D0 is M1
+	subl		%d0,L_SCR1(%a6)			| ...a1 is M
+	addiw		#0x3FFF,%d0		| ...biased expo. of 2^(M1)
+	movew		%d0,ADJSCALE(%a6)		| ...ADJSCALE := 2^(M1)
+	clrw		ADJSCALE+2(%a6)
+	movel		#0x80000000,ADJSCALE+4(%a6)
+	clrl		ADJSCALE+8(%a6)
+	movel		L_SCR1(%a6),%d0			| ...D0 is M
+	addiw		#0x3FFF,%d0		| ...biased expo. of 2^(M)
+	bra		EXPCONT1		| ...go back to Step 3
+
+EXP2BIG:
+|--Step 9
+	fmovel		%d1,%FPCR
+	movel		(%a0),%d0
+	bclrb		#sign_bit,(%a0)		| ...setox always returns positive
+	cmpil		#0,%d0
+	blt		t_unfl
+	bra		t_ovfl
+
+	.global	setoxm1d
+setoxm1d:
+|--entry point for EXPM1(X), here X is denormalized
+|--Step 0.
+	bra		t_extdnrm
+
+
+	.global	setoxm1
+setoxm1:
+|--entry point for EXPM1(X), here X is finite, non-zero, non-NaN
+
+|--Step 1.
+|--Step 1.1
+	movel		(%a0),%d0	 | ...load part of input X
+	andil		#0x7FFF0000,%d0	| ...biased expo. of X
+	cmpil		#0x3FFD0000,%d0	| ...1/4
+	bges		EM1CON1	 | ...|X| >= 1/4
+	bra		EM1SM
+
+EM1CON1:
+|--Step 1.3
+|--The case |X| >= 1/4
+	movew		4(%a0),%d0	| ...expo. and partial sig. of |X|
+	cmpil		#0x4004C215,%d0	| ...70log2 rounded up to 16 bits
+	bles		EM1MAIN	 | ...1/4 <= |X| <= 70log2
+	bra		EM1BIG
+
+EM1MAIN:
+|--Step 2.
+|--This is the case:	1/4 <= |X| <= 70 log2.
+	fmovex		(%a0),%fp0	| ...load input from (a0)
+
+	fmovex		%fp0,%fp1
+	fmuls		#0x42B8AA3B,%fp0	| ...64/log2 * X
+	fmovemx	%fp2-%fp2/%fp3,-(%a7)		| ...save fp2
+|	MOVE.W		#$3F81,EM1A4		...prefetch in CB mode
+	fmovel		%fp0,%d0		| ...N = int( X * 64/log2 )
+	lea		EXPTBL,%a1
+	fmovel		%d0,%fp0		| ...convert to floating-format
+
+	movel		%d0,L_SCR1(%a6)			| ...save N temporarily
+	andil		#0x3F,%d0		 | ...D0 is J = N mod 64
+	lsll		#4,%d0
+	addal		%d0,%a1			| ...address of 2^(J/64)
+	movel		L_SCR1(%a6),%d0
+	asrl		#6,%d0			| ...D0 is M
+	movel		%d0,L_SCR1(%a6)			| ...save a copy of M
+|	MOVE.W		#$3FDC,L2		...prefetch L2 in CB mode
+
+|--Step 3.
+|--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
+|--a0 points to 2^(J/64), D0 and a1 both contain M
+	fmovex		%fp0,%fp2
+	fmuls		#0xBC317218,%fp0	| ...N * L1, L1 = lead(-log2/64)
+	fmulx		L2,%fp2		| ...N * L2, L1+L2 = -log2/64
+	faddx		%fp1,%fp0	 | ...X + N*L1
+	faddx		%fp2,%fp0	 | ...fp0 is R, reduced arg.
+|	MOVE.W		#$3FC5,EM1A2		...load EM1A2 in cache
+	addiw		#0x3FFF,%d0		| ...D0 is biased expo. of 2^M
+
+|--Step 4.
+|--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
+|-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*(A5 + R*A6)))))
+|--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
+|--[R*S*(A2+S*(A4+S*A6))] + [R+S*(A1+S*(A3+S*A5))]
+
+	fmovex		%fp0,%fp1
+	fmulx		%fp1,%fp1		| ...fp1 IS S = R*R
+
+	fmoves		#0x3950097B,%fp2	| ...fp2 IS a6
+|	MOVE.W		#0,2(%a1)	...load 2^(J/64) in cache
+
+	fmulx		%fp1,%fp2		| ...fp2 IS S*A6
+	fmovex		%fp1,%fp3
+	fmuls		#0x3AB60B6A,%fp3	| ...fp3 IS S*A5
+
+	faddd		EM1A4,%fp2	| ...fp2 IS A4+S*A6
+	faddd		EM1A3,%fp3	| ...fp3 IS A3+S*A5
+	movew		%d0,SC(%a6)		| ...SC is 2^(M) in extended
+	clrw		SC+2(%a6)
+	movel		#0x80000000,SC+4(%a6)
+	clrl		SC+8(%a6)
+
+	fmulx		%fp1,%fp2		| ...fp2 IS S*(A4+S*A6)
+	movel		L_SCR1(%a6),%d0		| ...D0 is	M
+	negw		%d0		| ...D0 is -M
+	fmulx		%fp1,%fp3		| ...fp3 IS S*(A3+S*A5)
+	addiw		#0x3FFF,%d0	| ...biased expo. of 2^(-M)
+	faddd		EM1A2,%fp2	| ...fp2 IS A2+S*(A4+S*A6)
+	fadds		#0x3F000000,%fp3	| ...fp3 IS A1+S*(A3+S*A5)
+
+	fmulx		%fp1,%fp2		| ...fp2 IS S*(A2+S*(A4+S*A6))
+	oriw		#0x8000,%d0	| ...signed/expo. of -2^(-M)
+	movew		%d0,ONEBYSC(%a6)	| ...OnebySc is -2^(-M)
+	clrw		ONEBYSC+2(%a6)
+	movel		#0x80000000,ONEBYSC+4(%a6)
+	clrl		ONEBYSC+8(%a6)
+	fmulx		%fp3,%fp1		| ...fp1 IS S*(A1+S*(A3+S*A5))
+|					...fp3 released
+
+	fmulx		%fp0,%fp2		| ...fp2 IS R*S*(A2+S*(A4+S*A6))
+	faddx		%fp1,%fp0		| ...fp0 IS R+S*(A1+S*(A3+S*A5))
+|					...fp1 released
+
+	faddx		%fp2,%fp0		| ...fp0 IS EXP(R)-1
+|					...fp2 released
+	fmovemx	(%a7)+,%fp2-%fp2/%fp3	| ...fp2 restored
+
+|--Step 5
+|--Compute 2^(J/64)*p
+
+	fmulx		(%a1),%fp0	| ...2^(J/64)*(Exp(R)-1)
+
+|--Step 6
+|--Step 6.1
+	movel		L_SCR1(%a6),%d0		| ...retrieve M
+	cmpil		#63,%d0
+	bles		MLE63
+|--Step 6.2	M >= 64
+	fmoves		12(%a1),%fp1	| ...fp1 is t
+	faddx		ONEBYSC(%a6),%fp1	| ...fp1 is t+OnebySc
+	faddx		%fp1,%fp0		| ...p+(t+OnebySc), fp1 released
+	faddx		(%a1),%fp0	| ...T+(p+(t+OnebySc))
+	bras		EM1SCALE
+MLE63:
+|--Step 6.3	M <= 63
+	cmpil		#-3,%d0
+	bges		MGEN3
+MLTN3:
+|--Step 6.4	M <= -4
+	fadds		12(%a1),%fp0	| ...p+t
+	faddx		(%a1),%fp0	| ...T+(p+t)
+	faddx		ONEBYSC(%a6),%fp0	| ...OnebySc + (T+(p+t))
+	bras		EM1SCALE
+MGEN3:
+|--Step 6.5	-3 <= M <= 63
+	fmovex		(%a1)+,%fp1	| ...fp1 is T
+	fadds		(%a1),%fp0	| ...fp0 is p+t
+	faddx		ONEBYSC(%a6),%fp1	| ...fp1 is T+OnebySc
+	faddx		%fp1,%fp0		| ...(T+OnebySc)+(p+t)
+
+EM1SCALE:
+|--Step 6.6
+	fmovel		%d1,%FPCR
+	fmulx		SC(%a6),%fp0
+
+	bra		t_frcinx
+
+EM1SM:
+|--Step 7	|X| < 1/4.
+	cmpil		#0x3FBE0000,%d0	| ...2^(-65)
+	bges		EM1POLY
+
+EM1TINY:
+|--Step 8	|X| < 2^(-65)
+	cmpil		#0x00330000,%d0	| ...2^(-16312)
+	blts		EM12TINY
+|--Step 8.2
+	movel		#0x80010000,SC(%a6)	| ...SC is -2^(-16382)
+	movel		#0x80000000,SC+4(%a6)
+	clrl		SC+8(%a6)
+	fmovex		(%a0),%fp0
+	fmovel		%d1,%FPCR
+	faddx		SC(%a6),%fp0
+
+	bra		t_frcinx
+
+EM12TINY:
+|--Step 8.3
+	fmovex		(%a0),%fp0
+	fmuld		TWO140,%fp0
+	movel		#0x80010000,SC(%a6)
+	movel		#0x80000000,SC+4(%a6)
+	clrl		SC+8(%a6)
+	faddx		SC(%a6),%fp0
+	fmovel		%d1,%FPCR
+	fmuld		TWON140,%fp0
+
+	bra		t_frcinx
+
+EM1POLY:
+|--Step 9	exp(X)-1 by a simple polynomial
+	fmovex		(%a0),%fp0	| ...fp0 is X
+	fmulx		%fp0,%fp0		| ...fp0 is S := X*X
+	fmovemx	%fp2-%fp2/%fp3,-(%a7)	| ...save fp2
+	fmoves		#0x2F30CAA8,%fp1	| ...fp1 is B12
+	fmulx		%fp0,%fp1		| ...fp1 is S*B12
+	fmoves		#0x310F8290,%fp2	| ...fp2 is B11
+	fadds		#0x32D73220,%fp1	| ...fp1 is B10+S*B12
+
+	fmulx		%fp0,%fp2		| ...fp2 is S*B11
+	fmulx		%fp0,%fp1		| ...fp1 is S*(B10 + ...
+
+	fadds		#0x3493F281,%fp2	| ...fp2 is B9+S*...
+	faddd		EM1B8,%fp1	| ...fp1 is B8+S*...
+
+	fmulx		%fp0,%fp2		| ...fp2 is S*(B9+...
+	fmulx		%fp0,%fp1		| ...fp1 is S*(B8+...
+
+	faddd		EM1B7,%fp2	| ...fp2 is B7+S*...
+	faddd		EM1B6,%fp1	| ...fp1 is B6+S*...
+
+	fmulx		%fp0,%fp2		| ...fp2 is S*(B7+...
+	fmulx		%fp0,%fp1		| ...fp1 is S*(B6+...
+
+	faddd		EM1B5,%fp2	| ...fp2 is B5+S*...
+	faddd		EM1B4,%fp1	| ...fp1 is B4+S*...
+
+	fmulx		%fp0,%fp2		| ...fp2 is S*(B5+...
+	fmulx		%fp0,%fp1		| ...fp1 is S*(B4+...
+
+	faddd		EM1B3,%fp2	| ...fp2 is B3+S*...
+	faddx		EM1B2,%fp1	| ...fp1 is B2+S*...
+
+	fmulx		%fp0,%fp2		| ...fp2 is S*(B3+...
+	fmulx		%fp0,%fp1		| ...fp1 is S*(B2+...
+
+	fmulx		%fp0,%fp2		| ...fp2 is S*S*(B3+...)
+	fmulx		(%a0),%fp1	| ...fp1 is X*S*(B2...
+
+	fmuls		#0x3F000000,%fp0	| ...fp0 is S*B1
+	faddx		%fp2,%fp1		| ...fp1 is Q
+|					...fp2 released
+
+	fmovemx	(%a7)+,%fp2-%fp2/%fp3	| ...fp2 restored
+
+	faddx		%fp1,%fp0		| ...fp0 is S*B1+Q
+|					...fp1 released
+
+	fmovel		%d1,%FPCR
+	faddx		(%a0),%fp0
+
+	bra		t_frcinx
+
+EM1BIG:
+|--Step 10	|X| > 70 log2
+	movel		(%a0),%d0
+	cmpil		#0,%d0
+	bgt		EXPC1
+|--Step 10.2
+	fmoves		#0xBF800000,%fp0	| ...fp0 is -1
+	fmovel		%d1,%FPCR
+	fadds		#0x00800000,%fp0	| ...-1 + 2^(-126)
+
+	bra		t_frcinx
+
+	|end
diff --git a/arch/m68k/fpsp040/sgetem.S b/arch/m68k/fpsp040/sgetem.S
new file mode 100644
index 0000000..0fcbd04
--- /dev/null
+++ b/arch/m68k/fpsp040/sgetem.S
@@ -0,0 +1,141 @@
+|
+|	sgetem.sa 3.1 12/10/90
+|
+|	The entry point sGETEXP returns the exponent portion
+|	of the input argument.  The exponent bias is removed
+|	and the exponent value is returned as an extended
+|	precision number in fp0.  sGETEXPD handles denormalized
+|	numbers.
+|
+|	The entry point sGETMAN extracts the mantissa of the
+|	input argument.  The mantissa is converted to an
+|	extended precision number and returned in fp0.  The
+|	range of the result is [1.0 - 2.0).
+|
+|
+|	Input:  Double-extended number X in the ETEMP space in
+|		the floating-point save stack.
+|
+|	Output:	The functions return exp(X) or man(X) in fp0.
+|
+|	Modified: fp0.
+|
+|
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|SGETEM	idnt	2,1 | Motorola 040 Floating Point Software Package
+
+	|section 8
+
+#include "fpsp.h"
+
+	|xref	nrm_set
+
+|
+| This entry point is used by the unimplemented instruction exception
+| handler.  It points a0 to the input operand.
+|
+|
+|
+|	SGETEXP
+|
+
+	.global	sgetexp
+sgetexp:
+	movew	LOCAL_EX(%a0),%d0	|get the exponent
+	bclrl	#15,%d0		|clear the sign bit
+	subw	#0x3fff,%d0	|subtract off the bias
+	fmovew  %d0,%fp0		|move the exp to fp0
+	rts
+
+	.global	sgetexpd
+sgetexpd:
+	bclrb	#sign_bit,LOCAL_EX(%a0)
+	bsr	nrm_set		|normalize (exp will go negative)
+	movew	LOCAL_EX(%a0),%d0	|load resulting exponent into d0
+	subw	#0x3fff,%d0	|subtract off the bias
+	fmovew	%d0,%fp0		|move the exp to fp0
+	rts
+|
+|
+| This entry point is used by the unimplemented instruction exception
+| handler.  It points a0 to the input operand.
+|
+|
+|
+|	SGETMAN
+|
+|
+| For normalized numbers, leave the mantissa alone, simply load
+| with an exponent of +/- $3fff.
+|
+	.global	sgetman
+sgetman:
+	movel	USER_FPCR(%a6),%d0
+	andil	#0xffffff00,%d0	|clear rounding precision and mode
+	fmovel	%d0,%fpcr		|this fpcr setting is used by the 882
+	movew	LOCAL_EX(%a0),%d0	|get the exp (really just want sign bit)
+	orw	#0x7fff,%d0	|clear old exp
+	bclrl	#14,%d0		|make it the new exp +-3fff
+	movew	%d0,LOCAL_EX(%a0)	|move the sign & exp back to fsave stack
+	fmovex	(%a0),%fp0	|put new value back in fp0
+	rts
+
+|
+| For denormalized numbers, shift the mantissa until the j-bit = 1,
+| then load the exponent with +/1 $3fff.
+|
+	.global	sgetmand
+sgetmand:
+	movel	LOCAL_HI(%a0),%d0	|load ms mant in d0
+	movel	LOCAL_LO(%a0),%d1	|load ls mant in d1
+	bsr	shft		|shift mantissa bits till msbit is set
+	movel	%d0,LOCAL_HI(%a0)	|put ms mant back on stack
+	movel	%d1,LOCAL_LO(%a0)	|put ls mant back on stack
+	bras	sgetman
+
+|
+|	SHFT
+|
+|	Shifts the mantissa bits until msbit is set.
+|	input:
+|		ms mantissa part in d0
+|		ls mantissa part in d1
+|	output:
+|		shifted bits in d0 and d1
+shft:
+	tstl	%d0		|if any bits set in ms mant
+	bnes	upper		|then branch
+|				;else no bits set in ms mant
+	tstl	%d1		|test if any bits set in ls mant
+	bnes	cont		|if set then continue
+	bras	shft_end	|else return
+cont:
+	movel	%d3,-(%a7)	|save d3
+	exg	%d0,%d1		|shift ls mant to ms mant
+	bfffo	%d0{#0:#32},%d3	|find first 1 in ls mant to d0
+	lsll	%d3,%d0		|shift first 1 to integer bit in ms mant
+	movel	(%a7)+,%d3	|restore d3
+	bras	shft_end
+upper:
+
+	moveml	%d3/%d5/%d6,-(%a7)	|save registers
+	bfffo	%d0{#0:#32},%d3	|find first 1 in ls mant to d0
+	lsll	%d3,%d0		|shift ms mant until j-bit is set
+	movel	%d1,%d6		|save ls mant in d6
+	lsll	%d3,%d1		|shift ls mant by count
+	movel	#32,%d5
+	subl	%d3,%d5		|sub 32 from shift for ls mant
+	lsrl	%d5,%d6		|shift off all bits but those that will
+|				;be shifted into ms mant
+	orl	%d6,%d0		|shift the ls mant bits into the ms mant
+	moveml	(%a7)+,%d3/%d5/%d6	|restore registers
+shft_end:
+	rts
+
+	|end
diff --git a/arch/m68k/fpsp040/sint.S b/arch/m68k/fpsp040/sint.S
new file mode 100644
index 0000000..0f9bd28
--- /dev/null
+++ b/arch/m68k/fpsp040/sint.S
@@ -0,0 +1,247 @@
+|
+|	sint.sa 3.1 12/10/90
+|
+|	The entry point sINT computes the rounded integer
+|	equivalent of the input argument, sINTRZ computes
+|	the integer rounded to zero of the input argument.
+|
+|	Entry points sint and sintrz are called from do_func
+|	to emulate the fint and fintrz unimplemented instructions,
+|	respectively.  Entry point sintdo is used by bindec.
+|
+|	Input: (Entry points sint and sintrz) Double-extended
+|		number X in the ETEMP space in the floating-point
+|		save stack.
+|	       (Entry point sintdo) Double-extended number X in
+|		location pointed to by the address register a0.
+|	       (Entry point sintd) Double-extended denormalized
+|		number X in the ETEMP space in the floating-point
+|		save stack.
+|
+|	Output: The function returns int(X) or intrz(X) in fp0.
+|
+|	Modifies: fp0.
+|
+|	Algorithm: (sint and sintrz)
+|
+|	1. If exp(X) >= 63, return X.
+|	   If exp(X) < 0, return +/- 0 or +/- 1, according to
+|	   the rounding mode.
+|
+|	2. (X is in range) set rsc = 63 - exp(X). Unnormalize the
+|	   result to the exponent $403e.
+|
+|	3. Round the result in the mode given in USER_FPCR. For
+|	   sintrz, force round-to-zero mode.
+|
+|	4. Normalize the rounded result; store in fp0.
+|
+|	For the denormalized cases, force the correct result
+|	for the given sign and rounding mode.
+|
+|		        Sign(X)
+|		RMODE   +    -
+|		-----  --------
+|		 RN    +0   -0
+|		 RZ    +0   -0
+|		 RM    +0   -1
+|		 RP    +1   -0
+|
+|
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|SINT    idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+	|xref	dnrm_lp
+	|xref	nrm_set
+	|xref	round
+	|xref	t_inx2
+	|xref	ld_pone
+	|xref	ld_mone
+	|xref	ld_pzero
+	|xref	ld_mzero
+	|xref	snzrinx
+
+|
+|	FINT
+|
+	.global	sint
+sint:
+	bfextu	FPCR_MODE(%a6){#2:#2},%d1	|use user's mode for rounding
+|					;implicitly has extend precision
+|					;in upper word.
+	movel	%d1,L_SCR1(%a6)		|save mode bits
+	bras	sintexc
+
+|
+|	FINT with extended denorm inputs.
+|
+	.global	sintd
+sintd:
+	btstb	#5,FPCR_MODE(%a6)
+	beq	snzrinx		|if round nearest or round zero, +/- 0
+	btstb	#4,FPCR_MODE(%a6)
+	beqs	rnd_mns
+rnd_pls:
+	btstb	#sign_bit,LOCAL_EX(%a0)
+	bnes	sintmz
+	bsr	ld_pone		|if round plus inf and pos, answer is +1
+	bra	t_inx2
+rnd_mns:
+	btstb	#sign_bit,LOCAL_EX(%a0)
+	beqs	sintpz
+	bsr	ld_mone		|if round mns inf and neg, answer is -1
+	bra	t_inx2
+sintpz:
+	bsr	ld_pzero
+	bra	t_inx2
+sintmz:
+	bsr	ld_mzero
+	bra	t_inx2
+
+|
+|	FINTRZ
+|
+	.global	sintrz
+sintrz:
+	movel	#1,L_SCR1(%a6)		|use rz mode for rounding
+|					;implicitly has extend precision
+|					;in upper word.
+	bras	sintexc
+|
+|	SINTDO
+|
+|	Input:	a0 points to an IEEE extended format operand
+|	Output:	fp0 has the result
+|
+| Exceptions:
+|
+| If the subroutine results in an inexact operation, the inx2 and
+| ainx bits in the USER_FPSR are set.
+|
+|
+	.global	sintdo
+sintdo:
+	bfextu	FPCR_MODE(%a6){#2:#2},%d1	|use user's mode for rounding
+|					;implicitly has ext precision
+|					;in upper word.
+	movel	%d1,L_SCR1(%a6)		|save mode bits
+|
+| Real work of sint is in sintexc
+|
+sintexc:
+	bclrb	#sign_bit,LOCAL_EX(%a0)	|convert to internal extended
+|					;format
+	sne	LOCAL_SGN(%a0)
+	cmpw	#0x403e,LOCAL_EX(%a0)	|check if (unbiased) exp > 63
+	bgts	out_rnge			|branch if exp < 63
+	cmpw	#0x3ffd,LOCAL_EX(%a0)	|check if (unbiased) exp < 0
+	bgt	in_rnge			|if 63 >= exp > 0, do calc
+|
+| Input is less than zero.  Restore sign, and check for directed
+| rounding modes.  L_SCR1 contains the rmode in the lower byte.
+|
+un_rnge:
+	btstb	#1,L_SCR1+3(%a6)		|check for rn and rz
+	beqs	un_rnrz
+	tstb	LOCAL_SGN(%a0)		|check for sign
+	bnes	un_rmrp_neg
+|
+| Sign is +.  If rp, load +1.0, if rm, load +0.0
+|
+	cmpib	#3,L_SCR1+3(%a6)		|check for rp
+	beqs	un_ldpone		|if rp, load +1.0
+	bsr	ld_pzero		|if rm, load +0.0
+	bra	t_inx2
+un_ldpone:
+	bsr	ld_pone
+	bra	t_inx2
+|
+| Sign is -.  If rm, load -1.0, if rp, load -0.0
+|
+un_rmrp_neg:
+	cmpib	#2,L_SCR1+3(%a6)		|check for rm
+	beqs	un_ldmone		|if rm, load -1.0
+	bsr	ld_mzero		|if rp, load -0.0
+	bra	t_inx2
+un_ldmone:
+	bsr	ld_mone
+	bra	t_inx2
+|
+| Rmode is rn or rz; return signed zero
+|
+un_rnrz:
+	tstb	LOCAL_SGN(%a0)		|check for sign
+	bnes	un_rnrz_neg
+	bsr	ld_pzero
+	bra	t_inx2
+un_rnrz_neg:
+	bsr	ld_mzero
+	bra	t_inx2
+
+|
+| Input is greater than 2^63.  All bits are significant.  Return
+| the input.
+|
+out_rnge:
+	bfclr	LOCAL_SGN(%a0){#0:#8}	|change back to IEEE ext format
+	beqs	intps
+	bsetb	#sign_bit,LOCAL_EX(%a0)
+intps:
+	fmovel	%fpcr,-(%sp)
+	fmovel	#0,%fpcr
+	fmovex LOCAL_EX(%a0),%fp0	|if exp > 63
+|					;then return X to the user
+|					;there are no fraction bits
+	fmovel	(%sp)+,%fpcr
+	rts
+
+in_rnge:
+|					;shift off fraction bits
+	clrl	%d0			|clear d0 - initial g,r,s for
+|					;dnrm_lp
+	movel	#0x403e,%d1		|set threshold for dnrm_lp
+|					;assumes a0 points to operand
+	bsr	dnrm_lp
+|					;returns unnormalized number
+|					;pointed by a0
+|					;output d0 supplies g,r,s
+|					;used by round
+	movel	L_SCR1(%a6),%d1		|use selected rounding mode
+|
+|
+	bsr	round			|round the unnorm based on users
+|					;input	a0 ptr to ext X
+|					;	d0 g,r,s bits
+|					;	d1 PREC/MODE info
+|					;output a0 ptr to rounded result
+|					;inexact flag set in USER_FPSR
+|					;if initial grs set
+|
+| normalize the rounded result and store value in fp0
+|
+	bsr	nrm_set			|normalize the unnorm
+|					;Input: a0 points to operand to
+|					;be normalized
+|					;Output: a0 points to normalized
+|					;result
+	bfclr	LOCAL_SGN(%a0){#0:#8}
+	beqs	nrmrndp
+	bsetb	#sign_bit,LOCAL_EX(%a0)	|return to IEEE extended format
+nrmrndp:
+	fmovel	%fpcr,-(%sp)
+	fmovel	#0,%fpcr
+	fmovex LOCAL_EX(%a0),%fp0	|move result to fp0
+	fmovel	(%sp)+,%fpcr
+	rts
+
+	|end
diff --git a/arch/m68k/fpsp040/skeleton.S b/arch/m68k/fpsp040/skeleton.S
new file mode 100644
index 0000000..dbc1255
--- /dev/null
+++ b/arch/m68k/fpsp040/skeleton.S
@@ -0,0 +1,516 @@
+|
+|	skeleton.sa 3.2 4/26/91
+|
+|	This file contains code that is system dependent and will
+|	need to be modified to install the FPSP.
+|
+|	Each entry point for exception 'xxxx' begins with a 'jmp fpsp_xxxx'.
+|	Put any target system specific handling that must be done immediately
+|	before the jump instruction.  If there no handling necessary, then
+|	the 'fpsp_xxxx' handler entry point should be placed in the exception
+|	table so that the 'jmp' can be eliminated. If the FPSP determines that the
+|	exception is one that must be reported then there will be a
+|	return from the package by a 'jmp real_xxxx'.  At that point
+|	the machine state will be identical to the state before
+|	the FPSP was entered.  In particular, whatever condition
+|	that caused the exception will still be pending when the FPSP
+|	package returns.  Thus, there will be system specific code
+|	to handle the exception.
+|
+|	If the exception was completely handled by the package, then
+|	the return will be via a 'jmp fpsp_done'.  Unless there is
+|	OS specific work to be done (such as handling a context switch or
+|	interrupt) the user program can be resumed via 'rte'.
+|
+|	In the following skeleton code, some typical 'real_xxxx' handling
+|	code is shown.  This code may need to be moved to an appropriate
+|	place in the target system, or rewritten.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|
+|	Modified for Linux-1.3.x by Jes Sorensen (jds@kom.auc.dk)
+|
+
+#include <linux/linkage.h>
+#include <asm/entry.h>
+#include <asm/offsets.h>
+
+|SKELETON	idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section 15
+|
+|	The following counters are used for standalone testing
+|
+
+	|section 8
+
+#include "fpsp.h"
+
+	|xref	b1238_fix
+
+|
+|	Divide by Zero exception
+|
+|	All dz exceptions are 'real', hence no fpsp_dz entry point.
+|
+	.global	dz
+	.global	real_dz
+dz:
+real_dz:
+	link		%a6,#-LOCAL_SIZE
+	fsave		-(%sp)
+	bclrb		#E1,E_BYTE(%a6)
+	frestore	(%sp)+
+	unlk		%a6
+
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	movel	%sp,%sp@-		| stack frame pointer argument
+	bsrl	trap_c
+	addql	#4,%sp
+	bral	ret_from_exception
+
+|
+|	Inexact exception
+|
+|	All inexact exceptions are real, but the 'real' handler
+|	will probably want to clear the pending exception.
+|	The provided code will clear the E3 exception (if pending),
+|	otherwise clear the E1 exception.  The frestore is not really
+|	necessary for E1 exceptions.
+|
+| Code following the 'inex' label is to handle bug #1232.  In this
+| bug, if an E1 snan, ovfl, or unfl occurred, and the process was
+| swapped out before taking the exception, the exception taken on
+| return was inex, rather than the correct exception.  The snan, ovfl,
+| and unfl exception to be taken must not have been enabled.  The
+| fix is to check for E1, and the existence of one of snan, ovfl,
+| or unfl bits set in the fpsr.  If any of these are set, branch
+| to the appropriate  handler for the exception in the fpsr.  Note
+| that this fix is only for d43b parts, and is skipped if the
+| version number is not $40.
+|
+|
+	.global	real_inex
+	.global	inex
+inex:
+	link		%a6,#-LOCAL_SIZE
+	fsave		-(%sp)
+	cmpib		#VER_40,(%sp)		|test version number
+	bnes		not_fmt40
+	fmovel		%fpsr,-(%sp)
+	btstb		#E1,E_BYTE(%a6)		|test for E1 set
+	beqs		not_b1232
+	btstb		#snan_bit,2(%sp) |test for snan
+	beq		inex_ckofl
+	addl		#4,%sp
+	frestore	(%sp)+
+	unlk		%a6
+	bra		snan
+inex_ckofl:
+	btstb		#ovfl_bit,2(%sp) |test for ovfl
+	beq		inex_ckufl
+	addl		#4,%sp
+	frestore	(%sp)+
+	unlk		%a6
+	bra		ovfl
+inex_ckufl:
+	btstb		#unfl_bit,2(%sp) |test for unfl
+	beq		not_b1232
+	addl		#4,%sp
+	frestore	(%sp)+
+	unlk		%a6
+	bra		unfl
+
+|
+| We do not have the bug 1232 case.  Clean up the stack and call
+| real_inex.
+|
+not_b1232:
+	addl		#4,%sp
+	frestore	(%sp)+
+	unlk		%a6
+
+real_inex:
+
+	link		%a6,#-LOCAL_SIZE
+	fsave		-(%sp)
+not_fmt40:
+	bclrb		#E3,E_BYTE(%a6)		|clear and test E3 flag
+	beqs		inex_cke1
+|
+| Clear dirty bit on dest resister in the frame before branching
+| to b1238_fix.
+|
+	moveml		%d0/%d1,USER_DA(%a6)
+	bfextu		CMDREG1B(%a6){#6:#3},%d0		|get dest reg no
+	bclrb		%d0,FPR_DIRTY_BITS(%a6)	|clr dest dirty bit
+	bsrl		b1238_fix		|test for bug1238 case
+	moveml		USER_DA(%a6),%d0/%d1
+	bras		inex_done
+inex_cke1:
+	bclrb		#E1,E_BYTE(%a6)
+inex_done:
+	frestore	(%sp)+
+	unlk		%a6
+
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	movel	%sp,%sp@-		| stack frame pointer argument
+	bsrl	trap_c
+	addql	#4,%sp
+	bral	ret_from_exception
+
+|
+|	Overflow exception
+|
+	|xref	fpsp_ovfl
+	.global	real_ovfl
+	.global	ovfl
+ovfl:
+	jmp	fpsp_ovfl
+real_ovfl:
+
+	link		%a6,#-LOCAL_SIZE
+	fsave		-(%sp)
+	bclrb		#E3,E_BYTE(%a6)		|clear and test E3 flag
+	bnes		ovfl_done
+	bclrb		#E1,E_BYTE(%a6)
+ovfl_done:
+	frestore	(%sp)+
+	unlk		%a6
+
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	movel	%sp,%sp@-		| stack frame pointer argument
+	bsrl	trap_c
+	addql	#4,%sp
+	bral	ret_from_exception
+
+|
+|	Underflow exception
+|
+	|xref	fpsp_unfl
+	.global	real_unfl
+	.global	unfl
+unfl:
+	jmp	fpsp_unfl
+real_unfl:
+
+	link		%a6,#-LOCAL_SIZE
+	fsave		-(%sp)
+	bclrb		#E3,E_BYTE(%a6)		|clear and test E3 flag
+	bnes		unfl_done
+	bclrb		#E1,E_BYTE(%a6)
+unfl_done:
+	frestore	(%sp)+
+	unlk		%a6
+
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	movel	%sp,%sp@-		| stack frame pointer argument
+	bsrl	trap_c
+	addql	#4,%sp
+	bral	ret_from_exception
+
+|
+|	Signalling NAN exception
+|
+	|xref	fpsp_snan
+	.global	real_snan
+	.global	snan
+snan:
+	jmp	fpsp_snan
+real_snan:
+	link		%a6,#-LOCAL_SIZE
+	fsave		-(%sp)
+	bclrb		#E1,E_BYTE(%a6)	|snan is always an E1 exception
+	frestore	(%sp)+
+	unlk		%a6
+
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	movel	%sp,%sp@-		| stack frame pointer argument
+	bsrl	trap_c
+	addql	#4,%sp
+	bral	ret_from_exception
+
+|
+|	Operand Error exception
+|
+	|xref	fpsp_operr
+	.global	real_operr
+	.global	operr
+operr:
+	jmp	fpsp_operr
+real_operr:
+	link		%a6,#-LOCAL_SIZE
+	fsave		-(%sp)
+	bclrb		#E1,E_BYTE(%a6)	|operr is always an E1 exception
+	frestore	(%sp)+
+	unlk		%a6
+
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	movel	%sp,%sp@-		| stack frame pointer argument
+	bsrl	trap_c
+	addql	#4,%sp
+	bral	ret_from_exception
+
+
+|
+|	BSUN exception
+|
+|	This sample handler simply clears the nan bit in the FPSR.
+|
+	|xref	fpsp_bsun
+	.global	real_bsun
+	.global	bsun
+bsun:
+	jmp	fpsp_bsun
+real_bsun:
+	link		%a6,#-LOCAL_SIZE
+	fsave		-(%sp)
+	bclrb		#E1,E_BYTE(%a6)	|bsun is always an E1 exception
+	fmovel		%FPSR,-(%sp)
+	bclrb		#nan_bit,(%sp)
+	fmovel		(%sp)+,%FPSR
+	frestore	(%sp)+
+	unlk		%a6
+
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	movel	%sp,%sp@-		| stack frame pointer argument
+	bsrl	trap_c
+	addql	#4,%sp
+	bral	ret_from_exception
+
+|
+|	F-line exception
+|
+|	A 'real' F-line exception is one that the FPSP isn't supposed to
+|	handle. E.g. an instruction with a co-processor ID that is not 1.
+|
+|
+	|xref	fpsp_fline
+	.global	real_fline
+	.global	fline
+fline:
+	jmp	fpsp_fline
+real_fline:
+
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	movel	%sp,%sp@-		| stack frame pointer argument
+	bsrl	trap_c
+	addql	#4,%sp
+	bral	ret_from_exception
+
+|
+|	Unsupported data type exception
+|
+	|xref	fpsp_unsupp
+	.global	real_unsupp
+	.global	unsupp
+unsupp:
+	jmp	fpsp_unsupp
+real_unsupp:
+	link		%a6,#-LOCAL_SIZE
+	fsave		-(%sp)
+	bclrb		#E1,E_BYTE(%a6)	|unsupp is always an E1 exception
+	frestore	(%sp)+
+	unlk		%a6
+
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	movel	%sp,%sp@-		| stack frame pointer argument
+	bsrl	trap_c
+	addql	#4,%sp
+	bral	ret_from_exception
+
+|
+|	Trace exception
+|
+	.global	real_trace
+real_trace:
+	|
+	bral	trap
+
+|
+|	fpsp_fmt_error --- exit point for frame format error
+|
+|	The fpu stack frame does not match the frames existing
+|	or planned at the time of this writing.  The fpsp is
+|	unable to handle frame sizes not in the following
+|	version:size pairs:
+|
+|	{4060, 4160} - busy frame
+|	{4028, 4130} - unimp frame
+|	{4000, 4100} - idle frame
+|
+|	This entry point simply holds an f-line illegal value.
+|	Replace this with a call to your kernel panic code or
+|	code to handle future revisions of the fpu.
+|
+	.global	fpsp_fmt_error
+fpsp_fmt_error:
+
+	.long	0xf27f0000	|f-line illegal
+
+|
+|	fpsp_done --- FPSP exit point
+|
+|	The exception has been handled by the package and we are ready
+|	to return to user mode, but there may be OS specific code
+|	to execute before we do.  If there is, do it now.
+|
+|
+
+	.global	fpsp_done
+fpsp_done:
+	btst	#0x5,%sp@		| supervisor bit set in saved SR?
+	beq	.Lnotkern
+	rte
+.Lnotkern:
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	tstb	%curptr@(TASK_NEEDRESCHED)
+	jne	ret_from_exception	| deliver signals,
+					| reschedule etc..
+	RESTORE_ALL
+
+|
+|	mem_write --- write to user or supervisor address space
+|
+| Writes to memory while in supervisor mode.  copyout accomplishes
+| this via a 'moves' instruction.  copyout is a UNIX SVR3 (and later) function.
+| If you don't have copyout, use the local copy of the function below.
+|
+|	a0 - supervisor source address
+|	a1 - user destination address
+|	d0 - number of bytes to write (maximum count is 12)
+|
+| The supervisor source address is guaranteed to point into the supervisor
+| stack.  The result is that a UNIX
+| process is allowed to sleep as a consequence of a page fault during
+| copyout.  The probability of a page fault is exceedingly small because
+| the 68040 always reads the destination address and thus the page
+| faults should have already been handled.
+|
+| If the EXC_SR shows that the exception was from supervisor space,
+| then just do a dumb (and slow) memory move.  In a UNIX environment
+| there shouldn't be any supervisor mode floating point exceptions.
+|
+	.global	mem_write
+mem_write:
+	btstb	#5,EXC_SR(%a6)	|check for supervisor state
+	beqs	user_write
+super_write:
+	moveb	(%a0)+,(%a1)+
+	subql	#1,%d0
+	bnes	super_write
+	rts
+user_write:
+	movel	%d1,-(%sp)	|preserve d1 just in case
+	movel	%d0,-(%sp)
+	movel	%a1,-(%sp)
+	movel	%a0,-(%sp)
+	jsr		copyout
+	addw	#12,%sp
+	movel	(%sp)+,%d1
+	rts
+|
+|	mem_read --- read from user or supervisor address space
+|
+| Reads from memory while in supervisor mode.  copyin accomplishes
+| this via a 'moves' instruction.  copyin is a UNIX SVR3 (and later) function.
+| If you don't have copyin, use the local copy of the function below.
+|
+| The FPSP calls mem_read to read the original F-line instruction in order
+| to extract the data register number when the 'Dn' addressing mode is
+| used.
+|
+|Input:
+|	a0 - user source address
+|	a1 - supervisor destination address
+|	d0 - number of bytes to read (maximum count is 12)
+|
+| Like mem_write, mem_read always reads with a supervisor
+| destination address on the supervisor stack.  Also like mem_write,
+| the EXC_SR is checked and a simple memory copy is done if reading
+| from supervisor space is indicated.
+|
+	.global	mem_read
+mem_read:
+	btstb	#5,EXC_SR(%a6)	|check for supervisor state
+	beqs	user_read
+super_read:
+	moveb	(%a0)+,(%a1)+
+	subql	#1,%d0
+	bnes	super_read
+	rts
+user_read:
+	movel	%d1,-(%sp)	|preserve d1 just in case
+	movel	%d0,-(%sp)
+	movel	%a1,-(%sp)
+	movel	%a0,-(%sp)
+	jsr	copyin
+	addw	#12,%sp
+	movel	(%sp)+,%d1
+	rts
+
+|
+| Use these routines if your kernel doesn't have copyout/copyin equivalents.
+| Assumes that D0/D1/A0/A1 are scratch registers. copyout overwrites DFC,
+| and copyin overwrites SFC.
+|
+copyout:
+	movel	4(%sp),%a0	| source
+	movel	8(%sp),%a1	| destination
+	movel	12(%sp),%d0	| count
+	subl	#1,%d0		| dec count by 1 for dbra
+	movel	#1,%d1
+
+|	DFC is already set
+|	movec	%d1,%DFC		| set dfc for user data space
+moreout:
+	moveb	(%a0)+,%d1	| fetch supervisor byte
+out_ea:
+	movesb	%d1,(%a1)+	| write user byte
+	dbf	%d0,moreout
+	rts
+
+copyin:
+	movel	4(%sp),%a0	| source
+	movel	8(%sp),%a1	| destination
+	movel	12(%sp),%d0	| count
+	subl	#1,%d0		| dec count by 1 for dbra
+	movel	#1,%d1
+|	SFC is already set
+|	movec	%d1,%SFC		| set sfc for user space
+morein:
+in_ea:
+	movesb	(%a0)+,%d1	| fetch user byte
+	moveb	%d1,(%a1)+	| write supervisor byte
+	dbf	%d0,morein
+	rts
+
+	.section .fixup,#alloc,#execinstr
+	.even
+1:
+	jbra	fpsp040_die
+
+	.section __ex_table,#alloc
+	.align	4
+
+	.long	in_ea,1b
+	.long	out_ea,1b
+
+	|end
diff --git a/arch/m68k/fpsp040/slog2.S b/arch/m68k/fpsp040/slog2.S
new file mode 100644
index 0000000..517fa45
--- /dev/null
+++ b/arch/m68k/fpsp040/slog2.S
@@ -0,0 +1,188 @@
+|
+|	slog2.sa 3.1 12/10/90
+|
+|       The entry point slog10 computes the base-10
+|	logarithm of an input argument X.
+|	slog10d does the same except the input value is a
+|	denormalized number.
+|	sLog2 and sLog2d are the base-2 analogues.
+|
+|       INPUT:	Double-extended value in memory location pointed to
+|		by address register a0.
+|
+|       OUTPUT: log_10(X) or log_2(X) returned in floating-point
+|		register fp0.
+|
+|       ACCURACY and MONOTONICITY: The returned result is within 1.7
+|		ulps in 64 significant bit, i.e. within 0.5003 ulp
+|		to 53 bits if the result is subsequently rounded
+|		to double precision. The result is provably monotonic
+|		in double precision.
+|
+|       SPEED:	Two timings are measured, both in the copy-back mode.
+|		The first one is measured when the function is invoked
+|		the first time (so the instructions and data are not
+|		in cache), and the second one is measured when the
+|		function is reinvoked at the same input argument.
+|
+|       ALGORITHM and IMPLEMENTATION NOTES:
+|
+|       slog10d:
+|
+|       Step 0.   If X < 0, create a NaN and raise the invalid operation
+|                 flag. Otherwise, save FPCR in D1; set FpCR to default.
+|       Notes:    Default means round-to-nearest mode, no floating-point
+|                 traps, and precision control = double extended.
+|
+|       Step 1.   Call slognd to obtain Y = log(X), the natural log of X.
+|       Notes:    Even if X is denormalized, log(X) is always normalized.
+|
+|       Step 2.   Compute log_10(X) = log(X) * (1/log(10)).
+|            2.1  Restore the user FPCR
+|            2.2  Return ans := Y * INV_L10.
+|
+|
+|       slog10:
+|
+|       Step 0.   If X < 0, create a NaN and raise the invalid operation
+|                 flag. Otherwise, save FPCR in D1; set FpCR to default.
+|       Notes:    Default means round-to-nearest mode, no floating-point
+|                 traps, and precision control = double extended.
+|
+|       Step 1.   Call sLogN to obtain Y = log(X), the natural log of X.
+|
+|       Step 2.   Compute log_10(X) = log(X) * (1/log(10)).
+|            2.1  Restore the user FPCR
+|            2.2  Return ans := Y * INV_L10.
+|
+|
+|       sLog2d:
+|
+|       Step 0.   If X < 0, create a NaN and raise the invalid operation
+|                 flag. Otherwise, save FPCR in D1; set FpCR to default.
+|       Notes:    Default means round-to-nearest mode, no floating-point
+|                 traps, and precision control = double extended.
+|
+|       Step 1.   Call slognd to obtain Y = log(X), the natural log of X.
+|       Notes:    Even if X is denormalized, log(X) is always normalized.
+|
+|       Step 2.   Compute log_10(X) = log(X) * (1/log(2)).
+|            2.1  Restore the user FPCR
+|            2.2  Return ans := Y * INV_L2.
+|
+|
+|       sLog2:
+|
+|       Step 0.   If X < 0, create a NaN and raise the invalid operation
+|                 flag. Otherwise, save FPCR in D1; set FpCR to default.
+|       Notes:    Default means round-to-nearest mode, no floating-point
+|                 traps, and precision control = double extended.
+|
+|       Step 1.   If X is not an integer power of two, i.e., X != 2^k,
+|                 go to Step 3.
+|
+|       Step 2.   Return k.
+|            2.1  Get integer k, X = 2^k.
+|            2.2  Restore the user FPCR.
+|            2.3  Return ans := convert-to-double-extended(k).
+|
+|       Step 3.   Call sLogN to obtain Y = log(X), the natural log of X.
+|
+|       Step 4.   Compute log_2(X) = log(X) * (1/log(2)).
+|            4.1  Restore the user FPCR
+|            4.2  Return ans := Y * INV_L2.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|SLOG2    idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+	|xref	t_frcinx
+	|xref	t_operr
+	|xref	slogn
+	|xref	slognd
+
+INV_L10:  .long 0x3FFD0000,0xDE5BD8A9,0x37287195,0x00000000
+
+INV_L2:   .long 0x3FFF0000,0xB8AA3B29,0x5C17F0BC,0x00000000
+
+	.global	slog10d
+slog10d:
+|--entry point for Log10(X), X is denormalized
+	movel		(%a0),%d0
+	blt		invalid
+	movel		%d1,-(%sp)
+	clrl		%d1
+	bsr		slognd			| ...log(X), X denorm.
+	fmovel		(%sp)+,%fpcr
+	fmulx		INV_L10,%fp0
+	bra		t_frcinx
+
+	.global	slog10
+slog10:
+|--entry point for Log10(X), X is normalized
+
+	movel		(%a0),%d0
+	blt		invalid
+	movel		%d1,-(%sp)
+	clrl		%d1
+	bsr		slogn			| ...log(X), X normal.
+	fmovel		(%sp)+,%fpcr
+	fmulx		INV_L10,%fp0
+	bra		t_frcinx
+
+
+	.global	slog2d
+slog2d:
+|--entry point for Log2(X), X is denormalized
+
+	movel		(%a0),%d0
+	blt		invalid
+	movel		%d1,-(%sp)
+	clrl		%d1
+	bsr		slognd			| ...log(X), X denorm.
+	fmovel		(%sp)+,%fpcr
+	fmulx		INV_L2,%fp0
+	bra		t_frcinx
+
+	.global	slog2
+slog2:
+|--entry point for Log2(X), X is normalized
+	movel		(%a0),%d0
+	blt		invalid
+
+	movel		8(%a0),%d0
+	bnes		continue		| ...X is not 2^k
+
+	movel		4(%a0),%d0
+	andl		#0x7FFFFFFF,%d0
+	tstl		%d0
+	bnes		continue
+
+|--X = 2^k.
+	movew		(%a0),%d0
+	andl		#0x00007FFF,%d0
+	subl		#0x3FFF,%d0
+	fmovel		%d1,%fpcr
+	fmovel		%d0,%fp0
+	bra		t_frcinx
+
+continue:
+	movel		%d1,-(%sp)
+	clrl		%d1
+	bsr		slogn			| ...log(X), X normal.
+	fmovel		(%sp)+,%fpcr
+	fmulx		INV_L2,%fp0
+	bra		t_frcinx
+
+invalid:
+	bra		t_operr
+
+	|end
diff --git a/arch/m68k/fpsp040/slogn.S b/arch/m68k/fpsp040/slogn.S
new file mode 100644
index 0000000..2aaa072
--- /dev/null
+++ b/arch/m68k/fpsp040/slogn.S
@@ -0,0 +1,592 @@
+|
+|	slogn.sa 3.1 12/10/90
+|
+|	slogn computes the natural logarithm of an
+|	input value. slognd does the same except the input value is a
+|	denormalized number. slognp1 computes log(1+X), and slognp1d
+|	computes log(1+X) for denormalized X.
+|
+|	Input: Double-extended value in memory location pointed to by address
+|		register a0.
+|
+|	Output:	log(X) or log(1+X) returned in floating-point register Fp0.
+|
+|	Accuracy and Monotonicity: The returned result is within 2 ulps in
+|		64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
+|		result is subsequently rounded to double precision. The
+|		result is provably monotonic in double precision.
+|
+|	Speed: The program slogn takes approximately 190 cycles for input
+|		argument X such that |X-1| >= 1/16, which is the usual
+|		situation. For those arguments, slognp1 takes approximately
+|		 210 cycles. For the less common arguments, the program will
+|		 run no worse than 10% slower.
+|
+|	Algorithm:
+|	LOGN:
+|	Step 1. If |X-1| < 1/16, approximate log(X) by an odd polynomial in
+|		u, where u = 2(X-1)/(X+1). Otherwise, move on to Step 2.
+|
+|	Step 2. X = 2**k * Y where 1 <= Y < 2. Define F to be the first seven
+|		significant bits of Y plus 2**(-7), i.e. F = 1.xxxxxx1 in base
+|		2 where the six "x" match those of Y. Note that |Y-F| <= 2**(-7).
+|
+|	Step 3. Define u = (Y-F)/F. Approximate log(1+u) by a polynomial in u,
+|		log(1+u) = poly.
+|
+|	Step 4. Reconstruct log(X) = log( 2**k * Y ) = k*log(2) + log(F) + log(1+u)
+|		by k*log(2) + (log(F) + poly). The values of log(F) are calculated
+|		beforehand and stored in the program.
+|
+|	lognp1:
+|	Step 1: If |X| < 1/16, approximate log(1+X) by an odd polynomial in
+|		u where u = 2X/(2+X). Otherwise, move on to Step 2.
+|
+|	Step 2: Let 1+X = 2**k * Y, where 1 <= Y < 2. Define F as done in Step 2
+|		of the algorithm for LOGN and compute log(1+X) as
+|		k*log(2) + log(F) + poly where poly approximates log(1+u),
+|		u = (Y-F)/F.
+|
+|	Implementation Notes:
+|	Note 1. There are 64 different possible values for F, thus 64 log(F)'s
+|		need to be tabulated. Moreover, the values of 1/F are also
+|		tabulated so that the division in (Y-F)/F can be performed by a
+|		multiplication.
+|
+|	Note 2. In Step 2 of lognp1, in order to preserved accuracy, the value
+|		Y-F has to be calculated carefully when 1/2 <= X < 3/2.
+|
+|	Note 3. To fully exploit the pipeline, polynomials are usually separated
+|		into two parts evaluated independently before being added up.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|slogn	idnt	2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+BOUNDS1:  .long 0x3FFEF07D,0x3FFF8841
+BOUNDS2:  .long 0x3FFE8000,0x3FFFC000
+
+LOGOF2:	.long 0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
+
+one:	.long 0x3F800000
+zero:	.long 0x00000000
+infty:	.long 0x7F800000
+negone:	.long 0xBF800000
+
+LOGA6:	.long 0x3FC2499A,0xB5E4040B
+LOGA5:	.long 0xBFC555B5,0x848CB7DB
+
+LOGA4:	.long 0x3FC99999,0x987D8730
+LOGA3:	.long 0xBFCFFFFF,0xFF6F7E97
+
+LOGA2:	.long 0x3FD55555,0x555555a4
+LOGA1:	.long 0xBFE00000,0x00000008
+
+LOGB5:	.long 0x3F175496,0xADD7DAD6
+LOGB4:	.long 0x3F3C71C2,0xFE80C7E0
+
+LOGB3:	.long 0x3F624924,0x928BCCFF
+LOGB2:	.long 0x3F899999,0x999995EC
+
+LOGB1:	.long 0x3FB55555,0x55555555
+TWO:	.long 0x40000000,0x00000000
+
+LTHOLD:	.long 0x3f990000,0x80000000,0x00000000,0x00000000
+
+LOGTBL:
+	.long  0x3FFE0000,0xFE03F80F,0xE03F80FE,0x00000000
+	.long  0x3FF70000,0xFF015358,0x833C47E2,0x00000000
+	.long  0x3FFE0000,0xFA232CF2,0x52138AC0,0x00000000
+	.long  0x3FF90000,0xBDC8D83E,0xAD88D549,0x00000000
+	.long  0x3FFE0000,0xF6603D98,0x0F6603DA,0x00000000
+	.long  0x3FFA0000,0x9CF43DCF,0xF5EAFD48,0x00000000
+	.long  0x3FFE0000,0xF2B9D648,0x0F2B9D65,0x00000000
+	.long  0x3FFA0000,0xDA16EB88,0xCB8DF614,0x00000000
+	.long  0x3FFE0000,0xEF2EB71F,0xC4345238,0x00000000
+	.long  0x3FFB0000,0x8B29B775,0x1BD70743,0x00000000
+	.long  0x3FFE0000,0xEBBDB2A5,0xC1619C8C,0x00000000
+	.long  0x3FFB0000,0xA8D839F8,0x30C1FB49,0x00000000
+	.long  0x3FFE0000,0xE865AC7B,0x7603A197,0x00000000
+	.long  0x3FFB0000,0xC61A2EB1,0x8CD907AD,0x00000000
+	.long  0x3FFE0000,0xE525982A,0xF70C880E,0x00000000
+	.long  0x3FFB0000,0xE2F2A47A,0xDE3A18AF,0x00000000
+	.long  0x3FFE0000,0xE1FC780E,0x1FC780E2,0x00000000
+	.long  0x3FFB0000,0xFF64898E,0xDF55D551,0x00000000
+	.long  0x3FFE0000,0xDEE95C4C,0xA037BA57,0x00000000
+	.long  0x3FFC0000,0x8DB956A9,0x7B3D0148,0x00000000
+	.long  0x3FFE0000,0xDBEB61EE,0xD19C5958,0x00000000
+	.long  0x3FFC0000,0x9B8FE100,0xF47BA1DE,0x00000000
+	.long  0x3FFE0000,0xD901B203,0x6406C80E,0x00000000
+	.long  0x3FFC0000,0xA9372F1D,0x0DA1BD17,0x00000000
+	.long  0x3FFE0000,0xD62B80D6,0x2B80D62C,0x00000000
+	.long  0x3FFC0000,0xB6B07F38,0xCE90E46B,0x00000000
+	.long  0x3FFE0000,0xD3680D36,0x80D3680D,0x00000000
+	.long  0x3FFC0000,0xC3FD0329,0x06488481,0x00000000
+	.long  0x3FFE0000,0xD0B69FCB,0xD2580D0B,0x00000000
+	.long  0x3FFC0000,0xD11DE0FF,0x15AB18CA,0x00000000
+	.long  0x3FFE0000,0xCE168A77,0x25080CE1,0x00000000
+	.long  0x3FFC0000,0xDE1433A1,0x6C66B150,0x00000000
+	.long  0x3FFE0000,0xCB8727C0,0x65C393E0,0x00000000
+	.long  0x3FFC0000,0xEAE10B5A,0x7DDC8ADD,0x00000000
+	.long  0x3FFE0000,0xC907DA4E,0x871146AD,0x00000000
+	.long  0x3FFC0000,0xF7856E5E,0xE2C9B291,0x00000000
+	.long  0x3FFE0000,0xC6980C69,0x80C6980C,0x00000000
+	.long  0x3FFD0000,0x82012CA5,0xA68206D7,0x00000000
+	.long  0x3FFE0000,0xC4372F85,0x5D824CA6,0x00000000
+	.long  0x3FFD0000,0x882C5FCD,0x7256A8C5,0x00000000
+	.long  0x3FFE0000,0xC1E4BBD5,0x95F6E947,0x00000000
+	.long  0x3FFD0000,0x8E44C60B,0x4CCFD7DE,0x00000000
+	.long  0x3FFE0000,0xBFA02FE8,0x0BFA02FF,0x00000000
+	.long  0x3FFD0000,0x944AD09E,0xF4351AF6,0x00000000
+	.long  0x3FFE0000,0xBD691047,0x07661AA3,0x00000000
+	.long  0x3FFD0000,0x9A3EECD4,0xC3EAA6B2,0x00000000
+	.long  0x3FFE0000,0xBB3EE721,0xA54D880C,0x00000000
+	.long  0x3FFD0000,0xA0218434,0x353F1DE8,0x00000000
+	.long  0x3FFE0000,0xB92143FA,0x36F5E02E,0x00000000
+	.long  0x3FFD0000,0xA5F2FCAB,0xBBC506DA,0x00000000
+	.long  0x3FFE0000,0xB70FBB5A,0x19BE3659,0x00000000
+	.long  0x3FFD0000,0xABB3B8BA,0x2AD362A5,0x00000000
+	.long  0x3FFE0000,0xB509E68A,0x9B94821F,0x00000000
+	.long  0x3FFD0000,0xB1641795,0xCE3CA97B,0x00000000
+	.long  0x3FFE0000,0xB30F6352,0x8917C80B,0x00000000
+	.long  0x3FFD0000,0xB7047551,0x5D0F1C61,0x00000000
+	.long  0x3FFE0000,0xB11FD3B8,0x0B11FD3C,0x00000000
+	.long  0x3FFD0000,0xBC952AFE,0xEA3D13E1,0x00000000
+	.long  0x3FFE0000,0xAF3ADDC6,0x80AF3ADE,0x00000000
+	.long  0x3FFD0000,0xC2168ED0,0xF458BA4A,0x00000000
+	.long  0x3FFE0000,0xAD602B58,0x0AD602B6,0x00000000
+	.long  0x3FFD0000,0xC788F439,0xB3163BF1,0x00000000
+	.long  0x3FFE0000,0xAB8F69E2,0x8359CD11,0x00000000
+	.long  0x3FFD0000,0xCCECAC08,0xBF04565D,0x00000000
+	.long  0x3FFE0000,0xA9C84A47,0xA07F5638,0x00000000
+	.long  0x3FFD0000,0xD2420487,0x2DD85160,0x00000000
+	.long  0x3FFE0000,0xA80A80A8,0x0A80A80B,0x00000000
+	.long  0x3FFD0000,0xD7894992,0x3BC3588A,0x00000000
+	.long  0x3FFE0000,0xA655C439,0x2D7B73A8,0x00000000
+	.long  0x3FFD0000,0xDCC2C4B4,0x9887DACC,0x00000000
+	.long  0x3FFE0000,0xA4A9CF1D,0x96833751,0x00000000
+	.long  0x3FFD0000,0xE1EEBD3E,0x6D6A6B9E,0x00000000
+	.long  0x3FFE0000,0xA3065E3F,0xAE7CD0E0,0x00000000
+	.long  0x3FFD0000,0xE70D785C,0x2F9F5BDC,0x00000000
+	.long  0x3FFE0000,0xA16B312E,0xA8FC377D,0x00000000
+	.long  0x3FFD0000,0xEC1F392C,0x5179F283,0x00000000
+	.long  0x3FFE0000,0x9FD809FD,0x809FD80A,0x00000000
+	.long  0x3FFD0000,0xF12440D3,0xE36130E6,0x00000000
+	.long  0x3FFE0000,0x9E4CAD23,0xDD5F3A20,0x00000000
+	.long  0x3FFD0000,0xF61CCE92,0x346600BB,0x00000000
+	.long  0x3FFE0000,0x9CC8E160,0xC3FB19B9,0x00000000
+	.long  0x3FFD0000,0xFB091FD3,0x8145630A,0x00000000
+	.long  0x3FFE0000,0x9B4C6F9E,0xF03A3CAA,0x00000000
+	.long  0x3FFD0000,0xFFE97042,0xBFA4C2AD,0x00000000
+	.long  0x3FFE0000,0x99D722DA,0xBDE58F06,0x00000000
+	.long  0x3FFE0000,0x825EFCED,0x49369330,0x00000000
+	.long  0x3FFE0000,0x9868C809,0x868C8098,0x00000000
+	.long  0x3FFE0000,0x84C37A7A,0xB9A905C9,0x00000000
+	.long  0x3FFE0000,0x97012E02,0x5C04B809,0x00000000
+	.long  0x3FFE0000,0x87224C2E,0x8E645FB7,0x00000000
+	.long  0x3FFE0000,0x95A02568,0x095A0257,0x00000000
+	.long  0x3FFE0000,0x897B8CAC,0x9F7DE298,0x00000000
+	.long  0x3FFE0000,0x94458094,0x45809446,0x00000000
+	.long  0x3FFE0000,0x8BCF55DE,0xC4CD05FE,0x00000000
+	.long  0x3FFE0000,0x92F11384,0x0497889C,0x00000000
+	.long  0x3FFE0000,0x8E1DC0FB,0x89E125E5,0x00000000
+	.long  0x3FFE0000,0x91A2B3C4,0xD5E6F809,0x00000000
+	.long  0x3FFE0000,0x9066E68C,0x955B6C9B,0x00000000
+	.long  0x3FFE0000,0x905A3863,0x3E06C43B,0x00000000
+	.long  0x3FFE0000,0x92AADE74,0xC7BE59E0,0x00000000
+	.long  0x3FFE0000,0x8F1779D9,0xFDC3A219,0x00000000
+	.long  0x3FFE0000,0x94E9BFF6,0x15845643,0x00000000
+	.long  0x3FFE0000,0x8DDA5202,0x37694809,0x00000000
+	.long  0x3FFE0000,0x9723A1B7,0x20134203,0x00000000
+	.long  0x3FFE0000,0x8CA29C04,0x6514E023,0x00000000
+	.long  0x3FFE0000,0x995899C8,0x90EB8990,0x00000000
+	.long  0x3FFE0000,0x8B70344A,0x139BC75A,0x00000000
+	.long  0x3FFE0000,0x9B88BDAA,0x3A3DAE2F,0x00000000
+	.long  0x3FFE0000,0x8A42F870,0x5669DB46,0x00000000
+	.long  0x3FFE0000,0x9DB4224F,0xFFE1157C,0x00000000
+	.long  0x3FFE0000,0x891AC73A,0xE9819B50,0x00000000
+	.long  0x3FFE0000,0x9FDADC26,0x8B7A12DA,0x00000000
+	.long  0x3FFE0000,0x87F78087,0xF78087F8,0x00000000
+	.long  0x3FFE0000,0xA1FCFF17,0xCE733BD4,0x00000000
+	.long  0x3FFE0000,0x86D90544,0x7A34ACC6,0x00000000
+	.long  0x3FFE0000,0xA41A9E8F,0x5446FB9F,0x00000000
+	.long  0x3FFE0000,0x85BF3761,0x2CEE3C9B,0x00000000
+	.long  0x3FFE0000,0xA633CD7E,0x6771CD8B,0x00000000
+	.long  0x3FFE0000,0x84A9F9C8,0x084A9F9D,0x00000000
+	.long  0x3FFE0000,0xA8489E60,0x0B435A5E,0x00000000
+	.long  0x3FFE0000,0x83993052,0x3FBE3368,0x00000000
+	.long  0x3FFE0000,0xAA59233C,0xCCA4BD49,0x00000000
+	.long  0x3FFE0000,0x828CBFBE,0xB9A020A3,0x00000000
+	.long  0x3FFE0000,0xAC656DAE,0x6BCC4985,0x00000000
+	.long  0x3FFE0000,0x81848DA8,0xFAF0D277,0x00000000
+	.long  0x3FFE0000,0xAE6D8EE3,0x60BB2468,0x00000000
+	.long  0x3FFE0000,0x80808080,0x80808081,0x00000000
+	.long  0x3FFE0000,0xB07197A2,0x3C46C654,0x00000000
+
+	.set	ADJK,L_SCR1
+
+	.set	X,FP_SCR1
+	.set	XDCARE,X+2
+	.set	XFRAC,X+4
+
+	.set	F,FP_SCR2
+	.set	FFRAC,F+4
+
+	.set	KLOG2,FP_SCR3
+
+	.set	SAVEU,FP_SCR4
+
+	| xref	t_frcinx
+	|xref	t_extdnrm
+	|xref	t_operr
+	|xref	t_dz
+
+	.global	slognd
+slognd:
+|--ENTRY POINT FOR LOG(X) FOR DENORMALIZED INPUT
+
+	movel		#-100,ADJK(%a6)	| ...INPUT = 2^(ADJK) * FP0
+
+|----normalize the input value by left shifting k bits (k to be determined
+|----below), adjusting exponent and storing -k to  ADJK
+|----the value TWOTO100 is no longer needed.
+|----Note that this code assumes the denormalized input is NON-ZERO.
+
+     moveml	%d2-%d7,-(%a7)		| ...save some registers
+     movel	#0x00000000,%d3		| ...D3 is exponent of smallest norm. #
+     movel	4(%a0),%d4
+     movel	8(%a0),%d5		| ...(D4,D5) is (Hi_X,Lo_X)
+     clrl	%d2			| ...D2 used for holding K
+
+     tstl	%d4
+     bnes	HiX_not0
+
+HiX_0:
+     movel	%d5,%d4
+     clrl	%d5
+     movel	#32,%d2
+     clrl	%d6
+     bfffo      %d4{#0:#32},%d6
+     lsll      %d6,%d4
+     addl	%d6,%d2			| ...(D3,D4,D5) is normalized
+
+     movel	%d3,X(%a6)
+     movel	%d4,XFRAC(%a6)
+     movel	%d5,XFRAC+4(%a6)
+     negl	%d2
+     movel	%d2,ADJK(%a6)
+     fmovex	X(%a6),%fp0
+     moveml	(%a7)+,%d2-%d7		| ...restore registers
+     lea	X(%a6),%a0
+     bras	LOGBGN			| ...begin regular log(X)
+
+
+HiX_not0:
+     clrl	%d6
+     bfffo	%d4{#0:#32},%d6		| ...find first 1
+     movel	%d6,%d2			| ...get k
+     lsll	%d6,%d4
+     movel	%d5,%d7			| ...a copy of D5
+     lsll	%d6,%d5
+     negl	%d6
+     addil	#32,%d6
+     lsrl	%d6,%d7
+     orl	%d7,%d4			| ...(D3,D4,D5) normalized
+
+     movel	%d3,X(%a6)
+     movel	%d4,XFRAC(%a6)
+     movel	%d5,XFRAC+4(%a6)
+     negl	%d2
+     movel	%d2,ADJK(%a6)
+     fmovex	X(%a6),%fp0
+     moveml	(%a7)+,%d2-%d7		| ...restore registers
+     lea	X(%a6),%a0
+     bras	LOGBGN			| ...begin regular log(X)
+
+
+	.global	slogn
+slogn:
+|--ENTRY POINT FOR LOG(X) FOR X FINITE, NON-ZERO, NOT NAN'S
+
+	fmovex		(%a0),%fp0	| ...LOAD INPUT
+	movel		#0x00000000,ADJK(%a6)
+
+LOGBGN:
+|--FPCR SAVED AND CLEARED, INPUT IS 2^(ADJK)*FP0, FP0 CONTAINS
+|--A FINITE, NON-ZERO, NORMALIZED NUMBER.
+
+	movel	(%a0),%d0
+	movew	4(%a0),%d0
+
+	movel	(%a0),X(%a6)
+	movel	4(%a0),X+4(%a6)
+	movel	8(%a0),X+8(%a6)
+
+	cmpil	#0,%d0		| ...CHECK IF X IS NEGATIVE
+	blt	LOGNEG		| ...LOG OF NEGATIVE ARGUMENT IS INVALID
+	cmp2l	BOUNDS1,%d0	| ...X IS POSITIVE, CHECK IF X IS NEAR 1
+	bcc	LOGNEAR1	| ...BOUNDS IS ROUGHLY [15/16, 17/16]
+
+LOGMAIN:
+|--THIS SHOULD BE THE USUAL CASE, X NOT VERY CLOSE TO 1
+
+|--X = 2^(K) * Y, 1 <= Y < 2. THUS, Y = 1.XXXXXXXX....XX IN BINARY.
+|--WE DEFINE F = 1.XXXXXX1, I.E. FIRST 7 BITS OF Y AND ATTACH A 1.
+|--THE IDEA IS THAT LOG(X) = K*LOG2 + LOG(Y)
+|--			 = K*LOG2 + LOG(F) + LOG(1 + (Y-F)/F).
+|--NOTE THAT U = (Y-F)/F IS VERY SMALL AND THUS APPROXIMATING
+|--LOG(1+U) CAN BE VERY EFFICIENT.
+|--ALSO NOTE THAT THE VALUE 1/F IS STORED IN A TABLE SO THAT NO
+|--DIVISION IS NEEDED TO CALCULATE (Y-F)/F.
+
+|--GET K, Y, F, AND ADDRESS OF 1/F.
+	asrl	#8,%d0
+	asrl	#8,%d0		| ...SHIFTED 16 BITS, BIASED EXPO. OF X
+	subil	#0x3FFF,%d0	| ...THIS IS K
+	addl	ADJK(%a6),%d0	| ...ADJUST K, ORIGINAL INPUT MAY BE  DENORM.
+	lea	LOGTBL,%a0	| ...BASE ADDRESS OF 1/F AND LOG(F)
+	fmovel	%d0,%fp1		| ...CONVERT K TO FLOATING-POINT FORMAT
+
+|--WHILE THE CONVERSION IS GOING ON, WE GET F AND ADDRESS OF 1/F
+	movel	#0x3FFF0000,X(%a6)	| ...X IS NOW Y, I.E. 2^(-K)*X
+	movel	XFRAC(%a6),FFRAC(%a6)
+	andil	#0xFE000000,FFRAC(%a6) | ...FIRST 7 BITS OF Y
+	oril	#0x01000000,FFRAC(%a6) | ...GET F: ATTACH A 1 AT THE EIGHTH BIT
+	movel	FFRAC(%a6),%d0	| ...READY TO GET ADDRESS OF 1/F
+	andil	#0x7E000000,%d0
+	asrl	#8,%d0
+	asrl	#8,%d0
+	asrl	#4,%d0		| ...SHIFTED 20, D0 IS THE DISPLACEMENT
+	addal	%d0,%a0		| ...A0 IS THE ADDRESS FOR 1/F
+
+	fmovex	X(%a6),%fp0
+	movel	#0x3fff0000,F(%a6)
+	clrl	F+8(%a6)
+	fsubx	F(%a6),%fp0		| ...Y-F
+	fmovemx %fp2-%fp2/%fp3,-(%sp)	| ...SAVE FP2 WHILE FP0 IS NOT READY
+|--SUMMARY: FP0 IS Y-F, A0 IS ADDRESS OF 1/F, FP1 IS K
+|--REGISTERS SAVED: FPCR, FP1, FP2
+
+LP1CONT1:
+|--AN RE-ENTRY POINT FOR LOGNP1
+	fmulx	(%a0),%fp0	| ...FP0 IS U = (Y-F)/F
+	fmulx	LOGOF2,%fp1	| ...GET K*LOG2 WHILE FP0 IS NOT READY
+	fmovex	%fp0,%fp2
+	fmulx	%fp2,%fp2		| ...FP2 IS V=U*U
+	fmovex	%fp1,KLOG2(%a6)	| ...PUT K*LOG2 IN MEMORY, FREE FP1
+
+|--LOG(1+U) IS APPROXIMATED BY
+|--U + V*(A1+U*(A2+U*(A3+U*(A4+U*(A5+U*A6))))) WHICH IS
+|--[U + V*(A1+V*(A3+V*A5))]  +  [U*V*(A2+V*(A4+V*A6))]
+
+	fmovex	%fp2,%fp3
+	fmovex	%fp2,%fp1
+
+	fmuld	LOGA6,%fp1	| ...V*A6
+	fmuld	LOGA5,%fp2	| ...V*A5
+
+	faddd	LOGA4,%fp1	| ...A4+V*A6
+	faddd	LOGA3,%fp2	| ...A3+V*A5
+
+	fmulx	%fp3,%fp1		| ...V*(A4+V*A6)
+	fmulx	%fp3,%fp2		| ...V*(A3+V*A5)
+
+	faddd	LOGA2,%fp1	| ...A2+V*(A4+V*A6)
+	faddd	LOGA1,%fp2	| ...A1+V*(A3+V*A5)
+
+	fmulx	%fp3,%fp1		| ...V*(A2+V*(A4+V*A6))
+	addal	#16,%a0		| ...ADDRESS OF LOG(F)
+	fmulx	%fp3,%fp2		| ...V*(A1+V*(A3+V*A5)), FP3 RELEASED
+
+	fmulx	%fp0,%fp1		| ...U*V*(A2+V*(A4+V*A6))
+	faddx	%fp2,%fp0		| ...U+V*(A1+V*(A3+V*A5)), FP2 RELEASED
+
+	faddx	(%a0),%fp1	| ...LOG(F)+U*V*(A2+V*(A4+V*A6))
+	fmovemx  (%sp)+,%fp2-%fp2/%fp3	| ...RESTORE FP2
+	faddx	%fp1,%fp0		| ...FP0 IS LOG(F) + LOG(1+U)
+
+	fmovel	%d1,%fpcr
+	faddx	KLOG2(%a6),%fp0	| ...FINAL ADD
+	bra	t_frcinx
+
+
+LOGNEAR1:
+|--REGISTERS SAVED: FPCR, FP1. FP0 CONTAINS THE INPUT.
+	fmovex	%fp0,%fp1
+	fsubs	one,%fp1		| ...FP1 IS X-1
+	fadds	one,%fp0		| ...FP0 IS X+1
+	faddx	%fp1,%fp1		| ...FP1 IS 2(X-1)
+|--LOG(X) = LOG(1+U/2)-LOG(1-U/2) WHICH IS AN ODD POLYNOMIAL
+|--IN U, U = 2(X-1)/(X+1) = FP1/FP0
+
+LP1CONT2:
+|--THIS IS AN RE-ENTRY POINT FOR LOGNP1
+	fdivx	%fp0,%fp1		| ...FP1 IS U
+	fmovemx %fp2-%fp2/%fp3,-(%sp)	 | ...SAVE FP2
+|--REGISTERS SAVED ARE NOW FPCR,FP1,FP2,FP3
+|--LET V=U*U, W=V*V, CALCULATE
+|--U + U*V*(B1 + V*(B2 + V*(B3 + V*(B4 + V*B5)))) BY
+|--U + U*V*(  [B1 + W*(B3 + W*B5)]  +  [V*(B2 + W*B4)]  )
+	fmovex	%fp1,%fp0
+	fmulx	%fp0,%fp0	| ...FP0 IS V
+	fmovex	%fp1,SAVEU(%a6) | ...STORE U IN MEMORY, FREE FP1
+	fmovex	%fp0,%fp1
+	fmulx	%fp1,%fp1	| ...FP1 IS W
+
+	fmoved	LOGB5,%fp3
+	fmoved	LOGB4,%fp2
+
+	fmulx	%fp1,%fp3	| ...W*B5
+	fmulx	%fp1,%fp2	| ...W*B4
+
+	faddd	LOGB3,%fp3 | ...B3+W*B5
+	faddd	LOGB2,%fp2 | ...B2+W*B4
+
+	fmulx	%fp3,%fp1	| ...W*(B3+W*B5), FP3 RELEASED
+
+	fmulx	%fp0,%fp2	| ...V*(B2+W*B4)
+
+	faddd	LOGB1,%fp1 | ...B1+W*(B3+W*B5)
+	fmulx	SAVEU(%a6),%fp0 | ...FP0 IS U*V
+
+	faddx	%fp2,%fp1	| ...B1+W*(B3+W*B5) + V*(B2+W*B4), FP2 RELEASED
+	fmovemx (%sp)+,%fp2-%fp2/%fp3 | ...FP2 RESTORED
+
+	fmulx	%fp1,%fp0	| ...U*V*( [B1+W*(B3+W*B5)] + [V*(B2+W*B4)] )
+
+	fmovel	%d1,%fpcr
+	faddx	SAVEU(%a6),%fp0
+	bra	t_frcinx
+	rts
+
+LOGNEG:
+|--REGISTERS SAVED FPCR. LOG(-VE) IS INVALID
+	bra	t_operr
+
+	.global	slognp1d
+slognp1d:
+|--ENTRY POINT FOR LOG(1+Z) FOR DENORMALIZED INPUT
+| Simply return the denorm
+
+	bra	t_extdnrm
+
+	.global	slognp1
+slognp1:
+|--ENTRY POINT FOR LOG(1+X) FOR X FINITE, NON-ZERO, NOT NAN'S
+
+	fmovex	(%a0),%fp0	| ...LOAD INPUT
+	fabsx	%fp0		|test magnitude
+	fcmpx	LTHOLD,%fp0	|compare with min threshold
+	fbgt	LP1REAL		|if greater, continue
+	fmovel	#0,%fpsr		|clr N flag from compare
+	fmovel	%d1,%fpcr
+	fmovex	(%a0),%fp0	|return signed argument
+	bra	t_frcinx
+
+LP1REAL:
+	fmovex	(%a0),%fp0	| ...LOAD INPUT
+	movel	#0x00000000,ADJK(%a6)
+	fmovex	%fp0,%fp1	| ...FP1 IS INPUT Z
+	fadds	one,%fp0	| ...X := ROUND(1+Z)
+	fmovex	%fp0,X(%a6)
+	movew	XFRAC(%a6),XDCARE(%a6)
+	movel	X(%a6),%d0
+	cmpil	#0,%d0
+	ble	LP1NEG0	| ...LOG OF ZERO OR -VE
+	cmp2l	BOUNDS2,%d0
+	bcs	LOGMAIN	| ...BOUNDS2 IS [1/2,3/2]
+|--IF 1+Z > 3/2 OR 1+Z < 1/2, THEN X, WHICH IS ROUNDING 1+Z,
+|--CONTAINS AT LEAST 63 BITS OF INFORMATION OF Z. IN THAT CASE,
+|--SIMPLY INVOKE LOG(X) FOR LOG(1+Z).
+
+LP1NEAR1:
+|--NEXT SEE IF EXP(-1/16) < X < EXP(1/16)
+	cmp2l	BOUNDS1,%d0
+	bcss	LP1CARE
+
+LP1ONE16:
+|--EXP(-1/16) < X < EXP(1/16). LOG(1+Z) = LOG(1+U/2) - LOG(1-U/2)
+|--WHERE U = 2Z/(2+Z) = 2Z/(1+X).
+	faddx	%fp1,%fp1	| ...FP1 IS 2Z
+	fadds	one,%fp0	| ...FP0 IS 1+X
+|--U = FP1/FP0
+	bra	LP1CONT2
+
+LP1CARE:
+|--HERE WE USE THE USUAL TABLE DRIVEN APPROACH. CARE HAS TO BE
+|--TAKEN BECAUSE 1+Z CAN HAVE 67 BITS OF INFORMATION AND WE MUST
+|--PRESERVE ALL THE INFORMATION. BECAUSE 1+Z IS IN [1/2,3/2],
+|--THERE ARE ONLY TWO CASES.
+|--CASE 1: 1+Z < 1, THEN K = -1 AND Y-F = (2-F) + 2Z
+|--CASE 2: 1+Z > 1, THEN K = 0  AND Y-F = (1-F) + Z
+|--ON RETURNING TO LP1CONT1, WE MUST HAVE K IN FP1, ADDRESS OF
+|--(1/F) IN A0, Y-F IN FP0, AND FP2 SAVED.
+
+	movel	XFRAC(%a6),FFRAC(%a6)
+	andil	#0xFE000000,FFRAC(%a6)
+	oril	#0x01000000,FFRAC(%a6)	| ...F OBTAINED
+	cmpil	#0x3FFF8000,%d0	| ...SEE IF 1+Z > 1
+	bges	KISZERO
+
+KISNEG1:
+	fmoves	TWO,%fp0
+	movel	#0x3fff0000,F(%a6)
+	clrl	F+8(%a6)
+	fsubx	F(%a6),%fp0	| ...2-F
+	movel	FFRAC(%a6),%d0
+	andil	#0x7E000000,%d0
+	asrl	#8,%d0
+	asrl	#8,%d0
+	asrl	#4,%d0		| ...D0 CONTAINS DISPLACEMENT FOR 1/F
+	faddx	%fp1,%fp1		| ...GET 2Z
+	fmovemx %fp2-%fp2/%fp3,-(%sp)	| ...SAVE FP2
+	faddx	%fp1,%fp0		| ...FP0 IS Y-F = (2-F)+2Z
+	lea	LOGTBL,%a0	| ...A0 IS ADDRESS OF 1/F
+	addal	%d0,%a0
+	fmoves	negone,%fp1	| ...FP1 IS K = -1
+	bra	LP1CONT1
+
+KISZERO:
+	fmoves	one,%fp0
+	movel	#0x3fff0000,F(%a6)
+	clrl	F+8(%a6)
+	fsubx	F(%a6),%fp0		| ...1-F
+	movel	FFRAC(%a6),%d0
+	andil	#0x7E000000,%d0
+	asrl	#8,%d0
+	asrl	#8,%d0
+	asrl	#4,%d0
+	faddx	%fp1,%fp0		| ...FP0 IS Y-F
+	fmovemx %fp2-%fp2/%fp3,-(%sp)	| ...FP2 SAVED
+	lea	LOGTBL,%a0
+	addal	%d0,%a0		| ...A0 IS ADDRESS OF 1/F
+	fmoves	zero,%fp1	| ...FP1 IS K = 0
+	bra	LP1CONT1
+
+LP1NEG0:
+|--FPCR SAVED. D0 IS X IN COMPACT FORM.
+	cmpil	#0,%d0
+	blts	LP1NEG
+LP1ZERO:
+	fmoves	negone,%fp0
+
+	fmovel	%d1,%fpcr
+	bra t_dz
+
+LP1NEG:
+	fmoves	zero,%fp0
+
+	fmovel	%d1,%fpcr
+	bra	t_operr
+
+	|end
diff --git a/arch/m68k/fpsp040/smovecr.S b/arch/m68k/fpsp040/smovecr.S
new file mode 100644
index 0000000..a0127fa
--- /dev/null
+++ b/arch/m68k/fpsp040/smovecr.S
@@ -0,0 +1,162 @@
+|
+|	smovecr.sa 3.1 12/10/90
+|
+|	The entry point sMOVECR returns the constant at the
+|	offset given in the instruction field.
+|
+|	Input: An offset in the instruction word.
+|
+|	Output:	The constant rounded to the user's rounding
+|		mode unchecked for overflow.
+|
+|	Modified: fp0.
+|
+|
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|SMOVECR	idnt	2,1 | Motorola 040 Floating Point Software Package
+
+	|section 8
+
+#include "fpsp.h"
+
+	|xref	nrm_set
+	|xref	round
+	|xref	PIRN
+	|xref	PIRZRM
+	|xref	PIRP
+	|xref	SMALRN
+	|xref	SMALRZRM
+	|xref	SMALRP
+	|xref	BIGRN
+	|xref	BIGRZRM
+	|xref	BIGRP
+
+FZERO:	.long	00000000
+|
+|	FMOVECR
+|
+	.global	smovcr
+smovcr:
+	bfextu	CMDREG1B(%a6){#9:#7},%d0 |get offset
+	bfextu	USER_FPCR(%a6){#26:#2},%d1 |get rmode
+|
+| check range of offset
+|
+	tstb	%d0		|if zero, offset is to pi
+	beqs	PI_TBL		|it is pi
+	cmpib	#0x0a,%d0		|check range $01 - $0a
+	bles	Z_VAL		|if in this range, return zero
+	cmpib	#0x0e,%d0		|check range $0b - $0e
+	bles	SM_TBL		|valid constants in this range
+	cmpib	#0x2f,%d0		|check range $10 - $2f
+	bles	Z_VAL		|if in this range, return zero
+	cmpib	#0x3f,%d0		|check range $30 - $3f
+	ble	BG_TBL		|valid constants in this range
+Z_VAL:
+	fmoves	FZERO,%fp0
+	rts
+PI_TBL:
+	tstb	%d1		|offset is zero, check for rmode
+	beqs	PI_RN		|if zero, rn mode
+	cmpib	#0x3,%d1		|check for rp
+	beqs	PI_RP		|if 3, rp mode
+PI_RZRM:
+	leal	PIRZRM,%a0	|rmode is rz or rm, load PIRZRM in a0
+	bra	set_finx
+PI_RN:
+	leal	PIRN,%a0		|rmode is rn, load PIRN in a0
+	bra	set_finx
+PI_RP:
+	leal	PIRP,%a0		|rmode is rp, load PIRP in a0
+	bra	set_finx
+SM_TBL:
+	subil	#0xb,%d0		|make offset in 0 - 4 range
+	tstb	%d1		|check for rmode
+	beqs	SM_RN		|if zero, rn mode
+	cmpib	#0x3,%d1		|check for rp
+	beqs	SM_RP		|if 3, rp mode
+SM_RZRM:
+	leal	SMALRZRM,%a0	|rmode is rz or rm, load SMRZRM in a0
+	cmpib	#0x2,%d0		|check if result is inex
+	ble	set_finx	|if 0 - 2, it is inexact
+	bra	no_finx		|if 3, it is exact
+SM_RN:
+	leal	SMALRN,%a0	|rmode is rn, load SMRN in a0
+	cmpib	#0x2,%d0		|check if result is inex
+	ble	set_finx	|if 0 - 2, it is inexact
+	bra	no_finx		|if 3, it is exact
+SM_RP:
+	leal	SMALRP,%a0	|rmode is rp, load SMRP in a0
+	cmpib	#0x2,%d0		|check if result is inex
+	ble	set_finx	|if 0 - 2, it is inexact
+	bra	no_finx		|if 3, it is exact
+BG_TBL:
+	subil	#0x30,%d0		|make offset in 0 - f range
+	tstb	%d1		|check for rmode
+	beqs	BG_RN		|if zero, rn mode
+	cmpib	#0x3,%d1		|check for rp
+	beqs	BG_RP		|if 3, rp mode
+BG_RZRM:
+	leal	BIGRZRM,%a0	|rmode is rz or rm, load BGRZRM in a0
+	cmpib	#0x1,%d0		|check if result is inex
+	ble	set_finx	|if 0 - 1, it is inexact
+	cmpib	#0x7,%d0		|second check
+	ble	no_finx		|if 0 - 7, it is exact
+	bra	set_finx	|if 8 - f, it is inexact
+BG_RN:
+	leal	BIGRN,%a0	|rmode is rn, load BGRN in a0
+	cmpib	#0x1,%d0		|check if result is inex
+	ble	set_finx	|if 0 - 1, it is inexact
+	cmpib	#0x7,%d0		|second check
+	ble	no_finx		|if 0 - 7, it is exact
+	bra	set_finx	|if 8 - f, it is inexact
+BG_RP:
+	leal	BIGRP,%a0	|rmode is rp, load SMRP in a0
+	cmpib	#0x1,%d0		|check if result is inex
+	ble	set_finx	|if 0 - 1, it is inexact
+	cmpib	#0x7,%d0		|second check
+	ble	no_finx		|if 0 - 7, it is exact
+|	bra	set_finx	;if 8 - f, it is inexact
+set_finx:
+	orl	#inx2a_mask,USER_FPSR(%a6) |set inex2/ainex
+no_finx:
+	mulul	#12,%d0			|use offset to point into tables
+	movel	%d1,L_SCR1(%a6)		|load mode for round call
+	bfextu	USER_FPCR(%a6){#24:#2},%d1	|get precision
+	tstl	%d1			|check if extended precision
+|
+| Precision is extended
+|
+	bnes	not_ext			|if extended, do not call round
+	fmovemx (%a0,%d0),%fp0-%fp0		|return result in fp0
+	rts
+|
+| Precision is single or double
+|
+not_ext:
+	swap	%d1			|rnd prec in upper word of d1
+	addl	L_SCR1(%a6),%d1		|merge rmode in low word of d1
+	movel	(%a0,%d0),FP_SCR1(%a6)	|load first word to temp storage
+	movel	4(%a0,%d0),FP_SCR1+4(%a6)	|load second word
+	movel	8(%a0,%d0),FP_SCR1+8(%a6)	|load third word
+	clrl	%d0			|clear g,r,s
+	lea	FP_SCR1(%a6),%a0
+	btstb	#sign_bit,LOCAL_EX(%a0)
+	sne	LOCAL_SGN(%a0)		|convert to internal ext. format
+
+	bsr	round			|go round the mantissa
+
+	bfclr	LOCAL_SGN(%a0){#0:#8}	|convert back to IEEE ext format
+	beqs	fin_fcr
+	bsetb	#sign_bit,LOCAL_EX(%a0)
+fin_fcr:
+	fmovemx (%a0),%fp0-%fp0
+	rts
+
+	|end
diff --git a/arch/m68k/fpsp040/srem_mod.S b/arch/m68k/fpsp040/srem_mod.S
new file mode 100644
index 0000000..8c8d7f5
--- /dev/null
+++ b/arch/m68k/fpsp040/srem_mod.S
@@ -0,0 +1,422 @@
+|
+|	srem_mod.sa 3.1 12/10/90
+|
+|      The entry point sMOD computes the floating point MOD of the
+|      input values X and Y. The entry point sREM computes the floating
+|      point (IEEE) REM of the input values X and Y.
+|
+|      INPUT
+|      -----
+|      Double-extended value Y is pointed to by address in register
+|      A0. Double-extended value X is located in -12(A0). The values
+|      of X and Y are both nonzero and finite; although either or both
+|      of them can be denormalized. The special cases of zeros, NaNs,
+|      and infinities are handled elsewhere.
+|
+|      OUTPUT
+|      ------
+|      FREM(X,Y) or FMOD(X,Y), depending on entry point.
+|
+|       ALGORITHM
+|       ---------
+|
+|       Step 1.  Save and strip signs of X and Y: signX := sign(X),
+|                signY := sign(Y), X := |X|, Y := |Y|,
+|                signQ := signX EOR signY. Record whether MOD or REM
+|                is requested.
+|
+|       Step 2.  Set L := expo(X)-expo(Y), k := 0, Q := 0.
+|                If (L < 0) then
+|                   R := X, go to Step 4.
+|                else
+|                   R := 2^(-L)X, j := L.
+|                endif
+|
+|       Step 3.  Perform MOD(X,Y)
+|            3.1 If R = Y, go to Step 9.
+|            3.2 If R > Y, then { R := R - Y, Q := Q + 1}
+|            3.3 If j = 0, go to Step 4.
+|            3.4 k := k + 1, j := j - 1, Q := 2Q, R := 2R. Go to
+|                Step 3.1.
+|
+|       Step 4.  At this point, R = X - QY = MOD(X,Y). Set
+|                Last_Subtract := false (used in Step 7 below). If
+|                MOD is requested, go to Step 6.
+|
+|       Step 5.  R = MOD(X,Y), but REM(X,Y) is requested.
+|            5.1 If R < Y/2, then R = MOD(X,Y) = REM(X,Y). Go to
+|                Step 6.
+|            5.2 If R > Y/2, then { set Last_Subtract := true,
+|                Q := Q + 1, Y := signY*Y }. Go to Step 6.
+|            5.3 This is the tricky case of R = Y/2. If Q is odd,
+|                then { Q := Q + 1, signX := -signX }.
+|
+|       Step 6.  R := signX*R.
+|
+|       Step 7.  If Last_Subtract = true, R := R - Y.
+|
+|       Step 8.  Return signQ, last 7 bits of Q, and R as required.
+|
+|       Step 9.  At this point, R = 2^(-j)*X - Q Y = Y. Thus,
+|                X = 2^(j)*(Q+1)Y. set Q := 2^(j)*(Q+1),
+|                R := 0. Return signQ, last 7 bits of Q, and R.
+|
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+SREM_MOD:    |idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section    8
+
+#include "fpsp.h"
+
+	.set	Mod_Flag,L_SCR3
+	.set	SignY,FP_SCR3+4
+	.set	SignX,FP_SCR3+8
+	.set	SignQ,FP_SCR3+12
+	.set	Sc_Flag,FP_SCR4
+
+	.set	Y,FP_SCR1
+	.set	Y_Hi,Y+4
+	.set	Y_Lo,Y+8
+
+	.set	R,FP_SCR2
+	.set	R_Hi,R+4
+	.set	R_Lo,R+8
+
+
+Scale:     .long	0x00010000,0x80000000,0x00000000,0x00000000
+
+	|xref	t_avoid_unsupp
+
+        .global        smod
+smod:
+
+   movel               #0,Mod_Flag(%a6)
+   bras                Mod_Rem
+
+        .global        srem
+srem:
+
+   movel               #1,Mod_Flag(%a6)
+
+Mod_Rem:
+|..Save sign of X and Y
+   moveml              %d2-%d7,-(%a7)     | ...save data registers
+   movew               (%a0),%d3
+   movew               %d3,SignY(%a6)
+   andil               #0x00007FFF,%d3   | ...Y := |Y|
+
+|
+   movel               4(%a0),%d4
+   movel               8(%a0),%d5        | ...(D3,D4,D5) is |Y|
+
+   tstl                %d3
+   bnes                Y_Normal
+
+   movel               #0x00003FFE,%d3	| ...$3FFD + 1
+   tstl                %d4
+   bnes                HiY_not0
+
+HiY_0:
+   movel               %d5,%d4
+   clrl                %d5
+   subil               #32,%d3
+   clrl                %d6
+   bfffo                %d4{#0:#32},%d6
+   lsll                %d6,%d4
+   subl                %d6,%d3           | ...(D3,D4,D5) is normalized
+|                                       ...with bias $7FFD
+   bras                Chk_X
+
+HiY_not0:
+   clrl                %d6
+   bfffo                %d4{#0:#32},%d6
+   subl                %d6,%d3
+   lsll                %d6,%d4
+   movel               %d5,%d7           | ...a copy of D5
+   lsll                %d6,%d5
+   negl                %d6
+   addil               #32,%d6
+   lsrl                %d6,%d7
+   orl                 %d7,%d4           | ...(D3,D4,D5) normalized
+|                                       ...with bias $7FFD
+   bras                Chk_X
+
+Y_Normal:
+   addil               #0x00003FFE,%d3   | ...(D3,D4,D5) normalized
+|                                       ...with bias $7FFD
+
+Chk_X:
+   movew               -12(%a0),%d0
+   movew               %d0,SignX(%a6)
+   movew               SignY(%a6),%d1
+   eorl                %d0,%d1
+   andil               #0x00008000,%d1
+   movew               %d1,SignQ(%a6)	| ...sign(Q) obtained
+   andil               #0x00007FFF,%d0
+   movel               -8(%a0),%d1
+   movel               -4(%a0),%d2       | ...(D0,D1,D2) is |X|
+   tstl                %d0
+   bnes                X_Normal
+   movel               #0x00003FFE,%d0
+   tstl                %d1
+   bnes                HiX_not0
+
+HiX_0:
+   movel               %d2,%d1
+   clrl                %d2
+   subil               #32,%d0
+   clrl                %d6
+   bfffo                %d1{#0:#32},%d6
+   lsll                %d6,%d1
+   subl                %d6,%d0           | ...(D0,D1,D2) is normalized
+|                                       ...with bias $7FFD
+   bras                Init
+
+HiX_not0:
+   clrl                %d6
+   bfffo                %d1{#0:#32},%d6
+   subl                %d6,%d0
+   lsll                %d6,%d1
+   movel               %d2,%d7           | ...a copy of D2
+   lsll                %d6,%d2
+   negl                %d6
+   addil               #32,%d6
+   lsrl                %d6,%d7
+   orl                 %d7,%d1           | ...(D0,D1,D2) normalized
+|                                       ...with bias $7FFD
+   bras                Init
+
+X_Normal:
+   addil               #0x00003FFE,%d0   | ...(D0,D1,D2) normalized
+|                                       ...with bias $7FFD
+
+Init:
+|
+   movel               %d3,L_SCR1(%a6)   | ...save biased expo(Y)
+   movel		%d0,L_SCR2(%a6)	|save d0
+   subl                %d3,%d0           | ...L := expo(X)-expo(Y)
+|   Move.L               D0,L            ...D0 is j
+   clrl                %d6              | ...D6 := carry <- 0
+   clrl                %d3              | ...D3 is Q
+   moveal              #0,%a1           | ...A1 is k; j+k=L, Q=0
+
+|..(Carry,D1,D2) is R
+   tstl                %d0
+   bges                Mod_Loop
+
+|..expo(X) < expo(Y). Thus X = mod(X,Y)
+|
+   movel		L_SCR2(%a6),%d0	|restore d0
+   bra                Get_Mod
+
+|..At this point  R = 2^(-L)X; Q = 0; k = 0; and  k+j = L
+
+
+Mod_Loop:
+   tstl                %d6              | ...test carry bit
+   bgts                R_GT_Y
+
+|..At this point carry = 0, R = (D1,D2), Y = (D4,D5)
+   cmpl                %d4,%d1           | ...compare hi(R) and hi(Y)
+   bnes                R_NE_Y
+   cmpl                %d5,%d2           | ...compare lo(R) and lo(Y)
+   bnes                R_NE_Y
+
+|..At this point, R = Y
+   bra                Rem_is_0
+
+R_NE_Y:
+|..use the borrow of the previous compare
+   bcss                R_LT_Y          | ...borrow is set iff R < Y
+
+R_GT_Y:
+|..If Carry is set, then Y < (Carry,D1,D2) < 2Y. Otherwise, Carry = 0
+|..and Y < (D1,D2) < 2Y. Either way, perform R - Y
+   subl                %d5,%d2           | ...lo(R) - lo(Y)
+   subxl               %d4,%d1           | ...hi(R) - hi(Y)
+   clrl                %d6              | ...clear carry
+   addql               #1,%d3           | ...Q := Q + 1
+
+R_LT_Y:
+|..At this point, Carry=0, R < Y. R = 2^(k-L)X - QY; k+j = L; j >= 0.
+   tstl                %d0              | ...see if j = 0.
+   beqs                PostLoop
+
+   addl                %d3,%d3           | ...Q := 2Q
+   addl                %d2,%d2           | ...lo(R) = 2lo(R)
+   roxll               #1,%d1           | ...hi(R) = 2hi(R) + carry
+   scs                  %d6              | ...set Carry if 2(R) overflows
+   addql               #1,%a1           | ...k := k+1
+   subql               #1,%d0           | ...j := j - 1
+|..At this point, R=(Carry,D1,D2) = 2^(k-L)X - QY, j+k=L, j >= 0, R < 2Y.
+
+   bras                Mod_Loop
+
+PostLoop:
+|..k = L, j = 0, Carry = 0, R = (D1,D2) = X - QY, R < Y.
+
+|..normalize R.
+   movel               L_SCR1(%a6),%d0           | ...new biased expo of R
+   tstl                %d1
+   bnes                HiR_not0
+
+HiR_0:
+   movel               %d2,%d1
+   clrl                %d2
+   subil               #32,%d0
+   clrl                %d6
+   bfffo                %d1{#0:#32},%d6
+   lsll                %d6,%d1
+   subl                %d6,%d0           | ...(D0,D1,D2) is normalized
+|                                       ...with bias $7FFD
+   bras                Get_Mod
+
+HiR_not0:
+   clrl                %d6
+   bfffo                %d1{#0:#32},%d6
+   bmis                Get_Mod         | ...already normalized
+   subl                %d6,%d0
+   lsll                %d6,%d1
+   movel               %d2,%d7           | ...a copy of D2
+   lsll                %d6,%d2
+   negl                %d6
+   addil               #32,%d6
+   lsrl                %d6,%d7
+   orl                 %d7,%d1           | ...(D0,D1,D2) normalized
+
+|
+Get_Mod:
+   cmpil		#0x000041FE,%d0
+   bges		No_Scale
+Do_Scale:
+   movew		%d0,R(%a6)
+   clrw		R+2(%a6)
+   movel		%d1,R_Hi(%a6)
+   movel		%d2,R_Lo(%a6)
+   movel		L_SCR1(%a6),%d6
+   movew		%d6,Y(%a6)
+   clrw		Y+2(%a6)
+   movel		%d4,Y_Hi(%a6)
+   movel		%d5,Y_Lo(%a6)
+   fmovex		R(%a6),%fp0		| ...no exception
+   movel		#1,Sc_Flag(%a6)
+   bras		ModOrRem
+No_Scale:
+   movel		%d1,R_Hi(%a6)
+   movel		%d2,R_Lo(%a6)
+   subil		#0x3FFE,%d0
+   movew		%d0,R(%a6)
+   clrw		R+2(%a6)
+   movel		L_SCR1(%a6),%d6
+   subil		#0x3FFE,%d6
+   movel		%d6,L_SCR1(%a6)
+   fmovex		R(%a6),%fp0
+   movew		%d6,Y(%a6)
+   movel		%d4,Y_Hi(%a6)
+   movel		%d5,Y_Lo(%a6)
+   movel		#0,Sc_Flag(%a6)
+
+|
+
+
+ModOrRem:
+   movel               Mod_Flag(%a6),%d6
+   beqs                Fix_Sign
+
+   movel               L_SCR1(%a6),%d6           | ...new biased expo(Y)
+   subql               #1,%d6           | ...biased expo(Y/2)
+   cmpl                %d6,%d0
+   blts                Fix_Sign
+   bgts                Last_Sub
+
+   cmpl                %d4,%d1
+   bnes                Not_EQ
+   cmpl                %d5,%d2
+   bnes                Not_EQ
+   bra                Tie_Case
+
+Not_EQ:
+   bcss                Fix_Sign
+
+Last_Sub:
+|
+   fsubx		Y(%a6),%fp0		| ...no exceptions
+   addql               #1,%d3           | ...Q := Q + 1
+
+|
+
+Fix_Sign:
+|..Get sign of X
+   movew               SignX(%a6),%d6
+   bges		Get_Q
+   fnegx		%fp0
+
+|..Get Q
+|
+Get_Q:
+   clrl		%d6
+   movew               SignQ(%a6),%d6        | ...D6 is sign(Q)
+   movel               #8,%d7
+   lsrl                %d7,%d6
+   andil               #0x0000007F,%d3   | ...7 bits of Q
+   orl                 %d6,%d3           | ...sign and bits of Q
+   swap                 %d3
+   fmovel              %fpsr,%d6
+   andil               #0xFF00FFFF,%d6
+   orl                 %d3,%d6
+   fmovel              %d6,%fpsr         | ...put Q in fpsr
+
+|
+Restore:
+   moveml              (%a7)+,%d2-%d7
+   fmovel              USER_FPCR(%a6),%fpcr
+   movel               Sc_Flag(%a6),%d0
+   beqs                Finish
+   fmulx		Scale(%pc),%fp0	| ...may cause underflow
+   bra			t_avoid_unsupp	|check for denorm as a
+|					;result of the scaling
+
+Finish:
+	fmovex		%fp0,%fp0		|capture exceptions & round
+	rts
+
+Rem_is_0:
+|..R = 2^(-j)X - Q Y = Y, thus R = 0 and quotient = 2^j (Q+1)
+   addql               #1,%d3
+   cmpil               #8,%d0           | ...D0 is j
+   bges                Q_Big
+
+   lsll                %d0,%d3
+   bras                Set_R_0
+
+Q_Big:
+   clrl                %d3
+
+Set_R_0:
+   fmoves		#0x00000000,%fp0
+   movel		#0,Sc_Flag(%a6)
+   bra                Fix_Sign
+
+Tie_Case:
+|..Check parity of Q
+   movel               %d3,%d6
+   andil               #0x00000001,%d6
+   tstl                %d6
+   beq                Fix_Sign	| ...Q is even
+
+|..Q is odd, Q := Q + 1, signX := -signX
+   addql               #1,%d3
+   movew               SignX(%a6),%d6
+   eoril               #0x00008000,%d6
+   movew               %d6,SignX(%a6)
+   bra                Fix_Sign
+
+   |end
diff --git a/arch/m68k/fpsp040/ssin.S b/arch/m68k/fpsp040/ssin.S
new file mode 100644
index 0000000..043c91c
--- /dev/null
+++ b/arch/m68k/fpsp040/ssin.S
@@ -0,0 +1,746 @@
+|
+|	ssin.sa 3.3 7/29/91
+|
+|	The entry point sSIN computes the sine of an input argument
+|	sCOS computes the cosine, and sSINCOS computes both. The
+|	corresponding entry points with a "d" computes the same
+|	corresponding function values for denormalized inputs.
+|
+|	Input: Double-extended number X in location pointed to
+|		by address register a0.
+|
+|	Output: The function value sin(X) or cos(X) returned in Fp0 if SIN or
+|		COS is requested. Otherwise, for SINCOS, sin(X) is returned
+|		in Fp0, and cos(X) is returned in Fp1.
+|
+|	Modifies: Fp0 for SIN or COS; both Fp0 and Fp1 for SINCOS.
+|
+|	Accuracy and Monotonicity: The returned result is within 1 ulp in
+|		64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
+|		result is subsequently rounded to double precision. The
+|		result is provably monotonic in double precision.
+|
+|	Speed: The programs sSIN and sCOS take approximately 150 cycles for
+|		input argument X such that |X| < 15Pi, which is the usual
+|		situation. The speed for sSINCOS is approximately 190 cycles.
+|
+|	Algorithm:
+|
+|	SIN and COS:
+|	1. If SIN is invoked, set AdjN := 0; otherwise, set AdjN := 1.
+|
+|	2. If |X| >= 15Pi or |X| < 2**(-40), go to 7.
+|
+|	3. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let
+|		k = N mod 4, so in particular, k = 0,1,2,or 3. Overwrite
+|		k by k := k + AdjN.
+|
+|	4. If k is even, go to 6.
+|
+|	5. (k is odd) Set j := (k-1)/2, sgn := (-1)**j. Return sgn*cos(r)
+|		where cos(r) is approximated by an even polynomial in r,
+|		1 + r*r*(B1+s*(B2+ ... + s*B8)),	s = r*r.
+|		Exit.
+|
+|	6. (k is even) Set j := k/2, sgn := (-1)**j. Return sgn*sin(r)
+|		where sin(r) is approximated by an odd polynomial in r
+|		r + r*s*(A1+s*(A2+ ... + s*A7)),	s = r*r.
+|		Exit.
+|
+|	7. If |X| > 1, go to 9.
+|
+|	8. (|X|<2**(-40)) If SIN is invoked, return X; otherwise return 1.
+|
+|	9. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, go back to 3.
+|
+|	SINCOS:
+|	1. If |X| >= 15Pi or |X| < 2**(-40), go to 6.
+|
+|	2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let
+|		k = N mod 4, so in particular, k = 0,1,2,or 3.
+|
+|	3. If k is even, go to 5.
+|
+|	4. (k is odd) Set j1 := (k-1)/2, j2 := j1 (EOR) (k mod 2), i.e.
+|		j1 exclusive or with the l.s.b. of k.
+|		sgn1 := (-1)**j1, sgn2 := (-1)**j2.
+|		SIN(X) = sgn1 * cos(r) and COS(X) = sgn2*sin(r) where
+|		sin(r) and cos(r) are computed as odd and even polynomials
+|		in r, respectively. Exit
+|
+|	5. (k is even) Set j1 := k/2, sgn1 := (-1)**j1.
+|		SIN(X) = sgn1 * sin(r) and COS(X) = sgn1*cos(r) where
+|		sin(r) and cos(r) are computed as odd and even polynomials
+|		in r, respectively. Exit
+|
+|	6. If |X| > 1, go to 8.
+|
+|	7. (|X|<2**(-40)) SIN(X) = X and COS(X) = 1. Exit.
+|
+|	8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, go back to 2.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|SSIN	idnt	2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+BOUNDS1:	.long 0x3FD78000,0x4004BC7E
+TWOBYPI:	.long 0x3FE45F30,0x6DC9C883
+
+SINA7:	.long 0xBD6AAA77,0xCCC994F5
+SINA6:	.long 0x3DE61209,0x7AAE8DA1
+
+SINA5:	.long 0xBE5AE645,0x2A118AE4
+SINA4:	.long 0x3EC71DE3,0xA5341531
+
+SINA3:	.long 0xBF2A01A0,0x1A018B59,0x00000000,0x00000000
+
+SINA2:	.long 0x3FF80000,0x88888888,0x888859AF,0x00000000
+
+SINA1:	.long 0xBFFC0000,0xAAAAAAAA,0xAAAAAA99,0x00000000
+
+COSB8:	.long 0x3D2AC4D0,0xD6011EE3
+COSB7:	.long 0xBDA9396F,0x9F45AC19
+
+COSB6:	.long 0x3E21EED9,0x0612C972
+COSB5:	.long 0xBE927E4F,0xB79D9FCF
+
+COSB4:	.long 0x3EFA01A0,0x1A01D423,0x00000000,0x00000000
+
+COSB3:	.long 0xBFF50000,0xB60B60B6,0x0B61D438,0x00000000
+
+COSB2:	.long 0x3FFA0000,0xAAAAAAAA,0xAAAAAB5E
+COSB1:	.long 0xBF000000
+
+INVTWOPI: .long 0x3FFC0000,0xA2F9836E,0x4E44152A
+
+TWOPI1:	.long 0x40010000,0xC90FDAA2,0x00000000,0x00000000
+TWOPI2:	.long 0x3FDF0000,0x85A308D4,0x00000000,0x00000000
+
+	|xref	PITBL
+
+	.set	INARG,FP_SCR4
+
+	.set	X,FP_SCR5
+	.set	XDCARE,X+2
+	.set	XFRAC,X+4
+
+	.set	RPRIME,FP_SCR1
+	.set	SPRIME,FP_SCR2
+
+	.set	POSNEG1,L_SCR1
+	.set	TWOTO63,L_SCR1
+
+	.set	ENDFLAG,L_SCR2
+	.set	N,L_SCR2
+
+	.set	ADJN,L_SCR3
+
+	| xref	t_frcinx
+	|xref	t_extdnrm
+	|xref	sto_cos
+
+	.global	ssind
+ssind:
+|--SIN(X) = X FOR DENORMALIZED X
+	bra		t_extdnrm
+
+	.global	scosd
+scosd:
+|--COS(X) = 1 FOR DENORMALIZED X
+
+	fmoves		#0x3F800000,%fp0
+|
+|	9D25B Fix: Sometimes the previous fmove.s sets fpsr bits
+|
+	fmovel		#0,%fpsr
+|
+	bra		t_frcinx
+
+	.global	ssin
+ssin:
+|--SET ADJN TO 0
+	movel		#0,ADJN(%a6)
+	bras		SINBGN
+
+	.global	scos
+scos:
+|--SET ADJN TO 1
+	movel		#1,ADJN(%a6)
+
+SINBGN:
+|--SAVE FPCR, FP1. CHECK IF |X| IS TOO SMALL OR LARGE
+
+	fmovex		(%a0),%fp0	| ...LOAD INPUT
+
+	movel		(%a0),%d0
+	movew		4(%a0),%d0
+	fmovex		%fp0,X(%a6)
+	andil		#0x7FFFFFFF,%d0		| ...COMPACTIFY X
+
+	cmpil		#0x3FD78000,%d0		| ...|X| >= 2**(-40)?
+	bges		SOK1
+	bra		SINSM
+
+SOK1:
+	cmpil		#0x4004BC7E,%d0		| ...|X| < 15 PI?
+	blts		SINMAIN
+	bra		REDUCEX
+
+SINMAIN:
+|--THIS IS THE USUAL CASE, |X| <= 15 PI.
+|--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+	fmovex		%fp0,%fp1
+	fmuld		TWOBYPI,%fp1	| ...X*2/PI
+
+|--HIDE THE NEXT THREE INSTRUCTIONS
+	lea		PITBL+0x200,%a1 | ...TABLE OF N*PI/2, N = -32,...,32
+
+
+|--FP1 IS NOW READY
+	fmovel		%fp1,N(%a6)		| ...CONVERT TO INTEGER
+
+	movel		N(%a6),%d0
+	asll		#4,%d0
+	addal		%d0,%a1	| ...A1 IS THE ADDRESS OF N*PIBY2
+|				...WHICH IS IN TWO PIECES Y1 & Y2
+
+	fsubx		(%a1)+,%fp0	| ...X-Y1
+|--HIDE THE NEXT ONE
+	fsubs		(%a1),%fp0	| ...FP0 IS R = (X-Y1)-Y2
+
+SINCONT:
+|--continuation from REDUCEX
+
+|--GET N+ADJN AND SEE IF SIN(R) OR COS(R) IS NEEDED
+	movel		N(%a6),%d0
+	addl		ADJN(%a6),%d0	| ...SEE IF D0 IS ODD OR EVEN
+	rorl		#1,%d0	| ...D0 WAS ODD IFF D0 IS NEGATIVE
+	cmpil		#0,%d0
+	blt		COSPOLY
+
+SINPOLY:
+|--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
+|--THEN WE RETURN	SGN*SIN(R). SGN*SIN(R) IS COMPUTED BY
+|--R' + R'*S*(A1 + S(A2 + S(A3 + S(A4 + ... + SA7)))), WHERE
+|--R' = SGN*R, S=R*R. THIS CAN BE REWRITTEN AS
+|--R' + R'*S*( [A1+T(A3+T(A5+TA7))] + [S(A2+T(A4+TA6))])
+|--WHERE T=S*S.
+|--NOTE THAT A3 THROUGH A7 ARE STORED IN DOUBLE PRECISION
+|--WHILE A1 AND A2 ARE IN DOUBLE-EXTENDED FORMAT.
+	fmovex		%fp0,X(%a6)	| ...X IS R
+	fmulx		%fp0,%fp0	| ...FP0 IS S
+|---HIDE THE NEXT TWO WHILE WAITING FOR FP0
+	fmoved		SINA7,%fp3
+	fmoved		SINA6,%fp2
+|--FP0 IS NOW READY
+	fmovex		%fp0,%fp1
+	fmulx		%fp1,%fp1	| ...FP1 IS T
+|--HIDE THE NEXT TWO WHILE WAITING FOR FP1
+
+	rorl		#1,%d0
+	andil		#0x80000000,%d0
+|				...LEAST SIG. BIT OF D0 IN SIGN POSITION
+	eorl		%d0,X(%a6)	| ...X IS NOW R'= SGN*R
+
+	fmulx		%fp1,%fp3	| ...TA7
+	fmulx		%fp1,%fp2	| ...TA6
+
+	faddd		SINA5,%fp3 | ...A5+TA7
+	faddd		SINA4,%fp2 | ...A4+TA6
+
+	fmulx		%fp1,%fp3	| ...T(A5+TA7)
+	fmulx		%fp1,%fp2	| ...T(A4+TA6)
+
+	faddd		SINA3,%fp3 | ...A3+T(A5+TA7)
+	faddx		SINA2,%fp2 | ...A2+T(A4+TA6)
+
+	fmulx		%fp3,%fp1	| ...T(A3+T(A5+TA7))
+
+	fmulx		%fp0,%fp2	| ...S(A2+T(A4+TA6))
+	faddx		SINA1,%fp1 | ...A1+T(A3+T(A5+TA7))
+	fmulx		X(%a6),%fp0	| ...R'*S
+
+	faddx		%fp2,%fp1	| ...[A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))]
+|--FP3 RELEASED, RESTORE NOW AND TAKE SOME ADVANTAGE OF HIDING
+|--FP2 RELEASED, RESTORE NOW AND TAKE FULL ADVANTAGE OF HIDING
+
+
+	fmulx		%fp1,%fp0		| ...SIN(R')-R'
+|--FP1 RELEASED.
+
+	fmovel		%d1,%FPCR		|restore users exceptions
+	faddx		X(%a6),%fp0		|last inst - possible exception set
+	bra		t_frcinx
+
+
+COSPOLY:
+|--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
+|--THEN WE RETURN	SGN*COS(R). SGN*COS(R) IS COMPUTED BY
+|--SGN + S'*(B1 + S(B2 + S(B3 + S(B4 + ... + SB8)))), WHERE
+|--S=R*R AND S'=SGN*S. THIS CAN BE REWRITTEN AS
+|--SGN + S'*([B1+T(B3+T(B5+TB7))] + [S(B2+T(B4+T(B6+TB8)))])
+|--WHERE T=S*S.
+|--NOTE THAT B4 THROUGH B8 ARE STORED IN DOUBLE PRECISION
+|--WHILE B2 AND B3 ARE IN DOUBLE-EXTENDED FORMAT, B1 IS -1/2
+|--AND IS THEREFORE STORED AS SINGLE PRECISION.
+
+	fmulx		%fp0,%fp0	| ...FP0 IS S
+|---HIDE THE NEXT TWO WHILE WAITING FOR FP0
+	fmoved		COSB8,%fp2
+	fmoved		COSB7,%fp3
+|--FP0 IS NOW READY
+	fmovex		%fp0,%fp1
+	fmulx		%fp1,%fp1	| ...FP1 IS T
+|--HIDE THE NEXT TWO WHILE WAITING FOR FP1
+	fmovex		%fp0,X(%a6)	| ...X IS S
+	rorl		#1,%d0
+	andil		#0x80000000,%d0
+|			...LEAST SIG. BIT OF D0 IN SIGN POSITION
+
+	fmulx		%fp1,%fp2	| ...TB8
+|--HIDE THE NEXT TWO WHILE WAITING FOR THE XU
+	eorl		%d0,X(%a6)	| ...X IS NOW S'= SGN*S
+	andil		#0x80000000,%d0
+
+	fmulx		%fp1,%fp3	| ...TB7
+|--HIDE THE NEXT TWO WHILE WAITING FOR THE XU
+	oril		#0x3F800000,%d0	| ...D0 IS SGN IN SINGLE
+	movel		%d0,POSNEG1(%a6)
+
+	faddd		COSB6,%fp2 | ...B6+TB8
+	faddd		COSB5,%fp3 | ...B5+TB7
+
+	fmulx		%fp1,%fp2	| ...T(B6+TB8)
+	fmulx		%fp1,%fp3	| ...T(B5+TB7)
+
+	faddd		COSB4,%fp2 | ...B4+T(B6+TB8)
+	faddx		COSB3,%fp3 | ...B3+T(B5+TB7)
+
+	fmulx		%fp1,%fp2	| ...T(B4+T(B6+TB8))
+	fmulx		%fp3,%fp1	| ...T(B3+T(B5+TB7))
+
+	faddx		COSB2,%fp2 | ...B2+T(B4+T(B6+TB8))
+	fadds		COSB1,%fp1 | ...B1+T(B3+T(B5+TB7))
+
+	fmulx		%fp2,%fp0	| ...S(B2+T(B4+T(B6+TB8)))
+|--FP3 RELEASED, RESTORE NOW AND TAKE SOME ADVANTAGE OF HIDING
+|--FP2 RELEASED.
+
+
+	faddx		%fp1,%fp0
+|--FP1 RELEASED
+
+	fmulx		X(%a6),%fp0
+
+	fmovel		%d1,%FPCR		|restore users exceptions
+	fadds		POSNEG1(%a6),%fp0	|last inst - possible exception set
+	bra		t_frcinx
+
+
+SINBORS:
+|--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
+|--IF |X| < 2**(-40), RETURN X OR 1.
+	cmpil		#0x3FFF8000,%d0
+	bgts		REDUCEX
+
+
+SINSM:
+	movel		ADJN(%a6),%d0
+	cmpil		#0,%d0
+	bgts		COSTINY
+
+SINTINY:
+	movew		#0x0000,XDCARE(%a6)	| ...JUST IN CASE
+	fmovel		%d1,%FPCR		|restore users exceptions
+	fmovex		X(%a6),%fp0		|last inst - possible exception set
+	bra		t_frcinx
+
+
+COSTINY:
+	fmoves		#0x3F800000,%fp0
+
+	fmovel		%d1,%FPCR		|restore users exceptions
+	fsubs		#0x00800000,%fp0	|last inst - possible exception set
+	bra		t_frcinx
+
+
+REDUCEX:
+|--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
+|--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
+|--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
+
+	fmovemx	%fp2-%fp5,-(%a7)	| ...save FP2 through FP5
+	movel		%d2,-(%a7)
+        fmoves         #0x00000000,%fp1
+|--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
+|--there is a danger of unwanted overflow in first LOOP iteration.  In this
+|--case, reduce argument by one remainder step to make subsequent reduction
+|--safe.
+	cmpil	#0x7ffeffff,%d0		|is argument dangerously large?
+	bnes	LOOP
+	movel	#0x7ffe0000,FP_SCR2(%a6)	|yes
+|					;create 2**16383*PI/2
+	movel	#0xc90fdaa2,FP_SCR2+4(%a6)
+	clrl	FP_SCR2+8(%a6)
+	ftstx	%fp0			|test sign of argument
+	movel	#0x7fdc0000,FP_SCR3(%a6)	|create low half of 2**16383*
+|					;PI/2 at FP_SCR3
+	movel	#0x85a308d3,FP_SCR3+4(%a6)
+	clrl   FP_SCR3+8(%a6)
+	fblt	red_neg
+	orw	#0x8000,FP_SCR2(%a6)	|positive arg
+	orw	#0x8000,FP_SCR3(%a6)
+red_neg:
+	faddx  FP_SCR2(%a6),%fp0		|high part of reduction is exact
+	fmovex  %fp0,%fp1		|save high result in fp1
+	faddx  FP_SCR3(%a6),%fp0		|low part of reduction
+	fsubx  %fp0,%fp1			|determine low component of result
+	faddx  FP_SCR3(%a6),%fp1		|fp0/fp1 are reduced argument.
+
+|--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
+|--integer quotient will be stored in N
+|--Intermediate remainder is 66-bit long; (R,r) in (FP0,FP1)
+
+LOOP:
+	fmovex		%fp0,INARG(%a6)	| ...+-2**K * F, 1 <= F < 2
+	movew		INARG(%a6),%d0
+        movel          %d0,%a1		| ...save a copy of D0
+	andil		#0x00007FFF,%d0
+	subil		#0x00003FFF,%d0	| ...D0 IS K
+	cmpil		#28,%d0
+	bles		LASTLOOP
+CONTLOOP:
+	subil		#27,%d0	 | ...D0 IS L := K-27
+	movel		#0,ENDFLAG(%a6)
+	bras		WORK
+LASTLOOP:
+	clrl		%d0		| ...D0 IS L := 0
+	movel		#1,ENDFLAG(%a6)
+
+WORK:
+|--FIND THE REMAINDER OF (R,r) W.R.T.	2**L * (PI/2). L IS SO CHOSEN
+|--THAT	INT( X * (2/PI) / 2**(L) ) < 2**29.
+
+|--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
+|--2**L * (PIby2_1), 2**L * (PIby2_2)
+
+	movel		#0x00003FFE,%d2	| ...BIASED EXPO OF 2/PI
+	subl		%d0,%d2		| ...BIASED EXPO OF 2**(-L)*(2/PI)
+
+	movel		#0xA2F9836E,FP_SCR1+4(%a6)
+	movel		#0x4E44152A,FP_SCR1+8(%a6)
+	movew		%d2,FP_SCR1(%a6)	| ...FP_SCR1 is 2**(-L)*(2/PI)
+
+	fmovex		%fp0,%fp2
+	fmulx		FP_SCR1(%a6),%fp2
+|--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
+|--FLOATING POINT FORMAT, THE TWO FMOVE'S	FMOVE.L FP <--> N
+|--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
+|--(SIGN(INARG)*2**63	+	FP2) - SIGN(INARG)*2**63 WILL GIVE
+|--US THE DESIRED VALUE IN FLOATING POINT.
+
+|--HIDE SIX CYCLES OF INSTRUCTION
+        movel		%a1,%d2
+        swap		%d2
+	andil		#0x80000000,%d2
+	oril		#0x5F000000,%d2	| ...D2 IS SIGN(INARG)*2**63 IN SGL
+	movel		%d2,TWOTO63(%a6)
+
+	movel		%d0,%d2
+	addil		#0x00003FFF,%d2	| ...BIASED EXPO OF 2**L * (PI/2)
+
+|--FP2 IS READY
+	fadds		TWOTO63(%a6),%fp2	| ...THE FRACTIONAL PART OF FP1 IS ROUNDED
+
+|--HIDE 4 CYCLES OF INSTRUCTION; creating 2**(L)*Piby2_1  and  2**(L)*Piby2_2
+        movew		%d2,FP_SCR2(%a6)
+	clrw           FP_SCR2+2(%a6)
+	movel		#0xC90FDAA2,FP_SCR2+4(%a6)
+	clrl		FP_SCR2+8(%a6)		| ...FP_SCR2 is  2**(L) * Piby2_1
+
+|--FP2 IS READY
+	fsubs		TWOTO63(%a6),%fp2		| ...FP2 is N
+
+	addil		#0x00003FDD,%d0
+        movew		%d0,FP_SCR3(%a6)
+	clrw           FP_SCR3+2(%a6)
+	movel		#0x85A308D3,FP_SCR3+4(%a6)
+	clrl		FP_SCR3+8(%a6)		| ...FP_SCR3 is 2**(L) * Piby2_2
+
+	movel		ENDFLAG(%a6),%d0
+
+|--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
+|--P2 = 2**(L) * Piby2_2
+	fmovex		%fp2,%fp4
+	fmulx		FP_SCR2(%a6),%fp4		| ...W = N*P1
+	fmovex		%fp2,%fp5
+	fmulx		FP_SCR3(%a6),%fp5		| ...w = N*P2
+	fmovex		%fp4,%fp3
+|--we want P+p = W+w  but  |p| <= half ulp of P
+|--Then, we need to compute  A := R-P   and  a := r-p
+	faddx		%fp5,%fp3			| ...FP3 is P
+	fsubx		%fp3,%fp4			| ...W-P
+
+	fsubx		%fp3,%fp0			| ...FP0 is A := R - P
+        faddx		%fp5,%fp4			| ...FP4 is p = (W-P)+w
+
+	fmovex		%fp0,%fp3			| ...FP3 A
+	fsubx		%fp4,%fp1			| ...FP1 is a := r - p
+
+|--Now we need to normalize (A,a) to  "new (R,r)" where R+r = A+a but
+|--|r| <= half ulp of R.
+	faddx		%fp1,%fp0			| ...FP0 is R := A+a
+|--No need to calculate r if this is the last loop
+	cmpil		#0,%d0
+	bgt		RESTORE
+
+|--Need to calculate r
+	fsubx		%fp0,%fp3			| ...A-R
+	faddx		%fp3,%fp1			| ...FP1 is r := (A-R)+a
+	bra		LOOP
+
+RESTORE:
+        fmovel		%fp2,N(%a6)
+	movel		(%a7)+,%d2
+	fmovemx	(%a7)+,%fp2-%fp5
+
+
+	movel		ADJN(%a6),%d0
+	cmpil		#4,%d0
+
+	blt		SINCONT
+	bras		SCCONT
+
+	.global	ssincosd
+ssincosd:
+|--SIN AND COS OF X FOR DENORMALIZED X
+
+	fmoves		#0x3F800000,%fp1
+	bsr		sto_cos		|store cosine result
+	bra		t_extdnrm
+
+	.global	ssincos
+ssincos:
+|--SET ADJN TO 4
+	movel		#4,ADJN(%a6)
+
+	fmovex		(%a0),%fp0	| ...LOAD INPUT
+
+	movel		(%a0),%d0
+	movew		4(%a0),%d0
+	fmovex		%fp0,X(%a6)
+	andil		#0x7FFFFFFF,%d0		| ...COMPACTIFY X
+
+	cmpil		#0x3FD78000,%d0		| ...|X| >= 2**(-40)?
+	bges		SCOK1
+	bra		SCSM
+
+SCOK1:
+	cmpil		#0x4004BC7E,%d0		| ...|X| < 15 PI?
+	blts		SCMAIN
+	bra		REDUCEX
+
+
+SCMAIN:
+|--THIS IS THE USUAL CASE, |X| <= 15 PI.
+|--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+	fmovex		%fp0,%fp1
+	fmuld		TWOBYPI,%fp1	| ...X*2/PI
+
+|--HIDE THE NEXT THREE INSTRUCTIONS
+	lea		PITBL+0x200,%a1 | ...TABLE OF N*PI/2, N = -32,...,32
+
+
+|--FP1 IS NOW READY
+	fmovel		%fp1,N(%a6)		| ...CONVERT TO INTEGER
+
+	movel		N(%a6),%d0
+	asll		#4,%d0
+	addal		%d0,%a1		| ...ADDRESS OF N*PIBY2, IN Y1, Y2
+
+	fsubx		(%a1)+,%fp0	| ...X-Y1
+        fsubs		(%a1),%fp0	| ...FP0 IS R = (X-Y1)-Y2
+
+SCCONT:
+|--continuation point from REDUCEX
+
+|--HIDE THE NEXT TWO
+	movel		N(%a6),%d0
+	rorl		#1,%d0
+
+	cmpil		#0,%d0		| ...D0 < 0 IFF N IS ODD
+	bge		NEVEN
+
+NODD:
+|--REGISTERS SAVED SO FAR: D0, A0, FP2.
+
+	fmovex		%fp0,RPRIME(%a6)
+	fmulx		%fp0,%fp0	 | ...FP0 IS S = R*R
+	fmoved		SINA7,%fp1	| ...A7
+	fmoved		COSB8,%fp2	| ...B8
+	fmulx		%fp0,%fp1	 | ...SA7
+	movel		%d2,-(%a7)
+	movel		%d0,%d2
+	fmulx		%fp0,%fp2	 | ...SB8
+	rorl		#1,%d2
+	andil		#0x80000000,%d2
+
+	faddd		SINA6,%fp1	| ...A6+SA7
+	eorl		%d0,%d2
+	andil		#0x80000000,%d2
+	faddd		COSB7,%fp2	| ...B7+SB8
+
+	fmulx		%fp0,%fp1	 | ...S(A6+SA7)
+	eorl		%d2,RPRIME(%a6)
+	movel		(%a7)+,%d2
+	fmulx		%fp0,%fp2	 | ...S(B7+SB8)
+	rorl		#1,%d0
+	andil		#0x80000000,%d0
+
+	faddd		SINA5,%fp1	| ...A5+S(A6+SA7)
+	movel		#0x3F800000,POSNEG1(%a6)
+	eorl		%d0,POSNEG1(%a6)
+	faddd		COSB6,%fp2	| ...B6+S(B7+SB8)
+
+	fmulx		%fp0,%fp1	 | ...S(A5+S(A6+SA7))
+	fmulx		%fp0,%fp2	 | ...S(B6+S(B7+SB8))
+	fmovex		%fp0,SPRIME(%a6)
+
+	faddd		SINA4,%fp1	| ...A4+S(A5+S(A6+SA7))
+	eorl		%d0,SPRIME(%a6)
+	faddd		COSB5,%fp2	| ...B5+S(B6+S(B7+SB8))
+
+	fmulx		%fp0,%fp1	 | ...S(A4+...)
+	fmulx		%fp0,%fp2	 | ...S(B5+...)
+
+	faddd		SINA3,%fp1	| ...A3+S(A4+...)
+	faddd		COSB4,%fp2	| ...B4+S(B5+...)
+
+	fmulx		%fp0,%fp1	 | ...S(A3+...)
+	fmulx		%fp0,%fp2	 | ...S(B4+...)
+
+	faddx		SINA2,%fp1	| ...A2+S(A3+...)
+	faddx		COSB3,%fp2	| ...B3+S(B4+...)
+
+	fmulx		%fp0,%fp1	 | ...S(A2+...)
+	fmulx		%fp0,%fp2	 | ...S(B3+...)
+
+	faddx		SINA1,%fp1	| ...A1+S(A2+...)
+	faddx		COSB2,%fp2	| ...B2+S(B3+...)
+
+	fmulx		%fp0,%fp1	 | ...S(A1+...)
+	fmulx		%fp2,%fp0	 | ...S(B2+...)
+
+
+
+	fmulx		RPRIME(%a6),%fp1	| ...R'S(A1+...)
+	fadds		COSB1,%fp0	| ...B1+S(B2...)
+	fmulx		SPRIME(%a6),%fp0	| ...S'(B1+S(B2+...))
+
+	movel		%d1,-(%sp)	|restore users mode & precision
+	andil		#0xff,%d1		|mask off all exceptions
+	fmovel		%d1,%FPCR
+	faddx		RPRIME(%a6),%fp1	| ...COS(X)
+	bsr		sto_cos		|store cosine result
+	fmovel		(%sp)+,%FPCR	|restore users exceptions
+	fadds		POSNEG1(%a6),%fp0	| ...SIN(X)
+
+	bra		t_frcinx
+
+
+NEVEN:
+|--REGISTERS SAVED SO FAR: FP2.
+
+	fmovex		%fp0,RPRIME(%a6)
+	fmulx		%fp0,%fp0	 | ...FP0 IS S = R*R
+	fmoved		COSB8,%fp1			| ...B8
+	fmoved		SINA7,%fp2			| ...A7
+	fmulx		%fp0,%fp1	 | ...SB8
+	fmovex		%fp0,SPRIME(%a6)
+	fmulx		%fp0,%fp2	 | ...SA7
+	rorl		#1,%d0
+	andil		#0x80000000,%d0
+	faddd		COSB7,%fp1	| ...B7+SB8
+	faddd		SINA6,%fp2	| ...A6+SA7
+	eorl		%d0,RPRIME(%a6)
+	eorl		%d0,SPRIME(%a6)
+	fmulx		%fp0,%fp1	 | ...S(B7+SB8)
+	oril		#0x3F800000,%d0
+	movel		%d0,POSNEG1(%a6)
+	fmulx		%fp0,%fp2	 | ...S(A6+SA7)
+
+	faddd		COSB6,%fp1	| ...B6+S(B7+SB8)
+	faddd		SINA5,%fp2	| ...A5+S(A6+SA7)
+
+	fmulx		%fp0,%fp1	 | ...S(B6+S(B7+SB8))
+	fmulx		%fp0,%fp2	 | ...S(A5+S(A6+SA7))
+
+	faddd		COSB5,%fp1	| ...B5+S(B6+S(B7+SB8))
+	faddd		SINA4,%fp2	| ...A4+S(A5+S(A6+SA7))
+
+	fmulx		%fp0,%fp1	 | ...S(B5+...)
+	fmulx		%fp0,%fp2	 | ...S(A4+...)
+
+	faddd		COSB4,%fp1	| ...B4+S(B5+...)
+	faddd		SINA3,%fp2	| ...A3+S(A4+...)
+
+	fmulx		%fp0,%fp1	 | ...S(B4+...)
+	fmulx		%fp0,%fp2	 | ...S(A3+...)
+
+	faddx		COSB3,%fp1	| ...B3+S(B4+...)
+	faddx		SINA2,%fp2	| ...A2+S(A3+...)
+
+	fmulx		%fp0,%fp1	 | ...S(B3+...)
+	fmulx		%fp0,%fp2	 | ...S(A2+...)
+
+	faddx		COSB2,%fp1	| ...B2+S(B3+...)
+	faddx		SINA1,%fp2	| ...A1+S(A2+...)
+
+	fmulx		%fp0,%fp1	 | ...S(B2+...)
+	fmulx		%fp2,%fp0	 | ...s(a1+...)
+
+
+
+	fadds		COSB1,%fp1	| ...B1+S(B2...)
+	fmulx		RPRIME(%a6),%fp0	| ...R'S(A1+...)
+	fmulx		SPRIME(%a6),%fp1	| ...S'(B1+S(B2+...))
+
+	movel		%d1,-(%sp)	|save users mode & precision
+	andil		#0xff,%d1		|mask off all exceptions
+	fmovel		%d1,%FPCR
+	fadds		POSNEG1(%a6),%fp1	| ...COS(X)
+	bsr		sto_cos		|store cosine result
+	fmovel		(%sp)+,%FPCR	|restore users exceptions
+	faddx		RPRIME(%a6),%fp0	| ...SIN(X)
+
+	bra		t_frcinx
+
+SCBORS:
+	cmpil		#0x3FFF8000,%d0
+	bgt		REDUCEX
+
+
+SCSM:
+	movew		#0x0000,XDCARE(%a6)
+	fmoves		#0x3F800000,%fp1
+
+	movel		%d1,-(%sp)	|save users mode & precision
+	andil		#0xff,%d1		|mask off all exceptions
+	fmovel		%d1,%FPCR
+	fsubs		#0x00800000,%fp1
+	bsr		sto_cos		|store cosine result
+	fmovel		(%sp)+,%FPCR	|restore users exceptions
+	fmovex		X(%a6),%fp0
+	bra		t_frcinx
+
+	|end
diff --git a/arch/m68k/fpsp040/ssinh.S b/arch/m68k/fpsp040/ssinh.S
new file mode 100644
index 0000000..c8b3308
--- /dev/null
+++ b/arch/m68k/fpsp040/ssinh.S
@@ -0,0 +1,135 @@
+|
+|	ssinh.sa 3.1 12/10/90
+|
+|       The entry point sSinh computes the hyperbolic sine of
+|       an input argument; sSinhd does the same except for denormalized
+|       input.
+|
+|       Input: Double-extended number X in location pointed to
+|		by address register a0.
+|
+|       Output: The value sinh(X) returned in floating-point register Fp0.
+|
+|       Accuracy and Monotonicity: The returned result is within 3 ulps in
+|               64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
+|               result is subsequently rounded to double precision. The
+|               result is provably monotonic in double precision.
+|
+|       Speed: The program sSINH takes approximately 280 cycles.
+|
+|       Algorithm:
+|
+|       SINH
+|       1. If |X| > 16380 log2, go to 3.
+|
+|       2. (|X| <= 16380 log2) Sinh(X) is obtained by the formulae
+|               y = |X|, sgn = sign(X), and z = expm1(Y),
+|               sinh(X) = sgn*(1/2)*( z + z/(1+z) ).
+|          Exit.
+|
+|       3. If |X| > 16480 log2, go to 5.
+|
+|       4. (16380 log2 < |X| <= 16480 log2)
+|               sinh(X) = sign(X) * exp(|X|)/2.
+|          However, invoking exp(|X|) may cause premature overflow.
+|          Thus, we calculate sinh(X) as follows:
+|             Y       := |X|
+|             sgn     := sign(X)
+|             sgnFact := sgn * 2**(16380)
+|             Y'      := Y - 16381 log2
+|             sinh(X) := sgnFact * exp(Y').
+|          Exit.
+|
+|       5. (|X| > 16480 log2) sinh(X) must overflow. Return
+|          sign(X)*Huge*Huge to generate overflow and an infinity with
+|          the appropriate sign. Huge is the largest finite number in
+|          extended format. Exit.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|SSINH	idnt	2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+T1:	.long 0x40C62D38,0xD3D64634 | ... 16381 LOG2 LEAD
+T2:	.long 0x3D6F90AE,0xB1E75CC7 | ... 16381 LOG2 TRAIL
+
+	|xref	t_frcinx
+	|xref	t_ovfl
+	|xref	t_extdnrm
+	|xref	setox
+	|xref	setoxm1
+
+	.global	ssinhd
+ssinhd:
+|--SINH(X) = X FOR DENORMALIZED X
+
+	bra	t_extdnrm
+
+	.global	ssinh
+ssinh:
+	fmovex	(%a0),%fp0	| ...LOAD INPUT
+
+	movel	(%a0),%d0
+	movew	4(%a0),%d0
+	movel	%d0,%a1		| save a copy of original (compacted) operand
+	andl	#0x7FFFFFFF,%d0
+	cmpl	#0x400CB167,%d0
+	bgts	SINHBIG
+
+|--THIS IS THE USUAL CASE, |X| < 16380 LOG2
+|--Y = |X|, Z = EXPM1(Y), SINH(X) = SIGN(X)*(1/2)*( Z + Z/(1+Z) )
+
+	fabsx	%fp0		| ...Y = |X|
+
+	moveml	%a1/%d1,-(%sp)
+	fmovemx %fp0-%fp0,(%a0)
+	clrl	%d1
+	bsr	setoxm1		| ...FP0 IS Z = EXPM1(Y)
+	fmovel	#0,%fpcr
+	moveml	(%sp)+,%a1/%d1
+
+	fmovex	%fp0,%fp1
+	fadds	#0x3F800000,%fp1	| ...1+Z
+	fmovex	%fp0,-(%sp)
+	fdivx	%fp1,%fp0		| ...Z/(1+Z)
+	movel	%a1,%d0
+	andl	#0x80000000,%d0
+	orl	#0x3F000000,%d0
+	faddx	(%sp)+,%fp0
+	movel	%d0,-(%sp)
+
+	fmovel	%d1,%fpcr
+	fmuls	(%sp)+,%fp0	|last fp inst - possible exceptions set
+
+	bra	t_frcinx
+
+SINHBIG:
+	cmpl	#0x400CB2B3,%d0
+	bgt	t_ovfl
+	fabsx	%fp0
+	fsubd	T1(%pc),%fp0	| ...(|X|-16381LOG2_LEAD)
+	movel	#0,-(%sp)
+	movel	#0x80000000,-(%sp)
+	movel	%a1,%d0
+	andl	#0x80000000,%d0
+	orl	#0x7FFB0000,%d0
+	movel	%d0,-(%sp)	| ...EXTENDED FMT
+	fsubd	T2(%pc),%fp0	| ...|X| - 16381 LOG2, ACCURATE
+
+	movel	%d1,-(%sp)
+	clrl	%d1
+	fmovemx %fp0-%fp0,(%a0)
+	bsr	setox
+	fmovel	(%sp)+,%fpcr
+
+	fmulx	(%sp)+,%fp0	|possible exception
+	bra	t_frcinx
+
+	|end
diff --git a/arch/m68k/fpsp040/stan.S b/arch/m68k/fpsp040/stan.S
new file mode 100644
index 0000000..b5c2a19
--- /dev/null
+++ b/arch/m68k/fpsp040/stan.S
@@ -0,0 +1,455 @@
+|
+|	stan.sa 3.3 7/29/91
+|
+|	The entry point stan computes the tangent of
+|	an input argument;
+|	stand does the same except for denormalized input.
+|
+|	Input: Double-extended number X in location pointed to
+|		by address register a0.
+|
+|	Output: The value tan(X) returned in floating-point register Fp0.
+|
+|	Accuracy and Monotonicity: The returned result is within 3 ulp in
+|		64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
+|		result is subsequently rounded to double precision. The
+|		result is provably monotonic in double precision.
+|
+|	Speed: The program sTAN takes approximately 170 cycles for
+|		input argument X such that |X| < 15Pi, which is the usual
+|		situation.
+|
+|	Algorithm:
+|
+|	1. If |X| >= 15Pi or |X| < 2**(-40), go to 6.
+|
+|	2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let
+|		k = N mod 2, so in particular, k = 0 or 1.
+|
+|	3. If k is odd, go to 5.
+|
+|	4. (k is even) Tan(X) = tan(r) and tan(r) is approximated by a
+|		rational function U/V where
+|		U = r + r*s*(P1 + s*(P2 + s*P3)), and
+|		V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))),  s = r*r.
+|		Exit.
+|
+|	4. (k is odd) Tan(X) = -cot(r). Since tan(r) is approximated by a
+|		rational function U/V where
+|		U = r + r*s*(P1 + s*(P2 + s*P3)), and
+|		V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r,
+|		-Cot(r) = -V/U. Exit.
+|
+|	6. If |X| > 1, go to 8.
+|
+|	7. (|X|<2**(-40)) Tan(X) = X. Exit.
+|
+|	8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, go back to 2.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|STAN	idnt	2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+BOUNDS1:	.long 0x3FD78000,0x4004BC7E
+TWOBYPI:	.long 0x3FE45F30,0x6DC9C883
+
+TANQ4:	.long 0x3EA0B759,0xF50F8688
+TANP3:	.long 0xBEF2BAA5,0xA8924F04
+
+TANQ3:	.long 0xBF346F59,0xB39BA65F,0x00000000,0x00000000
+
+TANP2:	.long 0x3FF60000,0xE073D3FC,0x199C4A00,0x00000000
+
+TANQ2:	.long 0x3FF90000,0xD23CD684,0x15D95FA1,0x00000000
+
+TANP1:	.long 0xBFFC0000,0x8895A6C5,0xFB423BCA,0x00000000
+
+TANQ1:	.long 0xBFFD0000,0xEEF57E0D,0xA84BC8CE,0x00000000
+
+INVTWOPI: .long 0x3FFC0000,0xA2F9836E,0x4E44152A,0x00000000
+
+TWOPI1:	.long 0x40010000,0xC90FDAA2,0x00000000,0x00000000
+TWOPI2:	.long 0x3FDF0000,0x85A308D4,0x00000000,0x00000000
+
+|--N*PI/2, -32 <= N <= 32, IN A LEADING TERM IN EXT. AND TRAILING
+|--TERM IN SGL. NOTE THAT PI IS 64-BIT LONG, THUS N*PI/2 IS AT
+|--MOST 69 BITS LONG.
+	.global	PITBL
+PITBL:
+  .long  0xC0040000,0xC90FDAA2,0x2168C235,0x21800000
+  .long  0xC0040000,0xC2C75BCD,0x105D7C23,0xA0D00000
+  .long  0xC0040000,0xBC7EDCF7,0xFF523611,0xA1E80000
+  .long  0xC0040000,0xB6365E22,0xEE46F000,0x21480000
+  .long  0xC0040000,0xAFEDDF4D,0xDD3BA9EE,0xA1200000
+  .long  0xC0040000,0xA9A56078,0xCC3063DD,0x21FC0000
+  .long  0xC0040000,0xA35CE1A3,0xBB251DCB,0x21100000
+  .long  0xC0040000,0x9D1462CE,0xAA19D7B9,0xA1580000
+  .long  0xC0040000,0x96CBE3F9,0x990E91A8,0x21E00000
+  .long  0xC0040000,0x90836524,0x88034B96,0x20B00000
+  .long  0xC0040000,0x8A3AE64F,0x76F80584,0xA1880000
+  .long  0xC0040000,0x83F2677A,0x65ECBF73,0x21C40000
+  .long  0xC0030000,0xFB53D14A,0xA9C2F2C2,0x20000000
+  .long  0xC0030000,0xEEC2D3A0,0x87AC669F,0x21380000
+  .long  0xC0030000,0xE231D5F6,0x6595DA7B,0xA1300000
+  .long  0xC0030000,0xD5A0D84C,0x437F4E58,0x9FC00000
+  .long  0xC0030000,0xC90FDAA2,0x2168C235,0x21000000
+  .long  0xC0030000,0xBC7EDCF7,0xFF523611,0xA1680000
+  .long  0xC0030000,0xAFEDDF4D,0xDD3BA9EE,0xA0A00000
+  .long  0xC0030000,0xA35CE1A3,0xBB251DCB,0x20900000
+  .long  0xC0030000,0x96CBE3F9,0x990E91A8,0x21600000
+  .long  0xC0030000,0x8A3AE64F,0x76F80584,0xA1080000
+  .long  0xC0020000,0xFB53D14A,0xA9C2F2C2,0x1F800000
+  .long  0xC0020000,0xE231D5F6,0x6595DA7B,0xA0B00000
+  .long  0xC0020000,0xC90FDAA2,0x2168C235,0x20800000
+  .long  0xC0020000,0xAFEDDF4D,0xDD3BA9EE,0xA0200000
+  .long  0xC0020000,0x96CBE3F9,0x990E91A8,0x20E00000
+  .long  0xC0010000,0xFB53D14A,0xA9C2F2C2,0x1F000000
+  .long  0xC0010000,0xC90FDAA2,0x2168C235,0x20000000
+  .long  0xC0010000,0x96CBE3F9,0x990E91A8,0x20600000
+  .long  0xC0000000,0xC90FDAA2,0x2168C235,0x1F800000
+  .long  0xBFFF0000,0xC90FDAA2,0x2168C235,0x1F000000
+  .long  0x00000000,0x00000000,0x00000000,0x00000000
+  .long  0x3FFF0000,0xC90FDAA2,0x2168C235,0x9F000000
+  .long  0x40000000,0xC90FDAA2,0x2168C235,0x9F800000
+  .long  0x40010000,0x96CBE3F9,0x990E91A8,0xA0600000
+  .long  0x40010000,0xC90FDAA2,0x2168C235,0xA0000000
+  .long  0x40010000,0xFB53D14A,0xA9C2F2C2,0x9F000000
+  .long  0x40020000,0x96CBE3F9,0x990E91A8,0xA0E00000
+  .long  0x40020000,0xAFEDDF4D,0xDD3BA9EE,0x20200000
+  .long  0x40020000,0xC90FDAA2,0x2168C235,0xA0800000
+  .long  0x40020000,0xE231D5F6,0x6595DA7B,0x20B00000
+  .long  0x40020000,0xFB53D14A,0xA9C2F2C2,0x9F800000
+  .long  0x40030000,0x8A3AE64F,0x76F80584,0x21080000
+  .long  0x40030000,0x96CBE3F9,0x990E91A8,0xA1600000
+  .long  0x40030000,0xA35CE1A3,0xBB251DCB,0xA0900000
+  .long  0x40030000,0xAFEDDF4D,0xDD3BA9EE,0x20A00000
+  .long  0x40030000,0xBC7EDCF7,0xFF523611,0x21680000
+  .long  0x40030000,0xC90FDAA2,0x2168C235,0xA1000000
+  .long  0x40030000,0xD5A0D84C,0x437F4E58,0x1FC00000
+  .long  0x40030000,0xE231D5F6,0x6595DA7B,0x21300000
+  .long  0x40030000,0xEEC2D3A0,0x87AC669F,0xA1380000
+  .long  0x40030000,0xFB53D14A,0xA9C2F2C2,0xA0000000
+  .long  0x40040000,0x83F2677A,0x65ECBF73,0xA1C40000
+  .long  0x40040000,0x8A3AE64F,0x76F80584,0x21880000
+  .long  0x40040000,0x90836524,0x88034B96,0xA0B00000
+  .long  0x40040000,0x96CBE3F9,0x990E91A8,0xA1E00000
+  .long  0x40040000,0x9D1462CE,0xAA19D7B9,0x21580000
+  .long  0x40040000,0xA35CE1A3,0xBB251DCB,0xA1100000
+  .long  0x40040000,0xA9A56078,0xCC3063DD,0xA1FC0000
+  .long  0x40040000,0xAFEDDF4D,0xDD3BA9EE,0x21200000
+  .long  0x40040000,0xB6365E22,0xEE46F000,0xA1480000
+  .long  0x40040000,0xBC7EDCF7,0xFF523611,0x21E80000
+  .long  0x40040000,0xC2C75BCD,0x105D7C23,0x20D00000
+  .long  0x40040000,0xC90FDAA2,0x2168C235,0xA1800000
+
+	.set	INARG,FP_SCR4
+
+	.set	TWOTO63,L_SCR1
+	.set	ENDFLAG,L_SCR2
+	.set	N,L_SCR3
+
+	| xref	t_frcinx
+	|xref	t_extdnrm
+
+	.global	stand
+stand:
+|--TAN(X) = X FOR DENORMALIZED X
+
+	bra		t_extdnrm
+
+	.global	stan
+stan:
+	fmovex		(%a0),%fp0	| ...LOAD INPUT
+
+	movel		(%a0),%d0
+	movew		4(%a0),%d0
+	andil		#0x7FFFFFFF,%d0
+
+	cmpil		#0x3FD78000,%d0		| ...|X| >= 2**(-40)?
+	bges		TANOK1
+	bra		TANSM
+TANOK1:
+	cmpil		#0x4004BC7E,%d0		| ...|X| < 15 PI?
+	blts		TANMAIN
+	bra		REDUCEX
+
+
+TANMAIN:
+|--THIS IS THE USUAL CASE, |X| <= 15 PI.
+|--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+	fmovex		%fp0,%fp1
+	fmuld		TWOBYPI,%fp1	| ...X*2/PI
+
+|--HIDE THE NEXT TWO INSTRUCTIONS
+	leal		PITBL+0x200,%a1 | ...TABLE OF N*PI/2, N = -32,...,32
+
+|--FP1 IS NOW READY
+	fmovel		%fp1,%d0		| ...CONVERT TO INTEGER
+
+	asll		#4,%d0
+	addal		%d0,%a1		| ...ADDRESS N*PIBY2 IN Y1, Y2
+
+	fsubx		(%a1)+,%fp0	| ...X-Y1
+|--HIDE THE NEXT ONE
+
+	fsubs		(%a1),%fp0	| ...FP0 IS R = (X-Y1)-Y2
+
+	rorl		#5,%d0
+	andil		#0x80000000,%d0	| ...D0 WAS ODD IFF D0 < 0
+
+TANCONT:
+
+	cmpil		#0,%d0
+	blt		NODD
+
+	fmovex		%fp0,%fp1
+	fmulx		%fp1,%fp1		| ...S = R*R
+
+	fmoved		TANQ4,%fp3
+	fmoved		TANP3,%fp2
+
+	fmulx		%fp1,%fp3		| ...SQ4
+	fmulx		%fp1,%fp2		| ...SP3
+
+	faddd		TANQ3,%fp3	| ...Q3+SQ4
+	faddx		TANP2,%fp2	| ...P2+SP3
+
+	fmulx		%fp1,%fp3		| ...S(Q3+SQ4)
+	fmulx		%fp1,%fp2		| ...S(P2+SP3)
+
+	faddx		TANQ2,%fp3	| ...Q2+S(Q3+SQ4)
+	faddx		TANP1,%fp2	| ...P1+S(P2+SP3)
+
+	fmulx		%fp1,%fp3		| ...S(Q2+S(Q3+SQ4))
+	fmulx		%fp1,%fp2		| ...S(P1+S(P2+SP3))
+
+	faddx		TANQ1,%fp3	| ...Q1+S(Q2+S(Q3+SQ4))
+	fmulx		%fp0,%fp2		| ...RS(P1+S(P2+SP3))
+
+	fmulx		%fp3,%fp1		| ...S(Q1+S(Q2+S(Q3+SQ4)))
+
+
+	faddx		%fp2,%fp0		| ...R+RS(P1+S(P2+SP3))
+
+
+	fadds		#0x3F800000,%fp1	| ...1+S(Q1+...)
+
+	fmovel		%d1,%fpcr		|restore users exceptions
+	fdivx		%fp1,%fp0		|last inst - possible exception set
+
+	bra		t_frcinx
+
+NODD:
+	fmovex		%fp0,%fp1
+	fmulx		%fp0,%fp0		| ...S = R*R
+
+	fmoved		TANQ4,%fp3
+	fmoved		TANP3,%fp2
+
+	fmulx		%fp0,%fp3		| ...SQ4
+	fmulx		%fp0,%fp2		| ...SP3
+
+	faddd		TANQ3,%fp3	| ...Q3+SQ4
+	faddx		TANP2,%fp2	| ...P2+SP3
+
+	fmulx		%fp0,%fp3		| ...S(Q3+SQ4)
+	fmulx		%fp0,%fp2		| ...S(P2+SP3)
+
+	faddx		TANQ2,%fp3	| ...Q2+S(Q3+SQ4)
+	faddx		TANP1,%fp2	| ...P1+S(P2+SP3)
+
+	fmulx		%fp0,%fp3		| ...S(Q2+S(Q3+SQ4))
+	fmulx		%fp0,%fp2		| ...S(P1+S(P2+SP3))
+
+	faddx		TANQ1,%fp3	| ...Q1+S(Q2+S(Q3+SQ4))
+	fmulx		%fp1,%fp2		| ...RS(P1+S(P2+SP3))
+
+	fmulx		%fp3,%fp0		| ...S(Q1+S(Q2+S(Q3+SQ4)))
+
+
+	faddx		%fp2,%fp1		| ...R+RS(P1+S(P2+SP3))
+	fadds		#0x3F800000,%fp0	| ...1+S(Q1+...)
+
+
+	fmovex		%fp1,-(%sp)
+	eoril		#0x80000000,(%sp)
+
+	fmovel		%d1,%fpcr		|restore users exceptions
+	fdivx		(%sp)+,%fp0	|last inst - possible exception set
+
+	bra		t_frcinx
+
+TANBORS:
+|--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
+|--IF |X| < 2**(-40), RETURN X OR 1.
+	cmpil		#0x3FFF8000,%d0
+	bgts		REDUCEX
+
+TANSM:
+
+	fmovex		%fp0,-(%sp)
+	fmovel		%d1,%fpcr		 |restore users exceptions
+	fmovex		(%sp)+,%fp0	|last inst - possible exception set
+
+	bra		t_frcinx
+
+
+REDUCEX:
+|--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
+|--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
+|--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
+
+	fmovemx	%fp2-%fp5,-(%a7)	| ...save FP2 through FP5
+	movel		%d2,-(%a7)
+        fmoves         #0x00000000,%fp1
+
+|--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
+|--there is a danger of unwanted overflow in first LOOP iteration.  In this
+|--case, reduce argument by one remainder step to make subsequent reduction
+|--safe.
+	cmpil	#0x7ffeffff,%d0		|is argument dangerously large?
+	bnes	LOOP
+	movel	#0x7ffe0000,FP_SCR2(%a6)	|yes
+|					;create 2**16383*PI/2
+	movel	#0xc90fdaa2,FP_SCR2+4(%a6)
+	clrl	FP_SCR2+8(%a6)
+	ftstx	%fp0			|test sign of argument
+	movel	#0x7fdc0000,FP_SCR3(%a6)	|create low half of 2**16383*
+|					;PI/2 at FP_SCR3
+	movel	#0x85a308d3,FP_SCR3+4(%a6)
+	clrl   FP_SCR3+8(%a6)
+	fblt	red_neg
+	orw	#0x8000,FP_SCR2(%a6)	|positive arg
+	orw	#0x8000,FP_SCR3(%a6)
+red_neg:
+	faddx  FP_SCR2(%a6),%fp0		|high part of reduction is exact
+	fmovex  %fp0,%fp1		|save high result in fp1
+	faddx  FP_SCR3(%a6),%fp0		|low part of reduction
+	fsubx  %fp0,%fp1			|determine low component of result
+	faddx  FP_SCR3(%a6),%fp1		|fp0/fp1 are reduced argument.
+
+|--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
+|--integer quotient will be stored in N
+|--Intermediate remainder is 66-bit long; (R,r) in (FP0,FP1)
+
+LOOP:
+	fmovex		%fp0,INARG(%a6)	| ...+-2**K * F, 1 <= F < 2
+	movew		INARG(%a6),%d0
+        movel          %d0,%a1		| ...save a copy of D0
+	andil		#0x00007FFF,%d0
+	subil		#0x00003FFF,%d0	| ...D0 IS K
+	cmpil		#28,%d0
+	bles		LASTLOOP
+CONTLOOP:
+	subil		#27,%d0	 | ...D0 IS L := K-27
+	movel		#0,ENDFLAG(%a6)
+	bras		WORK
+LASTLOOP:
+	clrl		%d0		| ...D0 IS L := 0
+	movel		#1,ENDFLAG(%a6)
+
+WORK:
+|--FIND THE REMAINDER OF (R,r) W.R.T.	2**L * (PI/2). L IS SO CHOSEN
+|--THAT	INT( X * (2/PI) / 2**(L) ) < 2**29.
+
+|--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
+|--2**L * (PIby2_1), 2**L * (PIby2_2)
+
+	movel		#0x00003FFE,%d2	| ...BIASED EXPO OF 2/PI
+	subl		%d0,%d2		| ...BIASED EXPO OF 2**(-L)*(2/PI)
+
+	movel		#0xA2F9836E,FP_SCR1+4(%a6)
+	movel		#0x4E44152A,FP_SCR1+8(%a6)
+	movew		%d2,FP_SCR1(%a6)	| ...FP_SCR1 is 2**(-L)*(2/PI)
+
+	fmovex		%fp0,%fp2
+	fmulx		FP_SCR1(%a6),%fp2
+|--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
+|--FLOATING POINT FORMAT, THE TWO FMOVE'S	FMOVE.L FP <--> N
+|--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
+|--(SIGN(INARG)*2**63	+	FP2) - SIGN(INARG)*2**63 WILL GIVE
+|--US THE DESIRED VALUE IN FLOATING POINT.
+
+|--HIDE SIX CYCLES OF INSTRUCTION
+        movel		%a1,%d2
+        swap		%d2
+	andil		#0x80000000,%d2
+	oril		#0x5F000000,%d2	| ...D2 IS SIGN(INARG)*2**63 IN SGL
+	movel		%d2,TWOTO63(%a6)
+
+	movel		%d0,%d2
+	addil		#0x00003FFF,%d2	| ...BIASED EXPO OF 2**L * (PI/2)
+
+|--FP2 IS READY
+	fadds		TWOTO63(%a6),%fp2	| ...THE FRACTIONAL PART OF FP1 IS ROUNDED
+
+|--HIDE 4 CYCLES OF INSTRUCTION; creating 2**(L)*Piby2_1  and  2**(L)*Piby2_2
+        movew		%d2,FP_SCR2(%a6)
+	clrw           FP_SCR2+2(%a6)
+	movel		#0xC90FDAA2,FP_SCR2+4(%a6)
+	clrl		FP_SCR2+8(%a6)		| ...FP_SCR2 is  2**(L) * Piby2_1
+
+|--FP2 IS READY
+	fsubs		TWOTO63(%a6),%fp2		| ...FP2 is N
+
+	addil		#0x00003FDD,%d0
+        movew		%d0,FP_SCR3(%a6)
+	clrw           FP_SCR3+2(%a6)
+	movel		#0x85A308D3,FP_SCR3+4(%a6)
+	clrl		FP_SCR3+8(%a6)		| ...FP_SCR3 is 2**(L) * Piby2_2
+
+	movel		ENDFLAG(%a6),%d0
+
+|--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
+|--P2 = 2**(L) * Piby2_2
+	fmovex		%fp2,%fp4
+	fmulx		FP_SCR2(%a6),%fp4		| ...W = N*P1
+	fmovex		%fp2,%fp5
+	fmulx		FP_SCR3(%a6),%fp5		| ...w = N*P2
+	fmovex		%fp4,%fp3
+|--we want P+p = W+w  but  |p| <= half ulp of P
+|--Then, we need to compute  A := R-P   and  a := r-p
+	faddx		%fp5,%fp3			| ...FP3 is P
+	fsubx		%fp3,%fp4			| ...W-P
+
+	fsubx		%fp3,%fp0			| ...FP0 is A := R - P
+        faddx		%fp5,%fp4			| ...FP4 is p = (W-P)+w
+
+	fmovex		%fp0,%fp3			| ...FP3 A
+	fsubx		%fp4,%fp1			| ...FP1 is a := r - p
+
+|--Now we need to normalize (A,a) to  "new (R,r)" where R+r = A+a but
+|--|r| <= half ulp of R.
+	faddx		%fp1,%fp0			| ...FP0 is R := A+a
+|--No need to calculate r if this is the last loop
+	cmpil		#0,%d0
+	bgt		RESTORE
+
+|--Need to calculate r
+	fsubx		%fp0,%fp3			| ...A-R
+	faddx		%fp3,%fp1			| ...FP1 is r := (A-R)+a
+	bra		LOOP
+
+RESTORE:
+        fmovel		%fp2,N(%a6)
+	movel		(%a7)+,%d2
+	fmovemx	(%a7)+,%fp2-%fp5
+
+
+	movel		N(%a6),%d0
+        rorl		#1,%d0
+
+
+	bra		TANCONT
+
+	|end
diff --git a/arch/m68k/fpsp040/stanh.S b/arch/m68k/fpsp040/stanh.S
new file mode 100644
index 0000000..33b0098
--- /dev/null
+++ b/arch/m68k/fpsp040/stanh.S
@@ -0,0 +1,185 @@
+|
+|	stanh.sa 3.1 12/10/90
+|
+|	The entry point sTanh computes the hyperbolic tangent of
+|	an input argument; sTanhd does the same except for denormalized
+|	input.
+|
+|	Input: Double-extended number X in location pointed to
+|		by address register a0.
+|
+|	Output: The value tanh(X) returned in floating-point register Fp0.
+|
+|	Accuracy and Monotonicity: The returned result is within 3 ulps in
+|		64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
+|		result is subsequently rounded to double precision. The
+|		result is provably monotonic in double precision.
+|
+|	Speed: The program stanh takes approximately 270 cycles.
+|
+|	Algorithm:
+|
+|	TANH
+|	1. If |X| >= (5/2) log2 or |X| <= 2**(-40), go to 3.
+|
+|	2. (2**(-40) < |X| < (5/2) log2) Calculate tanh(X) by
+|		sgn := sign(X), y := 2|X|, z := expm1(Y), and
+|		tanh(X) = sgn*( z/(2+z) ).
+|		Exit.
+|
+|	3. (|X| <= 2**(-40) or |X| >= (5/2) log2). If |X| < 1,
+|		go to 7.
+|
+|	4. (|X| >= (5/2) log2) If |X| >= 50 log2, go to 6.
+|
+|	5. ((5/2) log2 <= |X| < 50 log2) Calculate tanh(X) by
+|		sgn := sign(X), y := 2|X|, z := exp(Y),
+|		tanh(X) = sgn - [ sgn*2/(1+z) ].
+|		Exit.
+|
+|	6. (|X| >= 50 log2) Tanh(X) = +-1 (round to nearest). Thus, we
+|		calculate Tanh(X) by
+|		sgn := sign(X), Tiny := 2**(-126),
+|		tanh(X) := sgn - sgn*Tiny.
+|		Exit.
+|
+|	7. (|X| < 2**(-40)). Tanh(X) = X.	Exit.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|STANH	idnt	2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+	.set	X,FP_SCR5
+	.set	XDCARE,X+2
+	.set	XFRAC,X+4
+
+	.set	SGN,L_SCR3
+
+	.set	V,FP_SCR6
+
+BOUNDS1:	.long 0x3FD78000,0x3FFFDDCE | ... 2^(-40), (5/2)LOG2
+
+	|xref	t_frcinx
+	|xref	t_extdnrm
+	|xref	setox
+	|xref	setoxm1
+
+	.global	stanhd
+stanhd:
+|--TANH(X) = X FOR DENORMALIZED X
+
+	bra		t_extdnrm
+
+	.global	stanh
+stanh:
+	fmovex		(%a0),%fp0	| ...LOAD INPUT
+
+	fmovex		%fp0,X(%a6)
+	movel		(%a0),%d0
+	movew		4(%a0),%d0
+	movel		%d0,X(%a6)
+	andl		#0x7FFFFFFF,%d0
+	cmp2l		BOUNDS1(%pc),%d0	| ...2**(-40) < |X| < (5/2)LOG2 ?
+	bcss		TANHBORS
+
+|--THIS IS THE USUAL CASE
+|--Y = 2|X|, Z = EXPM1(Y), TANH(X) = SIGN(X) * Z / (Z+2).
+
+	movel		X(%a6),%d0
+	movel		%d0,SGN(%a6)
+	andl		#0x7FFF0000,%d0
+	addl		#0x00010000,%d0	| ...EXPONENT OF 2|X|
+	movel		%d0,X(%a6)
+	andl		#0x80000000,SGN(%a6)
+	fmovex		X(%a6),%fp0		| ...FP0 IS Y = 2|X|
+
+	movel		%d1,-(%a7)
+	clrl		%d1
+	fmovemx	%fp0-%fp0,(%a0)
+	bsr		setoxm1		| ...FP0 IS Z = EXPM1(Y)
+	movel		(%a7)+,%d1
+
+	fmovex		%fp0,%fp1
+	fadds		#0x40000000,%fp1	| ...Z+2
+	movel		SGN(%a6),%d0
+	fmovex		%fp1,V(%a6)
+	eorl		%d0,V(%a6)
+
+	fmovel		%d1,%FPCR		|restore users exceptions
+	fdivx		V(%a6),%fp0
+	bra		t_frcinx
+
+TANHBORS:
+	cmpl		#0x3FFF8000,%d0
+	blt		TANHSM
+
+	cmpl		#0x40048AA1,%d0
+	bgt		TANHHUGE
+
+|-- (5/2) LOG2 < |X| < 50 LOG2,
+|--TANH(X) = 1 - (2/[EXP(2X)+1]). LET Y = 2|X|, SGN = SIGN(X),
+|--TANH(X) = SGN -	SGN*2/[EXP(Y)+1].
+
+	movel		X(%a6),%d0
+	movel		%d0,SGN(%a6)
+	andl		#0x7FFF0000,%d0
+	addl		#0x00010000,%d0	| ...EXPO OF 2|X|
+	movel		%d0,X(%a6)		| ...Y = 2|X|
+	andl		#0x80000000,SGN(%a6)
+	movel		SGN(%a6),%d0
+	fmovex		X(%a6),%fp0		| ...Y = 2|X|
+
+	movel		%d1,-(%a7)
+	clrl		%d1
+	fmovemx	%fp0-%fp0,(%a0)
+	bsr		setox		| ...FP0 IS EXP(Y)
+	movel		(%a7)+,%d1
+	movel		SGN(%a6),%d0
+	fadds		#0x3F800000,%fp0	| ...EXP(Y)+1
+
+	eorl		#0xC0000000,%d0	| ...-SIGN(X)*2
+	fmoves		%d0,%fp1		| ...-SIGN(X)*2 IN SGL FMT
+	fdivx		%fp0,%fp1		| ...-SIGN(X)2 / [EXP(Y)+1 ]
+
+	movel		SGN(%a6),%d0
+	orl		#0x3F800000,%d0	| ...SGN
+	fmoves		%d0,%fp0		| ...SGN IN SGL FMT
+
+	fmovel		%d1,%FPCR		|restore users exceptions
+	faddx		%fp1,%fp0
+
+	bra		t_frcinx
+
+TANHSM:
+	movew		#0x0000,XDCARE(%a6)
+
+	fmovel		%d1,%FPCR		|restore users exceptions
+	fmovex		X(%a6),%fp0		|last inst - possible exception set
+
+	bra		t_frcinx
+
+TANHHUGE:
+|---RETURN SGN(X) - SGN(X)EPS
+	movel		X(%a6),%d0
+	andl		#0x80000000,%d0
+	orl		#0x3F800000,%d0
+	fmoves		%d0,%fp0
+	andl		#0x80000000,%d0
+	eorl		#0x80800000,%d0	| ...-SIGN(X)*EPS
+
+	fmovel		%d1,%FPCR		|restore users exceptions
+	fadds		%d0,%fp0
+
+	bra		t_frcinx
+
+	|end
diff --git a/arch/m68k/fpsp040/sto_res.S b/arch/m68k/fpsp040/sto_res.S
new file mode 100644
index 0000000..0cdca3b
--- /dev/null
+++ b/arch/m68k/fpsp040/sto_res.S
@@ -0,0 +1,98 @@
+|
+|	sto_res.sa 3.1 12/10/90
+|
+|	Takes the result and puts it in where the user expects it.
+|	Library functions return result in fp0.	If fp0 is not the
+|	users destination register then fp0 is moved to the
+|	correct floating-point destination register.  fp0 and fp1
+|	are then restored to the original contents.
+|
+|	Input:	result in fp0,fp1
+|
+|		d2 & a0 should be kept unmodified
+|
+|	Output:	moves the result to the true destination reg or mem
+|
+|	Modifies: destination floating point register
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+STO_RES:	|idnt	2,1 | Motorola 040 Floating Point Software Package
+
+
+	|section	8
+
+#include "fpsp.h"
+
+	.global	sto_cos
+sto_cos:
+	bfextu		CMDREG1B(%a6){#13:#3},%d0	|extract cos destination
+	cmpib		#3,%d0		|check for fp0/fp1 cases
+	bles		c_fp0123
+	fmovemx	%fp1-%fp1,-(%a7)
+	moveql		#7,%d1
+	subl		%d0,%d1		|d1 = 7- (dest. reg. no.)
+	clrl		%d0
+	bsetl		%d1,%d0		|d0 is dynamic register mask
+	fmovemx	(%a7)+,%d0
+	rts
+c_fp0123:
+	cmpib		#0,%d0
+	beqs		c_is_fp0
+	cmpib		#1,%d0
+	beqs		c_is_fp1
+	cmpib		#2,%d0
+	beqs		c_is_fp2
+c_is_fp3:
+	fmovemx	%fp1-%fp1,USER_FP3(%a6)
+	rts
+c_is_fp2:
+	fmovemx	%fp1-%fp1,USER_FP2(%a6)
+	rts
+c_is_fp1:
+	fmovemx	%fp1-%fp1,USER_FP1(%a6)
+	rts
+c_is_fp0:
+	fmovemx	%fp1-%fp1,USER_FP0(%a6)
+	rts
+
+
+	.global	sto_res
+sto_res:
+	bfextu		CMDREG1B(%a6){#6:#3},%d0	|extract destination register
+	cmpib		#3,%d0		|check for fp0/fp1 cases
+	bles		fp0123
+	fmovemx	%fp0-%fp0,-(%a7)
+	moveql		#7,%d1
+	subl		%d0,%d1		|d1 = 7- (dest. reg. no.)
+	clrl		%d0
+	bsetl		%d1,%d0		|d0 is dynamic register mask
+	fmovemx	(%a7)+,%d0
+	rts
+fp0123:
+	cmpib		#0,%d0
+	beqs		is_fp0
+	cmpib		#1,%d0
+	beqs		is_fp1
+	cmpib		#2,%d0
+	beqs		is_fp2
+is_fp3:
+	fmovemx	%fp0-%fp0,USER_FP3(%a6)
+	rts
+is_fp2:
+	fmovemx	%fp0-%fp0,USER_FP2(%a6)
+	rts
+is_fp1:
+	fmovemx	%fp0-%fp0,USER_FP1(%a6)
+	rts
+is_fp0:
+	fmovemx	%fp0-%fp0,USER_FP0(%a6)
+	rts
+
+	|end
diff --git a/arch/m68k/fpsp040/stwotox.S b/arch/m68k/fpsp040/stwotox.S
new file mode 100644
index 0000000..4e3c140
--- /dev/null
+++ b/arch/m68k/fpsp040/stwotox.S
@@ -0,0 +1,427 @@
+|
+|	stwotox.sa 3.1 12/10/90
+|
+|	stwotox  --- 2**X
+|	stwotoxd --- 2**X for denormalized X
+|	stentox  --- 10**X
+|	stentoxd --- 10**X for denormalized X
+|
+|	Input: Double-extended number X in location pointed to
+|		by address register a0.
+|
+|	Output: The function values are returned in Fp0.
+|
+|	Accuracy and Monotonicity: The returned result is within 2 ulps in
+|		64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
+|		result is subsequently rounded to double precision. The
+|		result is provably monotonic in double precision.
+|
+|	Speed: The program stwotox takes approximately 190 cycles and the
+|		program stentox takes approximately 200 cycles.
+|
+|	Algorithm:
+|
+|	twotox
+|	1. If |X| > 16480, go to ExpBig.
+|
+|	2. If |X| < 2**(-70), go to ExpSm.
+|
+|	3. Decompose X as X = N/64 + r where |r| <= 1/128. Furthermore
+|		decompose N as
+|		 N = 64(M + M') + j,  j = 0,1,2,...,63.
+|
+|	4. Overwrite r := r * log2. Then
+|		2**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r).
+|		Go to expr to compute that expression.
+|
+|	tentox
+|	1. If |X| > 16480*log_10(2) (base 10 log of 2), go to ExpBig.
+|
+|	2. If |X| < 2**(-70), go to ExpSm.
+|
+|	3. Set y := X*log_2(10)*64 (base 2 log of 10). Set
+|		N := round-to-int(y). Decompose N as
+|		 N = 64(M + M') + j,  j = 0,1,2,...,63.
+|
+|	4. Define r as
+|		r := ((X - N*L1)-N*L2) * L10
+|		where L1, L2 are the leading and trailing parts of log_10(2)/64
+|		and L10 is the natural log of 10. Then
+|		10**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r).
+|		Go to expr to compute that expression.
+|
+|	expr
+|	1. Fetch 2**(j/64) from table as Fact1 and Fact2.
+|
+|	2. Overwrite Fact1 and Fact2 by
+|		Fact1 := 2**(M) * Fact1
+|		Fact2 := 2**(M) * Fact2
+|		Thus Fact1 + Fact2 = 2**(M) * 2**(j/64).
+|
+|	3. Calculate P where 1 + P approximates exp(r):
+|		P = r + r*r*(A1+r*(A2+...+r*A5)).
+|
+|	4. Let AdjFact := 2**(M'). Return
+|		AdjFact * ( Fact1 + ((Fact1*P) + Fact2) ).
+|		Exit.
+|
+|	ExpBig
+|	1. Generate overflow by Huge * Huge if X > 0; otherwise, generate
+|		underflow by Tiny * Tiny.
+|
+|	ExpSm
+|	1. Return 1 + X.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|STWOTOX	idnt	2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+BOUNDS1:	.long 0x3FB98000,0x400D80C0 | ... 2^(-70),16480
+BOUNDS2:	.long 0x3FB98000,0x400B9B07 | ... 2^(-70),16480 LOG2/LOG10
+
+L2TEN64:	.long 0x406A934F,0x0979A371 | ... 64LOG10/LOG2
+L10TWO1:	.long 0x3F734413,0x509F8000 | ... LOG2/64LOG10
+
+L10TWO2:	.long 0xBFCD0000,0xC0219DC1,0xDA994FD2,0x00000000
+
+LOG10:	.long 0x40000000,0x935D8DDD,0xAAA8AC17,0x00000000
+
+LOG2:	.long 0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
+
+EXPA5:	.long 0x3F56C16D,0x6F7BD0B2
+EXPA4:	.long 0x3F811112,0x302C712C
+EXPA3:	.long 0x3FA55555,0x55554CC1
+EXPA2:	.long 0x3FC55555,0x55554A54
+EXPA1:	.long 0x3FE00000,0x00000000,0x00000000,0x00000000
+
+HUGE:	.long 0x7FFE0000,0xFFFFFFFF,0xFFFFFFFF,0x00000000
+TINY:	.long 0x00010000,0xFFFFFFFF,0xFFFFFFFF,0x00000000
+
+EXPTBL:
+	.long  0x3FFF0000,0x80000000,0x00000000,0x3F738000
+	.long  0x3FFF0000,0x8164D1F3,0xBC030773,0x3FBEF7CA
+	.long  0x3FFF0000,0x82CD8698,0xAC2BA1D7,0x3FBDF8A9
+	.long  0x3FFF0000,0x843A28C3,0xACDE4046,0x3FBCD7C9
+	.long  0x3FFF0000,0x85AAC367,0xCC487B15,0xBFBDE8DA
+	.long  0x3FFF0000,0x871F6196,0x9E8D1010,0x3FBDE85C
+	.long  0x3FFF0000,0x88980E80,0x92DA8527,0x3FBEBBF1
+	.long  0x3FFF0000,0x8A14D575,0x496EFD9A,0x3FBB80CA
+	.long  0x3FFF0000,0x8B95C1E3,0xEA8BD6E7,0xBFBA8373
+	.long  0x3FFF0000,0x8D1ADF5B,0x7E5BA9E6,0xBFBE9670
+	.long  0x3FFF0000,0x8EA4398B,0x45CD53C0,0x3FBDB700
+	.long  0x3FFF0000,0x9031DC43,0x1466B1DC,0x3FBEEEB0
+	.long  0x3FFF0000,0x91C3D373,0xAB11C336,0x3FBBFD6D
+	.long  0x3FFF0000,0x935A2B2F,0x13E6E92C,0xBFBDB319
+	.long  0x3FFF0000,0x94F4EFA8,0xFEF70961,0x3FBDBA2B
+	.long  0x3FFF0000,0x96942D37,0x20185A00,0x3FBE91D5
+	.long  0x3FFF0000,0x9837F051,0x8DB8A96F,0x3FBE8D5A
+	.long  0x3FFF0000,0x99E04593,0x20B7FA65,0xBFBCDE7B
+	.long  0x3FFF0000,0x9B8D39B9,0xD54E5539,0xBFBEBAAF
+	.long  0x3FFF0000,0x9D3ED9A7,0x2CFFB751,0xBFBD86DA
+	.long  0x3FFF0000,0x9EF53260,0x91A111AE,0xBFBEBEDD
+	.long  0x3FFF0000,0xA0B0510F,0xB9714FC2,0x3FBCC96E
+	.long  0x3FFF0000,0xA2704303,0x0C496819,0xBFBEC90B
+	.long  0x3FFF0000,0xA43515AE,0x09E6809E,0x3FBBD1DB
+	.long  0x3FFF0000,0xA5FED6A9,0xB15138EA,0x3FBCE5EB
+	.long  0x3FFF0000,0xA7CD93B4,0xE965356A,0xBFBEC274
+	.long  0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x3FBEA83C
+	.long  0x3FFF0000,0xAB7A39B5,0xA93ED337,0x3FBECB00
+	.long  0x3FFF0000,0xAD583EEA,0x42A14AC6,0x3FBE9301
+	.long  0x3FFF0000,0xAF3B78AD,0x690A4375,0xBFBD8367
+	.long  0x3FFF0000,0xB123F581,0xD2AC2590,0xBFBEF05F
+	.long  0x3FFF0000,0xB311C412,0xA9112489,0x3FBDFB3C
+	.long  0x3FFF0000,0xB504F333,0xF9DE6484,0x3FBEB2FB
+	.long  0x3FFF0000,0xB6FD91E3,0x28D17791,0x3FBAE2CB
+	.long  0x3FFF0000,0xB8FBAF47,0x62FB9EE9,0x3FBCDC3C
+	.long  0x3FFF0000,0xBAFF5AB2,0x133E45FB,0x3FBEE9AA
+	.long  0x3FFF0000,0xBD08A39F,0x580C36BF,0xBFBEAEFD
+	.long  0x3FFF0000,0xBF1799B6,0x7A731083,0xBFBCBF51
+	.long  0x3FFF0000,0xC12C4CCA,0x66709456,0x3FBEF88A
+	.long  0x3FFF0000,0xC346CCDA,0x24976407,0x3FBD83B2
+	.long  0x3FFF0000,0xC5672A11,0x5506DADD,0x3FBDF8AB
+	.long  0x3FFF0000,0xC78D74C8,0xABB9B15D,0xBFBDFB17
+	.long  0x3FFF0000,0xC9B9BD86,0x6E2F27A3,0xBFBEFE3C
+	.long  0x3FFF0000,0xCBEC14FE,0xF2727C5D,0xBFBBB6F8
+	.long  0x3FFF0000,0xCE248C15,0x1F8480E4,0xBFBCEE53
+	.long  0x3FFF0000,0xD06333DA,0xEF2B2595,0xBFBDA4AE
+	.long  0x3FFF0000,0xD2A81D91,0xF12AE45A,0x3FBC9124
+	.long  0x3FFF0000,0xD4F35AAB,0xCFEDFA1F,0x3FBEB243
+	.long  0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x3FBDE69A
+	.long  0x3FFF0000,0xD99D15C2,0x78AFD7B6,0xBFB8BC61
+	.long  0x3FFF0000,0xDBFBB797,0xDAF23755,0x3FBDF610
+	.long  0x3FFF0000,0xDE60F482,0x5E0E9124,0xBFBD8BE1
+	.long  0x3FFF0000,0xE0CCDEEC,0x2A94E111,0x3FBACB12
+	.long  0x3FFF0000,0xE33F8972,0xBE8A5A51,0x3FBB9BFE
+	.long  0x3FFF0000,0xE5B906E7,0x7C8348A8,0x3FBCF2F4
+	.long  0x3FFF0000,0xE8396A50,0x3C4BDC68,0x3FBEF22F
+	.long  0x3FFF0000,0xEAC0C6E7,0xDD24392F,0xBFBDBF4A
+	.long  0x3FFF0000,0xED4F301E,0xD9942B84,0x3FBEC01A
+	.long  0x3FFF0000,0xEFE4B99B,0xDCDAF5CB,0x3FBE8CAC
+	.long  0x3FFF0000,0xF281773C,0x59FFB13A,0xBFBCBB3F
+	.long  0x3FFF0000,0xF5257D15,0x2486CC2C,0x3FBEF73A
+	.long  0x3FFF0000,0xF7D0DF73,0x0AD13BB9,0xBFB8B795
+	.long  0x3FFF0000,0xFA83B2DB,0x722A033A,0x3FBEF84B
+	.long  0x3FFF0000,0xFD3E0C0C,0xF486C175,0xBFBEF581
+
+	.set	N,L_SCR1
+
+	.set	X,FP_SCR1
+	.set	XDCARE,X+2
+	.set	XFRAC,X+4
+
+	.set	ADJFACT,FP_SCR2
+
+	.set	FACT1,FP_SCR3
+	.set	FACT1HI,FACT1+4
+	.set	FACT1LOW,FACT1+8
+
+	.set	FACT2,FP_SCR4
+	.set	FACT2HI,FACT2+4
+	.set	FACT2LOW,FACT2+8
+
+	| xref	t_unfl
+	|xref	t_ovfl
+	|xref	t_frcinx
+
+	.global	stwotoxd
+stwotoxd:
+|--ENTRY POINT FOR 2**(X) FOR DENORMALIZED ARGUMENT
+
+	fmovel		%d1,%fpcr		| ...set user's rounding mode/precision
+	fmoves		#0x3F800000,%fp0  | ...RETURN 1 + X
+	movel		(%a0),%d0
+	orl		#0x00800001,%d0
+	fadds		%d0,%fp0
+	bra		t_frcinx
+
+	.global	stwotox
+stwotox:
+|--ENTRY POINT FOR 2**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+	fmovemx	(%a0),%fp0-%fp0	| ...LOAD INPUT, do not set cc's
+
+	movel		(%a0),%d0
+	movew		4(%a0),%d0
+	fmovex		%fp0,X(%a6)
+	andil		#0x7FFFFFFF,%d0
+
+	cmpil		#0x3FB98000,%d0		| ...|X| >= 2**(-70)?
+	bges		TWOOK1
+	bra		EXPBORS
+
+TWOOK1:
+	cmpil		#0x400D80C0,%d0		| ...|X| > 16480?
+	bles		TWOMAIN
+	bra		EXPBORS
+
+
+TWOMAIN:
+|--USUAL CASE, 2^(-70) <= |X| <= 16480
+
+	fmovex		%fp0,%fp1
+	fmuls		#0x42800000,%fp1  | ...64 * X
+
+	fmovel		%fp1,N(%a6)		| ...N = ROUND-TO-INT(64 X)
+	movel		%d2,-(%sp)
+	lea		EXPTBL,%a1	| ...LOAD ADDRESS OF TABLE OF 2^(J/64)
+	fmovel		N(%a6),%fp1		| ...N --> FLOATING FMT
+	movel		N(%a6),%d0
+	movel		%d0,%d2
+	andil		#0x3F,%d0		| ...D0 IS J
+	asll		#4,%d0		| ...DISPLACEMENT FOR 2^(J/64)
+	addal		%d0,%a1		| ...ADDRESS FOR 2^(J/64)
+	asrl		#6,%d2		| ...d2 IS L, N = 64L + J
+	movel		%d2,%d0
+	asrl		#1,%d0		| ...D0 IS M
+	subl		%d0,%d2		| ...d2 IS M', N = 64(M+M') + J
+	addil		#0x3FFF,%d2
+	movew		%d2,ADJFACT(%a6)	| ...ADJFACT IS 2^(M')
+	movel		(%sp)+,%d2
+|--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
+|--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
+|--ADJFACT = 2^(M').
+|--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
+
+	fmuls		#0x3C800000,%fp1  | ...(1/64)*N
+	movel		(%a1)+,FACT1(%a6)
+	movel		(%a1)+,FACT1HI(%a6)
+	movel		(%a1)+,FACT1LOW(%a6)
+	movew		(%a1)+,FACT2(%a6)
+	clrw		FACT2+2(%a6)
+
+	fsubx		%fp1,%fp0		| ...X - (1/64)*INT(64 X)
+
+	movew		(%a1)+,FACT2HI(%a6)
+	clrw		FACT2HI+2(%a6)
+	clrl		FACT2LOW(%a6)
+	addw		%d0,FACT1(%a6)
+
+	fmulx		LOG2,%fp0	| ...FP0 IS R
+	addw		%d0,FACT2(%a6)
+
+	bra		expr
+
+EXPBORS:
+|--FPCR, D0 SAVED
+	cmpil		#0x3FFF8000,%d0
+	bgts		EXPBIG
+
+EXPSM:
+|--|X| IS SMALL, RETURN 1 + X
+
+	fmovel		%d1,%FPCR		|restore users exceptions
+	fadds		#0x3F800000,%fp0  | ...RETURN 1 + X
+
+	bra		t_frcinx
+
+EXPBIG:
+|--|X| IS LARGE, GENERATE OVERFLOW IF X > 0; ELSE GENERATE UNDERFLOW
+|--REGISTERS SAVE SO FAR ARE FPCR AND  D0
+	movel		X(%a6),%d0
+	cmpil		#0,%d0
+	blts		EXPNEG
+
+	bclrb		#7,(%a0)		|t_ovfl expects positive value
+	bra		t_ovfl
+
+EXPNEG:
+	bclrb		#7,(%a0)		|t_unfl expects positive value
+	bra		t_unfl
+
+	.global	stentoxd
+stentoxd:
+|--ENTRY POINT FOR 10**(X) FOR DENORMALIZED ARGUMENT
+
+	fmovel		%d1,%fpcr		| ...set user's rounding mode/precision
+	fmoves		#0x3F800000,%fp0  | ...RETURN 1 + X
+	movel		(%a0),%d0
+	orl		#0x00800001,%d0
+	fadds		%d0,%fp0
+	bra		t_frcinx
+
+	.global	stentox
+stentox:
+|--ENTRY POINT FOR 10**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+	fmovemx	(%a0),%fp0-%fp0	| ...LOAD INPUT, do not set cc's
+
+	movel		(%a0),%d0
+	movew		4(%a0),%d0
+	fmovex		%fp0,X(%a6)
+	andil		#0x7FFFFFFF,%d0
+
+	cmpil		#0x3FB98000,%d0		| ...|X| >= 2**(-70)?
+	bges		TENOK1
+	bra		EXPBORS
+
+TENOK1:
+	cmpil		#0x400B9B07,%d0		| ...|X| <= 16480*log2/log10 ?
+	bles		TENMAIN
+	bra		EXPBORS
+
+TENMAIN:
+|--USUAL CASE, 2^(-70) <= |X| <= 16480 LOG 2 / LOG 10
+
+	fmovex		%fp0,%fp1
+	fmuld		L2TEN64,%fp1	| ...X*64*LOG10/LOG2
+
+	fmovel		%fp1,N(%a6)		| ...N=INT(X*64*LOG10/LOG2)
+	movel		%d2,-(%sp)
+	lea		EXPTBL,%a1	| ...LOAD ADDRESS OF TABLE OF 2^(J/64)
+	fmovel		N(%a6),%fp1		| ...N --> FLOATING FMT
+	movel		N(%a6),%d0
+	movel		%d0,%d2
+	andil		#0x3F,%d0		| ...D0 IS J
+	asll		#4,%d0		| ...DISPLACEMENT FOR 2^(J/64)
+	addal		%d0,%a1		| ...ADDRESS FOR 2^(J/64)
+	asrl		#6,%d2		| ...d2 IS L, N = 64L + J
+	movel		%d2,%d0
+	asrl		#1,%d0		| ...D0 IS M
+	subl		%d0,%d2		| ...d2 IS M', N = 64(M+M') + J
+	addil		#0x3FFF,%d2
+	movew		%d2,ADJFACT(%a6)	| ...ADJFACT IS 2^(M')
+	movel		(%sp)+,%d2
+
+|--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
+|--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
+|--ADJFACT = 2^(M').
+|--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
+
+	fmovex		%fp1,%fp2
+
+	fmuld		L10TWO1,%fp1	| ...N*(LOG2/64LOG10)_LEAD
+	movel		(%a1)+,FACT1(%a6)
+
+	fmulx		L10TWO2,%fp2	| ...N*(LOG2/64LOG10)_TRAIL
+
+	movel		(%a1)+,FACT1HI(%a6)
+	movel		(%a1)+,FACT1LOW(%a6)
+	fsubx		%fp1,%fp0		| ...X - N L_LEAD
+	movew		(%a1)+,FACT2(%a6)
+
+	fsubx		%fp2,%fp0		| ...X - N L_TRAIL
+
+	clrw		FACT2+2(%a6)
+	movew		(%a1)+,FACT2HI(%a6)
+	clrw		FACT2HI+2(%a6)
+	clrl		FACT2LOW(%a6)
+
+	fmulx		LOG10,%fp0	| ...FP0 IS R
+
+	addw		%d0,FACT1(%a6)
+	addw		%d0,FACT2(%a6)
+
+expr:
+|--FPCR, FP2, FP3 ARE SAVED IN ORDER AS SHOWN.
+|--ADJFACT CONTAINS 2**(M'), FACT1 + FACT2 = 2**(M) * 2**(J/64).
+|--FP0 IS R. THE FOLLOWING CODE COMPUTES
+|--	2**(M'+M) * 2**(J/64) * EXP(R)
+
+	fmovex		%fp0,%fp1
+	fmulx		%fp1,%fp1		| ...FP1 IS S = R*R
+
+	fmoved		EXPA5,%fp2	| ...FP2 IS A5
+	fmoved		EXPA4,%fp3	| ...FP3 IS A4
+
+	fmulx		%fp1,%fp2		| ...FP2 IS S*A5
+	fmulx		%fp1,%fp3		| ...FP3 IS S*A4
+
+	faddd		EXPA3,%fp2	| ...FP2 IS A3+S*A5
+	faddd		EXPA2,%fp3	| ...FP3 IS A2+S*A4
+
+	fmulx		%fp1,%fp2		| ...FP2 IS S*(A3+S*A5)
+	fmulx		%fp1,%fp3		| ...FP3 IS S*(A2+S*A4)
+
+	faddd		EXPA1,%fp2	| ...FP2 IS A1+S*(A3+S*A5)
+	fmulx		%fp0,%fp3		| ...FP3 IS R*S*(A2+S*A4)
+
+	fmulx		%fp1,%fp2		| ...FP2 IS S*(A1+S*(A3+S*A5))
+	faddx		%fp3,%fp0		| ...FP0 IS R+R*S*(A2+S*A4)
+
+	faddx		%fp2,%fp0		| ...FP0 IS EXP(R) - 1
+
+
+|--FINAL RECONSTRUCTION PROCESS
+|--EXP(X) = 2^M*2^(J/64) + 2^M*2^(J/64)*(EXP(R)-1)  -  (1 OR 0)
+
+	fmulx		FACT1(%a6),%fp0
+	faddx		FACT2(%a6),%fp0
+	faddx		FACT1(%a6),%fp0
+
+	fmovel		%d1,%FPCR		|restore users exceptions
+	clrw		ADJFACT+2(%a6)
+	movel		#0x80000000,ADJFACT+4(%a6)
+	clrl		ADJFACT+8(%a6)
+	fmulx		ADJFACT(%a6),%fp0	| ...FINAL ADJUSTMENT
+
+	bra		t_frcinx
+
+	|end
diff --git a/arch/m68k/fpsp040/tbldo.S b/arch/m68k/fpsp040/tbldo.S
new file mode 100644
index 0000000..fe60cf4
--- /dev/null
+++ b/arch/m68k/fpsp040/tbldo.S
@@ -0,0 +1,554 @@
+|
+|	tbldo.sa 3.1 12/10/90
+|
+| Modified:
+|	8/16/90	chinds	The table was constructed to use only one level
+|			of indirection in do_func for monadic
+|			functions.  Dyadic functions require two
+|			levels, and the tables are still contained
+|			in do_func.  The table is arranged for
+|			index with a 10-bit index, with the first
+|			7 bits the opcode, and the remaining 3
+|			the stag.  For dyadic functions, all
+|			valid addresses are to the generic entry
+|			point.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|TBLDO	idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+	|xref	ld_pinf,ld_pone,ld_ppi2
+	|xref	t_dz2,t_operr
+	|xref	serror,sone,szero,sinf,snzrinx
+	|xref	sopr_inf,spi_2,src_nan,szr_inf
+
+	|xref	smovcr
+	|xref	pmod,prem,pscale
+	|xref	satanh,satanhd
+	|xref	sacos,sacosd,sasin,sasind,satan,satand
+	|xref	setox,setoxd,setoxm1,setoxm1d,setoxm1i
+	|xref	sgetexp,sgetexpd,sgetman,sgetmand
+	|xref	sint,sintd,sintrz
+	|xref	ssincos,ssincosd,ssincosi,ssincosnan,ssincosz
+	|xref	scos,scosd,ssin,ssind,stan,stand
+	|xref	scosh,scoshd,ssinh,ssinhd,stanh,stanhd
+	|xref	sslog10,sslog2,sslogn,sslognp1
+	|xref	sslog10d,sslog2d,sslognd,slognp1d
+	|xref	stentox,stentoxd,stwotox,stwotoxd
+
+|	instruction		;opcode-stag Notes
+	.global	tblpre
+tblpre:
+	.long	smovcr		|$00-0 fmovecr all
+	.long	smovcr		|$00-1 fmovecr all
+	.long	smovcr		|$00-2 fmovecr all
+	.long	smovcr		|$00-3 fmovecr all
+	.long	smovcr		|$00-4 fmovecr all
+	.long	smovcr		|$00-5 fmovecr all
+	.long	smovcr		|$00-6 fmovecr all
+	.long	smovcr		|$00-7 fmovecr all
+
+	.long	sint		|$01-0 fint norm
+	.long	szero		|$01-1 fint zero
+	.long	sinf		|$01-2 fint inf
+	.long	src_nan		|$01-3 fint nan
+	.long	sintd		|$01-4 fint denorm inx
+	.long	serror		|$01-5 fint ERROR
+	.long	serror		|$01-6 fint ERROR
+	.long	serror		|$01-7 fint ERROR
+
+	.long	ssinh		|$02-0 fsinh norm
+	.long	szero		|$02-1 fsinh zero
+	.long	sinf		|$02-2 fsinh inf
+	.long	src_nan		|$02-3 fsinh nan
+	.long	ssinhd		|$02-4 fsinh denorm
+	.long	serror		|$02-5 fsinh ERROR
+	.long	serror		|$02-6 fsinh ERROR
+	.long	serror		|$02-7 fsinh ERROR
+
+	.long	sintrz		|$03-0 fintrz norm
+	.long	szero		|$03-1 fintrz zero
+	.long	sinf		|$03-2 fintrz inf
+	.long	src_nan		|$03-3 fintrz nan
+	.long	snzrinx		|$03-4 fintrz denorm inx
+	.long	serror		|$03-5 fintrz ERROR
+	.long	serror		|$03-6 fintrz ERROR
+	.long	serror		|$03-7 fintrz ERROR
+
+	.long	serror		|$04-0 ERROR - illegal extension
+	.long	serror		|$04-1 ERROR - illegal extension
+	.long	serror		|$04-2 ERROR - illegal extension
+	.long	serror		|$04-3 ERROR - illegal extension
+	.long	serror		|$04-4 ERROR - illegal extension
+	.long	serror		|$04-5 ERROR - illegal extension
+	.long	serror		|$04-6 ERROR - illegal extension
+	.long	serror		|$04-7 ERROR - illegal extension
+
+	.long	serror		|$05-0 ERROR - illegal extension
+	.long	serror		|$05-1 ERROR - illegal extension
+	.long	serror		|$05-2 ERROR - illegal extension
+	.long	serror		|$05-3 ERROR - illegal extension
+	.long	serror		|$05-4 ERROR - illegal extension
+	.long	serror		|$05-5 ERROR - illegal extension
+	.long	serror		|$05-6 ERROR - illegal extension
+	.long	serror		|$05-7 ERROR - illegal extension
+
+	.long	sslognp1	|$06-0 flognp1 norm
+	.long	szero		|$06-1 flognp1 zero
+	.long	sopr_inf	|$06-2 flognp1 inf
+	.long	src_nan		|$06-3 flognp1 nan
+	.long	slognp1d	|$06-4 flognp1 denorm
+	.long	serror		|$06-5 flognp1 ERROR
+	.long	serror		|$06-6 flognp1 ERROR
+	.long	serror		|$06-7 flognp1 ERROR
+
+	.long	serror		|$07-0 ERROR - illegal extension
+	.long	serror		|$07-1 ERROR - illegal extension
+	.long	serror		|$07-2 ERROR - illegal extension
+	.long	serror		|$07-3 ERROR - illegal extension
+	.long	serror		|$07-4 ERROR - illegal extension
+	.long	serror		|$07-5 ERROR - illegal extension
+	.long	serror		|$07-6 ERROR - illegal extension
+	.long	serror		|$07-7 ERROR - illegal extension
+
+	.long	setoxm1		|$08-0 fetoxm1 norm
+	.long	szero		|$08-1 fetoxm1 zero
+	.long	setoxm1i	|$08-2 fetoxm1 inf
+	.long	src_nan		|$08-3 fetoxm1 nan
+	.long	setoxm1d	|$08-4 fetoxm1 denorm
+	.long	serror		|$08-5 fetoxm1 ERROR
+	.long	serror		|$08-6 fetoxm1 ERROR
+	.long	serror		|$08-7 fetoxm1 ERROR
+
+	.long	stanh		|$09-0 ftanh norm
+	.long	szero		|$09-1 ftanh zero
+	.long	sone		|$09-2 ftanh inf
+	.long	src_nan		|$09-3 ftanh nan
+	.long	stanhd		|$09-4 ftanh denorm
+	.long	serror		|$09-5 ftanh ERROR
+	.long	serror		|$09-6 ftanh ERROR
+	.long	serror		|$09-7 ftanh ERROR
+
+	.long	satan		|$0a-0 fatan norm
+	.long	szero		|$0a-1 fatan zero
+	.long	spi_2		|$0a-2 fatan inf
+	.long	src_nan		|$0a-3 fatan nan
+	.long	satand		|$0a-4 fatan denorm
+	.long	serror		|$0a-5 fatan ERROR
+	.long	serror		|$0a-6 fatan ERROR
+	.long	serror		|$0a-7 fatan ERROR
+
+	.long	serror		|$0b-0 ERROR - illegal extension
+	.long	serror		|$0b-1 ERROR - illegal extension
+	.long	serror		|$0b-2 ERROR - illegal extension
+	.long	serror		|$0b-3 ERROR - illegal extension
+	.long	serror		|$0b-4 ERROR - illegal extension
+	.long	serror		|$0b-5 ERROR - illegal extension
+	.long	serror		|$0b-6 ERROR - illegal extension
+	.long	serror		|$0b-7 ERROR - illegal extension
+
+	.long	sasin		|$0c-0 fasin norm
+	.long	szero		|$0c-1 fasin zero
+	.long	t_operr		|$0c-2 fasin inf
+	.long	src_nan		|$0c-3 fasin nan
+	.long	sasind		|$0c-4 fasin denorm
+	.long	serror		|$0c-5 fasin ERROR
+	.long	serror		|$0c-6 fasin ERROR
+	.long	serror		|$0c-7 fasin ERROR
+
+	.long	satanh		|$0d-0 fatanh norm
+	.long	szero		|$0d-1 fatanh zero
+	.long	t_operr		|$0d-2 fatanh inf
+	.long	src_nan		|$0d-3 fatanh nan
+	.long	satanhd		|$0d-4 fatanh denorm
+	.long	serror		|$0d-5 fatanh ERROR
+	.long	serror		|$0d-6 fatanh ERROR
+	.long	serror		|$0d-7 fatanh ERROR
+
+	.long	ssin		|$0e-0 fsin norm
+	.long	szero		|$0e-1 fsin zero
+	.long	t_operr		|$0e-2 fsin inf
+	.long	src_nan		|$0e-3 fsin nan
+	.long	ssind		|$0e-4 fsin denorm
+	.long	serror		|$0e-5 fsin ERROR
+	.long	serror		|$0e-6 fsin ERROR
+	.long	serror		|$0e-7 fsin ERROR
+
+	.long	stan		|$0f-0 ftan norm
+	.long	szero		|$0f-1 ftan zero
+	.long	t_operr		|$0f-2 ftan inf
+	.long	src_nan		|$0f-3 ftan nan
+	.long	stand		|$0f-4 ftan denorm
+	.long	serror		|$0f-5 ftan ERROR
+	.long	serror		|$0f-6 ftan ERROR
+	.long	serror		|$0f-7 ftan ERROR
+
+	.long	setox		|$10-0 fetox norm
+	.long	ld_pone		|$10-1 fetox zero
+	.long	szr_inf		|$10-2 fetox inf
+	.long	src_nan		|$10-3 fetox nan
+	.long	setoxd		|$10-4 fetox denorm
+	.long	serror		|$10-5 fetox ERROR
+	.long	serror		|$10-6 fetox ERROR
+	.long	serror		|$10-7 fetox ERROR
+
+	.long	stwotox		|$11-0 ftwotox norm
+	.long	ld_pone		|$11-1 ftwotox zero
+	.long	szr_inf		|$11-2 ftwotox inf
+	.long	src_nan		|$11-3 ftwotox nan
+	.long	stwotoxd	|$11-4 ftwotox denorm
+	.long	serror		|$11-5 ftwotox ERROR
+	.long	serror		|$11-6 ftwotox ERROR
+	.long	serror		|$11-7 ftwotox ERROR
+
+	.long	stentox		|$12-0 ftentox norm
+	.long	ld_pone		|$12-1 ftentox zero
+	.long	szr_inf		|$12-2 ftentox inf
+	.long	src_nan		|$12-3 ftentox nan
+	.long	stentoxd	|$12-4 ftentox denorm
+	.long	serror		|$12-5 ftentox ERROR
+	.long	serror		|$12-6 ftentox ERROR
+	.long	serror		|$12-7 ftentox ERROR
+
+	.long	serror		|$13-0 ERROR - illegal extension
+	.long	serror		|$13-1 ERROR - illegal extension
+	.long	serror		|$13-2 ERROR - illegal extension
+	.long	serror		|$13-3 ERROR - illegal extension
+	.long	serror		|$13-4 ERROR - illegal extension
+	.long	serror		|$13-5 ERROR - illegal extension
+	.long	serror		|$13-6 ERROR - illegal extension
+	.long	serror		|$13-7 ERROR - illegal extension
+
+	.long	sslogn		|$14-0 flogn norm
+	.long	t_dz2		|$14-1 flogn zero
+	.long	sopr_inf	|$14-2 flogn inf
+	.long	src_nan		|$14-3 flogn nan
+	.long	sslognd		|$14-4 flogn denorm
+	.long	serror		|$14-5 flogn ERROR
+	.long	serror		|$14-6 flogn ERROR
+	.long	serror		|$14-7 flogn ERROR
+
+	.long	sslog10		|$15-0 flog10 norm
+	.long	t_dz2		|$15-1 flog10 zero
+	.long	sopr_inf	|$15-2 flog10 inf
+	.long	src_nan		|$15-3 flog10 nan
+	.long	sslog10d	|$15-4 flog10 denorm
+	.long	serror		|$15-5 flog10 ERROR
+	.long	serror		|$15-6 flog10 ERROR
+	.long	serror		|$15-7 flog10 ERROR
+
+	.long	sslog2		|$16-0 flog2 norm
+	.long	t_dz2		|$16-1 flog2 zero
+	.long	sopr_inf	|$16-2 flog2 inf
+	.long	src_nan		|$16-3 flog2 nan
+	.long	sslog2d		|$16-4 flog2 denorm
+	.long	serror		|$16-5 flog2 ERROR
+	.long	serror		|$16-6 flog2 ERROR
+	.long	serror		|$16-7 flog2 ERROR
+
+	.long	serror		|$17-0 ERROR - illegal extension
+	.long	serror		|$17-1 ERROR - illegal extension
+	.long	serror		|$17-2 ERROR - illegal extension
+	.long	serror		|$17-3 ERROR - illegal extension
+	.long	serror		|$17-4 ERROR - illegal extension
+	.long	serror		|$17-5 ERROR - illegal extension
+	.long	serror		|$17-6 ERROR - illegal extension
+	.long	serror		|$17-7 ERROR - illegal extension
+
+	.long	serror		|$18-0 ERROR - illegal extension
+	.long	serror		|$18-1 ERROR - illegal extension
+	.long	serror		|$18-2 ERROR - illegal extension
+	.long	serror		|$18-3 ERROR - illegal extension
+	.long	serror		|$18-4 ERROR - illegal extension
+	.long	serror		|$18-5 ERROR - illegal extension
+	.long	serror		|$18-6 ERROR - illegal extension
+	.long	serror		|$18-7 ERROR - illegal extension
+
+	.long	scosh		|$19-0 fcosh norm
+	.long	ld_pone		|$19-1 fcosh zero
+	.long	ld_pinf		|$19-2 fcosh inf
+	.long	src_nan		|$19-3 fcosh nan
+	.long	scoshd		|$19-4 fcosh denorm
+	.long	serror		|$19-5 fcosh ERROR
+	.long	serror		|$19-6 fcosh ERROR
+	.long	serror		|$19-7 fcosh ERROR
+
+	.long	serror		|$1a-0 ERROR - illegal extension
+	.long	serror		|$1a-1 ERROR - illegal extension
+	.long	serror		|$1a-2 ERROR - illegal extension
+	.long	serror		|$1a-3 ERROR - illegal extension
+	.long	serror		|$1a-4 ERROR - illegal extension
+	.long	serror		|$1a-5 ERROR - illegal extension
+	.long	serror		|$1a-6 ERROR - illegal extension
+	.long	serror		|$1a-7 ERROR - illegal extension
+
+	.long	serror		|$1b-0 ERROR - illegal extension
+	.long	serror		|$1b-1 ERROR - illegal extension
+	.long	serror		|$1b-2 ERROR - illegal extension
+	.long	serror		|$1b-3 ERROR - illegal extension
+	.long	serror		|$1b-4 ERROR - illegal extension
+	.long	serror		|$1b-5 ERROR - illegal extension
+	.long	serror		|$1b-6 ERROR - illegal extension
+	.long	serror		|$1b-7 ERROR - illegal extension
+
+	.long	sacos		|$1c-0 facos norm
+	.long	ld_ppi2		|$1c-1 facos zero
+	.long	t_operr		|$1c-2 facos inf
+	.long	src_nan		|$1c-3 facos nan
+	.long	sacosd		|$1c-4 facos denorm
+	.long	serror		|$1c-5 facos ERROR
+	.long	serror		|$1c-6 facos ERROR
+	.long	serror		|$1c-7 facos ERROR
+
+	.long	scos		|$1d-0 fcos norm
+	.long	ld_pone		|$1d-1 fcos zero
+	.long	t_operr		|$1d-2 fcos inf
+	.long	src_nan		|$1d-3 fcos nan
+	.long	scosd		|$1d-4 fcos denorm
+	.long	serror		|$1d-5 fcos ERROR
+	.long	serror		|$1d-6 fcos ERROR
+	.long	serror		|$1d-7 fcos ERROR
+
+	.long	sgetexp		|$1e-0 fgetexp norm
+	.long	szero		|$1e-1 fgetexp zero
+	.long	t_operr		|$1e-2 fgetexp inf
+	.long	src_nan		|$1e-3 fgetexp nan
+	.long	sgetexpd	|$1e-4 fgetexp denorm
+	.long	serror		|$1e-5 fgetexp ERROR
+	.long	serror		|$1e-6 fgetexp ERROR
+	.long	serror		|$1e-7 fgetexp ERROR
+
+	.long	sgetman		|$1f-0 fgetman norm
+	.long	szero		|$1f-1 fgetman zero
+	.long	t_operr		|$1f-2 fgetman inf
+	.long	src_nan		|$1f-3 fgetman nan
+	.long	sgetmand	|$1f-4 fgetman denorm
+	.long	serror		|$1f-5 fgetman ERROR
+	.long	serror		|$1f-6 fgetman ERROR
+	.long	serror		|$1f-7 fgetman ERROR
+
+	.long	serror		|$20-0 ERROR - illegal extension
+	.long	serror		|$20-1 ERROR - illegal extension
+	.long	serror		|$20-2 ERROR - illegal extension
+	.long	serror		|$20-3 ERROR - illegal extension
+	.long	serror		|$20-4 ERROR - illegal extension
+	.long	serror		|$20-5 ERROR - illegal extension
+	.long	serror		|$20-6 ERROR - illegal extension
+	.long	serror		|$20-7 ERROR - illegal extension
+
+	.long	pmod		|$21-0 fmod all
+	.long	pmod		|$21-1 fmod all
+	.long	pmod		|$21-2 fmod all
+	.long	pmod		|$21-3 fmod all
+	.long	pmod		|$21-4 fmod all
+	.long	serror		|$21-5 fmod ERROR
+	.long	serror		|$21-6 fmod ERROR
+	.long	serror		|$21-7 fmod ERROR
+
+	.long	serror		|$22-0 ERROR - illegal extension
+	.long	serror		|$22-1 ERROR - illegal extension
+	.long	serror		|$22-2 ERROR - illegal extension
+	.long	serror		|$22-3 ERROR - illegal extension
+	.long	serror		|$22-4 ERROR - illegal extension
+	.long	serror		|$22-5 ERROR - illegal extension
+	.long	serror		|$22-6 ERROR - illegal extension
+	.long	serror		|$22-7 ERROR - illegal extension
+
+	.long	serror		|$23-0 ERROR - illegal extension
+	.long	serror		|$23-1 ERROR - illegal extension
+	.long	serror		|$23-2 ERROR - illegal extension
+	.long	serror		|$23-3 ERROR - illegal extension
+	.long	serror		|$23-4 ERROR - illegal extension
+	.long	serror		|$23-5 ERROR - illegal extension
+	.long	serror		|$23-6 ERROR - illegal extension
+	.long	serror		|$23-7 ERROR - illegal extension
+
+	.long	serror		|$24-0 ERROR - illegal extension
+	.long	serror		|$24-1 ERROR - illegal extension
+	.long	serror		|$24-2 ERROR - illegal extension
+	.long	serror		|$24-3 ERROR - illegal extension
+	.long	serror		|$24-4 ERROR - illegal extension
+	.long	serror		|$24-5 ERROR - illegal extension
+	.long	serror		|$24-6 ERROR - illegal extension
+	.long	serror		|$24-7 ERROR - illegal extension
+
+	.long	prem		|$25-0 frem all
+	.long	prem		|$25-1 frem all
+	.long	prem		|$25-2 frem all
+	.long	prem		|$25-3 frem all
+	.long	prem		|$25-4 frem all
+	.long	serror		|$25-5 frem ERROR
+	.long	serror		|$25-6 frem ERROR
+	.long	serror		|$25-7 frem ERROR
+
+	.long	pscale		|$26-0 fscale all
+	.long	pscale		|$26-1 fscale all
+	.long	pscale		|$26-2 fscale all
+	.long	pscale		|$26-3 fscale all
+	.long	pscale		|$26-4 fscale all
+	.long	serror		|$26-5 fscale ERROR
+	.long	serror		|$26-6 fscale ERROR
+	.long	serror		|$26-7 fscale ERROR
+
+	.long	serror		|$27-0 ERROR - illegal extension
+	.long	serror		|$27-1 ERROR - illegal extension
+	.long	serror		|$27-2 ERROR - illegal extension
+	.long	serror		|$27-3 ERROR - illegal extension
+	.long	serror		|$27-4 ERROR - illegal extension
+	.long	serror		|$27-5 ERROR - illegal extension
+	.long	serror		|$27-6 ERROR - illegal extension
+	.long	serror		|$27-7 ERROR - illegal extension
+
+	.long	serror		|$28-0 ERROR - illegal extension
+	.long	serror		|$28-1 ERROR - illegal extension
+	.long	serror		|$28-2 ERROR - illegal extension
+	.long	serror		|$28-3 ERROR - illegal extension
+	.long	serror		|$28-4 ERROR - illegal extension
+	.long	serror		|$28-5 ERROR - illegal extension
+	.long	serror		|$28-6 ERROR - illegal extension
+	.long	serror		|$28-7 ERROR - illegal extension
+
+	.long	serror		|$29-0 ERROR - illegal extension
+	.long	serror		|$29-1 ERROR - illegal extension
+	.long	serror		|$29-2 ERROR - illegal extension
+	.long	serror		|$29-3 ERROR - illegal extension
+	.long	serror		|$29-4 ERROR - illegal extension
+	.long	serror		|$29-5 ERROR - illegal extension
+	.long	serror		|$29-6 ERROR - illegal extension
+	.long	serror		|$29-7 ERROR - illegal extension
+
+	.long	serror		|$2a-0 ERROR - illegal extension
+	.long	serror		|$2a-1 ERROR - illegal extension
+	.long	serror		|$2a-2 ERROR - illegal extension
+	.long	serror		|$2a-3 ERROR - illegal extension
+	.long	serror		|$2a-4 ERROR - illegal extension
+	.long	serror		|$2a-5 ERROR - illegal extension
+	.long	serror		|$2a-6 ERROR - illegal extension
+	.long	serror		|$2a-7 ERROR - illegal extension
+
+	.long	serror		|$2b-0 ERROR - illegal extension
+	.long	serror		|$2b-1 ERROR - illegal extension
+	.long	serror		|$2b-2 ERROR - illegal extension
+	.long	serror		|$2b-3 ERROR - illegal extension
+	.long	serror		|$2b-4 ERROR - illegal extension
+	.long	serror		|$2b-5 ERROR - illegal extension
+	.long	serror		|$2b-6 ERROR - illegal extension
+	.long	serror		|$2b-7 ERROR - illegal extension
+
+	.long	serror		|$2c-0 ERROR - illegal extension
+	.long	serror		|$2c-1 ERROR - illegal extension
+	.long	serror		|$2c-2 ERROR - illegal extension
+	.long	serror		|$2c-3 ERROR - illegal extension
+	.long	serror		|$2c-4 ERROR - illegal extension
+	.long	serror		|$2c-5 ERROR - illegal extension
+	.long	serror		|$2c-6 ERROR - illegal extension
+	.long	serror		|$2c-7 ERROR - illegal extension
+
+	.long	serror		|$2d-0 ERROR - illegal extension
+	.long	serror		|$2d-1 ERROR - illegal extension
+	.long	serror		|$2d-2 ERROR - illegal extension
+	.long	serror		|$2d-3 ERROR - illegal extension
+	.long	serror		|$2d-4 ERROR - illegal extension
+	.long	serror		|$2d-5 ERROR - illegal extension
+	.long	serror		|$2d-6 ERROR - illegal extension
+	.long	serror		|$2d-7 ERROR - illegal extension
+
+	.long	serror		|$2e-0 ERROR - illegal extension
+	.long	serror		|$2e-1 ERROR - illegal extension
+	.long	serror		|$2e-2 ERROR - illegal extension
+	.long	serror		|$2e-3 ERROR - illegal extension
+	.long	serror		|$2e-4 ERROR - illegal extension
+	.long	serror		|$2e-5 ERROR - illegal extension
+	.long	serror		|$2e-6 ERROR - illegal extension
+	.long	serror		|$2e-7 ERROR - illegal extension
+
+	.long	serror		|$2f-0 ERROR - illegal extension
+	.long	serror		|$2f-1 ERROR - illegal extension
+	.long	serror		|$2f-2 ERROR - illegal extension
+	.long	serror		|$2f-3 ERROR - illegal extension
+	.long	serror		|$2f-4 ERROR - illegal extension
+	.long	serror		|$2f-5 ERROR - illegal extension
+	.long	serror		|$2f-6 ERROR - illegal extension
+	.long	serror		|$2f-7 ERROR - illegal extension
+
+	.long	ssincos		|$30-0 fsincos norm
+	.long	ssincosz	|$30-1 fsincos zero
+	.long	ssincosi	|$30-2 fsincos inf
+	.long	ssincosnan	|$30-3 fsincos nan
+	.long	ssincosd	|$30-4 fsincos denorm
+	.long	serror		|$30-5 fsincos ERROR
+	.long	serror		|$30-6 fsincos ERROR
+	.long	serror		|$30-7 fsincos ERROR
+
+	.long	ssincos		|$31-0 fsincos norm
+	.long	ssincosz	|$31-1 fsincos zero
+	.long	ssincosi	|$31-2 fsincos inf
+	.long	ssincosnan	|$31-3 fsincos nan
+	.long	ssincosd	|$31-4 fsincos denorm
+	.long	serror		|$31-5 fsincos ERROR
+	.long	serror		|$31-6 fsincos ERROR
+	.long	serror		|$31-7 fsincos ERROR
+
+	.long	ssincos		|$32-0 fsincos norm
+	.long	ssincosz	|$32-1 fsincos zero
+	.long	ssincosi	|$32-2 fsincos inf
+	.long	ssincosnan	|$32-3 fsincos nan
+	.long	ssincosd	|$32-4 fsincos denorm
+	.long	serror		|$32-5 fsincos ERROR
+	.long	serror		|$32-6 fsincos ERROR
+	.long	serror		|$32-7 fsincos ERROR
+
+	.long	ssincos		|$33-0 fsincos norm
+	.long	ssincosz	|$33-1 fsincos zero
+	.long	ssincosi	|$33-2 fsincos inf
+	.long	ssincosnan	|$33-3 fsincos nan
+	.long	ssincosd	|$33-4 fsincos denorm
+	.long	serror		|$33-5 fsincos ERROR
+	.long	serror		|$33-6 fsincos ERROR
+	.long	serror		|$33-7 fsincos ERROR
+
+	.long	ssincos		|$34-0 fsincos norm
+	.long	ssincosz	|$34-1 fsincos zero
+	.long	ssincosi	|$34-2 fsincos inf
+	.long	ssincosnan	|$34-3 fsincos nan
+	.long	ssincosd	|$34-4 fsincos denorm
+	.long	serror		|$34-5 fsincos ERROR
+	.long	serror		|$34-6 fsincos ERROR
+	.long	serror		|$34-7 fsincos ERROR
+
+	.long	ssincos		|$35-0 fsincos norm
+	.long	ssincosz	|$35-1 fsincos zero
+	.long	ssincosi	|$35-2 fsincos inf
+	.long	ssincosnan	|$35-3 fsincos nan
+	.long	ssincosd	|$35-4 fsincos denorm
+	.long	serror		|$35-5 fsincos ERROR
+	.long	serror		|$35-6 fsincos ERROR
+	.long	serror		|$35-7 fsincos ERROR
+
+	.long	ssincos		|$36-0 fsincos norm
+	.long	ssincosz	|$36-1 fsincos zero
+	.long	ssincosi	|$36-2 fsincos inf
+	.long	ssincosnan	|$36-3 fsincos nan
+	.long	ssincosd	|$36-4 fsincos denorm
+	.long	serror		|$36-5 fsincos ERROR
+	.long	serror		|$36-6 fsincos ERROR
+	.long	serror		|$36-7 fsincos ERROR
+
+	.long	ssincos		|$37-0 fsincos norm
+	.long	ssincosz	|$37-1 fsincos zero
+	.long	ssincosi	|$37-2 fsincos inf
+	.long	ssincosnan	|$37-3 fsincos nan
+	.long	ssincosd	|$37-4 fsincos denorm
+	.long	serror		|$37-5 fsincos ERROR
+	.long	serror		|$37-6 fsincos ERROR
+	.long	serror		|$37-7 fsincos ERROR
+
+	|end
diff --git a/arch/m68k/fpsp040/util.S b/arch/m68k/fpsp040/util.S
new file mode 100644
index 0000000..452f3d6
--- /dev/null
+++ b/arch/m68k/fpsp040/util.S
@@ -0,0 +1,748 @@
+|
+|	util.sa 3.7 7/29/91
+|
+|	This file contains routines used by other programs.
+|
+|	ovf_res: used by overflow to force the correct
+|		 result. ovf_r_k, ovf_r_x2, ovf_r_x3 are
+|		 derivatives of this routine.
+|	get_fline: get user's opcode word
+|	g_dfmtou: returns the destination format.
+|	g_opcls: returns the opclass of the float instruction.
+|	g_rndpr: returns the rounding precision.
+|	reg_dest: write byte, word, or long data to Dn
+|
+|
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+|UTIL	idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+	|xref	mem_read
+
+	.global	g_dfmtou
+	.global	g_opcls
+	.global	g_rndpr
+	.global	get_fline
+	.global	reg_dest
+
+|
+| Final result table for ovf_res. Note that the negative counterparts
+| are unnecessary as ovf_res always returns the sign separately from
+| the exponent.
+|					;+inf
+EXT_PINF:	.long	0x7fff0000,0x00000000,0x00000000,0x00000000
+|					;largest +ext
+EXT_PLRG:	.long	0x7ffe0000,0xffffffff,0xffffffff,0x00000000
+|					;largest magnitude +sgl in ext
+SGL_PLRG:	.long	0x407e0000,0xffffff00,0x00000000,0x00000000
+|					;largest magnitude +dbl in ext
+DBL_PLRG:	.long	0x43fe0000,0xffffffff,0xfffff800,0x00000000
+|					;largest -ext
+
+tblovfl:
+	.long	EXT_RN
+	.long	EXT_RZ
+	.long	EXT_RM
+	.long	EXT_RP
+	.long	SGL_RN
+	.long	SGL_RZ
+	.long	SGL_RM
+	.long	SGL_RP
+	.long	DBL_RN
+	.long	DBL_RZ
+	.long	DBL_RM
+	.long	DBL_RP
+	.long	error
+	.long	error
+	.long	error
+	.long	error
+
+
+|
+|	ovf_r_k --- overflow result calculation
+|
+| This entry point is used by kernel_ex.
+|
+| This forces the destination precision to be extended
+|
+| Input:	operand in ETEMP
+| Output:	a result is in ETEMP (internal extended format)
+|
+	.global	ovf_r_k
+ovf_r_k:
+	lea	ETEMP(%a6),%a0	|a0 points to source operand
+	bclrb	#sign_bit,ETEMP_EX(%a6)
+	sne	ETEMP_SGN(%a6)	|convert to internal IEEE format
+
+|
+|	ovf_r_x2 --- overflow result calculation
+|
+| This entry point used by x_ovfl.  (opclass 0 and 2)
+|
+| Input		a0  points to an operand in the internal extended format
+| Output	a0  points to the result in the internal extended format
+|
+| This sets the round precision according to the user's FPCR unless the
+| instruction is fsgldiv or fsglmul or fsadd, fdadd, fsub, fdsub, fsmul,
+| fdmul, fsdiv, fddiv, fssqrt, fsmove, fdmove, fsabs, fdabs, fsneg, fdneg.
+| If the instruction is fsgldiv of fsglmul, the rounding precision must be
+| extended.  If the instruction is not fsgldiv or fsglmul but a force-
+| precision instruction, the rounding precision is then set to the force
+| precision.
+
+	.global	ovf_r_x2
+ovf_r_x2:
+	btstb	#E3,E_BYTE(%a6)		|check for nu exception
+	beql	ovf_e1_exc		|it is cu exception
+ovf_e3_exc:
+	movew	CMDREG3B(%a6),%d0		|get the command word
+	andiw	#0x00000060,%d0		|clear all bits except 6 and 5
+	cmpil	#0x00000040,%d0
+	beql	ovff_sgl		|force precision is single
+	cmpil	#0x00000060,%d0
+	beql	ovff_dbl		|force precision is double
+	movew	CMDREG3B(%a6),%d0		|get the command word again
+	andil	#0x7f,%d0			|clear all except operation
+	cmpil	#0x33,%d0
+	beql	ovf_fsgl		|fsglmul or fsgldiv
+	cmpil	#0x30,%d0
+	beql	ovf_fsgl
+	bra	ovf_fpcr		|instruction is none of the above
+|					;use FPCR
+ovf_e1_exc:
+	movew	CMDREG1B(%a6),%d0		|get command word
+	andil	#0x00000044,%d0		|clear all bits except 6 and 2
+	cmpil	#0x00000040,%d0
+	beql	ovff_sgl		|the instruction is force single
+	cmpil	#0x00000044,%d0
+	beql	ovff_dbl		|the instruction is force double
+	movew	CMDREG1B(%a6),%d0		|again get the command word
+	andil	#0x0000007f,%d0		|clear all except the op code
+	cmpil	#0x00000027,%d0
+	beql	ovf_fsgl		|fsglmul
+	cmpil	#0x00000024,%d0
+	beql	ovf_fsgl		|fsgldiv
+	bra	ovf_fpcr		|none of the above, use FPCR
+|
+|
+| Inst is either fsgldiv or fsglmul.  Force extended precision.
+|
+ovf_fsgl:
+	clrl	%d0
+	bra	ovf_res
+
+ovff_sgl:
+	movel	#0x00000001,%d0		|set single
+	bra	ovf_res
+ovff_dbl:
+	movel	#0x00000002,%d0		|set double
+	bra	ovf_res
+|
+| The precision is in the fpcr.
+|
+ovf_fpcr:
+	bfextu	FPCR_MODE(%a6){#0:#2},%d0 |set round precision
+	bra	ovf_res
+
+|
+|
+|	ovf_r_x3 --- overflow result calculation
+|
+| This entry point used by x_ovfl. (opclass 3 only)
+|
+| Input		a0  points to an operand in the internal extended format
+| Output	a0  points to the result in the internal extended format
+|
+| This sets the round precision according to the destination size.
+|
+	.global	ovf_r_x3
+ovf_r_x3:
+	bsr	g_dfmtou	|get dest fmt in d0{1:0}
+|				;for fmovout, the destination format
+|				;is the rounding precision
+
+|
+|	ovf_res --- overflow result calculation
+|
+| Input:
+|	a0	points to operand in internal extended format
+| Output:
+|	a0	points to result in internal extended format
+|
+	.global	ovf_res
+ovf_res:
+	lsll	#2,%d0		|move round precision to d0{3:2}
+	bfextu	FPCR_MODE(%a6){#2:#2},%d1 |set round mode
+	orl	%d1,%d0		|index is fmt:mode in d0{3:0}
+	leal	tblovfl,%a1	|load a1 with table address
+	movel	%a1@(%d0:l:4),%a1	|use d0 as index to the table
+	jmp	(%a1)		|go to the correct routine
+|
+|case DEST_FMT = EXT
+|
+EXT_RN:
+	leal	EXT_PINF,%a1	|answer is +/- infinity
+	bsetb	#inf_bit,FPSR_CC(%a6)
+	bra	set_sign	|now go set the sign
+EXT_RZ:
+	leal	EXT_PLRG,%a1	|answer is +/- large number
+	bra	set_sign	|now go set the sign
+EXT_RM:
+	tstb	LOCAL_SGN(%a0)	|if negative overflow
+	beqs	e_rm_pos
+e_rm_neg:
+	leal	EXT_PINF,%a1	|answer is negative infinity
+	orl	#neginf_mask,USER_FPSR(%a6)
+	bra	end_ovfr
+e_rm_pos:
+	leal	EXT_PLRG,%a1	|answer is large positive number
+	bra	end_ovfr
+EXT_RP:
+	tstb	LOCAL_SGN(%a0)	|if negative overflow
+	beqs	e_rp_pos
+e_rp_neg:
+	leal	EXT_PLRG,%a1	|answer is large negative number
+	bsetb	#neg_bit,FPSR_CC(%a6)
+	bra	end_ovfr
+e_rp_pos:
+	leal	EXT_PINF,%a1	|answer is positive infinity
+	bsetb	#inf_bit,FPSR_CC(%a6)
+	bra	end_ovfr
+|
+|case DEST_FMT = DBL
+|
+DBL_RN:
+	leal	EXT_PINF,%a1	|answer is +/- infinity
+	bsetb	#inf_bit,FPSR_CC(%a6)
+	bra	set_sign
+DBL_RZ:
+	leal	DBL_PLRG,%a1	|answer is +/- large number
+	bra	set_sign	|now go set the sign
+DBL_RM:
+	tstb	LOCAL_SGN(%a0)	|if negative overflow
+	beqs	d_rm_pos
+d_rm_neg:
+	leal	EXT_PINF,%a1	|answer is negative infinity
+	orl	#neginf_mask,USER_FPSR(%a6)
+	bra	end_ovfr	|inf is same for all precisions (ext,dbl,sgl)
+d_rm_pos:
+	leal	DBL_PLRG,%a1	|answer is large positive number
+	bra	end_ovfr
+DBL_RP:
+	tstb	LOCAL_SGN(%a0)	|if negative overflow
+	beqs	d_rp_pos
+d_rp_neg:
+	leal	DBL_PLRG,%a1	|answer is large negative number
+	bsetb	#neg_bit,FPSR_CC(%a6)
+	bra	end_ovfr
+d_rp_pos:
+	leal	EXT_PINF,%a1	|answer is positive infinity
+	bsetb	#inf_bit,FPSR_CC(%a6)
+	bra	end_ovfr
+|
+|case DEST_FMT = SGL
+|
+SGL_RN:
+	leal	EXT_PINF,%a1	|answer is +/-  infinity
+	bsetb	#inf_bit,FPSR_CC(%a6)
+	bras	set_sign
+SGL_RZ:
+	leal	SGL_PLRG,%a1	|answer is +/- large number
+	bras	set_sign
+SGL_RM:
+	tstb	LOCAL_SGN(%a0)	|if negative overflow
+	beqs	s_rm_pos
+s_rm_neg:
+	leal	EXT_PINF,%a1	|answer is negative infinity
+	orl	#neginf_mask,USER_FPSR(%a6)
+	bras	end_ovfr
+s_rm_pos:
+	leal	SGL_PLRG,%a1	|answer is large positive number
+	bras	end_ovfr
+SGL_RP:
+	tstb	LOCAL_SGN(%a0)	|if negative overflow
+	beqs	s_rp_pos
+s_rp_neg:
+	leal	SGL_PLRG,%a1	|answer is large negative number
+	bsetb	#neg_bit,FPSR_CC(%a6)
+	bras	end_ovfr
+s_rp_pos:
+	leal	EXT_PINF,%a1	|answer is positive infinity
+	bsetb	#inf_bit,FPSR_CC(%a6)
+	bras	end_ovfr
+
+set_sign:
+	tstb	LOCAL_SGN(%a0)	|if negative overflow
+	beqs	end_ovfr
+neg_sign:
+	bsetb	#neg_bit,FPSR_CC(%a6)
+
+end_ovfr:
+	movew	LOCAL_EX(%a1),LOCAL_EX(%a0) |do not overwrite sign
+	movel	LOCAL_HI(%a1),LOCAL_HI(%a0)
+	movel	LOCAL_LO(%a1),LOCAL_LO(%a0)
+	rts
+
+
+|
+|	ERROR
+|
+error:
+	rts
+|
+|	get_fline --- get f-line opcode of interrupted instruction
+|
+|	Returns opcode in the low word of d0.
+|
+get_fline:
+	movel	USER_FPIAR(%a6),%a0	|opcode address
+	movel	#0,-(%a7)	|reserve a word on the stack
+	leal	2(%a7),%a1	|point to low word of temporary
+	movel	#2,%d0		|count
+	bsrl	mem_read
+	movel	(%a7)+,%d0
+	rts
+|
+|	g_rndpr --- put rounding precision in d0{1:0}
+|
+|	valid return codes are:
+|		00 - extended
+|		01 - single
+|		10 - double
+|
+| begin
+| get rounding precision (cmdreg3b{6:5})
+| begin
+|  case	opclass = 011 (move out)
+|	get destination format - this is the also the rounding precision
+|
+|  case	opclass = 0x0
+|	if E3
+|	    *case RndPr(from cmdreg3b{6:5} = 11  then RND_PREC = DBL
+|	    *case RndPr(from cmdreg3b{6:5} = 10  then RND_PREC = SGL
+|	     case RndPr(from cmdreg3b{6:5} = 00 | 01
+|		use precision from FPCR{7:6}
+|			case 00 then RND_PREC = EXT
+|			case 01 then RND_PREC = SGL
+|			case 10 then RND_PREC = DBL
+|	else E1
+|	     use precision in FPCR{7:6}
+|	     case 00 then RND_PREC = EXT
+|	     case 01 then RND_PREC = SGL
+|	     case 10 then RND_PREC = DBL
+| end
+|
+g_rndpr:
+	bsr	g_opcls		|get opclass in d0{2:0}
+	cmpw	#0x0003,%d0	|check for opclass 011
+	bnes	op_0x0
+
+|
+| For move out instructions (opclass 011) the destination format
+| is the same as the rounding precision.  Pass results from g_dfmtou.
+|
+	bsr	g_dfmtou
+	rts
+op_0x0:
+	btstb	#E3,E_BYTE(%a6)
+	beql	unf_e1_exc	|branch to e1 underflow
+unf_e3_exc:
+	movel	CMDREG3B(%a6),%d0	|rounding precision in d0{10:9}
+	bfextu	%d0{#9:#2},%d0	|move the rounding prec bits to d0{1:0}
+	cmpil	#0x2,%d0
+	beql	unff_sgl	|force precision is single
+	cmpil	#0x3,%d0		|force precision is double
+	beql	unff_dbl
+	movew	CMDREG3B(%a6),%d0	|get the command word again
+	andil	#0x7f,%d0		|clear all except operation
+	cmpil	#0x33,%d0
+	beql	unf_fsgl	|fsglmul or fsgldiv
+	cmpil	#0x30,%d0
+	beql	unf_fsgl	|fsgldiv or fsglmul
+	bra	unf_fpcr
+unf_e1_exc:
+	movel	CMDREG1B(%a6),%d0	|get 32 bits off the stack, 1st 16 bits
+|				;are the command word
+	andil	#0x00440000,%d0	|clear all bits except bits 6 and 2
+	cmpil	#0x00400000,%d0
+	beql	unff_sgl	|force single
+	cmpil	#0x00440000,%d0	|force double
+	beql	unff_dbl
+	movel	CMDREG1B(%a6),%d0	|get the command word again
+	andil	#0x007f0000,%d0	|clear all bits except the operation
+	cmpil	#0x00270000,%d0
+	beql	unf_fsgl	|fsglmul
+	cmpil	#0x00240000,%d0
+	beql	unf_fsgl	|fsgldiv
+	bra	unf_fpcr
+
+|
+| Convert to return format.  The values from cmdreg3b and the return
+| values are:
+|	cmdreg3b	return	     precision
+|	--------	------	     ---------
+|	  00,01		  0		ext
+|	   10		  1		sgl
+|	   11		  2		dbl
+| Force single
+|
+unff_sgl:
+	movel	#1,%d0		|return 1
+	rts
+|
+| Force double
+|
+unff_dbl:
+	movel	#2,%d0		|return 2
+	rts
+|
+| Force extended
+|
+unf_fsgl:
+	movel	#0,%d0
+	rts
+|
+| Get rounding precision set in FPCR{7:6}.
+|
+unf_fpcr:
+	movel	USER_FPCR(%a6),%d0 |rounding precision bits in d0{7:6}
+	bfextu	%d0{#24:#2},%d0	|move the rounding prec bits to d0{1:0}
+	rts
+|
+|	g_opcls --- put opclass in d0{2:0}
+|
+g_opcls:
+	btstb	#E3,E_BYTE(%a6)
+	beqs	opc_1b		|if set, go to cmdreg1b
+opc_3b:
+	clrl	%d0		|if E3, only opclass 0x0 is possible
+	rts
+opc_1b:
+	movel	CMDREG1B(%a6),%d0
+	bfextu	%d0{#0:#3},%d0	|shift opclass bits d0{31:29} to d0{2:0}
+	rts
+|
+|	g_dfmtou --- put destination format in d0{1:0}
+|
+|	If E1, the format is from cmdreg1b{12:10}
+|	If E3, the format is extended.
+|
+|	Dest. Fmt.
+|		extended  010 -> 00
+|		single    001 -> 01
+|		double    101 -> 10
+|
+g_dfmtou:
+	btstb	#E3,E_BYTE(%a6)
+	beqs	op011
+	clrl	%d0		|if E1, size is always ext
+	rts
+op011:
+	movel	CMDREG1B(%a6),%d0
+	bfextu	%d0{#3:#3},%d0	|dest fmt from cmdreg1b{12:10}
+	cmpb	#1,%d0		|check for single
+	bnes	not_sgl
+	movel	#1,%d0
+	rts
+not_sgl:
+	cmpb	#5,%d0		|check for double
+	bnes	not_dbl
+	movel	#2,%d0
+	rts
+not_dbl:
+	clrl	%d0		|must be extended
+	rts
+
+|
+|
+| Final result table for unf_sub. Note that the negative counterparts
+| are unnecessary as unf_sub always returns the sign separately from
+| the exponent.
+|					;+zero
+EXT_PZRO:	.long	0x00000000,0x00000000,0x00000000,0x00000000
+|					;+zero
+SGL_PZRO:	.long	0x3f810000,0x00000000,0x00000000,0x00000000
+|					;+zero
+DBL_PZRO:	.long	0x3c010000,0x00000000,0x00000000,0x00000000
+|					;smallest +ext denorm
+EXT_PSML:	.long	0x00000000,0x00000000,0x00000001,0x00000000
+|					;smallest +sgl denorm
+SGL_PSML:	.long	0x3f810000,0x00000100,0x00000000,0x00000000
+|					;smallest +dbl denorm
+DBL_PSML:	.long	0x3c010000,0x00000000,0x00000800,0x00000000
+|
+|	UNF_SUB --- underflow result calculation
+|
+| Input:
+|	d0	contains round precision
+|	a0	points to input operand in the internal extended format
+|
+| Output:
+|	a0	points to correct internal extended precision result.
+|
+
+tblunf:
+	.long	uEXT_RN
+	.long	uEXT_RZ
+	.long	uEXT_RM
+	.long	uEXT_RP
+	.long	uSGL_RN
+	.long	uSGL_RZ
+	.long	uSGL_RM
+	.long	uSGL_RP
+	.long	uDBL_RN
+	.long	uDBL_RZ
+	.long	uDBL_RM
+	.long	uDBL_RP
+	.long	uDBL_RN
+	.long	uDBL_RZ
+	.long	uDBL_RM
+	.long	uDBL_RP
+
+	.global	unf_sub
+unf_sub:
+	lsll	#2,%d0		|move round precision to d0{3:2}
+	bfextu	FPCR_MODE(%a6){#2:#2},%d1 |set round mode
+	orl	%d1,%d0		|index is fmt:mode in d0{3:0}
+	leal	tblunf,%a1	|load a1 with table address
+	movel	%a1@(%d0:l:4),%a1	|use d0 as index to the table
+	jmp	(%a1)		|go to the correct routine
+|
+|case DEST_FMT = EXT
+|
+uEXT_RN:
+	leal	EXT_PZRO,%a1	|answer is +/- zero
+	bsetb	#z_bit,FPSR_CC(%a6)
+	bra	uset_sign	|now go set the sign
+uEXT_RZ:
+	leal	EXT_PZRO,%a1	|answer is +/- zero
+	bsetb	#z_bit,FPSR_CC(%a6)
+	bra	uset_sign	|now go set the sign
+uEXT_RM:
+	tstb	LOCAL_SGN(%a0)	|if negative underflow
+	beqs	ue_rm_pos
+ue_rm_neg:
+	leal	EXT_PSML,%a1	|answer is negative smallest denorm
+	bsetb	#neg_bit,FPSR_CC(%a6)
+	bra	end_unfr
+ue_rm_pos:
+	leal	EXT_PZRO,%a1	|answer is positive zero
+	bsetb	#z_bit,FPSR_CC(%a6)
+	bra	end_unfr
+uEXT_RP:
+	tstb	LOCAL_SGN(%a0)	|if negative underflow
+	beqs	ue_rp_pos
+ue_rp_neg:
+	leal	EXT_PZRO,%a1	|answer is negative zero
+	oril	#negz_mask,USER_FPSR(%a6)
+	bra	end_unfr
+ue_rp_pos:
+	leal	EXT_PSML,%a1	|answer is positive smallest denorm
+	bra	end_unfr
+|
+|case DEST_FMT = DBL
+|
+uDBL_RN:
+	leal	DBL_PZRO,%a1	|answer is +/- zero
+	bsetb	#z_bit,FPSR_CC(%a6)
+	bra	uset_sign
+uDBL_RZ:
+	leal	DBL_PZRO,%a1	|answer is +/- zero
+	bsetb	#z_bit,FPSR_CC(%a6)
+	bra	uset_sign	|now go set the sign
+uDBL_RM:
+	tstb	LOCAL_SGN(%a0)	|if negative overflow
+	beqs	ud_rm_pos
+ud_rm_neg:
+	leal	DBL_PSML,%a1	|answer is smallest denormalized negative
+	bsetb	#neg_bit,FPSR_CC(%a6)
+	bra	end_unfr
+ud_rm_pos:
+	leal	DBL_PZRO,%a1	|answer is positive zero
+	bsetb	#z_bit,FPSR_CC(%a6)
+	bra	end_unfr
+uDBL_RP:
+	tstb	LOCAL_SGN(%a0)	|if negative overflow
+	beqs	ud_rp_pos
+ud_rp_neg:
+	leal	DBL_PZRO,%a1	|answer is negative zero
+	oril	#negz_mask,USER_FPSR(%a6)
+	bra	end_unfr
+ud_rp_pos:
+	leal	DBL_PSML,%a1	|answer is smallest denormalized negative
+	bra	end_unfr
+|
+|case DEST_FMT = SGL
+|
+uSGL_RN:
+	leal	SGL_PZRO,%a1	|answer is +/- zero
+	bsetb	#z_bit,FPSR_CC(%a6)
+	bras	uset_sign
+uSGL_RZ:
+	leal	SGL_PZRO,%a1	|answer is +/- zero
+	bsetb	#z_bit,FPSR_CC(%a6)
+	bras	uset_sign
+uSGL_RM:
+	tstb	LOCAL_SGN(%a0)	|if negative overflow
+	beqs	us_rm_pos
+us_rm_neg:
+	leal	SGL_PSML,%a1	|answer is smallest denormalized negative
+	bsetb	#neg_bit,FPSR_CC(%a6)
+	bras	end_unfr
+us_rm_pos:
+	leal	SGL_PZRO,%a1	|answer is positive zero
+	bsetb	#z_bit,FPSR_CC(%a6)
+	bras	end_unfr
+uSGL_RP:
+	tstb	LOCAL_SGN(%a0)	|if negative overflow
+	beqs	us_rp_pos
+us_rp_neg:
+	leal	SGL_PZRO,%a1	|answer is negative zero
+	oril	#negz_mask,USER_FPSR(%a6)
+	bras	end_unfr
+us_rp_pos:
+	leal	SGL_PSML,%a1	|answer is smallest denormalized positive
+	bras	end_unfr
+
+uset_sign:
+	tstb	LOCAL_SGN(%a0)	|if negative overflow
+	beqs	end_unfr
+uneg_sign:
+	bsetb	#neg_bit,FPSR_CC(%a6)
+
+end_unfr:
+	movew	LOCAL_EX(%a1),LOCAL_EX(%a0) |be careful not to overwrite sign
+	movel	LOCAL_HI(%a1),LOCAL_HI(%a0)
+	movel	LOCAL_LO(%a1),LOCAL_LO(%a0)
+	rts
+|
+|	reg_dest --- write byte, word, or long data to Dn
+|
+|
+| Input:
+|	L_SCR1: Data
+|	d1:     data size and dest register number formatted as:
+|
+|	32		5    4     3     2     1     0
+|       -----------------------------------------------
+|       |        0        |    Size   |  Dest Reg #   |
+|       -----------------------------------------------
+|
+|	Size is:
+|		0 - Byte
+|		1 - Word
+|		2 - Long/Single
+|
+pregdst:
+	.long	byte_d0
+	.long	byte_d1
+	.long	byte_d2
+	.long	byte_d3
+	.long	byte_d4
+	.long	byte_d5
+	.long	byte_d6
+	.long	byte_d7
+	.long	word_d0
+	.long	word_d1
+	.long	word_d2
+	.long	word_d3
+	.long	word_d4
+	.long	word_d5
+	.long	word_d6
+	.long	word_d7
+	.long	long_d0
+	.long	long_d1
+	.long	long_d2
+	.long	long_d3
+	.long	long_d4
+	.long	long_d5
+	.long	long_d6
+	.long	long_d7
+
+reg_dest:
+	leal	pregdst,%a0
+	movel	%a0@(%d1:l:4),%a0
+	jmp	(%a0)
+
+byte_d0:
+	moveb	L_SCR1(%a6),USER_D0+3(%a6)
+	rts
+byte_d1:
+	moveb	L_SCR1(%a6),USER_D1+3(%a6)
+	rts
+byte_d2:
+	moveb	L_SCR1(%a6),%d2
+	rts
+byte_d3:
+	moveb	L_SCR1(%a6),%d3
+	rts
+byte_d4:
+	moveb	L_SCR1(%a6),%d4
+	rts
+byte_d5:
+	moveb	L_SCR1(%a6),%d5
+	rts
+byte_d6:
+	moveb	L_SCR1(%a6),%d6
+	rts
+byte_d7:
+	moveb	L_SCR1(%a6),%d7
+	rts
+word_d0:
+	movew	L_SCR1(%a6),USER_D0+2(%a6)
+	rts
+word_d1:
+	movew	L_SCR1(%a6),USER_D1+2(%a6)
+	rts
+word_d2:
+	movew	L_SCR1(%a6),%d2
+	rts
+word_d3:
+	movew	L_SCR1(%a6),%d3
+	rts
+word_d4:
+	movew	L_SCR1(%a6),%d4
+	rts
+word_d5:
+	movew	L_SCR1(%a6),%d5
+	rts
+word_d6:
+	movew	L_SCR1(%a6),%d6
+	rts
+word_d7:
+	movew	L_SCR1(%a6),%d7
+	rts
+long_d0:
+	movel	L_SCR1(%a6),USER_D0(%a6)
+	rts
+long_d1:
+	movel	L_SCR1(%a6),USER_D1(%a6)
+	rts
+long_d2:
+	movel	L_SCR1(%a6),%d2
+	rts
+long_d3:
+	movel	L_SCR1(%a6),%d3
+	rts
+long_d4:
+	movel	L_SCR1(%a6),%d4
+	rts
+long_d5:
+	movel	L_SCR1(%a6),%d5
+	rts
+long_d6:
+	movel	L_SCR1(%a6),%d6
+	rts
+long_d7:
+	movel	L_SCR1(%a6),%d7
+	rts
+	|end
diff --git a/arch/m68k/fpsp040/x_bsun.S b/arch/m68k/fpsp040/x_bsun.S
new file mode 100644
index 0000000..039247b
--- /dev/null
+++ b/arch/m68k/fpsp040/x_bsun.S
@@ -0,0 +1,47 @@
+|
+|	x_bsun.sa 3.3 7/1/91
+|
+|	fpsp_bsun --- FPSP handler for branch/set on unordered exception
+|
+|	Copy the PC to FPIAR to maintain 881/882 compatibility
+|
+|	The real_bsun handler will need to perform further corrective
+|	measures as outlined in the 040 User's Manual on pages
+|	9-41f, section 9.8.3.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+X_BSUN:	|idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+	|xref	real_bsun
+
+	.global	fpsp_bsun
+fpsp_bsun:
+|
+	link		%a6,#-LOCAL_SIZE
+	fsave		-(%a7)
+	moveml		%d0-%d1/%a0-%a1,USER_DA(%a6)
+	fmovemx	%fp0-%fp3,USER_FP0(%a6)
+	fmoveml	%fpcr/%fpsr/%fpiar,USER_FPCR(%a6)
+
+|
+	movel		EXC_PC(%a6),USER_FPIAR(%a6)
+|
+	moveml		USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx	USER_FP0(%a6),%fp0-%fp3
+	fmoveml	USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	frestore	(%a7)+
+	unlk		%a6
+	bral		real_bsun
+|
+	|end
diff --git a/arch/m68k/fpsp040/x_fline.S b/arch/m68k/fpsp040/x_fline.S
new file mode 100644
index 0000000..3917710
--- /dev/null
+++ b/arch/m68k/fpsp040/x_fline.S
@@ -0,0 +1,104 @@
+|
+|	x_fline.sa 3.3 1/10/91
+|
+|	fpsp_fline --- FPSP handler for fline exception
+|
+|	First determine if the exception is one of the unimplemented
+|	floating point instructions.  If so, let fpsp_unimp handle it.
+|	Next, determine if the instruction is an fmovecr with a non-zero
+|	<ea> field.  If so, handle here and return.  Otherwise, it
+|	must be a real F-line exception.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+X_FLINE:	|idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+	|xref	real_fline
+	|xref	fpsp_unimp
+	|xref	uni_2
+	|xref	mem_read
+	|xref	fpsp_fmt_error
+
+	.global	fpsp_fline
+fpsp_fline:
+|
+|	check for unimplemented vector first.  Use EXC_VEC-4 because
+|	the equate is valid only after a 'link a6' has pushed one more
+|	long onto the stack.
+|
+	cmpw	#UNIMP_VEC,EXC_VEC-4(%a7)
+	beql	fpsp_unimp
+
+|
+|	fmovecr with non-zero <ea> handling here
+|
+	subl	#4,%a7		|4 accounts for 2-word difference
+|				;between six word frame (unimp) and
+|				;four word frame
+	link	%a6,#-LOCAL_SIZE
+	fsave	-(%a7)
+	moveml	%d0-%d1/%a0-%a1,USER_DA(%a6)
+	moveal	EXC_PC+4(%a6),%a0	|get address of fline instruction
+	leal	L_SCR1(%a6),%a1	|use L_SCR1 as scratch
+	movel	#4,%d0
+	addl	#4,%a6		|to offset the sub.l #4,a7 above so that
+|				;a6 can point correctly to the stack frame
+|				;before branching to mem_read
+	bsrl	mem_read
+	subl	#4,%a6
+	movel	L_SCR1(%a6),%d0	|d0 contains the fline and command word
+	bfextu	%d0{#4:#3},%d1	|extract coprocessor id
+	cmpib	#1,%d1		|check if cpid=1
+	bne	not_mvcr	|exit if not
+	bfextu	%d0{#16:#6},%d1
+	cmpib	#0x17,%d1		|check if it is an FMOVECR encoding
+	bne	not_mvcr
+|				;if an FMOVECR instruction, fix stack
+|				;and go to FPSP_UNIMP
+fix_stack:
+	cmpib	#VER_40,(%a7)	|test for orig unimp frame
+	bnes	ck_rev
+	subl	#UNIMP_40_SIZE-4,%a7 |emulate an orig fsave
+	moveb	#VER_40,(%a7)
+	moveb	#UNIMP_40_SIZE-4,1(%a7)
+	clrw	2(%a7)
+	bras	fix_con
+ck_rev:
+	cmpib	#VER_41,(%a7)	|test for rev unimp frame
+	bnel	fpsp_fmt_error	|if not $40 or $41, exit with error
+	subl	#UNIMP_41_SIZE-4,%a7 |emulate a rev fsave
+	moveb	#VER_41,(%a7)
+	moveb	#UNIMP_41_SIZE-4,1(%a7)
+	clrw	2(%a7)
+fix_con:
+	movew	EXC_SR+4(%a6),EXC_SR(%a6) |move stacked sr to new position
+	movel	EXC_PC+4(%a6),EXC_PC(%a6) |move stacked pc to new position
+	fmovel	EXC_PC(%a6),%FPIAR |point FPIAR to fline inst
+	movel	#4,%d1
+	addl	%d1,EXC_PC(%a6)	|increment stacked pc value to next inst
+	movew	#0x202c,EXC_VEC(%a6) |reformat vector to unimp
+	clrl	EXC_EA(%a6)	|clear the EXC_EA field
+	movew	%d0,CMDREG1B(%a6) |move the lower word into CMDREG1B
+	clrl	E_BYTE(%a6)
+	bsetb	#UFLAG,T_BYTE(%a6)
+	moveml	USER_DA(%a6),%d0-%d1/%a0-%a1 |restore data registers
+	bral	uni_2
+
+not_mvcr:
+	moveml	USER_DA(%a6),%d0-%d1/%a0-%a1 |restore data registers
+	frestore (%a7)+
+	unlk	%a6
+	addl	#4,%a7
+	bral	real_fline
+
+	|end
diff --git a/arch/m68k/fpsp040/x_operr.S b/arch/m68k/fpsp040/x_operr.S
new file mode 100644
index 0000000..b0f54bc
--- /dev/null
+++ b/arch/m68k/fpsp040/x_operr.S
@@ -0,0 +1,356 @@
+|
+|	x_operr.sa 3.5 7/1/91
+|
+|	fpsp_operr --- FPSP handler for operand error exception
+|
+|	See 68040 User's Manual pp. 9-44f
+|
+| Note 1: For trap disabled 040 does the following:
+| If the dest is a fp reg, then an extended precision non_signaling
+| NAN is stored in the dest reg.  If the dest format is b, w, or l and
+| the source op is a NAN, then garbage is stored as the result (actually
+| the upper 32 bits of the mantissa are sent to the integer unit). If
+| the dest format is integer (b, w, l) and the operr is caused by
+| integer overflow, or the source op is inf, then the result stored is
+| garbage.
+| There are three cases in which operr is incorrectly signaled on the
+| 040.  This occurs for move_out of format b, w, or l for the largest
+| negative integer (-2^7 for b, -2^15 for w, -2^31 for l).
+|
+|	  On opclass = 011 fmove.(b,w,l) that causes a conversion
+|	  overflow -> OPERR, the exponent in wbte (and fpte) is:
+|		byte    56 - (62 - exp)
+|		word    48 - (62 - exp)
+|		long    32 - (62 - exp)
+|
+|			where exp = (true exp) - 1
+|
+|  So, wbtemp and fptemp will contain the following on erroneously
+|	  signalled operr:
+|			fpts = 1
+|			fpte = $4000  (15 bit externally)
+|		byte	fptm = $ffffffff ffffff80
+|		word	fptm = $ffffffff ffff8000
+|		long	fptm = $ffffffff 80000000
+|
+| Note 2: For trap enabled 040 does the following:
+| If the inst is move_out, then same as Note 1.
+| If the inst is not move_out, the dest is not modified.
+| The exceptional operand is not defined for integer overflow
+| during a move_out.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+X_OPERR:	|idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+	|xref	mem_write
+	|xref	real_operr
+	|xref	real_inex
+	|xref	get_fline
+	|xref	fpsp_done
+	|xref	reg_dest
+
+	.global	fpsp_operr
+fpsp_operr:
+|
+	link		%a6,#-LOCAL_SIZE
+	fsave		-(%a7)
+	moveml		%d0-%d1/%a0-%a1,USER_DA(%a6)
+	fmovemx	%fp0-%fp3,USER_FP0(%a6)
+	fmoveml	%fpcr/%fpsr/%fpiar,USER_FPCR(%a6)
+
+|
+| Check if this is an opclass 3 instruction.
+|  If so, fall through, else branch to operr_end
+|
+	btstb	#TFLAG,T_BYTE(%a6)
+	beqs	operr_end
+
+|
+| If the destination size is B,W,or L, the operr must be
+| handled here.
+|
+	movel	CMDREG1B(%a6),%d0
+	bfextu	%d0{#3:#3},%d0	|0=long, 4=word, 6=byte
+	cmpib	#0,%d0		|determine size; check long
+	beq	operr_long
+	cmpib	#4,%d0		|check word
+	beq	operr_word
+	cmpib	#6,%d0		|check byte
+	beq	operr_byte
+
+|
+| The size is not B,W,or L, so the operr is handled by the
+| kernel handler.  Set the operr bits and clean up, leaving
+| only the integer exception frame on the stack, and the
+| fpu in the original exceptional state.
+|
+operr_end:
+	bsetb		#operr_bit,FPSR_EXCEPT(%a6)
+	bsetb		#aiop_bit,FPSR_AEXCEPT(%a6)
+
+	moveml		USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx	USER_FP0(%a6),%fp0-%fp3
+	fmoveml	USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	frestore	(%a7)+
+	unlk		%a6
+	bral		real_operr
+
+operr_long:
+	moveql	#4,%d1		|write size to d1
+	moveb	STAG(%a6),%d0	|test stag for nan
+	andib	#0xe0,%d0		|clr all but tag
+	cmpib	#0x60,%d0		|check for nan
+	beq	operr_nan
+	cmpil	#0x80000000,FPTEMP_LO(%a6) |test if ls lword is special
+	bnes	chklerr		|if not equal, check for incorrect operr
+	bsr	check_upper	|check if exp and ms mant are special
+	tstl	%d0
+	bnes	chklerr		|if d0 is true, check for incorrect operr
+	movel	#0x80000000,%d0	|store special case result
+	bsr	operr_store
+	bra	not_enabled	|clean and exit
+|
+|	CHECK FOR INCORRECTLY GENERATED OPERR EXCEPTION HERE
+|
+chklerr:
+	movew	FPTEMP_EX(%a6),%d0
+	andw	#0x7FFF,%d0	|ignore sign bit
+	cmpw	#0x3FFE,%d0	|this is the only possible exponent value
+	bnes	chklerr2
+fixlong:
+	movel	FPTEMP_LO(%a6),%d0
+	bsr	operr_store
+	bra	not_enabled
+chklerr2:
+	movew	FPTEMP_EX(%a6),%d0
+	andw	#0x7FFF,%d0	|ignore sign bit
+	cmpw	#0x4000,%d0
+	bcc	store_max	|exponent out of range
+
+	movel	FPTEMP_LO(%a6),%d0
+	andl	#0x7FFF0000,%d0	|look for all 1's on bits 30-16
+	cmpl	#0x7FFF0000,%d0
+	beqs	fixlong
+
+	tstl	FPTEMP_LO(%a6)
+	bpls	chklepos
+	cmpl	#0xFFFFFFFF,FPTEMP_HI(%a6)
+	beqs	fixlong
+	bra	store_max
+chklepos:
+	tstl	FPTEMP_HI(%a6)
+	beqs	fixlong
+	bra	store_max
+
+operr_word:
+	moveql	#2,%d1		|write size to d1
+	moveb	STAG(%a6),%d0	|test stag for nan
+	andib	#0xe0,%d0		|clr all but tag
+	cmpib	#0x60,%d0		|check for nan
+	beq	operr_nan
+	cmpil	#0xffff8000,FPTEMP_LO(%a6) |test if ls lword is special
+	bnes	chkwerr		|if not equal, check for incorrect operr
+	bsr	check_upper	|check if exp and ms mant are special
+	tstl	%d0
+	bnes	chkwerr		|if d0 is true, check for incorrect operr
+	movel	#0x80000000,%d0	|store special case result
+	bsr	operr_store
+	bra	not_enabled	|clean and exit
+|
+|	CHECK FOR INCORRECTLY GENERATED OPERR EXCEPTION HERE
+|
+chkwerr:
+	movew	FPTEMP_EX(%a6),%d0
+	andw	#0x7FFF,%d0	|ignore sign bit
+	cmpw	#0x3FFE,%d0	|this is the only possible exponent value
+	bnes	store_max
+	movel	FPTEMP_LO(%a6),%d0
+	swap	%d0
+	bsr	operr_store
+	bra	not_enabled
+
+operr_byte:
+	moveql	#1,%d1		|write size to d1
+	moveb	STAG(%a6),%d0	|test stag for nan
+	andib	#0xe0,%d0		|clr all but tag
+	cmpib	#0x60,%d0		|check for nan
+	beqs	operr_nan
+	cmpil	#0xffffff80,FPTEMP_LO(%a6) |test if ls lword is special
+	bnes	chkberr		|if not equal, check for incorrect operr
+	bsr	check_upper	|check if exp and ms mant are special
+	tstl	%d0
+	bnes	chkberr		|if d0 is true, check for incorrect operr
+	movel	#0x80000000,%d0	|store special case result
+	bsr	operr_store
+	bra	not_enabled	|clean and exit
+|
+|	CHECK FOR INCORRECTLY GENERATED OPERR EXCEPTION HERE
+|
+chkberr:
+	movew	FPTEMP_EX(%a6),%d0
+	andw	#0x7FFF,%d0	|ignore sign bit
+	cmpw	#0x3FFE,%d0	|this is the only possible exponent value
+	bnes	store_max
+	movel	FPTEMP_LO(%a6),%d0
+	asll	#8,%d0
+	swap	%d0
+	bsr	operr_store
+	bra	not_enabled
+
+|
+| This operr condition is not of the special case.  Set operr
+| and aiop and write the portion of the nan to memory for the
+| given size.
+|
+operr_nan:
+	orl	#opaop_mask,USER_FPSR(%a6) |set operr & aiop
+
+	movel	ETEMP_HI(%a6),%d0	|output will be from upper 32 bits
+	bsr	operr_store
+	bra	end_operr
+|
+| Store_max loads the max pos or negative for the size, sets
+| the operr and aiop bits, and clears inex and ainex, incorrectly
+| set by the 040.
+|
+store_max:
+	orl	#opaop_mask,USER_FPSR(%a6) |set operr & aiop
+	bclrb	#inex2_bit,FPSR_EXCEPT(%a6)
+	bclrb	#ainex_bit,FPSR_AEXCEPT(%a6)
+	fmovel	#0,%FPSR
+
+	tstw	FPTEMP_EX(%a6)	|check sign
+	blts	load_neg
+	movel	#0x7fffffff,%d0
+	bsr	operr_store
+	bra	end_operr
+load_neg:
+	movel	#0x80000000,%d0
+	bsr	operr_store
+	bra	end_operr
+
+|
+| This routine stores the data in d0, for the given size in d1,
+| to memory or data register as required.  A read of the fline
+| is required to determine the destination.
+|
+operr_store:
+	movel	%d0,L_SCR1(%a6)	|move write data to L_SCR1
+	movel	%d1,-(%a7)	|save register size
+	bsrl	get_fline	|fline returned in d0
+	movel	(%a7)+,%d1
+	bftst	%d0{#26:#3}		|if mode is zero, dest is Dn
+	bnes	dest_mem
+|
+| Destination is Dn.  Get register number from d0. Data is on
+| the stack at (a7). D1 has size: 1=byte,2=word,4=long/single
+|
+	andil	#7,%d0		|isolate register number
+	cmpil	#4,%d1
+	beqs	op_long		|the most frequent case
+	cmpil	#2,%d1
+	bnes	op_con
+	orl	#8,%d0
+	bras	op_con
+op_long:
+	orl	#0x10,%d0
+op_con:
+	movel	%d0,%d1		|format size:reg for reg_dest
+	bral	reg_dest	|call to reg_dest returns to caller
+|				;of operr_store
+|
+| Destination is memory.  Get <ea> from integer exception frame
+| and call mem_write.
+|
+dest_mem:
+	leal	L_SCR1(%a6),%a0	|put ptr to write data in a0
+	movel	EXC_EA(%a6),%a1	|put user destination address in a1
+	movel	%d1,%d0		|put size in d0
+	bsrl	mem_write
+	rts
+|
+| Check the exponent for $c000 and the upper 32 bits of the
+| mantissa for $ffffffff.  If both are true, return d0 clr
+| and store the lower n bits of the least lword of FPTEMP
+| to d0 for write out.  If not, it is a real operr, and set d0.
+|
+check_upper:
+	cmpil	#0xffffffff,FPTEMP_HI(%a6) |check if first byte is all 1's
+	bnes	true_operr	|if not all 1's then was true operr
+	cmpiw	#0xc000,FPTEMP_EX(%a6) |check if incorrectly signalled
+	beqs	not_true_operr	|branch if not true operr
+	cmpiw	#0xbfff,FPTEMP_EX(%a6) |check if incorrectly signalled
+	beqs	not_true_operr	|branch if not true operr
+true_operr:
+	movel	#1,%d0		|signal real operr
+	rts
+not_true_operr:
+	clrl	%d0		|signal no real operr
+	rts
+
+|
+| End_operr tests for operr enabled.  If not, it cleans up the stack
+| and does an rte.  If enabled, it cleans up the stack and branches
+| to the kernel operr handler with only the integer exception
+| frame on the stack and the fpu in the original exceptional state
+| with correct data written to the destination.
+|
+end_operr:
+	btstb		#operr_bit,FPCR_ENABLE(%a6)
+	beqs		not_enabled
+enabled:
+	moveml		USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx	USER_FP0(%a6),%fp0-%fp3
+	fmoveml	USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	frestore	(%a7)+
+	unlk		%a6
+	bral		real_operr
+
+not_enabled:
+|
+| It is possible to have either inex2 or inex1 exceptions with the
+| operr.  If the inex enable bit is set in the FPCR, and either
+| inex2 or inex1 occurred, we must clean up and branch to the
+| real inex handler.
+|
+ck_inex:
+	moveb	FPCR_ENABLE(%a6),%d0
+	andb	FPSR_EXCEPT(%a6),%d0
+	andib	#0x3,%d0
+	beq	operr_exit
+|
+| Inexact enabled and reported, and we must take an inexact exception.
+|
+take_inex:
+	moveb		#INEX_VEC,EXC_VEC+1(%a6)
+	movel		USER_FPSR(%a6),FPSR_SHADOW(%a6)
+	orl		#sx_mask,E_BYTE(%a6)
+	moveml		USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx	USER_FP0(%a6),%fp0-%fp3
+	fmoveml	USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	frestore	(%a7)+
+	unlk		%a6
+	bral		real_inex
+|
+| Since operr is only an E1 exception, there is no need to frestore
+| any state back to the fpu.
+|
+operr_exit:
+	moveml		USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx	USER_FP0(%a6),%fp0-%fp3
+	fmoveml	USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	unlk		%a6
+	bral		fpsp_done
+
+	|end
diff --git a/arch/m68k/fpsp040/x_ovfl.S b/arch/m68k/fpsp040/x_ovfl.S
new file mode 100644
index 0000000..22cb8b4
--- /dev/null
+++ b/arch/m68k/fpsp040/x_ovfl.S
@@ -0,0 +1,186 @@
+|
+|	x_ovfl.sa 3.5 7/1/91
+|
+|	fpsp_ovfl --- FPSP handler for overflow exception
+|
+|	Overflow occurs when a floating-point intermediate result is
+|	too large to be represented in a floating-point data register,
+|	or when storing to memory, the contents of a floating-point
+|	data register are too large to be represented in the
+|	destination format.
+|
+| Trap disabled results
+|
+| If the instruction is move_out, then garbage is stored in the
+| destination.  If the instruction is not move_out, then the
+| destination is not affected.  For 68881 compatibility, the
+| following values should be stored at the destination, based
+| on the current rounding mode:
+|
+|  RN	Infinity with the sign of the intermediate result.
+|  RZ	Largest magnitude number, with the sign of the
+|	intermediate result.
+|  RM   For pos overflow, the largest pos number. For neg overflow,
+|	-infinity
+|  RP   For pos overflow, +infinity. For neg overflow, the largest
+|	neg number
+|
+| Trap enabled results
+| All trap disabled code applies.  In addition the exceptional
+| operand needs to be made available to the users exception handler
+| with a bias of $6000 subtracted from the exponent.
+|
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+X_OVFL:	|idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+	|xref	ovf_r_x2
+	|xref	ovf_r_x3
+	|xref	store
+	|xref	real_ovfl
+	|xref	real_inex
+	|xref	fpsp_done
+	|xref	g_opcls
+	|xref	b1238_fix
+
+	.global	fpsp_ovfl
+fpsp_ovfl:
+	link		%a6,#-LOCAL_SIZE
+	fsave		-(%a7)
+	moveml		%d0-%d1/%a0-%a1,USER_DA(%a6)
+	fmovemx	%fp0-%fp3,USER_FP0(%a6)
+	fmoveml	%fpcr/%fpsr/%fpiar,USER_FPCR(%a6)
+
+|
+|	The 040 doesn't set the AINEX bit in the FPSR, the following
+|	line temporarily rectifies this error.
+|
+	bsetb	#ainex_bit,FPSR_AEXCEPT(%a6)
+|
+	bsrl	ovf_adj		|denormalize, round & store interm op
+|
+|	if overflow traps not enabled check for inexact exception
+|
+	btstb	#ovfl_bit,FPCR_ENABLE(%a6)
+	beqs	ck_inex
+|
+	btstb		#E3,E_BYTE(%a6)
+	beqs		no_e3_1
+	bfextu		CMDREG3B(%a6){#6:#3},%d0	|get dest reg no
+	bclrb		%d0,FPR_DIRTY_BITS(%a6)	|clr dest dirty bit
+	bsrl		b1238_fix
+	movel		USER_FPSR(%a6),FPSR_SHADOW(%a6)
+	orl		#sx_mask,E_BYTE(%a6)
+no_e3_1:
+	moveml		USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx	USER_FP0(%a6),%fp0-%fp3
+	fmoveml	USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	frestore	(%a7)+
+	unlk		%a6
+	bral		real_ovfl
+|
+| It is possible to have either inex2 or inex1 exceptions with the
+| ovfl.  If the inex enable bit is set in the FPCR, and either
+| inex2 or inex1 occurred, we must clean up and branch to the
+| real inex handler.
+|
+ck_inex:
+|	move.b		FPCR_ENABLE(%a6),%d0
+|	and.b		FPSR_EXCEPT(%a6),%d0
+|	andi.b		#$3,%d0
+	btstb		#inex2_bit,FPCR_ENABLE(%a6)
+	beqs		ovfl_exit
+|
+| Inexact enabled and reported, and we must take an inexact exception.
+|
+take_inex:
+	btstb		#E3,E_BYTE(%a6)
+	beqs		no_e3_2
+	bfextu		CMDREG3B(%a6){#6:#3},%d0	|get dest reg no
+	bclrb		%d0,FPR_DIRTY_BITS(%a6)	|clr dest dirty bit
+	bsrl		b1238_fix
+	movel		USER_FPSR(%a6),FPSR_SHADOW(%a6)
+	orl		#sx_mask,E_BYTE(%a6)
+no_e3_2:
+	moveb		#INEX_VEC,EXC_VEC+1(%a6)
+	moveml		USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx	USER_FP0(%a6),%fp0-%fp3
+	fmoveml	USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	frestore	(%a7)+
+	unlk		%a6
+	bral		real_inex
+
+ovfl_exit:
+	bclrb	#E3,E_BYTE(%a6)	|test and clear E3 bit
+	beqs	e1_set
+|
+| Clear dirty bit on dest resister in the frame before branching
+| to b1238_fix.
+|
+	bfextu		CMDREG3B(%a6){#6:#3},%d0	|get dest reg no
+	bclrb		%d0,FPR_DIRTY_BITS(%a6)	|clr dest dirty bit
+	bsrl		b1238_fix		|test for bug1238 case
+
+	movel		USER_FPSR(%a6),FPSR_SHADOW(%a6)
+	orl		#sx_mask,E_BYTE(%a6)
+	moveml		USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx	USER_FP0(%a6),%fp0-%fp3
+	fmoveml	USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	frestore	(%a7)+
+	unlk		%a6
+	bral		fpsp_done
+e1_set:
+	moveml		USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx	USER_FP0(%a6),%fp0-%fp3
+	fmoveml	USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	unlk		%a6
+	bral		fpsp_done
+
+|
+|	ovf_adj
+|
+ovf_adj:
+|
+| Have a0 point to the correct operand.
+|
+	btstb	#E3,E_BYTE(%a6)	|test E3 bit
+	beqs	ovf_e1
+
+	lea	WBTEMP(%a6),%a0
+	bras	ovf_com
+ovf_e1:
+	lea	ETEMP(%a6),%a0
+
+ovf_com:
+	bclrb	#sign_bit,LOCAL_EX(%a0)
+	sne	LOCAL_SGN(%a0)
+
+	bsrl	g_opcls		|returns opclass in d0
+	cmpiw	#3,%d0		|check for opclass3
+	bnes	not_opc011
+
+|
+| FPSR_CC is saved and restored because ovf_r_x3 affects it. The
+| CCs are defined to be 'not affected' for the opclass3 instruction.
+|
+	moveb	FPSR_CC(%a6),L_SCR1(%a6)
+	bsrl	ovf_r_x3	|returns a0 pointing to result
+	moveb	L_SCR1(%a6),FPSR_CC(%a6)
+	bral	store		|stores to memory or register
+
+not_opc011:
+	bsrl	ovf_r_x2	|returns a0 pointing to result
+	bral	store		|stores to memory or register
+
+	|end
diff --git a/arch/m68k/fpsp040/x_snan.S b/arch/m68k/fpsp040/x_snan.S
new file mode 100644
index 0000000..039af573
--- /dev/null
+++ b/arch/m68k/fpsp040/x_snan.S
@@ -0,0 +1,277 @@
+|
+|	x_snan.sa 3.3 7/1/91
+|
+| fpsp_snan --- FPSP handler for signalling NAN exception
+|
+| SNAN for float -> integer conversions (integer conversion of
+| an SNAN) is a non-maskable run-time exception.
+|
+| For trap disabled the 040 does the following:
+| If the dest data format is s, d, or x, then the SNAN bit in the NAN
+| is set to one and the resulting non-signaling NAN (truncated if
+| necessary) is transferred to the dest.  If the dest format is b, w,
+| or l, then garbage is written to the dest (actually the upper 32 bits
+| of the mantissa are sent to the integer unit).
+|
+| For trap enabled the 040 does the following:
+| If the inst is move_out, then the results are the same as for trap
+| disabled with the exception posted.  If the instruction is not move_
+| out, the dest. is not modified, and the exception is posted.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+X_SNAN:	|idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+	|xref	get_fline
+	|xref	mem_write
+	|xref	real_snan
+	|xref	real_inex
+	|xref	fpsp_done
+	|xref	reg_dest
+
+	.global	fpsp_snan
+fpsp_snan:
+	link		%a6,#-LOCAL_SIZE
+	fsave		-(%a7)
+	moveml		%d0-%d1/%a0-%a1,USER_DA(%a6)
+	fmovemx	%fp0-%fp3,USER_FP0(%a6)
+	fmoveml	%fpcr/%fpsr/%fpiar,USER_FPCR(%a6)
+
+|
+| Check if trap enabled
+|
+	btstb		#snan_bit,FPCR_ENABLE(%a6)
+	bnes		ena		|If enabled, then branch
+
+	bsrl		move_out	|else SNAN disabled
+|
+| It is possible to have an inex1 exception with the
+| snan.  If the inex enable bit is set in the FPCR, and either
+| inex2 or inex1 occurred, we must clean up and branch to the
+| real inex handler.
+|
+ck_inex:
+	moveb	FPCR_ENABLE(%a6),%d0
+	andb	FPSR_EXCEPT(%a6),%d0
+	andib	#0x3,%d0
+	beq	end_snan
+|
+| Inexact enabled and reported, and we must take an inexact exception.
+|
+take_inex:
+	moveb		#INEX_VEC,EXC_VEC+1(%a6)
+	moveml		USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx	USER_FP0(%a6),%fp0-%fp3
+	fmoveml	USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	frestore	(%a7)+
+	unlk		%a6
+	bral		real_inex
+|
+| SNAN is enabled.  Check if inst is move_out.
+| Make any corrections to the 040 output as necessary.
+|
+ena:
+	btstb		#5,CMDREG1B(%a6) |if set, inst is move out
+	beq		not_out
+
+	bsrl		move_out
+
+report_snan:
+	moveb		(%a7),VER_TMP(%a6)
+	cmpib		#VER_40,(%a7)	|test for orig unimp frame
+	bnes		ck_rev
+	moveql		#13,%d0		|need to zero 14 lwords
+	bras		rep_con
+ck_rev:
+	moveql		#11,%d0		|need to zero 12 lwords
+rep_con:
+	clrl		(%a7)
+loop1:
+	clrl		-(%a7)		|clear and dec a7
+	dbra		%d0,loop1
+	moveb		VER_TMP(%a6),(%a7) |format a busy frame
+	moveb		#BUSY_SIZE-4,1(%a7)
+	movel		USER_FPSR(%a6),FPSR_SHADOW(%a6)
+	orl		#sx_mask,E_BYTE(%a6)
+	moveml		USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx	USER_FP0(%a6),%fp0-%fp3
+	fmoveml	USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	frestore	(%a7)+
+	unlk		%a6
+	bral		real_snan
+|
+| Exit snan handler by expanding the unimp frame into a busy frame
+|
+end_snan:
+	bclrb		#E1,E_BYTE(%a6)
+
+	moveb		(%a7),VER_TMP(%a6)
+	cmpib		#VER_40,(%a7)	|test for orig unimp frame
+	bnes		ck_rev2
+	moveql		#13,%d0		|need to zero 14 lwords
+	bras		rep_con2
+ck_rev2:
+	moveql		#11,%d0		|need to zero 12 lwords
+rep_con2:
+	clrl		(%a7)
+loop2:
+	clrl		-(%a7)		|clear and dec a7
+	dbra		%d0,loop2
+	moveb		VER_TMP(%a6),(%a7) |format a busy frame
+	moveb		#BUSY_SIZE-4,1(%a7) |write busy size
+	movel		USER_FPSR(%a6),FPSR_SHADOW(%a6)
+	orl		#sx_mask,E_BYTE(%a6)
+	moveml		USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx	USER_FP0(%a6),%fp0-%fp3
+	fmoveml	USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	frestore	(%a7)+
+	unlk		%a6
+	bral		fpsp_done
+
+|
+| Move_out
+|
+move_out:
+	movel		EXC_EA(%a6),%a0	|get <ea> from exc frame
+
+	bfextu		CMDREG1B(%a6){#3:#3},%d0 |move rx field to d0{2:0}
+	cmpil		#0,%d0		|check for long
+	beqs		sto_long	|branch if move_out long
+
+	cmpil		#4,%d0		|check for word
+	beqs		sto_word	|branch if move_out word
+
+	cmpil		#6,%d0		|check for byte
+	beqs		sto_byte	|branch if move_out byte
+
+|
+| Not byte, word or long
+|
+	rts
+|
+| Get the 32 most significant bits of etemp mantissa
+|
+sto_long:
+	movel		ETEMP_HI(%a6),%d1
+	movel		#4,%d0		|load byte count
+|
+| Set signalling nan bit
+|
+	bsetl		#30,%d1
+|
+| Store to the users destination address
+|
+	tstl		%a0		|check if <ea> is 0
+	beqs		wrt_dn		|destination is a data register
+
+	movel		%d1,-(%a7)	|move the snan onto the stack
+	movel		%a0,%a1		|load dest addr into a1
+	movel		%a7,%a0		|load src addr of snan into a0
+	bsrl		mem_write	|write snan to user memory
+	movel		(%a7)+,%d1	|clear off stack
+	rts
+|
+| Get the 16 most significant bits of etemp mantissa
+|
+sto_word:
+	movel		ETEMP_HI(%a6),%d1
+	movel		#2,%d0		|load byte count
+|
+| Set signalling nan bit
+|
+	bsetl		#30,%d1
+|
+| Store to the users destination address
+|
+	tstl		%a0		|check if <ea> is 0
+	beqs		wrt_dn		|destination is a data register
+
+	movel		%d1,-(%a7)	|move the snan onto the stack
+	movel		%a0,%a1		|load dest addr into a1
+	movel		%a7,%a0		|point to low word
+	bsrl		mem_write	|write snan to user memory
+	movel		(%a7)+,%d1	|clear off stack
+	rts
+|
+| Get the 8 most significant bits of etemp mantissa
+|
+sto_byte:
+	movel		ETEMP_HI(%a6),%d1
+	movel		#1,%d0		|load byte count
+|
+| Set signalling nan bit
+|
+	bsetl		#30,%d1
+|
+| Store to the users destination address
+|
+	tstl		%a0		|check if <ea> is 0
+	beqs		wrt_dn		|destination is a data register
+	movel		%d1,-(%a7)	|move the snan onto the stack
+	movel		%a0,%a1		|load dest addr into a1
+	movel		%a7,%a0		|point to source byte
+	bsrl		mem_write	|write snan to user memory
+	movel		(%a7)+,%d1	|clear off stack
+	rts
+
+|
+|	wrt_dn --- write to a data register
+|
+|	We get here with D1 containing the data to write and D0 the
+|	number of bytes to write: 1=byte,2=word,4=long.
+|
+wrt_dn:
+	movel		%d1,L_SCR1(%a6)	|data
+	movel		%d0,-(%a7)	|size
+	bsrl		get_fline	|returns fline word in d0
+	movel		%d0,%d1
+	andil		#0x7,%d1		|d1 now holds register number
+	movel		(%sp)+,%d0	|get original size
+	cmpil		#4,%d0
+	beqs		wrt_long
+	cmpil		#2,%d0
+	bnes		wrt_byte
+wrt_word:
+	orl		#0x8,%d1
+	bral		reg_dest
+wrt_long:
+	orl		#0x10,%d1
+	bral		reg_dest
+wrt_byte:
+	bral		reg_dest
+|
+| Check if it is a src nan or dst nan
+|
+not_out:
+	movel		DTAG(%a6),%d0
+	bfextu		%d0{#0:#3},%d0	|isolate dtag in lsbs
+
+	cmpib		#3,%d0		|check for nan in destination
+	bnes		issrc		|destination nan has priority
+dst_nan:
+	btstb		#6,FPTEMP_HI(%a6) |check if dest nan is an snan
+	bnes		issrc		|no, so check source for snan
+	movew		FPTEMP_EX(%a6),%d0
+	bras		cont
+issrc:
+	movew		ETEMP_EX(%a6),%d0
+cont:
+	btstl		#15,%d0		|test for sign of snan
+	beqs		clr_neg
+	bsetb		#neg_bit,FPSR_CC(%a6)
+	bra		report_snan
+clr_neg:
+	bclrb		#neg_bit,FPSR_CC(%a6)
+	bra		report_snan
+
+	|end
diff --git a/arch/m68k/fpsp040/x_store.S b/arch/m68k/fpsp040/x_store.S
new file mode 100644
index 0000000..4282fa6
--- /dev/null
+++ b/arch/m68k/fpsp040/x_store.S
@@ -0,0 +1,256 @@
+|
+|	x_store.sa 3.2 1/24/91
+|
+|	store --- store operand to memory or register
+|
+|	Used by underflow and overflow handlers.
+|
+|	a6 = points to fp value to be stored.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+X_STORE:	|idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+fpreg_mask:
+	.byte	0x80,0x40,0x20,0x10,0x08,0x04,0x02,0x01
+
+#include "fpsp.h"
+
+	|xref	mem_write
+	|xref	get_fline
+	|xref	g_opcls
+	|xref	g_dfmtou
+	|xref	reg_dest
+
+	.global	dest_ext
+	.global	dest_dbl
+	.global	dest_sgl
+
+	.global	store
+store:
+	btstb	#E3,E_BYTE(%a6)
+	beqs	E1_sto
+E3_sto:
+	movel	CMDREG3B(%a6),%d0
+	bfextu	%d0{#6:#3},%d0		|isolate dest. reg from cmdreg3b
+sto_fp:
+	lea	fpreg_mask,%a1
+	moveb	(%a1,%d0.w),%d0		|convert reg# to dynamic register mask
+	tstb	LOCAL_SGN(%a0)
+	beqs	is_pos
+	bsetb	#sign_bit,LOCAL_EX(%a0)
+is_pos:
+	fmovemx (%a0),%d0		|move to correct register
+|
+|	if fp0-fp3 is being modified, we must put a copy
+|	in the USER_FPn variable on the stack because all exception
+|	handlers restore fp0-fp3 from there.
+|
+	cmpb	#0x80,%d0
+	bnes	not_fp0
+	fmovemx %fp0-%fp0,USER_FP0(%a6)
+	rts
+not_fp0:
+	cmpb	#0x40,%d0
+	bnes	not_fp1
+	fmovemx %fp1-%fp1,USER_FP1(%a6)
+	rts
+not_fp1:
+	cmpb	#0x20,%d0
+	bnes	not_fp2
+	fmovemx %fp2-%fp2,USER_FP2(%a6)
+	rts
+not_fp2:
+	cmpb	#0x10,%d0
+	bnes	not_fp3
+	fmovemx %fp3-%fp3,USER_FP3(%a6)
+	rts
+not_fp3:
+	rts
+
+E1_sto:
+	bsrl	g_opcls		|returns opclass in d0
+	cmpib	#3,%d0
+	beq	opc011		|branch if opclass 3
+	movel	CMDREG1B(%a6),%d0
+	bfextu	%d0{#6:#3},%d0	|extract destination register
+	bras	sto_fp
+
+opc011:
+	bsrl	g_dfmtou	|returns dest format in d0
+|				;ext=00, sgl=01, dbl=10
+	movel	%a0,%a1		|save source addr in a1
+	movel	EXC_EA(%a6),%a0	|get the address
+	cmpil	#0,%d0		|if dest format is extended
+	beq	dest_ext	|then branch
+	cmpil	#1,%d0		|if dest format is single
+	beq	dest_sgl	|then branch
+|
+|	fall through to dest_dbl
+|
+
+|
+|	dest_dbl --- write double precision value to user space
+|
+|Input
+|	a0 -> destination address
+|	a1 -> source in extended precision
+|Output
+|	a0 -> destroyed
+|	a1 -> destroyed
+|	d0 -> 0
+|
+|Changes extended precision to double precision.
+| Note: no attempt is made to round the extended value to double.
+|	dbl_sign = ext_sign
+|	dbl_exp = ext_exp - $3fff(ext bias) + $7ff(dbl bias)
+|	get rid of ext integer bit
+|	dbl_mant = ext_mant{62:12}
+|
+|		---------------   ---------------    ---------------
+|  extended ->  |s|    exp    |   |1| ms mant   |    | ls mant     |
+|		---------------   ---------------    ---------------
+|		 95	    64    63 62	      32      31     11	  0
+|				     |			     |
+|				     |			     |
+|				     |			     |
+|			             v			     v
+|			      ---------------   ---------------
+|  double   ->		      |s|exp| mant  |   |  mant       |
+|			      ---------------   ---------------
+|			      63     51   32   31	       0
+|
+dest_dbl:
+	clrl	%d0		|clear d0
+	movew	LOCAL_EX(%a1),%d0	|get exponent
+	subw	#0x3fff,%d0	|subtract extended precision bias
+	cmpw	#0x4000,%d0	|check if inf
+	beqs	inf		|if so, special case
+	addw	#0x3ff,%d0	|add double precision bias
+	swap	%d0		|d0 now in upper word
+	lsll	#4,%d0		|d0 now in proper place for dbl prec exp
+	tstb	LOCAL_SGN(%a1)
+	beqs	get_mant	|if positive, go process mantissa
+	bsetl	#31,%d0		|if negative, put in sign information
+|				; before continuing
+	bras	get_mant	|go process mantissa
+inf:
+	movel	#0x7ff00000,%d0	|load dbl inf exponent
+	clrl	LOCAL_HI(%a1)	|clear msb
+	tstb	LOCAL_SGN(%a1)
+	beqs	dbl_inf		|if positive, go ahead and write it
+	bsetl	#31,%d0		|if negative put in sign information
+dbl_inf:
+	movel	%d0,LOCAL_EX(%a1)	|put the new exp back on the stack
+	bras	dbl_wrt
+get_mant:
+	movel	LOCAL_HI(%a1),%d1	|get ms mantissa
+	bfextu	%d1{#1:#20},%d1	|get upper 20 bits of ms
+	orl	%d1,%d0		|put these bits in ms word of double
+	movel	%d0,LOCAL_EX(%a1)	|put the new exp back on the stack
+	movel	LOCAL_HI(%a1),%d1	|get ms mantissa
+	movel	#21,%d0		|load shift count
+	lsll	%d0,%d1		|put lower 11 bits in upper bits
+	movel	%d1,LOCAL_HI(%a1)	|build lower lword in memory
+	movel	LOCAL_LO(%a1),%d1	|get ls mantissa
+	bfextu	%d1{#0:#21},%d0	|get ls 21 bits of double
+	orl	%d0,LOCAL_HI(%a1)	|put them in double result
+dbl_wrt:
+	movel	#0x8,%d0		|byte count for double precision number
+	exg	%a0,%a1		|a0=supervisor source, a1=user dest
+	bsrl	mem_write	|move the number to the user's memory
+	rts
+|
+|	dest_sgl --- write single precision value to user space
+|
+|Input
+|	a0 -> destination address
+|	a1 -> source in extended precision
+|
+|Output
+|	a0 -> destroyed
+|	a1 -> destroyed
+|	d0 -> 0
+|
+|Changes extended precision to single precision.
+|	sgl_sign = ext_sign
+|	sgl_exp = ext_exp - $3fff(ext bias) + $7f(sgl bias)
+|	get rid of ext integer bit
+|	sgl_mant = ext_mant{62:12}
+|
+|		---------------   ---------------    ---------------
+|  extended ->  |s|    exp    |   |1| ms mant   |    | ls mant     |
+|		---------------   ---------------    ---------------
+|		 95	    64    63 62	   40 32      31     12	  0
+|				     |	   |
+|				     |	   |
+|				     |	   |
+|			             v     v
+|			      ---------------
+|  single   ->		      |s|exp| mant  |
+|			      ---------------
+|			      31     22     0
+|
+dest_sgl:
+	clrl	%d0
+	movew	LOCAL_EX(%a1),%d0	|get exponent
+	subw	#0x3fff,%d0	|subtract extended precision bias
+	cmpw	#0x4000,%d0	|check if inf
+	beqs	sinf		|if so, special case
+	addw	#0x7f,%d0		|add single precision bias
+	swap	%d0		|put exp in upper word of d0
+	lsll	#7,%d0		|shift it into single exp bits
+	tstb	LOCAL_SGN(%a1)
+	beqs	get_sman	|if positive, continue
+	bsetl	#31,%d0		|if negative, put in sign first
+	bras	get_sman	|get mantissa
+sinf:
+	movel	#0x7f800000,%d0	|load single inf exp to d0
+	tstb	LOCAL_SGN(%a1)
+	beqs	sgl_wrt		|if positive, continue
+	bsetl	#31,%d0		|if negative, put in sign info
+	bras	sgl_wrt
+
+get_sman:
+	movel	LOCAL_HI(%a1),%d1	|get ms mantissa
+	bfextu	%d1{#1:#23},%d1	|get upper 23 bits of ms
+	orl	%d1,%d0		|put these bits in ms word of single
+
+sgl_wrt:
+	movel	%d0,L_SCR1(%a6)	|put the new exp back on the stack
+	movel	#0x4,%d0		|byte count for single precision number
+	tstl	%a0		|users destination address
+	beqs	sgl_Dn		|destination is a data register
+	exg	%a0,%a1		|a0=supervisor source, a1=user dest
+	leal	L_SCR1(%a6),%a0	|point a0 to data
+	bsrl	mem_write	|move the number to the user's memory
+	rts
+sgl_Dn:
+	bsrl	get_fline	|returns fline word in d0
+	andw	#0x7,%d0		|isolate register number
+	movel	%d0,%d1		|d1 has size:reg formatted for reg_dest
+	orl	#0x10,%d1		|reg_dest wants size added to reg#
+	bral	reg_dest	|size is X, rts in reg_dest will
+|				;return to caller of dest_sgl
+
+dest_ext:
+	tstb	LOCAL_SGN(%a1)	|put back sign into exponent word
+	beqs	dstx_cont
+	bsetb	#sign_bit,LOCAL_EX(%a1)
+dstx_cont:
+	clrb	LOCAL_SGN(%a1)	|clear out the sign byte
+
+	movel	#0x0c,%d0		|byte count for extended number
+	exg	%a0,%a1		|a0=supervisor source, a1=user dest
+	bsrl	mem_write	|move the number to the user's memory
+	rts
+
+	|end
diff --git a/arch/m68k/fpsp040/x_unfl.S b/arch/m68k/fpsp040/x_unfl.S
new file mode 100644
index 0000000..077fcc2
--- /dev/null
+++ b/arch/m68k/fpsp040/x_unfl.S
@@ -0,0 +1,269 @@
+|
+|	x_unfl.sa 3.4 7/1/91
+|
+|	fpsp_unfl --- FPSP handler for underflow exception
+|
+| Trap disabled results
+|	For 881/2 compatibility, sw must denormalize the intermediate
+| result, then store the result.  Denormalization is accomplished
+| by taking the intermediate result (which is always normalized) and
+| shifting the mantissa right while incrementing the exponent until
+| it is equal to the denormalized exponent for the destination
+| format.  After denormalization, the result is rounded to the
+| destination format.
+|
+| Trap enabled results
+|	All trap disabled code applies.	In addition the exceptional
+| operand needs to made available to the user with a bias of $6000
+| added to the exponent.
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+X_UNFL:	|idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+	|xref	denorm
+	|xref	round
+	|xref	store
+	|xref	g_rndpr
+	|xref	g_opcls
+	|xref	g_dfmtou
+	|xref	real_unfl
+	|xref	real_inex
+	|xref	fpsp_done
+	|xref	b1238_fix
+
+	.global	fpsp_unfl
+fpsp_unfl:
+	link		%a6,#-LOCAL_SIZE
+	fsave		-(%a7)
+	moveml		%d0-%d1/%a0-%a1,USER_DA(%a6)
+	fmovemx	%fp0-%fp3,USER_FP0(%a6)
+	fmoveml	%fpcr/%fpsr/%fpiar,USER_FPCR(%a6)
+
+|
+	bsrl		unf_res	|denormalize, round & store interm op
+|
+| If underflow exceptions are not enabled, check for inexact
+| exception
+|
+	btstb		#unfl_bit,FPCR_ENABLE(%a6)
+	beqs		ck_inex
+
+	btstb		#E3,E_BYTE(%a6)
+	beqs		no_e3_1
+|
+| Clear dirty bit on dest resister in the frame before branching
+| to b1238_fix.
+|
+	bfextu		CMDREG3B(%a6){#6:#3},%d0	|get dest reg no
+	bclrb		%d0,FPR_DIRTY_BITS(%a6)	|clr dest dirty bit
+	bsrl		b1238_fix		|test for bug1238 case
+	movel		USER_FPSR(%a6),FPSR_SHADOW(%a6)
+	orl		#sx_mask,E_BYTE(%a6)
+no_e3_1:
+	moveml		USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx	USER_FP0(%a6),%fp0-%fp3
+	fmoveml	USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	frestore	(%a7)+
+	unlk		%a6
+	bral		real_unfl
+|
+| It is possible to have either inex2 or inex1 exceptions with the
+| unfl.  If the inex enable bit is set in the FPCR, and either
+| inex2 or inex1 occurred, we must clean up and branch to the
+| real inex handler.
+|
+ck_inex:
+	moveb		FPCR_ENABLE(%a6),%d0
+	andb		FPSR_EXCEPT(%a6),%d0
+	andib		#0x3,%d0
+	beqs		unfl_done
+
+|
+| Inexact enabled and reported, and we must take an inexact exception
+|
+take_inex:
+	btstb		#E3,E_BYTE(%a6)
+	beqs		no_e3_2
+|
+| Clear dirty bit on dest resister in the frame before branching
+| to b1238_fix.
+|
+	bfextu		CMDREG3B(%a6){#6:#3},%d0	|get dest reg no
+	bclrb		%d0,FPR_DIRTY_BITS(%a6)	|clr dest dirty bit
+	bsrl		b1238_fix		|test for bug1238 case
+	movel		USER_FPSR(%a6),FPSR_SHADOW(%a6)
+	orl		#sx_mask,E_BYTE(%a6)
+no_e3_2:
+	moveb		#INEX_VEC,EXC_VEC+1(%a6)
+	moveml         USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx        USER_FP0(%a6),%fp0-%fp3
+	fmoveml        USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	frestore        (%a7)+
+	unlk            %a6
+	bral		real_inex
+
+unfl_done:
+	bclrb		#E3,E_BYTE(%a6)
+	beqs		e1_set		|if set then branch
+|
+| Clear dirty bit on dest resister in the frame before branching
+| to b1238_fix.
+|
+	bfextu		CMDREG3B(%a6){#6:#3},%d0		|get dest reg no
+	bclrb		%d0,FPR_DIRTY_BITS(%a6)	|clr dest dirty bit
+	bsrl		b1238_fix		|test for bug1238 case
+	movel		USER_FPSR(%a6),FPSR_SHADOW(%a6)
+	orl		#sx_mask,E_BYTE(%a6)
+	moveml		USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx	USER_FP0(%a6),%fp0-%fp3
+	fmoveml	USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	frestore	(%a7)+
+	unlk		%a6
+	bral		fpsp_done
+e1_set:
+	moveml		USER_DA(%a6),%d0-%d1/%a0-%a1
+	fmovemx	USER_FP0(%a6),%fp0-%fp3
+	fmoveml	USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
+	unlk		%a6
+	bral		fpsp_done
+|
+|	unf_res --- underflow result calculation
+|
+unf_res:
+	bsrl		g_rndpr		|returns RND_PREC in d0 0=ext,
+|					;1=sgl, 2=dbl
+|					;we need the RND_PREC in the
+|					;upper word for round
+	movew		#0,-(%a7)
+	movew		%d0,-(%a7)	|copy RND_PREC to stack
+|
+|
+| If the exception bit set is E3, the exceptional operand from the
+| fpu is in WBTEMP; else it is in FPTEMP.
+|
+	btstb		#E3,E_BYTE(%a6)
+	beqs		unf_E1
+unf_E3:
+	lea		WBTEMP(%a6),%a0	|a0 now points to operand
+|
+| Test for fsgldiv and fsglmul.  If the inst was one of these, then
+| force the precision to extended for the denorm routine.  Use
+| the user's precision for the round routine.
+|
+	movew		CMDREG3B(%a6),%d1	|check for fsgldiv or fsglmul
+	andiw		#0x7f,%d1
+	cmpiw		#0x30,%d1		|check for sgldiv
+	beqs		unf_sgl
+	cmpiw		#0x33,%d1		|check for sglmul
+	bnes		unf_cont	|if not, use fpcr prec in round
+unf_sgl:
+	clrl		%d0
+	movew		#0x1,(%a7)	|override g_rndpr precision
+|					;force single
+	bras		unf_cont
+unf_E1:
+	lea		FPTEMP(%a6),%a0	|a0 now points to operand
+unf_cont:
+	bclrb		#sign_bit,LOCAL_EX(%a0)	|clear sign bit
+	sne		LOCAL_SGN(%a0)		|store sign
+
+	bsrl		denorm		|returns denorm, a0 points to it
+|
+| WARNING:
+|				;d0 has guard,round sticky bit
+|				;make sure that it is not corrupted
+|				;before it reaches the round subroutine
+|				;also ensure that a0 isn't corrupted
+
+|
+| Set up d1 for round subroutine d1 contains the PREC/MODE
+| information respectively on upper/lower register halves.
+|
+	bfextu		FPCR_MODE(%a6){#2:#2},%d1	|get mode from FPCR
+|						;mode in lower d1
+	addl		(%a7)+,%d1		|merge PREC/MODE
+|
+| WARNING: a0 and d0 are assumed to be intact between the denorm and
+| round subroutines. All code between these two subroutines
+| must not corrupt a0 and d0.
+|
+|
+| Perform Round
+|	Input:		a0 points to input operand
+|			d0{31:29} has guard, round, sticky
+|			d1{01:00} has rounding mode
+|			d1{17:16} has rounding precision
+|	Output:		a0 points to rounded operand
+|
+
+	bsrl		round		|returns rounded denorm at (a0)
+|
+| Differentiate between store to memory vs. store to register
+|
+unf_store:
+	bsrl		g_opcls		|returns opclass in d0{2:0}
+	cmpib		#0x3,%d0
+	bnes		not_opc011
+|
+| At this point, a store to memory is pending
+|
+opc011:
+	bsrl		g_dfmtou
+	tstb		%d0
+	beqs		ext_opc011	|If extended, do not subtract
+|				;If destination format is sgl/dbl,
+	tstb		LOCAL_HI(%a0)	|If rounded result is normal,don't
+|					;subtract
+	bmis		ext_opc011
+	subqw		#1,LOCAL_EX(%a0)	|account for denorm bias vs.
+|				;normalized bias
+|				;          normalized   denormalized
+|				;single       $7f           $7e
+|				;double       $3ff          $3fe
+|
+ext_opc011:
+	bsrl		store		|stores to memory
+	bras		unf_done	|finish up
+
+|
+| At this point, a store to a float register is pending
+|
+not_opc011:
+	bsrl		store	|stores to float register
+|				;a0 is not corrupted on a store to a
+|				;float register.
+|
+| Set the condition codes according to result
+|
+	tstl		LOCAL_HI(%a0)	|check upper mantissa
+	bnes		ck_sgn
+	tstl		LOCAL_LO(%a0)	|check lower mantissa
+	bnes		ck_sgn
+	bsetb		#z_bit,FPSR_CC(%a6) |set condition codes if zero
+ck_sgn:
+	btstb		#sign_bit,LOCAL_EX(%a0)	|check the sign bit
+	beqs		unf_done
+	bsetb		#neg_bit,FPSR_CC(%a6)
+
+|
+| Finish.
+|
+unf_done:
+	btstb		#inex2_bit,FPSR_EXCEPT(%a6)
+	beqs		no_aunfl
+	bsetb		#aunfl_bit,FPSR_AEXCEPT(%a6)
+no_aunfl:
+	rts
+
+	|end
diff --git a/arch/m68k/fpsp040/x_unimp.S b/arch/m68k/fpsp040/x_unimp.S
new file mode 100644
index 0000000..920cb94
--- /dev/null
+++ b/arch/m68k/fpsp040/x_unimp.S
@@ -0,0 +1,77 @@
+|
+|	x_unimp.sa 3.3 7/1/91
+|
+|	fpsp_unimp --- FPSP handler for unimplemented instruction
+|	exception.
+|
+| Invoked when the user program encounters a floating-point
+| op-code that hardware does not support.  Trap vector# 11
+| (See table 8-1 MC68030 User's Manual).
+|
+|
+| Note: An fsave for an unimplemented inst. will create a short
+| fsave stack.
+|
+|  Input: 1. Six word stack frame for unimplemented inst, four word
+|            for illegal
+|            (See table 8-7 MC68030 User's Manual).
+|         2. Unimp (short) fsave state frame created here by fsave
+|            instruction.
+|
+|
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+X_UNIMP:	|idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+	|xref	get_op
+	|xref	do_func
+	|xref	sto_res
+	|xref	gen_except
+	|xref	fpsp_fmt_error
+
+	.global	fpsp_unimp
+	.global	uni_2
+fpsp_unimp:
+	link		%a6,#-LOCAL_SIZE
+	fsave		-(%a7)
+uni_2:
+	moveml		%d0-%d1/%a0-%a1,USER_DA(%a6)
+	fmovemx	%fp0-%fp3,USER_FP0(%a6)
+	fmoveml	%fpcr/%fpsr/%fpiar,USER_FPCR(%a6)
+	moveb		(%a7),%d0		|test for valid version num
+	andib		#0xf0,%d0		|test for $4x
+	cmpib		#VER_4,%d0	|must be $4x or exit
+	bnel		fpsp_fmt_error
+|
+|	Temporary D25B Fix
+|	The following lines are used to ensure that the FPSR
+|	exception byte and condition codes are clear before proceeding
+|
+	movel		USER_FPSR(%a6),%d0
+	andl		#0xFF00FF,%d0	|clear all but accrued exceptions
+	movel		%d0,USER_FPSR(%a6)
+	fmovel		#0,%FPSR |clear all user bits
+	fmovel		#0,%FPCR	|clear all user exceptions for FPSP
+
+	clrb		UFLG_TMP(%a6)	|clr flag for unsupp data
+
+	bsrl		get_op		|go get operand(s)
+	clrb		STORE_FLG(%a6)
+	bsrl		do_func		|do the function
+	fsave		-(%a7)		|capture possible exc state
+	tstb		STORE_FLG(%a6)
+	bnes		no_store	|if STORE_FLG is set, no store
+	bsrl		sto_res		|store the result in user space
+no_store:
+	bral		gen_except	|post any exceptions and return
+
+	|end
diff --git a/arch/m68k/fpsp040/x_unsupp.S b/arch/m68k/fpsp040/x_unsupp.S
new file mode 100644
index 0000000..4ec5728
--- /dev/null
+++ b/arch/m68k/fpsp040/x_unsupp.S
@@ -0,0 +1,83 @@
+|
+|	x_unsupp.sa 3.3 7/1/91
+|
+|	fpsp_unsupp --- FPSP handler for unsupported data type exception
+|
+| Trap vector #55	(See table 8-1 Mc68030 User's manual).
+| Invoked when the user program encounters a data format (packed) that
+| hardware does not support or a data type (denormalized numbers or un-
+| normalized numbers).
+| Normalizes denorms and unnorms, unpacks packed numbers then stores
+| them back into the machine to let the 040 finish the operation.
+|
+| Unsupp calls two routines:
+|	1. get_op -  gets the operand(s)
+|	2. res_func - restore the function back into the 040 or
+|			if fmove.p fpm,<ea> then pack source (fpm)
+|			and store in users memory <ea>.
+|
+|  Input: Long fsave stack frame
+|
+|
+
+|		Copyright (C) Motorola, Inc. 1990
+|			All Rights Reserved
+|
+|	THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
+|	The copyright notice above does not evidence any
+|	actual or intended publication of such source code.
+
+X_UNSUPP:	|idnt    2,1 | Motorola 040 Floating Point Software Package
+
+	|section	8
+
+#include "fpsp.h"
+
+	|xref	get_op
+	|xref	res_func
+	|xref	gen_except
+	|xref	fpsp_fmt_error
+
+	.global	fpsp_unsupp
+fpsp_unsupp:
+|
+	link		%a6,#-LOCAL_SIZE
+	fsave		-(%a7)
+	moveml		%d0-%d1/%a0-%a1,USER_DA(%a6)
+	fmovemx	%fp0-%fp3,USER_FP0(%a6)
+	fmoveml	%fpcr/%fpsr/%fpiar,USER_FPCR(%a6)
+
+
+	moveb		(%a7),VER_TMP(%a6) |save version number
+	moveb		(%a7),%d0		|test for valid version num
+	andib		#0xf0,%d0		|test for $4x
+	cmpib		#VER_4,%d0	|must be $4x or exit
+	bnel		fpsp_fmt_error
+
+	fmovel		#0,%FPSR		|clear all user status bits
+	fmovel		#0,%FPCR		|clear all user control bits
+|
+|	The following lines are used to ensure that the FPSR
+|	exception byte and condition codes are clear before proceeding,
+|	except in the case of fmove, which leaves the cc's intact.
+|
+unsupp_con:
+	movel		USER_FPSR(%a6),%d1
+	btst		#5,CMDREG1B(%a6)	|looking for fmove out
+	bne		fmove_con
+	andl		#0xFF00FF,%d1	|clear all but aexcs and qbyte
+	bras		end_fix
+fmove_con:
+	andl		#0x0FFF40FF,%d1	|clear all but cc's, snan bit, aexcs, and qbyte
+end_fix:
+	movel		%d1,USER_FPSR(%a6)
+
+	st		UFLG_TMP(%a6)	|set flag for unsupp data
+
+	bsrl		get_op		|everything okay, go get operand(s)
+	bsrl		res_func	|fix up stack frame so can restore it
+	clrl		-(%a7)
+	moveb		VER_TMP(%a6),(%a7) |move idle fmt word to top of stack
+	bral		gen_except
+|
+	|end
diff --git a/arch/m68k/hp300/Makefile b/arch/m68k/hp300/Makefile
new file mode 100644
index 0000000..89b6317
--- /dev/null
+++ b/arch/m68k/hp300/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for Linux arch/m68k/hp300 source directory
+#
+
+obj-y		:= ksyms.o config.o ints.o time.o reboot.o
diff --git a/arch/m68k/hp300/README.hp300 b/arch/m68k/hp300/README.hp300
new file mode 100644
index 0000000..47073fb
--- /dev/null
+++ b/arch/m68k/hp300/README.hp300
@@ -0,0 +1,14 @@
+HP300 notes
+-----------
+
+The Linux/HP web page is at <http://www.tazenda.demon.co.uk/phil/linux-hp/>
+
+Currently only 9000/340 machines have been tested.  Any amount of RAM should
+work now but I've only tried 16MB and 12MB.
+
+The serial console is probably broken at the moment but the Topcat/HIL keyboard
+combination seems to work for me.  Your mileage may vary.
+
+The LANCE driver works after a fashion but only if you reset the chip before
+every packet.  This doesn't make for very speedy operation.
+
diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c
new file mode 100644
index 0000000..a0b854f
--- /dev/null
+++ b/arch/m68k/hp300/config.c
@@ -0,0 +1,279 @@
+/*
+ *  linux/arch/m68k/hp300/config.c
+ *
+ *  Copyright (C) 1998 Philip Blundell <philb@gnu.org>
+ *
+ *  This file contains the HP300-specific initialisation code.  It gets
+ *  called by setup.c.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/console.h>
+
+#include <asm/bootinfo.h>
+#include <asm/machdep.h>
+#include <asm/blinken.h>
+#include <asm/io.h>                               /* readb() and writeb() */
+#include <asm/hp300hw.h>
+#include <asm/rtc.h>
+
+#include "ints.h"
+#include "time.h"
+
+unsigned long hp300_model;
+unsigned long hp300_uart_scode = -1;
+unsigned char ledstate;
+
+static char s_hp330[] __initdata = "330";
+static char s_hp340[] __initdata = "340";
+static char s_hp345[] __initdata = "345";
+static char s_hp360[] __initdata = "360";
+static char s_hp370[] __initdata = "370";
+static char s_hp375[] __initdata = "375";
+static char s_hp380[] __initdata = "380";
+static char s_hp385[] __initdata = "385";
+static char s_hp400[] __initdata = "400";
+static char s_hp425t[] __initdata = "425t";
+static char s_hp425s[] __initdata = "425s";
+static char s_hp425e[] __initdata = "425e";
+static char s_hp433t[] __initdata = "433t";
+static char s_hp433s[] __initdata = "433s";
+static char *hp300_models[] __initdata = {
+	[HP_320]	= NULL,
+	[HP_330]	= s_hp330,
+	[HP_340]	= s_hp340,
+	[HP_345]	= s_hp345,
+	[HP_350]	= NULL,
+	[HP_360]	= s_hp360,
+	[HP_370]	= s_hp370,
+	[HP_375]	= s_hp375,
+	[HP_380]	= s_hp380,
+	[HP_385]	= s_hp385,
+	[HP_400]	= s_hp400,
+	[HP_425T]	= s_hp425t,
+	[HP_425S]	= s_hp425s,
+	[HP_425E]	= s_hp425e,
+	[HP_433T]	= s_hp433t,
+	[HP_433S]	= s_hp433s,
+};
+
+static char hp300_model_name[13] = "HP9000/";
+
+extern void hp300_reset(void);
+extern irqreturn_t (*hp300_default_handler[])(int, void *, struct pt_regs *);
+extern int show_hp300_interrupts(struct seq_file *, void *);
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+extern int hp300_setup_serial_console(void) __init;
+#endif
+
+int __init hp300_parse_bootinfo(const struct bi_record *record)
+{
+	int unknown = 0;
+	const unsigned long *data = record->data;
+
+	switch (record->tag) {
+	case BI_HP300_MODEL:
+		hp300_model = *data;
+		break;
+
+	case BI_HP300_UART_SCODE:
+		hp300_uart_scode = *data;
+		break;
+
+	case BI_HP300_UART_ADDR:
+		/* serial port address: ignored here */
+		break;
+
+        default:
+		unknown = 1;
+	}
+
+	return unknown;
+}
+
+#ifdef CONFIG_HEARTBEAT
+static void hp300_pulse(int x)
+{
+	if (x)
+		blinken_leds(0x10, 0);
+	else
+		blinken_leds(0, 0x10);
+}
+#endif
+
+static void hp300_get_model(char *model)
+{
+	strcpy(model, hp300_model_name);
+}
+
+#define RTCBASE			0xf0420000
+#define RTC_DATA		0x1
+#define RTC_CMD			0x3
+
+#define	RTC_BUSY		0x02
+#define	RTC_DATA_RDY		0x01
+
+#define rtc_busy()		(in_8(RTCBASE + RTC_CMD) & RTC_BUSY)
+#define rtc_data_available()	(in_8(RTCBASE + RTC_CMD) & RTC_DATA_RDY)
+#define rtc_status()		(in_8(RTCBASE + RTC_CMD))
+#define rtc_command(x)		out_8(RTCBASE + RTC_CMD, (x))
+#define rtc_read_data()		(in_8(RTCBASE + RTC_DATA))
+#define rtc_write_data(x)	out_8(RTCBASE + RTC_DATA, (x))
+
+#define RTC_SETREG	0xe0
+#define RTC_WRITEREG	0xc2
+#define RTC_READREG	0xc3
+
+#define RTC_REG_SEC2	0
+#define RTC_REG_SEC1	1
+#define RTC_REG_MIN2	2
+#define RTC_REG_MIN1	3
+#define RTC_REG_HOUR2	4
+#define RTC_REG_HOUR1	5
+#define RTC_REG_WDAY	6
+#define RTC_REG_DAY2	7
+#define RTC_REG_DAY1	8
+#define RTC_REG_MON2	9
+#define RTC_REG_MON1	10
+#define RTC_REG_YEAR2	11
+#define RTC_REG_YEAR1	12
+
+#define RTC_HOUR1_24HMODE 0x8
+
+#define RTC_STAT_MASK	0xf0
+#define RTC_STAT_RDY	0x40
+
+static inline unsigned char hp300_rtc_read(unsigned char reg)
+{
+	unsigned char s, ret;
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	while (rtc_busy());
+	rtc_command(RTC_SETREG);
+	while (rtc_busy());
+	rtc_write_data(reg);
+	while (rtc_busy());
+	rtc_command(RTC_READREG);
+
+	do {
+		while (!rtc_data_available());
+		s = rtc_status();
+		ret = rtc_read_data();
+	} while ((s & RTC_STAT_MASK) != RTC_STAT_RDY);
+
+	local_irq_restore(flags);
+
+	return ret;
+}
+
+static inline unsigned char hp300_rtc_write(unsigned char reg,
+					    unsigned char val)
+{
+	unsigned char s, ret;
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	while (rtc_busy());
+	rtc_command(RTC_SETREG);
+	while (rtc_busy());
+	rtc_write_data((val << 4) | reg);
+	while (rtc_busy());
+	rtc_command(RTC_WRITEREG);
+	while (rtc_busy());
+	rtc_command(RTC_READREG);
+
+	do {
+		while (!rtc_data_available());
+		s = rtc_status();
+		ret = rtc_read_data();
+	} while ((s & RTC_STAT_MASK) != RTC_STAT_RDY);
+
+	local_irq_restore(flags);
+
+	return ret;
+}
+
+static int hp300_hwclk(int op, struct rtc_time *t)
+{
+	if (!op) { /* read */
+		t->tm_sec  = hp300_rtc_read(RTC_REG_SEC1) * 10 +
+			hp300_rtc_read(RTC_REG_SEC2);
+		t->tm_min  = hp300_rtc_read(RTC_REG_MIN1) * 10 +
+			hp300_rtc_read(RTC_REG_MIN2);
+		t->tm_hour = (hp300_rtc_read(RTC_REG_HOUR1) & 3) * 10 +
+			hp300_rtc_read(RTC_REG_HOUR2);
+		t->tm_wday = -1;
+		t->tm_mday = hp300_rtc_read(RTC_REG_DAY1) * 10 +
+			hp300_rtc_read(RTC_REG_DAY2);
+		t->tm_mon  = hp300_rtc_read(RTC_REG_MON1) * 10 +
+			hp300_rtc_read(RTC_REG_MON2) - 1;
+		t->tm_year = hp300_rtc_read(RTC_REG_YEAR1) * 10 +
+			hp300_rtc_read(RTC_REG_YEAR2);
+		if (t->tm_year <= 69)
+			t->tm_year += 100;
+	} else {
+		hp300_rtc_write(RTC_REG_SEC1, t->tm_sec / 10);
+		hp300_rtc_write(RTC_REG_SEC2, t->tm_sec % 10);
+		hp300_rtc_write(RTC_REG_MIN1, t->tm_min / 10);
+		hp300_rtc_write(RTC_REG_MIN2, t->tm_min % 10);
+		hp300_rtc_write(RTC_REG_HOUR1,
+				((t->tm_hour / 10) & 3) | RTC_HOUR1_24HMODE);
+		hp300_rtc_write(RTC_REG_HOUR2, t->tm_hour % 10);
+		hp300_rtc_write(RTC_REG_DAY1, t->tm_mday / 10);
+		hp300_rtc_write(RTC_REG_DAY2, t->tm_mday % 10);
+		hp300_rtc_write(RTC_REG_MON1, (t->tm_mon + 1) / 10);
+		hp300_rtc_write(RTC_REG_MON2, (t->tm_mon + 1) % 10);
+		if (t->tm_year >= 100)
+			t->tm_year -= 100;
+		hp300_rtc_write(RTC_REG_YEAR1, t->tm_year / 10);
+		hp300_rtc_write(RTC_REG_YEAR2, t->tm_year % 10);
+	}
+
+	return 0;
+}
+
+static unsigned int hp300_get_ss(void)
+{
+	return hp300_rtc_read(RTC_REG_SEC1) * 10 +
+		hp300_rtc_read(RTC_REG_SEC2);
+}
+
+void __init config_hp300(void)
+{
+	mach_sched_init      = hp300_sched_init;
+	mach_init_IRQ        = hp300_init_IRQ;
+	mach_request_irq     = hp300_request_irq;
+	mach_free_irq        = hp300_free_irq;
+	mach_get_model       = hp300_get_model;
+	mach_get_irq_list    = show_hp300_interrupts;
+	mach_gettimeoffset   = hp300_gettimeoffset;
+	mach_default_handler = &hp300_default_handler;
+	mach_hwclk	     = hp300_hwclk;
+	mach_get_ss	     = hp300_get_ss;
+	mach_reset           = hp300_reset;
+#ifdef CONFIG_HEARTBEAT
+	mach_heartbeat       = hp300_pulse;
+#endif
+#ifdef CONFIG_DUMMY_CONSOLE
+	conswitchp	     = &dummy_con;
+#endif
+	mach_max_dma_address = 0xffffffff;
+
+	if (hp300_model >= HP_330 && hp300_model <= HP_433S && hp300_model != HP_350) {
+		printk(KERN_INFO "Detected HP9000 model %s\n", hp300_models[hp300_model-HP_320]);
+		strcat(hp300_model_name, hp300_models[hp300_model-HP_320]);
+	}
+	else {
+		panic("Unknown HP9000 Model");
+	}
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+	hp300_setup_serial_console();
+#endif
+}
diff --git a/arch/m68k/hp300/hp300map.map b/arch/m68k/hp300/hp300map.map
new file mode 100644
index 0000000..6b45f0a
--- /dev/null
+++ b/arch/m68k/hp300/hp300map.map
@@ -0,0 +1,252 @@
+# HP300 kernel keymap. This uses 7 modifier combinations.
+keymaps 0-2,4-5,8,12
+# Change the above line into
+#	keymaps 0-2,4-6,8,12
+# in case you want the entries
+#	altgr   control keycode  83 = Boot
+#	altgr   control keycode 111 = Boot
+# below.
+#
+# In fact AltGr is used very little, and one more keymap can
+# be saved by mapping AltGr to Alt (and adapting a few entries):
+# keycode 100 = Alt
+#
+keycode   1 =
+keycode   2 = Alt
+keycode   3 = Alt
+keycode   4 = Shift
+keycode   5 = Shift
+keycode   6 = Control
+keycode   7 =
+keycode   8 =
+keycode   9 =
+keycode  10 =
+keycode  11 =
+keycode  12 =
+keycode  13 =
+keycode  14 =
+keycode  15 =
+keycode  16 =
+keycode  17 =
+keycode  18 =
+keycode  19 =
+keycode  20 =
+keycode  21 =
+keycode  22 =
+keycode  23 =
+keycode  24 = b
+keycode  25 = v
+keycode  26 = c
+keycode  27 = x
+keycode  28 = z
+keycode  29 =
+keycode  30 =
+keycode  31 = Escape		Delete
+keycode  32 =
+keycode  33 =
+keycode  34 =
+keycode  35 =
+keycode  36 =
+keycode  37 =
+keycode  38 =
+keycode  39 =
+keycode  40 = h
+keycode  41 = g
+keycode  42 = f
+keycode  43 = d
+keycode  44 = s
+keycode  45 = a
+keycode  46 =
+keycode  47 = Caps_Lock
+keycode  48 = u
+keycode  49 = y
+keycode  50 = t
+keycode  51 = r
+keycode  52 = e
+keycode  53 = w
+keycode  54 = q
+keycode  55 = Tab		Tab
+	alt     keycode    55 = Meta_Tab
+keycode  56 = seven		ampersand
+keycode  57 = six		asciicircum
+keycode  58 = five		percent
+keycode  59 = four		dollar
+keycode  60 = three		numbersign
+keycode  61 = two		at		at
+keycode  62 = one		exclam		exclam
+keycode  63 = grave		asciitilde
+	control keycode    63 = nul
+	alt     keycode    63 = Meta_grave
+keycode  64 =
+keycode  65 =
+keycode  66 =
+keycode  67 =
+keycode  68 =
+keycode  69 =
+keycode  70 =
+keycode  71 =
+keycode  72 =
+keycode  73 = F4
+	control keycode	   73 = Console_4
+keycode  74 = F3
+	control keycode	   74 = Console_3
+keycode  75 = F2
+	control keycode	   75 = Console_2
+keycode  76 = F1
+	control keycode	   76 = Console_1
+keycode  77 =
+keycode  78 =
+keycode  79 =
+keycode  80 =
+keycode  81 = F5
+	control keycode	   81 = Console_5
+keycode  82 = F6
+	control keycode	   82 = Console_6
+keycode  83 = F7
+	control keycode	   83 = Console_7
+keycode  84 = F8
+	control keycode	   84 = Console_8
+keycode  85 =
+keycode  86 =
+keycode  87 =
+keycode  88 = eight		asterisk	asterisk
+keycode  89 = nine		parenleft	bracketleft
+keycode  90 = zero		parenright	bracketright
+keycode  91 = minus		underscore
+keycode  92 = equal		plus
+keycode  93 = BackSpace
+keycode  94 =
+keycode  95 =
+keycode  96 = i
+keycode  97 = o
+keycode  98 = p
+keycode  99 = bracketleft	braceleft
+keycode 100 = bracketright	braceright
+keycode 101 = backslash		bar
+	control keycode   101 = Control_backslash
+	alt     keycode   101 = Meta_backslash
+keycode 102 =
+keycode 103 =
+keycode 104 = j
+keycode 105 = k
+keycode 106 = l
+keycode 107 = semicolon		colon
+	alt     keycode   107 = Meta_semicolon
+keycode 108 = apostrophe	quotedbl
+	control keycode   108 = Control_g
+	alt     keycode   108 = Meta_apostrophe
+keycode 109 = Return
+keycode 110 =
+keycode 111 =
+keycode 112 = m
+keycode 113 = comma		less
+keycode 114 = period		greater
+keycode 115 = slash		question
+keycode 116 =
+keycode 117 =
+keycode 118 =
+keycode 119 =
+keycode 120 = n
+keycode 121 = space		space
+keycode 122 =
+keycode 123 =
+keycode 124 = Left
+keycode 125 = Down
+keycode 126 = Up
+keycode 127 = Right
+string F1 = "\033[[A"
+string F2 = "\033[[B"
+string F3 = "\033[[C"
+string F4 = "\033[[D"
+string F5 = "\033[[E"
+string F6 = "\033[17~"
+string F7 = "\033[18~"
+string F8 = "\033[19~"
+string F9 = "\033[20~"
+string F10 = "\033[21~"
+string F11 = "\033[23~"
+string F12 = "\033[24~"
+string F13 = "\033[25~"
+string F14 = "\033[26~"
+string F15 = "\033[28~"
+string F16 = "\033[29~"
+string F17 = "\033[31~"
+string F18 = "\033[32~"
+string F19 = "\033[33~"
+string F20 = "\033[34~"
+string Find = "\033[1~"
+string Insert = "\033[2~"
+string Remove = "\033[3~"
+string Select = "\033[4~"
+string Prior = "\033[5~"
+string Next = "\033[6~"
+string Macro = "\033[M"
+string Pause = "\033[P"
+compose '`' 'A' to 'À'
+compose '`' 'a' to 'à'
+compose '\'' 'A' to 'Á'
+compose '\'' 'a' to 'á'
+compose '^' 'A' to 'Â'
+compose '^' 'a' to 'â'
+compose '~' 'A' to 'Ã'
+compose '~' 'a' to 'ã'
+compose '"' 'A' to 'Ä'
+compose '"' 'a' to 'ä'
+compose 'O' 'A' to 'Å'
+compose 'o' 'a' to 'å'
+compose '0' 'A' to 'Å'
+compose '0' 'a' to 'å'
+compose 'A' 'A' to 'Å'
+compose 'a' 'a' to 'å'
+compose 'A' 'E' to 'Æ'
+compose 'a' 'e' to 'æ'
+compose ',' 'C' to 'Ç'
+compose ',' 'c' to 'ç'
+compose '`' 'E' to 'È'
+compose '`' 'e' to 'è'
+compose '\'' 'E' to 'É'
+compose '\'' 'e' to 'é'
+compose '^' 'E' to 'Ê'
+compose '^' 'e' to 'ê'
+compose '"' 'E' to 'Ë'
+compose '"' 'e' to 'ë'
+compose '`' 'I' to 'Ì'
+compose '`' 'i' to 'ì'
+compose '\'' 'I' to 'Í'
+compose '\'' 'i' to 'í'
+compose '^' 'I' to 'Î'
+compose '^' 'i' to 'î'
+compose '"' 'I' to 'Ï'
+compose '"' 'i' to 'ï'
+compose '-' 'D' to 'Ð'
+compose '-' 'd' to 'ð'
+compose '~' 'N' to 'Ñ'
+compose '~' 'n' to 'ñ'
+compose '`' 'O' to 'Ò'
+compose '`' 'o' to 'ò'
+compose '\'' 'O' to 'Ó'
+compose '\'' 'o' to 'ó'
+compose '^' 'O' to 'Ô'
+compose '^' 'o' to 'ô'
+compose '~' 'O' to 'Õ'
+compose '~' 'o' to 'õ'
+compose '"' 'O' to 'Ö'
+compose '"' 'o' to 'ö'
+compose '/' 'O' to 'Ø'
+compose '/' 'o' to 'ø'
+compose '`' 'U' to 'Ù'
+compose '`' 'u' to 'ù'
+compose '\'' 'U' to 'Ú'
+compose '\'' 'u' to 'ú'
+compose '^' 'U' to 'Û'
+compose '^' 'u' to 'û'
+compose '"' 'U' to 'Ü'
+compose '"' 'u' to 'ü'
+compose '\'' 'Y' to 'Ý'
+compose '\'' 'y' to 'ý'
+compose 'T' 'H' to 'Þ'
+compose 't' 'h' to 'þ'
+compose 's' 's' to 'ß'
+compose '"' 'y' to 'ÿ'
+compose 's' 'z' to 'ß'
+compose 'i' 'j' to 'ÿ'
diff --git a/arch/m68k/hp300/ints.c b/arch/m68k/hp300/ints.c
new file mode 100644
index 0000000..0c5bb40
--- /dev/null
+++ b/arch/m68k/hp300/ints.c
@@ -0,0 +1,175 @@
+/*
+ *  linux/arch/m68k/hp300/ints.c
+ *
+ *  Copyright (C) 1998 Philip Blundell <philb@gnu.org>
+ *
+ *  This file contains the HP300-specific interrupt handling.
+ *  We only use the autovector interrupts, and therefore we need to
+ *  maintain lists of devices sharing each ipl.
+ *  [ipl list code added by Peter Maydell <pmaydell@chiark.greenend.org.uk> 06/1998]
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+#include <asm/ptrace.h>
+#include <asm/errno.h>
+#include "ints.h"
+
+/* Each ipl has a linked list of interrupt service routines.
+ * Service routines are added via hp300_request_irq() and removed
+ * via hp300_free_irq(). The device driver should set IRQ_FLG_FAST
+ * if it needs to be serviced early (eg FIFOless UARTs); this will
+ * cause it to be added at the front of the queue rather than
+ * the back.
+ * Currently IRQ_FLG_SLOW and flags=0 are treated identically; if
+ * we needed three levels of priority we could distinguish them
+ * but this strikes me as mildly ugly...
+ */
+
+/* we start with no entries in any list */
+static irq_node_t *hp300_irq_list[HP300_NUM_IRQS];
+
+static spinlock_t irqlist_lock;
+
+/* This handler receives all interrupts, dispatching them to the registered handlers */
+static irqreturn_t hp300_int_handler(int irq, void *dev_id, struct pt_regs *fp)
+{
+        irq_node_t *t;
+        /* We just give every handler on the chain an opportunity to handle
+         * the interrupt, in priority order.
+         */
+        for(t = hp300_irq_list[irq]; t; t=t->next)
+                t->handler(irq, t->dev_id, fp);
+        /* We could put in some accounting routines, checks for stray interrupts,
+         * etc, in here. Note that currently we can't tell whether or not
+         * a handler handles the interrupt, though.
+         */
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t hp300_badint(int irq, void *dev_id, struct pt_regs *fp)
+{
+	num_spurious += 1;
+	return IRQ_NONE;
+}
+
+irqreturn_t (*hp300_default_handler[SYS_IRQS])(int, void *, struct pt_regs *) = {
+	[0] = hp300_badint,
+	[1] = hp300_int_handler,
+	[2] = hp300_int_handler,
+	[3] = hp300_int_handler,
+	[4] = hp300_int_handler,
+	[5] = hp300_int_handler,
+	[6] = hp300_int_handler,
+	[7] = hp300_int_handler
+};
+
+/* dev_id had better be unique to each handler because it's the only way we have
+ * to distinguish handlers when removing them...
+ *
+ * It would be pretty easy to support IRQ_FLG_LOCK (handler is not replacable)
+ * and IRQ_FLG_REPLACE (handler replaces existing one with this dev_id)
+ * if we wanted to. IRQ_FLG_FAST is needed for devices where interrupt latency
+ * matters (eg the dreaded FIFOless UART...)
+ */
+int hp300_request_irq(unsigned int irq,
+                      irqreturn_t (*handler) (int, void *, struct pt_regs *),
+                      unsigned long flags, const char *devname, void *dev_id)
+{
+        irq_node_t *t, *n = new_irq_node();
+
+        if (!n)                                   /* oops, no free nodes */
+                return -ENOMEM;
+
+	spin_lock_irqsave(&irqlist_lock, flags);
+
+        if (!hp300_irq_list[irq]) {
+                /* no list yet */
+                hp300_irq_list[irq] = n;
+                n->next = NULL;
+        } else if (flags & IRQ_FLG_FAST) {
+                /* insert at head of list */
+                n->next = hp300_irq_list[irq];
+                hp300_irq_list[irq] = n;
+        } else {
+                /* insert at end of list */
+                for(t = hp300_irq_list[irq]; t->next; t = t->next)
+                        /* do nothing */;
+                n->next = NULL;
+                t->next = n;
+        }
+
+        /* Fill in n appropriately */
+        n->handler = handler;
+        n->flags = flags;
+        n->dev_id = dev_id;
+        n->devname = devname;
+	spin_unlock_irqrestore(&irqlist_lock, flags);
+	return 0;
+}
+
+void hp300_free_irq(unsigned int irq, void *dev_id)
+{
+        irq_node_t *t;
+        unsigned long flags;
+
+        spin_lock_irqsave(&irqlist_lock, flags);
+
+        t = hp300_irq_list[irq];
+        if (!t)                                   /* no handlers at all for that IRQ */
+        {
+                printk(KERN_ERR "hp300_free_irq: attempt to remove nonexistent handler for IRQ %d\n", irq);
+                spin_unlock_irqrestore(&irqlist_lock, flags);
+		return;
+        }
+
+        if (t->dev_id == dev_id)
+        {                                         /* removing first handler on chain */
+                t->flags = IRQ_FLG_STD;           /* we probably don't really need these */
+                t->dev_id = NULL;
+                t->devname = NULL;
+                t->handler = NULL;                /* frees this irq_node_t */
+                hp300_irq_list[irq] = t->next;
+		spin_unlock_irqrestore(&irqlist_lock, flags);
+		return;
+        }
+
+        /* OK, must be removing from middle of the chain */
+
+        for (t = hp300_irq_list[irq]; t->next && t->next->dev_id != dev_id; t = t->next)
+                /* do nothing */;
+        if (!t->next)
+        {
+                printk(KERN_ERR "hp300_free_irq: attempt to remove nonexistent handler for IRQ %d\n", irq);
+		spin_unlock_irqrestore(&irqlist_lock, flags);
+		return;
+        }
+        /* remove the entry after t: */
+        t->next->flags = IRQ_FLG_STD;
+        t->next->dev_id = NULL;
+	t->next->devname = NULL;
+	t->next->handler = NULL;
+        t->next = t->next->next;
+
+	spin_unlock_irqrestore(&irqlist_lock, flags);
+}
+
+int show_hp300_interrupts(struct seq_file *p, void *v)
+{
+	return 0;
+}
+
+void __init hp300_init_IRQ(void)
+{
+	spin_lock_init(&irqlist_lock);
+}
diff --git a/arch/m68k/hp300/ints.h b/arch/m68k/hp300/ints.h
new file mode 100644
index 0000000..8cfabe2
--- /dev/null
+++ b/arch/m68k/hp300/ints.h
@@ -0,0 +1,9 @@
+extern void hp300_init_IRQ(void);
+extern void (*hp300_handlers[8])(int, void *, struct pt_regs *);
+extern void hp300_free_irq(unsigned int irq, void *dev_id);
+extern int hp300_request_irq(unsigned int irq,
+		irqreturn_t (*handler) (int, void *, struct pt_regs *),
+		unsigned long flags, const char *devname, void *dev_id);
+
+/* number of interrupts, includes 0 (what's that?) */
+#define HP300_NUM_IRQS 8
diff --git a/arch/m68k/hp300/ksyms.c b/arch/m68k/hp300/ksyms.c
new file mode 100644
index 0000000..8202830
--- /dev/null
+++ b/arch/m68k/hp300/ksyms.c
@@ -0,0 +1,9 @@
+/*
+ *  linux/arch/m68k/hp300/ksyms.c
+ *
+ *  Copyright (C) 1998 Philip Blundell <philb@gnu.org>
+ *
+ *  This file contains the HP300-specific kernel symbols.  None yet. :-)
+ */
+
+#include <linux/module.h>
diff --git a/arch/m68k/hp300/reboot.S b/arch/m68k/hp300/reboot.S
new file mode 100644
index 0000000..52eb852
--- /dev/null
+++ b/arch/m68k/hp300/reboot.S
@@ -0,0 +1,16 @@
+/*
+ *  linux/arch/m68k/hp300/reboot.S
+ *
+ *  Copyright (C) 1998 Philip Blundell <philb@gnu.org>
+ *
+ *  Do the dirty work of rebooting the machine.  Basically we need to undo all the
+ *  good stuff that head.S did when we started up.  The caches and MMU must be
+ *  disabled and then we jump back to the PROM.  This is a bit gruesome but we put
+ *  a brave face on it.
+ */
+
+/* XXX Doesn't work yet.  Not sure why and can't be bothered to fix it at the moment. */
+
+	.globl	hp300_reset
+hp300_reset:
+	jmp	hp300_reset
diff --git a/arch/m68k/hp300/time.c b/arch/m68k/hp300/time.c
new file mode 100644
index 0000000..8da5b1b
--- /dev/null
+++ b/arch/m68k/hp300/time.c
@@ -0,0 +1,78 @@
+/*
+ *  linux/arch/m68k/hp300/time.c
+ *
+ *  Copyright (C) 1998 Philip Blundell <philb@gnu.org>
+ *
+ *  This file contains the HP300-specific time handling code.
+ */
+
+#include <asm/ptrace.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+#include <asm/blinken.h>
+#include "ints.h"
+
+/* Clock hardware definitions */
+
+#define CLOCKBASE	0xf05f8000
+
+#define	CLKCR1		0x1
+#define	CLKCR2		0x3
+#define	CLKCR3		CLKCR1
+#define	CLKSR		CLKCR2
+#define	CLKMSB1		0x5
+#define	CLKMSB2		0x9
+#define	CLKMSB3		0xD
+
+/* This is for machines which generate the exact clock. */
+#define USECS_PER_JIFFY (1000000/HZ)
+
+#define INTVAL ((10000 / 4) - 1)
+
+static irqreturn_t hp300_tick(int irq, void *dev_id, struct pt_regs *regs)
+{
+	unsigned long tmp;
+	irqreturn_t (*vector)(int, void *, struct pt_regs *) = dev_id;
+	in_8(CLOCKBASE + CLKSR);
+	asm volatile ("movpw %1@(5),%0" : "=d" (tmp) : "a" (CLOCKBASE));
+	/* Turn off the network and SCSI leds */
+	blinken_leds(0, 0xe0);
+	return vector(irq, NULL, regs);
+}
+
+unsigned long hp300_gettimeoffset(void)
+{
+  /* Read current timer 1 value */
+  unsigned char lsb, msb1, msb2;
+  unsigned short ticks;
+
+  msb1 = in_8(CLOCKBASE + 5);
+  lsb = in_8(CLOCKBASE + 7);
+  msb2 = in_8(CLOCKBASE + 5);
+  if (msb1 != msb2)
+    /* A carry happened while we were reading.  Read it again */
+    lsb = in_8(CLOCKBASE + 7);
+  ticks = INTVAL - ((msb2 << 8) | lsb);
+  return (USECS_PER_JIFFY * ticks) / INTVAL;
+}
+
+void __init hp300_sched_init(irqreturn_t (*vector)(int, void *, struct pt_regs *))
+{
+  out_8(CLOCKBASE + CLKCR2, 0x1);		/* select CR1 */
+  out_8(CLOCKBASE + CLKCR1, 0x1);		/* reset */
+
+  asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE));
+
+  cpu_request_irq(6, hp300_tick, IRQ_FLG_STD, "timer tick", vector);
+
+  out_8(CLOCKBASE + CLKCR2, 0x1);		/* select CR1 */
+  out_8(CLOCKBASE + CLKCR1, 0x40);		/* enable irq */
+}
diff --git a/arch/m68k/hp300/time.h b/arch/m68k/hp300/time.h
new file mode 100644
index 0000000..8ef9987
--- /dev/null
+++ b/arch/m68k/hp300/time.h
@@ -0,0 +1,4 @@
+extern void hp300_sched_init(irqreturn_t (*vector)(int, void *, struct pt_regs *));
+extern unsigned long hp300_gettimeoffset (void);
+
+
diff --git a/arch/m68k/ifpsp060/CHANGES b/arch/m68k/ifpsp060/CHANGES
new file mode 100644
index 0000000..c1e712d
--- /dev/null
+++ b/arch/m68k/ifpsp060/CHANGES
@@ -0,0 +1,120 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+CHANGES SINCE LAST RELEASE:
+---------------------------
+
+1) "movep" emulation where data was being read from memory
+was reading the intermediate bytes. Emulation now only
+reads the required bytes.
+
+2) "flogn", "flog2", and "flog10" of "1" was setting the
+Inexact FPSR bit. Emulation now does not set Inexact for
+this case.
+
+3) For an opclass three FP instruction where the effective addressing
+mode was pre-decrement or post-increment and the address register
+was A0 or A1, the address register was not being updated as a result
+of the operation. This has been corrected.
+
+4) Beta B.2 version had the following erratum:
+
+	Scenario:
+	---------
+	If {i,d}mem_{read,write}_{byte,word,long}() returns
+	a failing value to the 68060SP, the package ignores
+	this return value and continues with program execution
+	as if it never received a failing value.
+
+	Effect:
+	-------
+	For example, if a user executed "fsin.x ADDR,fp0" where
+	ADDR should cause a "segmentation violation", the memory read
+	requested by the package should return a failing value
+	to the package. Since the package currently ignores this
+	return value, the user program will continue to the
+	next instruction, and the result created in fp0 will be
+	undefined.
+
+	Fix:
+	----
+	This has been fixed in the current release.
+
+	Notes:
+	------
+	Upon receiving a non-zero (failing) return value from
+	a {i,d}mem_{read,write}_{byte,word,long}() "call-out",
+	the package creates a 16-byte access error stack frame
+	from the current exception stack frame and exits
+	through the "call-out" _real_access(). This is the process
+	as described in the MC68060 User's Manual.
+
+	For instruction read access errors, the info stacked is:
+		SR	= SR at time of exception
+		PC	= PC of instruction being emulated
+		VOFF	= $4008 (stack frame format type)
+		ADDRESS	= PC of instruction being emulated
+		FSLW	= FAULT STATUS LONGWORD
+
+	The valid FSLW bits are:
+		bit 27		= 1	(misaligned bit)
+		bit 24		= 1	(read)
+		bit 23		= 0	(write)
+		bit 22:21	= 10	(SIZE = word)
+		bit 20:19	= 00	(TT)
+		bit 18:16	= x10	(TM; x = 1 for supervisor mode)
+		bit 15		= 1	(IO)
+		bit 0		= 1	(Software Emulation Error)
+
+	all other bits are EQUAL TO ZERO and can be set by the _real_access()
+	"call-out" stub by the user as appropriate. The MC68060 User's Manual
+	stated that ONLY "bit 0" would be set. The 060SP attempts to set a few
+	other bits.
+
+	For data read/write access errors, the info stacked is:
+		SR	= SR at time of exception
+		PC	= PC of instruction being emulated
+		VOFF	= $4008 (stack frame format type)
+		ADDRESS	= Address of source or destination operand
+		FSLW	= FAULT STATUS LONGWORD
+
+	The valid FSLW bits are:
+		bit 27		= 0	(misaligned bit)
+		bit 24		= x	(read; 1 if read, 0 if write)
+		bit 23		= x	(write; 1 if write, 0 if read)
+		bit 22:21	= xx	(SIZE; see MC68060 User's Manual)
+		bit 20:19	= 00	(TT)
+		bit 18:16	= x01	(TM; x = 1 for supervisor mode)
+		bit 15		= 0	(IO)
+		bit 0		= 1	(Software Emulation Error)
+
+	all other bits are EQUAL TO ZERO and can be set by the _real_access()
+	"call-out" stub by the user as appropriate. The MC68060 User's Manual
+	stated that ONLY "bit 0" would be set. The 060SP attempts to set a few
+	other bits.
diff --git a/arch/m68k/ifpsp060/MISC b/arch/m68k/ifpsp060/MISC
new file mode 100644
index 0000000..b7e644b
--- /dev/null
+++ b/arch/m68k/ifpsp060/MISC
@@ -0,0 +1,201 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+RELEASE FILE VERSIONS:
+-----------------------
+
+fpsp.sa
+----------
+freal.s     : 2.4
+hdr.fpu     : 2.4
+x_fovfl.s   : 2.16
+x_funfl.s   : 2.19
+x_funsupp.s : 2.27
+x_effadd.s  : 2.21
+x_foperr.s  : 2.9
+x_fsnan.s   : 2.12
+x_finex.s   : 2.14
+x_fdz.s     : 2.5
+x_fline.s   : 2.5
+x_funimp.s  : 2.27
+fsin.s      : 2.6
+ftan.s      : 2.6
+fatan.s     : 2.3
+fasin.s     : 2.3
+facos.s     : 2.5
+fetox.s     : 2.4
+fgetem.s    : 2.5
+fcosh.s     : 2.4
+fsinh.s     : 2.5
+ftanh.s     : 2.3
+flogn.s     : 2.6
+fatanh.s    : 2.4
+flog2.s     : 2.4
+ftwotox.s   : 2.4
+fmovecr.s   : 2.5
+fscale.s    : 2.5
+frem_mod.s  : 2.6
+fkern.s     : 2.6
+fkern2.s    : 2.5
+fgen_except.s: 2.7
+foptbl.s    : 2.3
+fmul.s      : 2.5
+fin.s       : 2.4
+fdiv.s      : 2.5
+fneg.s      : 2.4
+ftst.s      : 2.3
+fint.s      : 2.3
+fintrz.s    : 2.3
+fabs.s      : 2.4
+fcmp.s      : 2.4
+fsglmul.s   : 2.5
+fsgldiv.s   : 2.8
+fadd.s      : 2.6
+fsub.s      : 2.6
+fsqrt.s     : 2.4
+fmisc.s     : 2.3
+fdbcc.s     : 2.8
+ftrapcc.s   : 2.5
+fscc.s      : 2.6
+fmovm.s     : 2.15
+fctrl.s     : 2.6
+fcalc_ea.s  : 2.7
+fmem.s      : 2.9
+fout.s      : 2.9
+ireg.s      : 2.6
+fdenorm.s   : 2.3
+fround.s    : 2.4
+fnorm.s     : 2.3
+foptag_set.s: 2.4
+fresult.s   : 2.3
+fpack.s     : 2.6
+fdecbin.s   : 2.4
+fbindec.s   : 2.5
+fbinstr.s   : 2.3
+faccess.s   : 2.3
+
+pfpsp.sa
+----------
+freal.s     : 2.4
+hdr.fpu     : 2.4
+x_fovfl.s   : 2.16
+x_funfl.s   : 2.19
+x_funsupp.s : 2.27
+x_effadd.s  : 2.21
+x_foperr.s  : 2.9
+x_fsnan.s   : 2.12
+x_finex.s   : 2.14
+x_fdz.s     : 2.5
+x_fline2.s  : 2.3
+fcalc_ea.s  : 2.7
+foptbl2.s   : 2.4
+fmovm.s     : 2.15
+fctrl.s     : 2.6
+fmisc.s     : 2.3
+fdenorm.s   : 2.3
+fround.s    : 2.4
+fnorm.s     : 2.3
+foptag_set.s: 2.4
+fresult.s   : 2.3
+fout.s      : 2.9
+fmul.s      : 2.5
+fin.s       : 2.4
+fdiv.s      : 2.5
+fneg.s      : 2.4
+ftst.s      : 2.3
+fint.s      : 2.3
+fintrz.s    : 2.3
+fabs.s      : 2.4
+fcmp.s      : 2.4
+fsglmul.s   : 2.5
+fsgldiv.s   : 2.8
+fadd.s      : 2.6
+fsub.s      : 2.6
+fsqrt.s     : 2.4
+ireg.s      : 2.6
+fpack.s     : 2.6
+fdecbin.s   : 2.4
+fbindec.s   : 2.5
+fbinstr.s   : 2.3
+faccess.s   : 2.3
+
+fplsp.sa
+----------
+lfptop.s    : 2.3
+hdr.fpu     : 2.4
+fsin.s      : 2.6
+ftan.s      : 2.6
+fatan.s     : 2.3
+fasin.s     : 2.3
+facos.s     : 2.5
+fetox.s     : 2.4
+fgetem.s    : 2.5
+fcosh.s     : 2.4
+fsinh.s     : 2.5
+ftanh.s     : 2.3
+flogn.s     : 2.6
+fatanh.s    : 2.4
+flog2.s     : 2.4
+ftwotox.s   : 2.4
+fscale.s    : 2.5
+frem_mod.s  : 2.6
+l_support.s : 2.15
+fnorm.s     : 2.3
+
+isp.sa
+----------
+ireal.s     : 2.4
+hdr.int     : 2.4
+x_uieh.s    : 2.13
+icalc_ea.s  : 2.11
+imovep.s    : 2.8
+ichk2cmp2.s : 2.6
+idiv64.s    : 2.10
+imul64.s    :
+icas2.s     : 2.11
+icas.s      : 2.12
+icas2_core.s: 2.6
+icas_core.s : 2.6
+
+ilsp.sa
+----------
+litop.s     : 2.2
+l_idiv64.s  : 2.8
+l_imul64.s  : 2.6
+l_ichk2cmp2.s: 2.5
+
+ex. files
+----------
+wrk/fskeleton.s: 2.2
+wrk/iskeleton.s: 2.2
+wrk/os.s    : 2.1
+
+tests
+----------
+itest.s     : 2.2
+ftest.s     : 2.1
diff --git a/arch/m68k/ifpsp060/Makefile b/arch/m68k/ifpsp060/Makefile
new file mode 100644
index 0000000..2fe8472
--- /dev/null
+++ b/arch/m68k/ifpsp060/Makefile
@@ -0,0 +1,10 @@
+# Makefile for 680x0 Linux 68060 integer/floating point support package
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "README.legal" in the main directory of this archive
+# for more details.
+
+obj-y := fskeleton.o iskeleton.o os.o
+
+EXTRA_AFLAGS := -traditional
+EXTRA_LDFLAGS := -x
diff --git a/arch/m68k/ifpsp060/README b/arch/m68k/ifpsp060/README
new file mode 100644
index 0000000..e3bced4
--- /dev/null
+++ b/arch/m68k/ifpsp060/README
@@ -0,0 +1,71 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Files in this directory:
+-------------------------
+
+fpsp.sa		Full FP Kernel Module - hex image
+fpsp.s		Full FP Kernel Module - source code
+fpsp.doc	Full FP Kernel Module - on-line documentation
+
+pfpsp.sa	Partial FP Kernel Module - hex image
+pfpsp.s		Partial FP Kernel Module - source code
+
+fplsp.sa	FP Library Module - hex image
+fplsp.s		FP Library Module - source code
+fplsp.doc	FP Library Module - on-line documentation
+
+isp.sa		Integer Unimplemented Kernel Module - hex image
+isp.s		Integer Unimplemented Kernel Module - source code
+isp.doc		Integer Unimplemented Kernel Module - on-line doc
+
+ilsp.sa		Integer Unimplemented Library Module - hex image
+ilsp.s		Integer Unimplemented Library Module - source code
+ilsp.doc	Integer Unimplemented Library Module - on-line doc
+
+fskeleton.s	Sample Call-outs needed by fpsp.sa and pfpsp.sa
+
+iskeleton.s	Sample Call-outs needed by isp.sa
+
+os.s		Sample Call-outs needed by fpsp.sa, pfpsp.sa, and isp.sa
+
+ftest.sa	Simple test program to test that {p}fpsp.sa
+		was connected properly; hex image
+ftest.s		above test; source code
+
+itest.sa	Simple test program to test that isp.sa was
+		connected properly; hex image
+itest.s		above test; source code
+
+test.doc	on-line documentation for {i,f}test.sa
+
+README		This file
+
+ERRATA		Known errata for this release
+
+MISC		Release file version numbers
diff --git a/arch/m68k/ifpsp060/TEST.DOC b/arch/m68k/ifpsp060/TEST.DOC
new file mode 100644
index 0000000..5e5900c
--- /dev/null
+++ b/arch/m68k/ifpsp060/TEST.DOC
@@ -0,0 +1,208 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+68060 SOFTWARE PACKAGE (Kernel version) SIMPLE TESTS
+-----------------------------------------------------
+
+The files itest.sa and ftest.sa contain simple tests to check
+the state of the 68060ISP and 68060FPSP once they have been installed.
+
+Release file format:
+--------------------
+The release files itest.sa and ftest.sa are essentially
+hexadecimal images of the actual tests. This format is the
+ONLY format that will be supported. The hex images were created
+by assembling the source code and then converting the resulting
+binary output images into ASCII text files. The hexadecimal
+numbers are listed using the Motorola Assembly syntax assembler
+directive "dc.l" (define constant longword). The files can be
+converted to other assembly syntaxes by using any word processor
+with a global search and replace function.
+
+To assist in assembling and linking these modules with other modules,
+the installer should add symbolic labels to the top of the files.
+This will allow the calling routines to access the entry points
+of these packages.
+
+The source code itest.s and ftest.s have been included but only
+for documentation purposes.
+
+Release file structure:
+-----------------------
+
+(top of module)
+	-----------------
+	|		| - 128 byte-sized section
+   (1)  |   Call-Out	| - 4 bytes per entry (user fills these in)
+	|		|
+	-----------------
+	|		| - 8 bytes per entry
+   (2)  | Entry Point	| - user does "bsr" or "jsr" to this address
+	|		|
+	-----------------
+	|		| - code section
+   (3)  ~		~
+	|		|
+	-----------------
+(bottom of module)
+
+The first section of this module is the "Call-out" section. This section
+is NOT INCLUDED in {i,f}test.sa (an example "Call-out" section is provided at
+the end of this file). The purpose of this section is to allow the test
+routines to reference external printing functions that must be provided
+by the host operating system. This section MUST be exactly 128 bytes in
+size. There are 32 fields, each 4 bytes in size. Each field corresponds
+to a function required by the test packages (these functions and their
+location are listed in "68060{ISP,FPSP}-TEST call-outs" below). Each field
+entry should contain the address of the corresponding function RELATIVE to
+the starting address of the "call-out" section. The "Call-out" section must
+sit adjacent to the {i,f}test.sa image in memory. Since itest.sa and ftest.sa
+are individual tests, they each require their own "Call-out" sections.
+
+The second section, the "Entry-point" section, is used by external routines
+to access the test routines. Since the {i,f}test.sa hex files contain
+no symbol names, this section contains function entry points that are fixed
+with respect to the top of the package. The currently defined entry-points
+are listed in section "68060{ISP,FPSP}-TEST entry points" below. A calling
+routine would simply execute a "bsr" or "jsr" that jumped to the selected
+function entry-point.
+
+For example, to run the 060ISP test, write a program that includes the
+itest.sa data and execute something similar to:
+
+	bsr	_060ISP_TEST+128+0
+
+(_060ISP_TEST is the starting address of the "Call-out" section; the "Call-out"
+section is 128 bytes long; and the 68060ISP test entry point is located
+0 bytes from the top of the "Entry-point" section.)
+
+The third section is the code section. After entering through an "Entry-point",
+the entry code jumps to the appropriate test code within the code section.
+
+68060ISP-TEST Call-outs:
+------------------------
+0x0: _print_string()
+0x4: _print_number()
+
+68060FPSP-TEST Call-outs:
+-------------------------
+0x0: _print_string()
+0x4: _print_number()
+
+The test packages call _print_string() and _print_number()
+as subroutines and expect the main program to print a string
+or a number to a file or to the screen.
+In "C"-like fashion, the test program calls:
+
+	print_string("Test passed");
+
+		or
+
+	print_number(20);
+
+For _print_string(), the test programs pass a longword address
+of the string on the stack. For _print_number(), the test programs pass
+a longword number to be printed.
+
+For debugging purposes, after the main program performs a "print"
+for a test package, it should flush the output so that it's not
+buffered. In this way, if the test program crashes, at least the previous
+statements printed will be seen.
+
+68060ISP-TEST Entry-points:
+---------------------------
+0x0: integer test
+
+68060FPSP-TEST Entry-points:
+----------------------------
+0x00: main fp test
+0x08: FP unimplemented test
+0x10: FP enabled snan/operr/ovfl/unfl/dz/inex
+
+The floating-point unit test has 3 entry points which will require
+3 different calls to the package if each of the three following tests
+is desired:
+
+main fp test: tests (1) unimp effective address exception
+		    (2) unsupported data type exceptions
+		    (3) non-maskable overflow/underflow exceptions
+
+FP unimplemented: tests FP unimplemented exception. this one is
+		  separate from the previous tests for systems that don't
+		  want FP unimplemented instructions.
+
+FP enabled: tests enabled snan/operr/ovfl/unfl/dz/inex.
+	    basically, it enables each of these exceptions and forces
+	    each using an implemented FP instruction. this process
+	    exercises _fpsp_{snan,operr,ovfl,unfl,dz,inex}() and
+	    _real_{snan,operr,ovfl,unfl,dz,inex}(). the test expects
+	    _real_XXXX() to do nothing except clear the exception
+	    and "rte". if a system's _real_XXXX() handler creates an
+	    alternate result, the test will print "failed" but this
+	    is acceptable.
+
+Miscellaneous:
+--------------
+Again, itest.sa and ftest.sa are simple tests and do not thoroughly
+test all 68060SP connections. For example, they do not test connections
+to _real_access(), _real_trace(), _real_trap(), etc. because these
+will be system-implemented several different ways and the test packages
+must remain system independent.
+
+Example test package set-up:
+----------------------------
+_print_str:
+	.			# provided by system
+	rts
+
+_print_num:
+	.			# provided by system
+	rts
+
+	.
+	.
+	bsr	_060FPSP_TEST+128+0
+	.
+	.
+	rts
+
+# beginning of "Call-out" section; provided by integrator.
+# MUST be 128 bytes long.
+_060FPSP_TEST:
+	long	_print_str - _060FPSP_TEST
+	long	_print_num - _060FPSP_TEST
+	space	120
+
+# ftest.sa starts here; start of "Entry-point" section.
+	long	0x60ff0000, 0x00002346
+	long	0x60ff0000, 0x00018766
+	long	0x60ff0000, 0x00023338
+	long	0x24377299, 0xab2643ea
+		.
+		.
+		.
diff --git a/arch/m68k/ifpsp060/fplsp.doc b/arch/m68k/ifpsp060/fplsp.doc
new file mode 100644
index 0000000..fb637c4
--- /dev/null
+++ b/arch/m68k/ifpsp060/fplsp.doc
@@ -0,0 +1,231 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+68060 FLOATING-POINT SOFTWARE PACKAGE (Library version)
+--------------------------------------------------------
+
+The file fplsp.sa contains the "Library version" of the
+68060SP Floating-Point Software Package. The routines
+included in this module can be used to emulate the
+FP instructions not implemented in 68060 hardware. These
+instructions normally take exception vector #11
+"FP Unimplemented Instruction".
+
+By re-compiling a program that uses these instructions, and
+making subroutine calls in place of the unimplemented
+instructions, a program can avoid the overhead associated
+with taking the exception.
+
+Release file format:
+--------------------
+The file fplsp.sa is essentially a hexadecimal image of the
+release package. This is the ONLY format which will be supported.
+The hex image was created by assembling the source code and
+then converting the resulting binary output image into an
+ASCII text file. The hexadecimal numbers are listed
+using the Motorola Assembly Syntax assembler directive "dc.l"
+(define constant longword). The file can be converted to other
+assembly syntaxes by using any word processor with a global
+search and replace function.
+
+To assist in assembling and linking this module with other modules,
+the installer should add a symbolic label to the top of the file.
+This will allow calling routines to access the entry points
+of this package.
+
+The source code fplsp.s has also been included but only for
+documentation purposes.
+
+Release file structure:
+-----------------------
+The file fplsp.sa contains an "Entry-Point" section and a
+code section. The FPLSP has no "Call-Out" section. The first section
+is the "Entry-Point" section. In order to access a function in the
+package, a program must "bsr" or "jsr" to the location listed
+below in "68060FPLSP entry points" that corresponds to the desired
+function. A branch instruction located at the selected entry point
+within the package will then enter the correct emulation code routine.
+
+The entry point addresses at the beginning of the package will remain
+fixed so that a program calling the routines will not have to be
+re-compiled with every new 68060FPLSP release.
+
+There are 3 entry-points for each instruction type: single precision,
+double precision, and extended precision.
+
+As an example, the "fsin" library instruction can be passed an
+extended precision operand if program executes:
+
+# fsin.x fp0
+
+	fmovm.x	&0x01,-(%sp)	# pass operand on stack
+	bsr.l	_060FPLSP_TOP+0x1a8 # branch to fsin routine
+	add.l	&0xc,%sp	# clear operand from stack
+
+Upon return, fp0 holds the correct result. The FPSR is
+set correctly. The FPCR is unchanged. The FPIAR is undefined.
+
+Another example. This time, a dyadic operation:
+
+# frem.s %fp1,%fp0
+
+	fmov.s	%fp1,-(%sp)	# pass src operand
+	fmov.s	%fp0,-(%sp)	# pass dst operand
+	bsr.l	_060FPLSP_TOP+0x168 # branch to frem routine
+	addq.l	&0x8,%sp	# clear operands from stack
+
+Again, the result is returned in fp0. Note that BOTH operands
+are passed in single precision format.
+
+Exception reporting:
+--------------------
+The package takes exceptions according to the FPCR value upon subroutine
+entry. If an exception should be reported, then the package forces
+this exception using implemented floating-point instructions.
+For example, if the instruction being emulated should cause a
+floating-point Operand Error exception, then the library routine
+executes an FMUL of a zero and an infinity to force the OPERR
+exception. Although the FPIAR will be undefined for the enabled
+Operand Error exception handler, the user will at least be able
+to record that the event occurred.
+
+Miscellaneous:
+--------------
+The package does not attempt to correctly emulate instructions
+with Signalling NAN inputs. Use of SNANs should be avoided with
+this package.
+
+The fabs/fadd/fdiv/fint/fintrz/fmul/fneg/fsqrt/fsub entry points
+are provided for the convenience of older compilers that make
+subroutine calls for all fp instructions. The code does NOT emulate
+the instruction but rather simply executes it.
+
+68060FPLSP entry points:
+------------------------
+_060FPLSP_TOP:
+0x000:	_060LSP__facoss_
+0x008:	_060LSP__facosd_
+0x010:	_060LSP__facosx_
+0x018:	_060LSP__fasins_
+0x020:	_060LSP__fasind_
+0x028:	_060LSP__fasinx_
+0x030:	_060LSP__fatans_
+0x038:	_060LSP__fatand_
+0x040:	_060LSP__fatanx_
+0x048:	_060LSP__fatanhs_
+0x050:	_060LSP__fatanhd_
+0x058:	_060LSP__fatanhx_
+0x060:	_060LSP__fcoss_
+0x068:	_060LSP__fcosd_
+0x070:	_060LSP__fcosx_
+0x078:	_060LSP__fcoshs_
+0x080:	_060LSP__fcoshd_
+0x088:	_060LSP__fcoshx_
+0x090:	_060LSP__fetoxs_
+0x098:	_060LSP__fetoxd_
+0x0a0:	_060LSP__fetoxx_
+0x0a8:	_060LSP__fetoxm1s_
+0x0b0:	_060LSP__fetoxm1d_
+0x0b8:	_060LSP__fetoxm1x_
+0x0c0:	_060LSP__fgetexps_
+0x0c8:	_060LSP__fgetexpd_
+0x0d0:	_060LSP__fgetexpx_
+0x0d8:	_060LSP__fgetmans_
+0x0e0:	_060LSP__fgetmand_
+0x0e8:	_060LSP__fgetmanx_
+0x0f0:	_060LSP__flog10s_
+0x0f8:	_060LSP__flog10d_
+0x100:	_060LSP__flog10x_
+0x108:	_060LSP__flog2s_
+0x110:	_060LSP__flog2d_
+0x118:	_060LSP__flog2x_
+0x120:	_060LSP__flogns_
+0x128:	_060LSP__flognd_
+0x130:	_060LSP__flognx_
+0x138:	_060LSP__flognp1s_
+0x140:	_060LSP__flognp1d_
+0x148:	_060LSP__flognp1x_
+0x150:	_060LSP__fmods_
+0x158:	_060LSP__fmodd_
+0x160:	_060LSP__fmodx_
+0x168:	_060LSP__frems_
+0x170:	_060LSP__fremd_
+0x178:	_060LSP__fremx_
+0x180:	_060LSP__fscales_
+0x188:	_060LSP__fscaled_
+0x190:	_060LSP__fscalex_
+0x198:	_060LSP__fsins_
+0x1a0:	_060LSP__fsind_
+0x1a8:	_060LSP__fsinx_
+0x1b0:	_060LSP__fsincoss_
+0x1b8:	_060LSP__fsincosd_
+0x1c0:	_060LSP__fsincosx_
+0x1c8:	_060LSP__fsinhs_
+0x1d0:	_060LSP__fsinhd_
+0x1d8:	_060LSP__fsinhx_
+0x1e0:	_060LSP__ftans_
+0x1e8:	_060LSP__ftand_
+0x1f0:	_060LSP__ftanx_
+0x1f8:	_060LSP__ftanhs_
+0x200:	_060LSP__ftanhd_
+0x208:	_060LSP__ftanhx_
+0x210:	_060LSP__ftentoxs_
+0x218:	_060LSP__ftentoxd_
+0x220:	_060LSP__ftentoxx_
+0x228:	_060LSP__ftwotoxs_
+0x230:	_060LSP__ftwotoxd_
+0x238:	_060LSP__ftwotoxx_
+
+0x240:	_060LSP__fabss_
+0x248:	_060LSP__fabsd_
+0x250:	_060LSP__fabsx_
+0x258:	_060LSP__fadds_
+0x260:	_060LSP__faddd_
+0x268:	_060LSP__faddx_
+0x270:	_060LSP__fdivs_
+0x278:	_060LSP__fdivd_
+0x280:	_060LSP__fdivx_
+0x288:	_060LSP__fints_
+0x290:	_060LSP__fintd_
+0x298:	_060LSP__fintx_
+0x2a0:	_060LSP__fintrzs_
+0x2a8:	_060LSP__fintrzd_
+0x2b0:	_060LSP__fintrzx_
+0x2b8:	_060LSP__fmuls_
+0x2c0:	_060LSP__fmuld_
+0x2c8:	_060LSP__fmulx_
+0x2d0:	_060LSP__fnegs_
+0x2d8:	_060LSP__fnegd_
+0x2e0:	_060LSP__fnegx_
+0x2e8:	_060LSP__fsqrts_
+0x2f0:	_060LSP__fsqrtd_
+0x2f8:	_060LSP__fsqrtx_
+0x300:	_060LSP__fsubs_
+0x308:	_060LSP__fsubd_
+0x310:	_060LSP__fsubx_
diff --git a/arch/m68k/ifpsp060/fplsp.sa b/arch/m68k/ifpsp060/fplsp.sa
new file mode 100644
index 0000000..8826df0
--- /dev/null
+++ b/arch/m68k/ifpsp060/fplsp.sa
@@ -0,0 +1,1946 @@
+	dc.l	$60ff0000,$238e0000,$60ff0000,$24200000
+	dc.l	$60ff0000,$24b60000,$60ff0000,$11060000
+	dc.l	$60ff0000,$11980000,$60ff0000,$122e0000
+	dc.l	$60ff0000,$0f160000,$60ff0000,$0fa80000
+	dc.l	$60ff0000,$103e0000,$60ff0000,$12ae0000
+	dc.l	$60ff0000,$13400000,$60ff0000,$13d60000
+	dc.l	$60ff0000,$05ae0000,$60ff0000,$06400000
+	dc.l	$60ff0000,$06d60000,$60ff0000,$213e0000
+	dc.l	$60ff0000,$21d00000,$60ff0000,$22660000
+	dc.l	$60ff0000,$16160000,$60ff0000,$16a80000
+	dc.l	$60ff0000,$173e0000,$60ff0000,$0aee0000
+	dc.l	$60ff0000,$0b800000,$60ff0000,$0c160000
+	dc.l	$60ff0000,$24a60000,$60ff0000,$25380000
+	dc.l	$60ff0000,$25ce0000,$60ff0000,$26660000
+	dc.l	$60ff0000,$26f80000,$60ff0000,$278e0000
+	dc.l	$60ff0000,$1d160000,$60ff0000,$1da80000
+	dc.l	$60ff0000,$1e3e0000,$60ff0000,$1ed60000
+	dc.l	$60ff0000,$1f680000,$60ff0000,$1ffe0000
+	dc.l	$60ff0000,$1b0e0000,$60ff0000,$1ba00000
+	dc.l	$60ff0000,$1c360000,$60ff0000,$08860000
+	dc.l	$60ff0000,$09180000,$60ff0000,$09ae0000
+	dc.l	$60ff0000,$2bf00000,$60ff0000,$2ca40000
+	dc.l	$60ff0000,$2d580000,$60ff0000,$29980000
+	dc.l	$60ff0000,$2a4c0000,$60ff0000,$2b000000
+	dc.l	$60ff0000,$2e000000,$60ff0000,$2eb40000
+	dc.l	$60ff0000,$2f680000,$60ff0000,$029e0000
+	dc.l	$60ff0000,$03300000,$60ff0000,$03c60000
+	dc.l	$60ff0000,$27660000,$60ff0000,$27fe0000
+	dc.l	$60ff0000,$289a0000,$60ff0000,$061e0000
+	dc.l	$60ff0000,$06b00000,$60ff0000,$07460000
+	dc.l	$60ff0000,$12ee0000,$60ff0000,$13800000
+	dc.l	$60ff0000,$14160000,$60ff0000,$0b760000
+	dc.l	$60ff0000,$0c080000,$60ff0000,$0c9e0000
+	dc.l	$60ff0000,$18460000,$60ff0000,$18d80000
+	dc.l	$60ff0000,$196e0000,$60ff0000,$16560000
+	dc.l	$60ff0000,$16e80000,$60ff0000,$177e0000
+	dc.l	$60ff0000,$72fe0000,$60ff0000,$72fe0000
+	dc.l	$60ff0000,$72fe0000,$60ff0000,$71be0000
+	dc.l	$60ff0000,$71d40000,$60ff0000,$71ea0000
+	dc.l	$60ff0000,$72840000,$60ff0000,$729a0000
+	dc.l	$60ff0000,$72b00000,$60ff0000,$72fe0000
+	dc.l	$60ff0000,$72fe0000,$60ff0000,$72fe0000
+	dc.l	$60ff0000,$72fe0000,$60ff0000,$72fe0000
+	dc.l	$60ff0000,$72fe0000,$60ff0000,$71f20000
+	dc.l	$60ff0000,$72080000,$60ff0000,$721e0000
+	dc.l	$60ff0000,$72860000,$60ff0000,$72860000
+	dc.l	$60ff0000,$72860000,$60ff0000,$72860000
+	dc.l	$60ff0000,$72860000,$60ff0000,$72860000
+	dc.l	$60ff0000,$71600000,$60ff0000,$71760000
+	dc.l	$60ff0000,$718c0000,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$40c62d38,$d3d64634,$3d6f90ae,$b1e75cc7
+	dc.l	$40000000,$c90fdaa2,$2168c235,$00000000
+	dc.l	$3fff0000,$c90fdaa2,$2168c235,$00000000
+	dc.l	$3fe45f30,$6dc9c883,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$0000f22e,$44000008,$f22e6800
+	dc.l	$ff6c41ee,$ff6c61ff,$00006c76,$1d40ff4e
+	dc.l	$120002ae,$00ff00ff,$ff644280,$102eff63
+	dc.l	$4a016608,$61ff0000,$2ddc6030,$0c010001
+	dc.l	$660861ff,$00007124,$60220c01,$00026608
+	dc.l	$61ff0000,$6d226014,$0c010003,$660861ff
+	dc.l	$00006f4c,$600661ff,$00002f8e,$4cee0303
+	dc.l	$ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+	dc.l	$4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+	dc.l	$ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+	dc.l	$f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+	dc.l	$61ff0000,$6bdc1d40,$ff4e1200,$02ae00ff
+	dc.l	$00ffff64,$4280102e,$ff631d41,$ff4e4a01
+	dc.l	$660861ff,$00002d3e,$60300c01,$00016608
+	dc.l	$61ff0000,$70866022,$0c010002,$660861ff
+	dc.l	$00006c84,$60140c01,$00036608,$61ff0000
+	dc.l	$6eae6006,$61ff0000,$2ef04cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$000041ee
+	dc.l	$ff6c216e,$00080000,$216e000c,$0004216e
+	dc.l	$00100008,$61ff0000,$6b381d40,$ff4e1200
+	dc.l	$02ae00ff,$00ffff64,$4280102e,$ff634a01
+	dc.l	$660861ff,$00002c9e,$60300c01,$00016608
+	dc.l	$61ff0000,$6fe66022,$0c010002,$660861ff
+	dc.l	$00006be4,$60140c01,$00036608,$61ff0000
+	dc.l	$6e0e6006,$61ff0000,$2e504cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+	dc.l	$44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+	dc.l	$00006a9e,$1d40ff4e,$120002ae,$00ff00ff
+	dc.l	$ff644280,$102eff63,$4a016608,$61ff0000
+	dc.l	$2c0e6030,$0c010001,$660861ff,$00006fc8
+	dc.l	$60220c01,$00026608,$61ff0000,$6b4a6014
+	dc.l	$0c010003,$660861ff,$00006d74,$600661ff
+	dc.l	$00002dbc,$4cee0303,$ff9cf22e,$9800ff60
+	dc.l	$f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+	dc.l	$0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+	dc.l	$f23c9000,$00000000,$f22e5400,$0008f22e
+	dc.l	$6800ff6c,$41eeff6c,$61ff0000,$6a041d40
+	dc.l	$ff4e1200,$02ae00ff,$00ffff64,$4280102e
+	dc.l	$ff631d41,$ff4e4a01,$660861ff,$00002b70
+	dc.l	$60300c01,$00016608,$61ff0000,$6f2a6022
+	dc.l	$0c010002,$660861ff,$00006aac,$60140c01
+	dc.l	$00036608,$61ff0000,$6cd66006,$61ff0000
+	dc.l	$2d1e4cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$000041ee,$ff6c216e,$00080000
+	dc.l	$216e000c,$0004216e,$00100008,$61ff0000
+	dc.l	$69601d40,$ff4e1200,$02ae00ff,$00ffff64
+	dc.l	$4280102e,$ff634a01,$660861ff,$00002ad0
+	dc.l	$60300c01,$00016608,$61ff0000,$6e8a6022
+	dc.l	$0c010002,$660861ff,$00006a0c,$60140c01
+	dc.l	$00036608,$61ff0000,$6c366006,$61ff0000
+	dc.l	$2c7e4cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$0000f22e,$44000008,$f22e6800
+	dc.l	$ff6c41ee,$ff6c61ff,$000068c6,$1d40ff4e
+	dc.l	$120002ae,$00ff00ff,$ff644280,$102eff63
+	dc.l	$4a016608,$61ff0000,$4e686030,$0c010001
+	dc.l	$660861ff,$00006d74,$60220c01,$00026608
+	dc.l	$61ff0000,$6d946014,$0c010003,$660861ff
+	dc.l	$00006b9c,$600661ff,$00004f14,$4cee0303
+	dc.l	$ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+	dc.l	$4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+	dc.l	$ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+	dc.l	$f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+	dc.l	$61ff0000,$682c1d40,$ff4e1200,$02ae00ff
+	dc.l	$00ffff64,$4280102e,$ff631d41,$ff4e4a01
+	dc.l	$660861ff,$00004dca,$60300c01,$00016608
+	dc.l	$61ff0000,$6cd66022,$0c010002,$660861ff
+	dc.l	$00006cf6,$60140c01,$00036608,$61ff0000
+	dc.l	$6afe6006,$61ff0000,$4e764cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$000041ee
+	dc.l	$ff6c216e,$00080000,$216e000c,$0004216e
+	dc.l	$00100008,$61ff0000,$67881d40,$ff4e1200
+	dc.l	$02ae00ff,$00ffff64,$4280102e,$ff634a01
+	dc.l	$660861ff,$00004d2a,$60300c01,$00016608
+	dc.l	$61ff0000,$6c366022,$0c010002,$660861ff
+	dc.l	$00006c56,$60140c01,$00036608,$61ff0000
+	dc.l	$6a5e6006,$61ff0000,$4dd64cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+	dc.l	$44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+	dc.l	$000066ee,$1d40ff4e,$120002ae,$00ff00ff
+	dc.l	$ff644280,$102eff63,$4a016608,$61ff0000
+	dc.l	$59b26030,$0c010001,$660861ff,$00006b9c
+	dc.l	$60220c01,$00026608,$61ff0000,$6bf26014
+	dc.l	$0c010003,$660861ff,$000069c4,$600661ff
+	dc.l	$00005ad4,$4cee0303,$ff9cf22e,$9800ff60
+	dc.l	$f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+	dc.l	$0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+	dc.l	$f23c9000,$00000000,$f22e5400,$0008f22e
+	dc.l	$6800ff6c,$41eeff6c,$61ff0000,$66541d40
+	dc.l	$ff4e1200,$02ae00ff,$00ffff64,$4280102e
+	dc.l	$ff631d41,$ff4e4a01,$660861ff,$00005914
+	dc.l	$60300c01,$00016608,$61ff0000,$6afe6022
+	dc.l	$0c010002,$660861ff,$00006b54,$60140c01
+	dc.l	$00036608,$61ff0000,$69266006,$61ff0000
+	dc.l	$5a364cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$000041ee,$ff6c216e,$00080000
+	dc.l	$216e000c,$0004216e,$00100008,$61ff0000
+	dc.l	$65b01d40,$ff4e1200,$02ae00ff,$00ffff64
+	dc.l	$4280102e,$ff634a01,$660861ff,$00005874
+	dc.l	$60300c01,$00016608,$61ff0000,$6a5e6022
+	dc.l	$0c010002,$660861ff,$00006ab4,$60140c01
+	dc.l	$00036608,$61ff0000,$68866006,$61ff0000
+	dc.l	$59964cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$0000f22e,$44000008,$f22e6800
+	dc.l	$ff6c41ee,$ff6c61ff,$00006516,$1d40ff4e
+	dc.l	$120002ae,$00ff00ff,$ff644280,$102eff63
+	dc.l	$4a016608,$61ff0000,$46c46030,$0c010001
+	dc.l	$660861ff,$000069c4,$60220c01,$00026608
+	dc.l	$61ff0000,$6a246014,$0c010003,$660861ff
+	dc.l	$000067ec,$600661ff,$00004948,$4cee0303
+	dc.l	$ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+	dc.l	$4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+	dc.l	$ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+	dc.l	$f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+	dc.l	$61ff0000,$647c1d40,$ff4e1200,$02ae00ff
+	dc.l	$00ffff64,$4280102e,$ff631d41,$ff4e4a01
+	dc.l	$660861ff,$00004626,$60300c01,$00016608
+	dc.l	$61ff0000,$69266022,$0c010002,$660861ff
+	dc.l	$00006986,$60140c01,$00036608,$61ff0000
+	dc.l	$674e6006,$61ff0000,$48aa4cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$000041ee
+	dc.l	$ff6c216e,$00080000,$216e000c,$0004216e
+	dc.l	$00100008,$61ff0000,$63d81d40,$ff4e1200
+	dc.l	$02ae00ff,$00ffff64,$4280102e,$ff634a01
+	dc.l	$660861ff,$00004586,$60300c01,$00016608
+	dc.l	$61ff0000,$68866022,$0c010002,$660861ff
+	dc.l	$000068e6,$60140c01,$00036608,$61ff0000
+	dc.l	$66ae6006,$61ff0000,$480a4cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+	dc.l	$44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+	dc.l	$0000633e,$1d40ff4e,$120002ae,$00ff00ff
+	dc.l	$ff644280,$102eff63,$4a016608,$61ff0000
+	dc.l	$49c46030,$0c010001,$660861ff,$000067ec
+	dc.l	$60220c01,$00026608,$61ff0000,$68546014
+	dc.l	$0c010003,$660861ff,$00006614,$600661ff
+	dc.l	$00004afa,$4cee0303,$ff9cf22e,$9800ff60
+	dc.l	$f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+	dc.l	$0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+	dc.l	$f23c9000,$00000000,$f22e5400,$0008f22e
+	dc.l	$6800ff6c,$41eeff6c,$61ff0000,$62a41d40
+	dc.l	$ff4e1200,$02ae00ff,$00ffff64,$4280102e
+	dc.l	$ff631d41,$ff4e4a01,$660861ff,$00004926
+	dc.l	$60300c01,$00016608,$61ff0000,$674e6022
+	dc.l	$0c010002,$660861ff,$000067b6,$60140c01
+	dc.l	$00036608,$61ff0000,$65766006,$61ff0000
+	dc.l	$4a5c4cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$000041ee,$ff6c216e,$00080000
+	dc.l	$216e000c,$0004216e,$00100008,$61ff0000
+	dc.l	$62001d40,$ff4e1200,$02ae00ff,$00ffff64
+	dc.l	$4280102e,$ff634a01,$660861ff,$00004886
+	dc.l	$60300c01,$00016608,$61ff0000,$66ae6022
+	dc.l	$0c010002,$660861ff,$00006716,$60140c01
+	dc.l	$00036608,$61ff0000,$64d66006,$61ff0000
+	dc.l	$49bc4cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$0000f22e,$44000008,$f22e6800
+	dc.l	$ff6c41ee,$ff6c61ff,$00006166,$1d40ff4e
+	dc.l	$120002ae,$00ff00ff,$ff644280,$102eff63
+	dc.l	$4a016608,$61ff0000,$391c6030,$0c010001
+	dc.l	$660861ff,$00006614,$60220c01,$00026608
+	dc.l	$61ff0000,$66b86014,$0c010003,$660861ff
+	dc.l	$0000643c,$600661ff,$00003b28,$4cee0303
+	dc.l	$ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+	dc.l	$4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+	dc.l	$ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+	dc.l	$f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+	dc.l	$61ff0000,$60cc1d40,$ff4e1200,$02ae00ff
+	dc.l	$00ffff64,$4280102e,$ff631d41,$ff4e4a01
+	dc.l	$660861ff,$0000387e,$60300c01,$00016608
+	dc.l	$61ff0000,$65766022,$0c010002,$660861ff
+	dc.l	$0000661a,$60140c01,$00036608,$61ff0000
+	dc.l	$639e6006,$61ff0000,$3a8a4cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$000041ee
+	dc.l	$ff6c216e,$00080000,$216e000c,$0004216e
+	dc.l	$00100008,$61ff0000,$60281d40,$ff4e1200
+	dc.l	$02ae00ff,$00ffff64,$4280102e,$ff634a01
+	dc.l	$660861ff,$000037de,$60300c01,$00016608
+	dc.l	$61ff0000,$64d66022,$0c010002,$660861ff
+	dc.l	$0000657a,$60140c01,$00036608,$61ff0000
+	dc.l	$62fe6006,$61ff0000,$39ea4cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+	dc.l	$44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+	dc.l	$00005f8e,$1d40ff4e,$120002ae,$00ff00ff
+	dc.l	$ff644280,$102eff63,$4a016608,$61ff0000
+	dc.l	$39886030,$0c010001,$660861ff,$0000643c
+	dc.l	$60220c01,$00026608,$61ff0000,$603a6014
+	dc.l	$0c010003,$660861ff,$00006264,$600661ff
+	dc.l	$00003a04,$4cee0303,$ff9cf22e,$9800ff60
+	dc.l	$f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+	dc.l	$0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+	dc.l	$f23c9000,$00000000,$f22e5400,$0008f22e
+	dc.l	$6800ff6c,$41eeff6c,$61ff0000,$5ef41d40
+	dc.l	$ff4e1200,$02ae00ff,$00ffff64,$4280102e
+	dc.l	$ff631d41,$ff4e4a01,$660861ff,$000038ea
+	dc.l	$60300c01,$00016608,$61ff0000,$639e6022
+	dc.l	$0c010002,$660861ff,$00005f9c,$60140c01
+	dc.l	$00036608,$61ff0000,$61c66006,$61ff0000
+	dc.l	$39664cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$000041ee,$ff6c216e,$00080000
+	dc.l	$216e000c,$0004216e,$00100008,$61ff0000
+	dc.l	$5e501d40,$ff4e1200,$02ae00ff,$00ffff64
+	dc.l	$4280102e,$ff634a01,$660861ff,$0000384a
+	dc.l	$60300c01,$00016608,$61ff0000,$62fe6022
+	dc.l	$0c010002,$660861ff,$00005efc,$60140c01
+	dc.l	$00036608,$61ff0000,$61266006,$61ff0000
+	dc.l	$38c64cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$0000f22e,$44000008,$f22e6800
+	dc.l	$ff6c41ee,$ff6c61ff,$00005db6,$1d40ff4e
+	dc.l	$120002ae,$00ff00ff,$ff644280,$102eff63
+	dc.l	$4a016608,$61ff0000,$51d46030,$0c010001
+	dc.l	$660861ff,$00006264,$60220c01,$00026608
+	dc.l	$61ff0000,$5e626014,$0c010003,$660861ff
+	dc.l	$0000608c,$600661ff,$00005224,$4cee0303
+	dc.l	$ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+	dc.l	$4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+	dc.l	$ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+	dc.l	$f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+	dc.l	$61ff0000,$5d1c1d40,$ff4e1200,$02ae00ff
+	dc.l	$00ffff64,$4280102e,$ff631d41,$ff4e4a01
+	dc.l	$660861ff,$00005136,$60300c01,$00016608
+	dc.l	$61ff0000,$61c66022,$0c010002,$660861ff
+	dc.l	$00005dc4,$60140c01,$00036608,$61ff0000
+	dc.l	$5fee6006,$61ff0000,$51864cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$000041ee
+	dc.l	$ff6c216e,$00080000,$216e000c,$0004216e
+	dc.l	$00100008,$61ff0000,$5c781d40,$ff4e1200
+	dc.l	$02ae00ff,$00ffff64,$4280102e,$ff634a01
+	dc.l	$660861ff,$00005096,$60300c01,$00016608
+	dc.l	$61ff0000,$61266022,$0c010002,$660861ff
+	dc.l	$00005d24,$60140c01,$00036608,$61ff0000
+	dc.l	$5f4e6006,$61ff0000,$50e64cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+	dc.l	$44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+	dc.l	$00005bde,$1d40ff4e,$120002ae,$00ff00ff
+	dc.l	$ff644280,$102eff63,$4a016608,$61ff0000
+	dc.l	$28066030,$0c010001,$660861ff,$0000608c
+	dc.l	$60220c01,$00026608,$61ff0000,$5c8a6014
+	dc.l	$0c010003,$660861ff,$00005eb4,$600661ff
+	dc.l	$00002938,$4cee0303,$ff9cf22e,$9800ff60
+	dc.l	$f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+	dc.l	$0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+	dc.l	$f23c9000,$00000000,$f22e5400,$0008f22e
+	dc.l	$6800ff6c,$41eeff6c,$61ff0000,$5b441d40
+	dc.l	$ff4e1200,$02ae00ff,$00ffff64,$4280102e
+	dc.l	$ff631d41,$ff4e4a01,$660861ff,$00002768
+	dc.l	$60300c01,$00016608,$61ff0000,$5fee6022
+	dc.l	$0c010002,$660861ff,$00005bec,$60140c01
+	dc.l	$00036608,$61ff0000,$5e166006,$61ff0000
+	dc.l	$289a4cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$000041ee,$ff6c216e,$00080000
+	dc.l	$216e000c,$0004216e,$00100008,$61ff0000
+	dc.l	$5aa01d40,$ff4e1200,$02ae00ff,$00ffff64
+	dc.l	$4280102e,$ff634a01,$660861ff,$000026c8
+	dc.l	$60300c01,$00016608,$61ff0000,$5f4e6022
+	dc.l	$0c010002,$660861ff,$00005b4c,$60140c01
+	dc.l	$00036608,$61ff0000,$5d766006,$61ff0000
+	dc.l	$27fa4cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$0000f22e,$44000008,$f22e6800
+	dc.l	$ff6c41ee,$ff6c61ff,$00005a06,$1d40ff4e
+	dc.l	$120002ae,$00ff00ff,$ff644280,$102eff63
+	dc.l	$4a016608,$61ff0000,$39e46030,$0c010001
+	dc.l	$660861ff,$00005f30,$60220c01,$00026608
+	dc.l	$61ff0000,$5f026014,$0c010003,$660861ff
+	dc.l	$00005cdc,$600661ff,$00003b5e,$4cee0303
+	dc.l	$ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+	dc.l	$4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+	dc.l	$ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+	dc.l	$f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+	dc.l	$61ff0000,$596c1d40,$ff4e1200,$02ae00ff
+	dc.l	$00ffff64,$4280102e,$ff631d41,$ff4e4a01
+	dc.l	$660861ff,$00003946,$60300c01,$00016608
+	dc.l	$61ff0000,$5e926022,$0c010002,$660861ff
+	dc.l	$00005e64,$60140c01,$00036608,$61ff0000
+	dc.l	$5c3e6006,$61ff0000,$3ac04cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$000041ee
+	dc.l	$ff6c216e,$00080000,$216e000c,$0004216e
+	dc.l	$00100008,$61ff0000,$58c81d40,$ff4e1200
+	dc.l	$02ae00ff,$00ffff64,$4280102e,$ff634a01
+	dc.l	$660861ff,$000038a6,$60300c01,$00016608
+	dc.l	$61ff0000,$5df26022,$0c010002,$660861ff
+	dc.l	$00005dc4,$60140c01,$00036608,$61ff0000
+	dc.l	$5b9e6006,$61ff0000,$3a204cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+	dc.l	$44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+	dc.l	$0000582e,$1d40ff4e,$120002ae,$00ff00ff
+	dc.l	$ff644280,$102eff63,$4a016608,$61ff0000
+	dc.l	$522e6030,$0c010001,$660861ff,$00005d58
+	dc.l	$60220c01,$00026608,$61ff0000,$5d2a6014
+	dc.l	$0c010003,$660861ff,$00005b04,$600661ff
+	dc.l	$000052d6,$4cee0303,$ff9cf22e,$9800ff60
+	dc.l	$f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+	dc.l	$0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+	dc.l	$f23c9000,$00000000,$f22e5400,$0008f22e
+	dc.l	$6800ff6c,$41eeff6c,$61ff0000,$57941d40
+	dc.l	$ff4e1200,$02ae00ff,$00ffff64,$4280102e
+	dc.l	$ff631d41,$ff4e4a01,$660861ff,$00005190
+	dc.l	$60300c01,$00016608,$61ff0000,$5cba6022
+	dc.l	$0c010002,$660861ff,$00005c8c,$60140c01
+	dc.l	$00036608,$61ff0000,$5a666006,$61ff0000
+	dc.l	$52384cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$000041ee,$ff6c216e,$00080000
+	dc.l	$216e000c,$0004216e,$00100008,$61ff0000
+	dc.l	$56f01d40,$ff4e1200,$02ae00ff,$00ffff64
+	dc.l	$4280102e,$ff634a01,$660861ff,$000050f0
+	dc.l	$60300c01,$00016608,$61ff0000,$5c1a6022
+	dc.l	$0c010002,$660861ff,$00005bec,$60140c01
+	dc.l	$00036608,$61ff0000,$59c66006,$61ff0000
+	dc.l	$51984cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$0000f22e,$44000008,$f22e6800
+	dc.l	$ff6c41ee,$ff6c61ff,$00005656,$1d40ff4e
+	dc.l	$120002ae,$00ff00ff,$ff644280,$102eff63
+	dc.l	$4a016608,$61ff0000,$514e6030,$0c010001
+	dc.l	$660861ff,$00005b80,$60220c01,$00026608
+	dc.l	$61ff0000,$5b526014,$0c010003,$660861ff
+	dc.l	$0000592c,$600661ff,$0000524c,$4cee0303
+	dc.l	$ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+	dc.l	$4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+	dc.l	$ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+	dc.l	$f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+	dc.l	$61ff0000,$55bc1d40,$ff4e1200,$02ae00ff
+	dc.l	$00ffff64,$4280102e,$ff631d41,$ff4e4a01
+	dc.l	$660861ff,$000050b0,$60300c01,$00016608
+	dc.l	$61ff0000,$5ae26022,$0c010002,$660861ff
+	dc.l	$00005ab4,$60140c01,$00036608,$61ff0000
+	dc.l	$588e6006,$61ff0000,$51ae4cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$000041ee
+	dc.l	$ff6c216e,$00080000,$216e000c,$0004216e
+	dc.l	$00100008,$61ff0000,$55181d40,$ff4e1200
+	dc.l	$02ae00ff,$00ffff64,$4280102e,$ff634a01
+	dc.l	$660861ff,$00005010,$60300c01,$00016608
+	dc.l	$61ff0000,$5a426022,$0c010002,$660861ff
+	dc.l	$00005a14,$60140c01,$00036608,$61ff0000
+	dc.l	$57ee6006,$61ff0000,$510e4cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+	dc.l	$44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+	dc.l	$0000547e,$1d40ff4e,$120002ae,$00ff00ff
+	dc.l	$ff644280,$102eff63,$4a016608,$61ff0000
+	dc.l	$45026030,$0c010001,$660861ff,$000054c8
+	dc.l	$60220c01,$00026608,$61ff0000,$59826014
+	dc.l	$0c010003,$660861ff,$00005754,$600661ff
+	dc.l	$00004682,$4cee0303,$ff9cf22e,$9800ff60
+	dc.l	$f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+	dc.l	$0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+	dc.l	$f23c9000,$00000000,$f22e5400,$0008f22e
+	dc.l	$6800ff6c,$41eeff6c,$61ff0000,$53e41d40
+	dc.l	$ff4e1200,$02ae00ff,$00ffff64,$4280102e
+	dc.l	$ff631d41,$ff4e4a01,$660861ff,$00004464
+	dc.l	$60300c01,$00016608,$61ff0000,$542a6022
+	dc.l	$0c010002,$660861ff,$000058e4,$60140c01
+	dc.l	$00036608,$61ff0000,$56b66006,$61ff0000
+	dc.l	$45e44cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$000041ee,$ff6c216e,$00080000
+	dc.l	$216e000c,$0004216e,$00100008,$61ff0000
+	dc.l	$53401d40,$ff4e1200,$02ae00ff,$00ffff64
+	dc.l	$4280102e,$ff634a01,$660861ff,$000043c4
+	dc.l	$60300c01,$00016608,$61ff0000,$538a6022
+	dc.l	$0c010002,$660861ff,$00005844,$60140c01
+	dc.l	$00036608,$61ff0000,$56166006,$61ff0000
+	dc.l	$45444cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$0000f22e,$44000008,$f22e6800
+	dc.l	$ff6c41ee,$ff6c61ff,$000052a6,$1d40ff4e
+	dc.l	$120002ae,$00ff00ff,$ff644280,$102eff63
+	dc.l	$4a016608,$61ff0000,$476c6030,$0c010001
+	dc.l	$660861ff,$000052f0,$60220c01,$00026608
+	dc.l	$61ff0000,$57aa6014,$0c010003,$660861ff
+	dc.l	$0000557c,$600661ff,$0000476a,$4cee0303
+	dc.l	$ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+	dc.l	$4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+	dc.l	$ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+	dc.l	$f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+	dc.l	$61ff0000,$520c1d40,$ff4e1200,$02ae00ff
+	dc.l	$00ffff64,$4280102e,$ff631d41,$ff4e4a01
+	dc.l	$660861ff,$000046ce,$60300c01,$00016608
+	dc.l	$61ff0000,$52526022,$0c010002,$660861ff
+	dc.l	$0000570c,$60140c01,$00036608,$61ff0000
+	dc.l	$54de6006,$61ff0000,$46cc4cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$000041ee
+	dc.l	$ff6c216e,$00080000,$216e000c,$0004216e
+	dc.l	$00100008,$61ff0000,$51681d40,$ff4e1200
+	dc.l	$02ae00ff,$00ffff64,$4280102e,$ff634a01
+	dc.l	$660861ff,$0000462e,$60300c01,$00016608
+	dc.l	$61ff0000,$51b26022,$0c010002,$660861ff
+	dc.l	$0000566c,$60140c01,$00036608,$61ff0000
+	dc.l	$543e6006,$61ff0000,$462c4cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+	dc.l	$44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+	dc.l	$000050ce,$1d40ff4e,$120002ae,$00ff00ff
+	dc.l	$ff644280,$102eff63,$4a016608,$61ff0000
+	dc.l	$45e46030,$0c010001,$660861ff,$00005118
+	dc.l	$60220c01,$00026608,$61ff0000,$55d26014
+	dc.l	$0c010003,$660861ff,$000053a4,$600661ff
+	dc.l	$0000460c,$4cee0303,$ff9cf22e,$9800ff60
+	dc.l	$f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+	dc.l	$0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+	dc.l	$f23c9000,$00000000,$f22e5400,$0008f22e
+	dc.l	$6800ff6c,$41eeff6c,$61ff0000,$50341d40
+	dc.l	$ff4e1200,$02ae00ff,$00ffff64,$4280102e
+	dc.l	$ff631d41,$ff4e4a01,$660861ff,$00004546
+	dc.l	$60300c01,$00016608,$61ff0000,$507a6022
+	dc.l	$0c010002,$660861ff,$00005534,$60140c01
+	dc.l	$00036608,$61ff0000,$53066006,$61ff0000
+	dc.l	$456e4cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$000041ee,$ff6c216e,$00080000
+	dc.l	$216e000c,$0004216e,$00100008,$61ff0000
+	dc.l	$4f901d40,$ff4e1200,$02ae00ff,$00ffff64
+	dc.l	$4280102e,$ff634a01,$660861ff,$000044a6
+	dc.l	$60300c01,$00016608,$61ff0000,$4fda6022
+	dc.l	$0c010002,$660861ff,$00005494,$60140c01
+	dc.l	$00036608,$61ff0000,$52666006,$61ff0000
+	dc.l	$44ce4cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$0000f22e,$44000008,$f22e6800
+	dc.l	$ff6c41ee,$ff6c61ff,$00004ef6,$1d40ff4e
+	dc.l	$120002ae,$00ff00ff,$ff644280,$102eff63
+	dc.l	$4a016608,$61ff0000,$33da6030,$0c010001
+	dc.l	$660861ff,$00005420,$60220c01,$00026608
+	dc.l	$61ff0000,$53ca6014,$0c010003,$660861ff
+	dc.l	$000051cc,$600661ff,$0000344c,$4cee0303
+	dc.l	$ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+	dc.l	$4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+	dc.l	$ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+	dc.l	$f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+	dc.l	$61ff0000,$4e5c1d40,$ff4e1200,$02ae00ff
+	dc.l	$00ffff64,$4280102e,$ff631d41,$ff4e4a01
+	dc.l	$660861ff,$0000333c,$60300c01,$00016608
+	dc.l	$61ff0000,$53826022,$0c010002,$660861ff
+	dc.l	$0000532c,$60140c01,$00036608,$61ff0000
+	dc.l	$512e6006,$61ff0000,$33ae4cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$000041ee
+	dc.l	$ff6c216e,$00080000,$216e000c,$0004216e
+	dc.l	$00100008,$61ff0000,$4db81d40,$ff4e1200
+	dc.l	$02ae00ff,$00ffff64,$4280102e,$ff634a01
+	dc.l	$660861ff,$0000329c,$60300c01,$00016608
+	dc.l	$61ff0000,$52e26022,$0c010002,$660861ff
+	dc.l	$0000528c,$60140c01,$00036608,$61ff0000
+	dc.l	$508e6006,$61ff0000,$330e4cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+	dc.l	$44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+	dc.l	$00004d1e,$1d40ff4e,$120002ae,$00ff00ff
+	dc.l	$ff644280,$102eff63,$4a016608,$61ff0000
+	dc.l	$27cc6030,$0c010001,$660861ff,$00005284
+	dc.l	$60220c01,$00026608,$61ff0000,$4dca6014
+	dc.l	$0c010003,$660861ff,$00004ff4,$600661ff
+	dc.l	$0000282a,$4cee0303,$ff9cf22e,$9800ff60
+	dc.l	$f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+	dc.l	$0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+	dc.l	$f23c9000,$00000000,$f22e5400,$0008f22e
+	dc.l	$6800ff6c,$41eeff6c,$61ff0000,$4c841d40
+	dc.l	$ff4e1200,$02ae00ff,$00ffff64,$4280102e
+	dc.l	$ff631d41,$ff4e4a01,$660861ff,$0000272e
+	dc.l	$60300c01,$00016608,$61ff0000,$51e66022
+	dc.l	$0c010002,$660861ff,$00004d2c,$60140c01
+	dc.l	$00036608,$61ff0000,$4f566006,$61ff0000
+	dc.l	$278c4cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$000041ee,$ff6c216e,$00080000
+	dc.l	$216e000c,$0004216e,$00100008,$61ff0000
+	dc.l	$4be01d40,$ff4e1200,$02ae00ff,$00ffff64
+	dc.l	$4280102e,$ff634a01,$660861ff,$0000268e
+	dc.l	$60300c01,$00016608,$61ff0000,$51466022
+	dc.l	$0c010002,$660861ff,$00004c8c,$60140c01
+	dc.l	$00036608,$61ff0000,$4eb66006,$61ff0000
+	dc.l	$26ec4cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$0000f22e,$44000008,$f22e6800
+	dc.l	$ff6c41ee,$ff6c61ff,$00004b46,$1d40ff4e
+	dc.l	$120002ae,$00ff00ff,$ff644280,$102eff63
+	dc.l	$4a016608,$61ff0000,$2fb06030,$0c010001
+	dc.l	$660861ff,$00004ff4,$60220c01,$00026608
+	dc.l	$61ff0000,$4bf26014,$0c010003,$660861ff
+	dc.l	$00004e1c,$600661ff,$00002f9a,$4cee0303
+	dc.l	$ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+	dc.l	$4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+	dc.l	$ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+	dc.l	$f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+	dc.l	$61ff0000,$4aac1d40,$ff4e1200,$02ae00ff
+	dc.l	$00ffff64,$4280102e,$ff631d41,$ff4e4a01
+	dc.l	$660861ff,$00002f12,$60300c01,$00016608
+	dc.l	$61ff0000,$4f566022,$0c010002,$660861ff
+	dc.l	$00004b54,$60140c01,$00036608,$61ff0000
+	dc.l	$4d7e6006,$61ff0000,$2efc4cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$000041ee
+	dc.l	$ff6c216e,$00080000,$216e000c,$0004216e
+	dc.l	$00100008,$61ff0000,$4a081d40,$ff4e1200
+	dc.l	$02ae00ff,$00ffff64,$4280102e,$ff634a01
+	dc.l	$660861ff,$00002e72,$60300c01,$00016608
+	dc.l	$61ff0000,$4eb66022,$0c010002,$660861ff
+	dc.l	$00004ab4,$60140c01,$00036608,$61ff0000
+	dc.l	$4cde6006,$61ff0000,$2e5c4cee,$0303ff9c
+	dc.l	$f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+	dc.l	$4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+	dc.l	$f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+	dc.l	$44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+	dc.l	$0000496e,$1d40ff4e,$120002ae,$00ff00ff
+	dc.l	$ff644280,$102eff63,$4a016608,$61ff0000
+	dc.l	$2e0c6030,$0c010001,$660861ff,$00004e1c
+	dc.l	$60220c01,$00026608,$61ff0000,$4a1a6014
+	dc.l	$0c010003,$660861ff,$00004c44,$600661ff
+	dc.l	$00002e08,$4cee0303,$ff9cf22e,$9800ff60
+	dc.l	$f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+	dc.l	$0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+	dc.l	$f23c9000,$00000000,$f22e5400,$0008f22e
+	dc.l	$6800ff6c,$41eeff6c,$61ff0000,$48d41d40
+	dc.l	$ff4e1200,$02ae00ff,$00ffff64,$4280102e
+	dc.l	$ff631d41,$ff4e4a01,$660861ff,$00002d6e
+	dc.l	$60300c01,$00016608,$61ff0000,$4d7e6022
+	dc.l	$0c010002,$660861ff,$0000497c,$60140c01
+	dc.l	$00036608,$61ff0000,$4ba66006,$61ff0000
+	dc.l	$2d6a4cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$000041ee,$ff6c216e,$00080000
+	dc.l	$216e000c,$0004216e,$00100008,$61ff0000
+	dc.l	$48301d40,$ff4e1200,$02ae00ff,$00ffff64
+	dc.l	$4280102e,$ff634a01,$660861ff,$00002cce
+	dc.l	$60300c01,$00016608,$61ff0000,$4cde6022
+	dc.l	$0c010002,$660861ff,$000048dc,$60140c01
+	dc.l	$00036608,$61ff0000,$4b066006,$61ff0000
+	dc.l	$2cca4cee,$0303ff9c,$f22e9800,$ff60f22e
+	dc.l	$d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$0000f22e,$44000008,$f22e6800
+	dc.l	$ff6c41ee,$ff6c61ff,$00004796,$1d40ff4e
+	dc.l	$120002ae,$00ff00ff,$ff644280,$102eff63
+	dc.l	$4a016608,$61ff0000,$0af46030,$0c010001
+	dc.l	$660861ff,$00004d18,$60220c01,$00026608
+	dc.l	$61ff0000,$4d386014,$0c010003,$660861ff
+	dc.l	$00004d34,$600661ff,$00000d58,$4cee0303
+	dc.l	$ff9cf22e,$9800ff60,$f227e003,$f21fd040
+	dc.l	$f21fd080,$4e5e4e75,$4e56ff40,$48ee0303
+	dc.l	$ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+	dc.l	$90000000,$0000f22e,$54000008,$f22e6800
+	dc.l	$ff6c41ee,$ff6c61ff,$000046f6,$1d40ff4e
+	dc.l	$120002ae,$00ff00ff,$ff644280,$102eff63
+	dc.l	$1d41ff4e,$4a016608,$61ff0000,$0a506030
+	dc.l	$0c010001,$660861ff,$00004c74,$60220c01
+	dc.l	$00026608,$61ff0000,$4c946014,$0c010003
+	dc.l	$660861ff,$00004c90,$600661ff,$00000cb4
+	dc.l	$4cee0303,$ff9cf22e,$9800ff60,$f227e003
+	dc.l	$f21fd040,$f21fd080,$4e5e4e75,$4e56ff40
+	dc.l	$48ee0303,$ff9cf22e,$b800ff60,$f22ef0c0
+	dc.l	$ffdcf23c,$90000000,$000041ee,$ff6c216e
+	dc.l	$00080000,$216e000c,$0004216e,$00100008
+	dc.l	$61ff0000,$464c1d40,$ff4e1200,$02ae00ff
+	dc.l	$00ffff64,$4280102e,$ff634a01,$660861ff
+	dc.l	$000009aa,$60300c01,$00016608,$61ff0000
+	dc.l	$4bce6022,$0c010002,$660861ff,$00004bee
+	dc.l	$60140c01,$00036608,$61ff0000,$4bea6006
+	dc.l	$61ff0000,$0c0e4cee,$0303ff9c,$f22e9800
+	dc.l	$ff60f227,$e003f21f,$d040f21f,$d0804e5e
+	dc.l	$4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+	dc.l	$ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+	dc.l	$f22e4400,$0008f22e,$6800ff78,$41eeff78
+	dc.l	$61ff0000,$45ac1d40,$ff4ff22e,$4400000c
+	dc.l	$f22e6800,$ff6c41ee,$ff6c61ff,$00004592
+	dc.l	$1d40ff4e,$220002ae,$00ff00ff,$ff644280
+	dc.l	$102eff63,$41eeff6c,$43eeff78,$4a016608
+	dc.l	$61ff0000,$4c466030,$0c010001,$660861ff
+	dc.l	$00004c64,$60220c01,$00026608,$61ff0000
+	dc.l	$4c846014,$0c010003,$660861ff,$00004d16
+	dc.l	$600661ff,$00004c14,$4cee0303,$ff9cf22e
+	dc.l	$9800ff60,$f22ed040,$ffe84e5e,$4e754e56
+	dc.l	$ff4048ee,$0303ff9c,$f22eb800,$ff60f22e
+	dc.l	$f0c0ffdc,$f23c9000,$00000000,$f22e5400
+	dc.l	$0008f22e,$6800ff78,$41eeff78,$61ff0000
+	dc.l	$44f01d40,$ff4ff22e,$54000010,$f22e6800
+	dc.l	$ff6c41ee,$ff6c61ff,$000044d6,$1d40ff4e
+	dc.l	$220002ae,$00ff00ff,$ff644280,$102eff63
+	dc.l	$41eeff6c,$43eeff78,$4a016608,$61ff0000
+	dc.l	$4b8a6030,$0c010001,$660861ff,$00004ba8
+	dc.l	$60220c01,$00026608,$61ff0000,$4bc86014
+	dc.l	$0c010003,$660861ff,$00004c5a,$600661ff
+	dc.l	$00004b58,$4cee0303,$ff9cf22e,$9800ff60
+	dc.l	$f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+	dc.l	$0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+	dc.l	$f23c9000,$00000000,$41eeff78,$216e0008
+	dc.l	$0000216e,$000c0004,$216e0010,$000861ff
+	dc.l	$0000442e,$1d40ff4f,$41eeff6c,$216e0014
+	dc.l	$0000216e,$00180004,$216e001c,$000861ff
+	dc.l	$0000440e,$1d40ff4e,$220002ae,$00ff00ff
+	dc.l	$ff644280,$102eff63,$41eeff6c,$43eeff78
+	dc.l	$4a016608,$61ff0000,$4ac26030,$0c010001
+	dc.l	$660861ff,$00004ae0,$60220c01,$00026608
+	dc.l	$61ff0000,$4b006014,$0c010003,$660861ff
+	dc.l	$00004b92,$600661ff,$00004a90,$4cee0303
+	dc.l	$ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+	dc.l	$4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+	dc.l	$ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+	dc.l	$f22e4400,$0008f22e,$6800ff78,$41eeff78
+	dc.l	$61ff0000,$436c1d40,$ff4ff22e,$4400000c
+	dc.l	$f22e6800,$ff6c41ee,$ff6c61ff,$00004352
+	dc.l	$1d40ff4e,$220002ae,$00ff00ff,$ff644280
+	dc.l	$102eff63,$41eeff6c,$43eeff78,$4a016608
+	dc.l	$61ff0000,$491c6030,$0c010001,$660861ff
+	dc.l	$0000493a,$60220c01,$00026608,$61ff0000
+	dc.l	$495a6014,$0c010003,$660861ff,$00004ad6
+	dc.l	$600661ff,$000048ea,$4cee0303,$ff9cf22e
+	dc.l	$9800ff60,$f22ed040,$ffe84e5e,$4e754e56
+	dc.l	$ff4048ee,$0303ff9c,$f22eb800,$ff60f22e
+	dc.l	$f0c0ffdc,$f23c9000,$00000000,$f22e5400
+	dc.l	$0008f22e,$6800ff78,$41eeff78,$61ff0000
+	dc.l	$42b01d40,$ff4ff22e,$54000010,$f22e6800
+	dc.l	$ff6c41ee,$ff6c61ff,$00004296,$1d40ff4e
+	dc.l	$220002ae,$00ff00ff,$ff644280,$102eff63
+	dc.l	$41eeff6c,$43eeff78,$4a016608,$61ff0000
+	dc.l	$48606030,$0c010001,$660861ff,$0000487e
+	dc.l	$60220c01,$00026608,$61ff0000,$489e6014
+	dc.l	$0c010003,$660861ff,$00004a1a,$600661ff
+	dc.l	$0000482e,$4cee0303,$ff9cf22e,$9800ff60
+	dc.l	$f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+	dc.l	$0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+	dc.l	$f23c9000,$00000000,$41eeff78,$216e0008
+	dc.l	$0000216e,$000c0004,$216e0010,$000861ff
+	dc.l	$000041ee,$1d40ff4f,$41eeff6c,$216e0014
+	dc.l	$0000216e,$00180004,$216e001c,$000861ff
+	dc.l	$000041ce,$1d40ff4e,$220002ae,$00ff00ff
+	dc.l	$ff644280,$102eff63,$41eeff6c,$43eeff78
+	dc.l	$4a016608,$61ff0000,$47986030,$0c010001
+	dc.l	$660861ff,$000047b6,$60220c01,$00026608
+	dc.l	$61ff0000,$47d66014,$0c010003,$660861ff
+	dc.l	$00004952,$600661ff,$00004766,$4cee0303
+	dc.l	$ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+	dc.l	$4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+	dc.l	$ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+	dc.l	$f22e4400,$0008f22e,$6800ff78,$41eeff78
+	dc.l	$61ff0000,$412c1d40,$ff4ff22e,$4400000c
+	dc.l	$f22e6800,$ff6c41ee,$ff6c61ff,$00004112
+	dc.l	$1d40ff4e,$220002ae,$00ff00ff,$ff644280
+	dc.l	$102eff63,$41eeff6c,$43eeff78,$4a016608
+	dc.l	$61ff0000,$484a6030,$0c010001,$660861ff
+	dc.l	$0000486a,$60220c01,$00026608,$61ff0000
+	dc.l	$488a6014,$0c010003,$660861ff,$00004896
+	dc.l	$600661ff,$00004818,$4cee0303,$ff9cf22e
+	dc.l	$9800ff60,$f22ed040,$ffe84e5e,$4e754e56
+	dc.l	$ff4048ee,$0303ff9c,$f22eb800,$ff60f22e
+	dc.l	$f0c0ffdc,$f23c9000,$00000000,$f22e5400
+	dc.l	$0008f22e,$6800ff78,$41eeff78,$61ff0000
+	dc.l	$40701d40,$ff4ff22e,$54000010,$f22e6800
+	dc.l	$ff6c41ee,$ff6c61ff,$00004056,$1d40ff4e
+	dc.l	$220002ae,$00ff00ff,$ff644280,$102eff63
+	dc.l	$41eeff6c,$43eeff78,$4a016608,$61ff0000
+	dc.l	$478e6030,$0c010001,$660861ff,$000047ae
+	dc.l	$60220c01,$00026608,$61ff0000,$47ce6014
+	dc.l	$0c010003,$660861ff,$000047da,$600661ff
+	dc.l	$0000475c,$4cee0303,$ff9cf22e,$9800ff60
+	dc.l	$f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+	dc.l	$0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+	dc.l	$f23c9000,$00000000,$41eeff78,$216e0008
+	dc.l	$0000216e,$000c0004,$216e0010,$000861ff
+	dc.l	$00003fae,$1d40ff4f,$41eeff6c,$216e0014
+	dc.l	$0000216e,$00180004,$216e001c,$000861ff
+	dc.l	$00003f8e,$1d40ff4e,$220002ae,$00ff00ff
+	dc.l	$ff644280,$102eff63,$41eeff6c,$43eeff78
+	dc.l	$4a016608,$61ff0000,$46c66030,$0c010001
+	dc.l	$660861ff,$000046e6,$60220c01,$00026608
+	dc.l	$61ff0000,$47066014,$0c010003,$660861ff
+	dc.l	$00004712,$600661ff,$00004694,$4cee0303
+	dc.l	$ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+	dc.l	$4e75bd6a,$aa77ccc9,$94f53de6,$12097aae
+	dc.l	$8da1be5a,$e6452a11,$8ae43ec7,$1de3a534
+	dc.l	$1531bf2a,$01a01a01,$8b590000,$00000000
+	dc.l	$00003ff8,$00008888,$88888888,$59af0000
+	dc.l	$0000bffc,$0000aaaa,$aaaaaaaa,$aa990000
+	dc.l	$00003d2a,$c4d0d601,$1ee3bda9,$396f9f45
+	dc.l	$ac193e21,$eed90612,$c972be92,$7e4fb79d
+	dc.l	$9fcf3efa,$01a01a01,$d4230000,$00000000
+	dc.l	$0000bff5,$0000b60b,$60b60b61,$d4380000
+	dc.l	$00003ffa,$0000aaaa,$aaaaaaaa,$ab5ebf00
+	dc.l	$00002d7c,$00000000,$ff5c6008,$2d7c0000
+	dc.l	$0001ff5c,$f2104800,$f22e6800,$ff842210
+	dc.l	$32280004,$02817fff,$ffff0c81,$3fd78000
+	dc.l	$6c046000,$01780c81,$4004bc7e,$6d046000
+	dc.l	$0468f200,$0080f23a,$54a3d186,$43fb0170
+	dc.l	$00000866,$f22e6080,$ff58222e,$ff58e981
+	dc.l	$d3c1f219,$4828f211,$4428222e,$ff58d2ae
+	dc.l	$ff5ce299,$0c810000,$00006d00,$0088f227
+	dc.l	$e00cf22e,$6800ff84,$f2000023,$f23a5580
+	dc.l	$fed2f23a,$5500fed4,$f2000080,$f20004a3
+	dc.l	$e2990281,$80000000,$b3aeff84,$f20005a3
+	dc.l	$f2000523,$f23a55a2,$febaf23a,$5522febc
+	dc.l	$f20005a3,$f2000523,$f23a55a2,$feb6f23a
+	dc.l	$4922fec0,$f2000ca3,$f2000123,$f23a48a2
+	dc.l	$fec2f22e,$4823ff84,$f20008a2,$f2000423
+	dc.l	$f21fd030,$f2009000,$f22e4822,$ff8460ff
+	dc.l	$00004006,$f227e00c,$f2000023,$f23a5500
+	dc.l	$fea2f23a,$5580fea4,$f2000080,$f20004a3
+	dc.l	$f22e6800,$ff84e299,$02818000,$0000f200
+	dc.l	$0523b3ae,$ff840281,$80000000,$f20005a3
+	dc.l	$00813f80,$00002d41,$ff54f23a,$5522fe74
+	dc.l	$f23a55a2,$fe76f200,$0523f200,$05a3f23a
+	dc.l	$5522fe70,$f23a49a2,$fe7af200,$0523f200
+	dc.l	$0ca3f23a,$4922fe7c,$f23a44a2,$fe82f200
+	dc.l	$0823f200,$0422f22e,$4823ff84,$f21fd030
+	dc.l	$f2009000,$f22e4422,$ff5460ff,$00003f6a
+	dc.l	$0c813fff,$80006eff,$00000300,$222eff5c
+	dc.l	$0c810000,$00006e14,$f2009000,$123c0003
+	dc.l	$f22e4800,$ff8460ff,$00003f36,$f23c4400
+	dc.l	$3f800000,$f2009000,$f23c4422,$80800000
+	dc.l	$60ff0000,$3f2c60ff,$00003f64,$f23c4400
+	dc.l	$3f800000,$60ff0000,$3f182d7c,$00000004
+	dc.l	$ff5cf210,$4800f22e,$6800ff84,$22103228
+	dc.l	$00040281,$7fffffff,$0c813fd7,$80006c04
+	dc.l	$60000240,$0c814004,$bc7e6d04,$6000027a
+	dc.l	$f2000080,$f23a54a3,$cf9843fb,$01700000
+	dc.l	$0678f22e,$6080ff58,$222eff58,$e981d3c1
+	dc.l	$f2194828,$f2114428,$222eff58,$e2990c81
+	dc.l	$00000000,$6c000106,$f227e004,$f22e6800
+	dc.l	$ff84f200,$0023f23a,$5480fce8,$f23a5500
+	dc.l	$fd32f200,$00a3f200,$01232f02,$2401e29a
+	dc.l	$02828000,$0000b382,$02828000,$0000f23a
+	dc.l	$54a2fcc8,$f23a5522,$fd12f200,$00a3b5ae
+	dc.l	$ff84241f,$f2000123,$e2990281,$80000000
+	dc.l	$2d7c3f80,$0000ff54,$b3aeff54,$f23a54a2
+	dc.l	$fca2f23a,$5522fcec,$f20000a3,$f2000123
+	dc.l	$f22e6800,$ff90f23a,$54a2fc90,$b3aeff90
+	dc.l	$f23a5522,$fcd6f200,$00a3f200,$0123f23a
+	dc.l	$54a2fc80,$f23a5522,$fccaf200,$00a3f200
+	dc.l	$0123f23a,$48a2fc7c,$f23a4922,$fcc6f200
+	dc.l	$00a3f200,$0123f23a,$48a2fc78,$f23a4922
+	dc.l	$fcc2f200,$00a3f200,$0823f22e,$48a3ff84
+	dc.l	$f23a4422,$fcbaf22e,$4823ff90,$f21fd020
+	dc.l	$f2009000,$f22e48a2,$ff8461ff,$00003e22
+	dc.l	$f22e4422,$ff5460ff,$00003d9e,$f227e004
+	dc.l	$f22e6800,$ff84f200,$0023f23a,$5480fc34
+	dc.l	$f23a5500,$fbdef200,$00a3f22e,$6800ff90
+	dc.l	$f2000123,$e2990281,$80000000,$f23a54a2
+	dc.l	$fc1af23a,$5522fbc4,$b3aeff84,$b3aeff90
+	dc.l	$f20000a3,$00813f80,$00002d41,$ff54f200
+	dc.l	$0123f23a,$54a2fbfc,$f23a5522,$fba6f200
+	dc.l	$00a3f200,$0123f23a,$54a2fbf0,$f23a5522
+	dc.l	$fb9af200,$00a3f200,$0123f23a,$54a2fbe4
+	dc.l	$f23a5522,$fb8ef200,$00a3f200,$0123f23a
+	dc.l	$48a2fbe0,$f23a4922,$fb8af200,$00a3f200
+	dc.l	$0123f23a,$48a2fbdc,$f23a4922,$fb86f200
+	dc.l	$00a3f200,$0823f23a,$44a2fbd4,$f22e4823
+	dc.l	$ff84f22e,$48a3ff90,$f21fd020,$f2009000
+	dc.l	$f22e44a2,$ff5461ff,$00003d36,$f22e4822
+	dc.l	$ff8460ff,$00003cb2,$0c813fff,$80006e00
+	dc.l	$0048f23c,$44803f80,$0000f200,$9000f23c
+	dc.l	$44a80080,$000061ff,$00003d06,$f200b000
+	dc.l	$123c0003,$f22e4800,$ff8460ff,$00003c72
+	dc.l	$2f00f23c,$44803f80,$000061ff,$00003ce2
+	dc.l	$201f60ff,$00003ca8,$f227e03c,$2f02f23c
+	dc.l	$44800000,$00000c81,$7ffeffff,$66523d7c
+	dc.l	$7ffeff84,$2d7cc90f,$daa2ff88,$42aeff8c
+	dc.l	$3d7c7fdc,$ff902d7c,$85a308d3,$ff9442ae
+	dc.l	$ff98f200,$003af294,$000e002e,$0080ff84
+	dc.l	$002e0080,$ff90f22e,$4822ff84,$f2000080
+	dc.l	$f22e4822,$ff90f200,$00a8f22e,$48a2ff90
+	dc.l	$f22e6800,$ff84322e,$ff842241,$02810000
+	dc.l	$7fff0481,$00003fff,$0c810000,$001c6f0e
+	dc.l	$04810000,$001b1d7c,$0000ff58,$60084281
+	dc.l	$1d7c0001,$ff58243c,$00003ffe,$94812d7c
+	dc.l	$a2f9836e,$ff882d7c,$4e44152a,$ff8c3d42
+	dc.l	$ff84f200,$0100f22e,$4923ff84,$24094842
+	dc.l	$02828000,$00000082,$5f000000,$2d42ff54
+	dc.l	$f22e4522,$ff54f22e,$4528ff54,$24010682
+	dc.l	$00003fff,$3d42ff84,$2d7cc90f,$daa2ff88
+	dc.l	$42aeff8c,$06810000,$3fdd3d41,$ff902d7c
+	dc.l	$85a308d3,$ff9442ae,$ff98122e,$ff58f200
+	dc.l	$0a00f22e,$4a23ff84,$f2000a80,$f22e4aa3
+	dc.l	$ff90f200,$1180f200,$15a2f200,$0e28f200
+	dc.l	$0c28f200,$1622f200,$0180f200,$10a8f200
+	dc.l	$04220c01,$00006e00,$000ef200,$01a8f200
+	dc.l	$0ca26000,$ff0cf22e,$6100ff58,$241ff21f
+	dc.l	$d03c222e,$ff5c0c81,$00000004,$6d00fa4c
+	dc.l	$6000fc36,$3ea0b759,$f50f8688,$bef2baa5
+	dc.l	$a8924f04,$bf346f59,$b39ba65f,$00000000
+	dc.l	$00000000,$3ff60000,$e073d3fc,$199c4a00
+	dc.l	$00000000,$3ff90000,$d23cd684,$15d95fa1
+	dc.l	$00000000,$bffc0000,$8895a6c5,$fb423bca
+	dc.l	$00000000,$bffd0000,$eef57e0d,$a84bc8ce
+	dc.l	$00000000,$3ffc0000,$a2f9836e,$4e44152a
+	dc.l	$00000000,$40010000,$c90fdaa2,$00000000
+	dc.l	$00000000,$3fdf0000,$85a308d4,$00000000
+	dc.l	$00000000,$c0040000,$c90fdaa2,$2168c235
+	dc.l	$21800000,$c0040000,$c2c75bcd,$105d7c23
+	dc.l	$a0d00000,$c0040000,$bc7edcf7,$ff523611
+	dc.l	$a1e80000,$c0040000,$b6365e22,$ee46f000
+	dc.l	$21480000,$c0040000,$afeddf4d,$dd3ba9ee
+	dc.l	$a1200000,$c0040000,$a9a56078,$cc3063dd
+	dc.l	$21fc0000,$c0040000,$a35ce1a3,$bb251dcb
+	dc.l	$21100000,$c0040000,$9d1462ce,$aa19d7b9
+	dc.l	$a1580000,$c0040000,$96cbe3f9,$990e91a8
+	dc.l	$21e00000,$c0040000,$90836524,$88034b96
+	dc.l	$20b00000,$c0040000,$8a3ae64f,$76f80584
+	dc.l	$a1880000,$c0040000,$83f2677a,$65ecbf73
+	dc.l	$21c40000,$c0030000,$fb53d14a,$a9c2f2c2
+	dc.l	$20000000,$c0030000,$eec2d3a0,$87ac669f
+	dc.l	$21380000,$c0030000,$e231d5f6,$6595da7b
+	dc.l	$a1300000,$c0030000,$d5a0d84c,$437f4e58
+	dc.l	$9fc00000,$c0030000,$c90fdaa2,$2168c235
+	dc.l	$21000000,$c0030000,$bc7edcf7,$ff523611
+	dc.l	$a1680000,$c0030000,$afeddf4d,$dd3ba9ee
+	dc.l	$a0a00000,$c0030000,$a35ce1a3,$bb251dcb
+	dc.l	$20900000,$c0030000,$96cbe3f9,$990e91a8
+	dc.l	$21600000,$c0030000,$8a3ae64f,$76f80584
+	dc.l	$a1080000,$c0020000,$fb53d14a,$a9c2f2c2
+	dc.l	$1f800000,$c0020000,$e231d5f6,$6595da7b
+	dc.l	$a0b00000,$c0020000,$c90fdaa2,$2168c235
+	dc.l	$20800000,$c0020000,$afeddf4d,$dd3ba9ee
+	dc.l	$a0200000,$c0020000,$96cbe3f9,$990e91a8
+	dc.l	$20e00000,$c0010000,$fb53d14a,$a9c2f2c2
+	dc.l	$1f000000,$c0010000,$c90fdaa2,$2168c235
+	dc.l	$20000000,$c0010000,$96cbe3f9,$990e91a8
+	dc.l	$20600000,$c0000000,$c90fdaa2,$2168c235
+	dc.l	$1f800000,$bfff0000,$c90fdaa2,$2168c235
+	dc.l	$1f000000,$00000000,$00000000,$00000000
+	dc.l	$00000000,$3fff0000,$c90fdaa2,$2168c235
+	dc.l	$9f000000,$40000000,$c90fdaa2,$2168c235
+	dc.l	$9f800000,$40010000,$96cbe3f9,$990e91a8
+	dc.l	$a0600000,$40010000,$c90fdaa2,$2168c235
+	dc.l	$a0000000,$40010000,$fb53d14a,$a9c2f2c2
+	dc.l	$9f000000,$40020000,$96cbe3f9,$990e91a8
+	dc.l	$a0e00000,$40020000,$afeddf4d,$dd3ba9ee
+	dc.l	$20200000,$40020000,$c90fdaa2,$2168c235
+	dc.l	$a0800000,$40020000,$e231d5f6,$6595da7b
+	dc.l	$20b00000,$40020000,$fb53d14a,$a9c2f2c2
+	dc.l	$9f800000,$40030000,$8a3ae64f,$76f80584
+	dc.l	$21080000,$40030000,$96cbe3f9,$990e91a8
+	dc.l	$a1600000,$40030000,$a35ce1a3,$bb251dcb
+	dc.l	$a0900000,$40030000,$afeddf4d,$dd3ba9ee
+	dc.l	$20a00000,$40030000,$bc7edcf7,$ff523611
+	dc.l	$21680000,$40030000,$c90fdaa2,$2168c235
+	dc.l	$a1000000,$40030000,$d5a0d84c,$437f4e58
+	dc.l	$1fc00000,$40030000,$e231d5f6,$6595da7b
+	dc.l	$21300000,$40030000,$eec2d3a0,$87ac669f
+	dc.l	$a1380000,$40030000,$fb53d14a,$a9c2f2c2
+	dc.l	$a0000000,$40040000,$83f2677a,$65ecbf73
+	dc.l	$a1c40000,$40040000,$8a3ae64f,$76f80584
+	dc.l	$21880000,$40040000,$90836524,$88034b96
+	dc.l	$a0b00000,$40040000,$96cbe3f9,$990e91a8
+	dc.l	$a1e00000,$40040000,$9d1462ce,$aa19d7b9
+	dc.l	$21580000,$40040000,$a35ce1a3,$bb251dcb
+	dc.l	$a1100000,$40040000,$a9a56078,$cc3063dd
+	dc.l	$a1fc0000,$40040000,$afeddf4d,$dd3ba9ee
+	dc.l	$21200000,$40040000,$b6365e22,$ee46f000
+	dc.l	$a1480000,$40040000,$bc7edcf7,$ff523611
+	dc.l	$21e80000,$40040000,$c2c75bcd,$105d7c23
+	dc.l	$20d00000,$40040000,$c90fdaa2,$2168c235
+	dc.l	$a1800000,$f2104800,$22103228,$00040281
+	dc.l	$7fffffff,$0c813fd7,$80006c04,$60000134
+	dc.l	$0c814004,$bc7e6d04,$60000144,$f2000080
+	dc.l	$f23a54a3,$c6dc43fa,$fdbcf201,$6080e981
+	dc.l	$d3c1f219,$4828f211,$4428ea99,$02818000
+	dc.l	$0000f227,$e00c0c81,$00000000,$6d000072
+	dc.l	$f2000080,$f20004a3,$f23a5580,$faf8f23a
+	dc.l	$5500fafa,$f20005a3,$f2000523,$f23a55a2
+	dc.l	$faf4f23a,$4922fafe,$f20005a3,$f2000523
+	dc.l	$f23a49a2,$fb00f23a,$4922fb0a,$f20005a3
+	dc.l	$f2000523,$f23a49a2,$fb0cf200,$0123f200
+	dc.l	$0ca3f200,$0822f23c,$44a23f80,$0000f21f
+	dc.l	$d030f200,$9000f200,$042060ff,$0000357a
+	dc.l	$f2000080,$f2000023,$f23a5580,$fa88f23a
+	dc.l	$5500fa8a,$f20001a3,$f2000123,$f23a55a2
+	dc.l	$fa84f23a,$4922fa8e,$f20001a3,$f2000123
+	dc.l	$f23a49a2,$fa90f23a,$4922fa9a,$f20001a3
+	dc.l	$f2000123,$f23a49a2,$fa9cf200,$0523f200
+	dc.l	$0c23f200,$08a2f23c,$44223f80,$0000f21f
+	dc.l	$d030f227,$68800a97,$80000000,$f2009000
+	dc.l	$f21f4820,$60ff0000,$35000c81,$3fff8000
+	dc.l	$6e1cf227,$6800f200,$9000123c,$0003f21f
+	dc.l	$480060ff,$000034da,$60ff0000,$3522f227
+	dc.l	$e03c2f02,$f23c4480,$00000000,$0c817ffe
+	dc.l	$ffff6652,$3d7c7ffe,$ff842d7c,$c90fdaa2
+	dc.l	$ff8842ae,$ff8c3d7c,$7fdcff90,$2d7c85a3
+	dc.l	$08d3ff94,$42aeff98,$f200003a,$f294000e
+	dc.l	$002e0080,$ff84002e,$0080ff90,$f22e4822
+	dc.l	$ff84f200,$0080f22e,$4822ff90,$f20000a8
+	dc.l	$f22e48a2,$ff90f22e,$6800ff84,$322eff84
+	dc.l	$22410281,$00007fff,$04810000,$3fff0c81
+	dc.l	$0000001c,$6f0e0481,$0000001b,$1d7c0000
+	dc.l	$ff586008,$42811d7c,$0001ff58,$243c0000
+	dc.l	$3ffe9481,$2d7ca2f9,$836eff88,$2d7c4e44
+	dc.l	$152aff8c,$3d42ff84,$f2000100,$f22e4923
+	dc.l	$ff842409,$48420282,$80000000,$00825f00
+	dc.l	$00002d42,$ff54f22e,$4522ff54,$f22e4528
+	dc.l	$ff542401,$06820000,$3fff3d42,$ff842d7c
+	dc.l	$c90fdaa2,$ff8842ae,$ff8c0681,$00003fdd
+	dc.l	$3d41ff90,$2d7c85a3,$08d3ff94,$42aeff98
+	dc.l	$122eff58,$f2000a00,$f22e4a23,$ff84f200
+	dc.l	$0a80f22e,$4aa3ff90,$f2001180,$f20015a2
+	dc.l	$f2000e28,$f2000c28,$f2001622,$f2000180
+	dc.l	$f20010a8,$f2000422,$0c010000,$6e00000e
+	dc.l	$f20001a8,$f2000ca2,$6000ff0c,$f22e6100
+	dc.l	$ff54241f,$f21fd03c,$222eff54,$e2996000
+	dc.l	$fd72bff6,$687e3149,$87d84002,$ac6934a2
+	dc.l	$6db3bfc2,$476f4e1d,$a28e3fb3,$44447f87
+	dc.l	$6989bfb7,$44ee7faf,$45db3fbc,$71c64694
+	dc.l	$0220bfc2,$49249218,$72f93fc9,$99999999
+	dc.l	$8fa9bfd5,$55555555,$5555bfb7,$0bf39853
+	dc.l	$9e6a3fbc,$7187962d,$1d7dbfc2,$49248271
+	dc.l	$07b83fc9,$99999996,$263ebfd5,$55555555
+	dc.l	$55363fff,$0000c90f,$daa22168,$c2350000
+	dc.l	$0000bfff,$0000c90f,$daa22168,$c2350000
+	dc.l	$00000001,$00008000,$00000000,$00000000
+	dc.l	$00008001,$00008000,$00000000,$00000000
+	dc.l	$00003ffb,$000083d1,$52c5060b,$7a510000
+	dc.l	$00003ffb,$00008bc8,$54456549,$8b8b0000
+	dc.l	$00003ffb,$000093be,$40601762,$6b0d0000
+	dc.l	$00003ffb,$00009bb3,$078d35ae,$c2020000
+	dc.l	$00003ffb,$0000a3a6,$9a525ddc,$e7de0000
+	dc.l	$00003ffb,$0000ab98,$e9436276,$56190000
+	dc.l	$00003ffb,$0000b389,$e502f9c5,$98620000
+	dc.l	$00003ffb,$0000bb79,$7e436b09,$e6fb0000
+	dc.l	$00003ffb,$0000c367,$a5c739e5,$f4460000
+	dc.l	$00003ffb,$0000cb54,$4c61cff7,$d5c60000
+	dc.l	$00003ffb,$0000d33f,$62f82488,$533e0000
+	dc.l	$00003ffb,$0000db28,$da816240,$4c770000
+	dc.l	$00003ffb,$0000e310,$a4078ad3,$4f180000
+	dc.l	$00003ffb,$0000eaf6,$b0a8188e,$e1eb0000
+	dc.l	$00003ffb,$0000f2da,$f1949dbe,$79d50000
+	dc.l	$00003ffb,$0000fabd,$581361d4,$7e3e0000
+	dc.l	$00003ffc,$00008346,$ac210959,$ecc40000
+	dc.l	$00003ffc,$00008b23,$2a083042,$82d80000
+	dc.l	$00003ffc,$000092fb,$70b8d29a,$e2f90000
+	dc.l	$00003ffc,$00009acf,$476f5ccd,$1cb40000
+	dc.l	$00003ffc,$0000a29e,$76304954,$f23f0000
+	dc.l	$00003ffc,$0000aa68,$c5d08ab8,$52300000
+	dc.l	$00003ffc,$0000b22d,$fffd9d53,$9f830000
+	dc.l	$00003ffc,$0000b9ed,$ef453e90,$0ea50000
+	dc.l	$00003ffc,$0000c1a8,$5f1cc75e,$3ea50000
+	dc.l	$00003ffc,$0000c95d,$1be82813,$8de60000
+	dc.l	$00003ffc,$0000d10b,$f300840d,$2de40000
+	dc.l	$00003ffc,$0000d8b4,$b2ba6bc0,$5e7a0000
+	dc.l	$00003ffc,$0000e057,$2a6bb423,$35f60000
+	dc.l	$00003ffc,$0000e7f3,$2a70ea9c,$aa8f0000
+	dc.l	$00003ffc,$0000ef88,$843264ec,$efaa0000
+	dc.l	$00003ffc,$0000f717,$0a28ecc0,$66660000
+	dc.l	$00003ffd,$0000812f,$d288332d,$ad320000
+	dc.l	$00003ffd,$000088a8,$d1b1218e,$4d640000
+	dc.l	$00003ffd,$00009012,$ab3f23e4,$aee80000
+	dc.l	$00003ffd,$0000976c,$c3d411e7,$f1b90000
+	dc.l	$00003ffd,$00009eb6,$89493889,$a2270000
+	dc.l	$00003ffd,$0000a5ef,$72c34487,$361b0000
+	dc.l	$00003ffd,$0000ad17,$00baf07a,$72270000
+	dc.l	$00003ffd,$0000b42c,$bcfafd37,$efb70000
+	dc.l	$00003ffd,$0000bb30,$3a940ba8,$0f890000
+	dc.l	$00003ffd,$0000c221,$15c6fcae,$bbaf0000
+	dc.l	$00003ffd,$0000c8fe,$f3e68633,$12210000
+	dc.l	$00003ffd,$0000cfc9,$8330b400,$0c700000
+	dc.l	$00003ffd,$0000d680,$7aa1102c,$5bf90000
+	dc.l	$00003ffd,$0000dd23,$99bc3125,$2aa30000
+	dc.l	$00003ffd,$0000e3b2,$a8556b8f,$c5170000
+	dc.l	$00003ffd,$0000ea2d,$764f6431,$59890000
+	dc.l	$00003ffd,$0000f3bf,$5bf8bad1,$a21d0000
+	dc.l	$00003ffe,$0000801c,$e39e0d20,$5c9a0000
+	dc.l	$00003ffe,$00008630,$a2dada1e,$d0660000
+	dc.l	$00003ffe,$00008c1a,$d445f3e0,$9b8c0000
+	dc.l	$00003ffe,$000091db,$8f1664f3,$50e20000
+	dc.l	$00003ffe,$00009773,$1420365e,$538c0000
+	dc.l	$00003ffe,$00009ce1,$c8e6a0b8,$cdba0000
+	dc.l	$00003ffe,$0000a228,$32dbcada,$ae090000
+	dc.l	$00003ffe,$0000a746,$f2ddb760,$22940000
+	dc.l	$00003ffe,$0000ac3e,$c0fb997d,$d6a20000
+	dc.l	$00003ffe,$0000b110,$688aebdc,$6f6a0000
+	dc.l	$00003ffe,$0000b5bc,$c49059ec,$c4b00000
+	dc.l	$00003ffe,$0000ba44,$bc7dd470,$782f0000
+	dc.l	$00003ffe,$0000bea9,$4144fd04,$9aac0000
+	dc.l	$00003ffe,$0000c2eb,$4abb6616,$28b60000
+	dc.l	$00003ffe,$0000c70b,$d54ce602,$ee140000
+	dc.l	$00003ffe,$0000cd00,$0549adec,$71590000
+	dc.l	$00003ffe,$0000d484,$57d2d8ea,$4ea30000
+	dc.l	$00003ffe,$0000db94,$8da712de,$ce3b0000
+	dc.l	$00003ffe,$0000e238,$55f969e8,$096a0000
+	dc.l	$00003ffe,$0000e877,$1129c435,$32590000
+	dc.l	$00003ffe,$0000ee57,$c16e0d37,$9c0d0000
+	dc.l	$00003ffe,$0000f3e1,$0211a87c,$37790000
+	dc.l	$00003ffe,$0000f919,$039d758b,$8d410000
+	dc.l	$00003ffe,$0000fe05,$8b8f6493,$5fb30000
+	dc.l	$00003fff,$00008155,$fb497b68,$5d040000
+	dc.l	$00003fff,$00008388,$9e3549d1,$08e10000
+	dc.l	$00003fff,$0000859c,$fa76511d,$724b0000
+	dc.l	$00003fff,$00008795,$2ecfff81,$31e70000
+	dc.l	$00003fff,$00008973,$2fd19557,$641b0000
+	dc.l	$00003fff,$00008b38,$cad10193,$2a350000
+	dc.l	$00003fff,$00008ce7,$a8d8301e,$e6b50000
+	dc.l	$00003fff,$00008f46,$a39e2eae,$52810000
+	dc.l	$00003fff,$0000922d,$a7d79188,$84870000
+	dc.l	$00003fff,$000094d1,$9fcbdedf,$52410000
+	dc.l	$00003fff,$0000973a,$b94419d2,$a08b0000
+	dc.l	$00003fff,$0000996f,$f00e08e1,$0b960000
+	dc.l	$00003fff,$00009b77,$3f951232,$1da70000
+	dc.l	$00003fff,$00009d55,$cc320f93,$56240000
+	dc.l	$00003fff,$00009f10,$0575006c,$c5710000
+	dc.l	$00003fff,$0000a0a9,$c290d97c,$c06c0000
+	dc.l	$00003fff,$0000a226,$59ebebc0,$630a0000
+	dc.l	$00003fff,$0000a388,$b4aff6ef,$0ec90000
+	dc.l	$00003fff,$0000a4d3,$5f1061d2,$92c40000
+	dc.l	$00003fff,$0000a608,$95dcfbe3,$187e0000
+	dc.l	$00003fff,$0000a72a,$51dc7367,$beac0000
+	dc.l	$00003fff,$0000a83a,$51530956,$168f0000
+	dc.l	$00003fff,$0000a93a,$20077539,$546e0000
+	dc.l	$00003fff,$0000aa9e,$7245023b,$26050000
+	dc.l	$00003fff,$0000ac4c,$84ba6fe4,$d58f0000
+	dc.l	$00003fff,$0000adce,$4a4a606b,$97120000
+	dc.l	$00003fff,$0000af2a,$2dcd8d26,$3c9c0000
+	dc.l	$00003fff,$0000b065,$6f81f222,$65c70000
+	dc.l	$00003fff,$0000b184,$65150f71,$496a0000
+	dc.l	$00003fff,$0000b28a,$aa156f9a,$da350000
+	dc.l	$00003fff,$0000b37b,$44ff3766,$b8950000
+	dc.l	$00003fff,$0000b458,$c3dce963,$04330000
+	dc.l	$00003fff,$0000b525,$529d5622,$46bd0000
+	dc.l	$00003fff,$0000b5e2,$cca95f9d,$88cc0000
+	dc.l	$00003fff,$0000b692,$cada7aca,$1ada0000
+	dc.l	$00003fff,$0000b736,$aea7a692,$58380000
+	dc.l	$00003fff,$0000b7cf,$ab287e9f,$7b360000
+	dc.l	$00003fff,$0000b85e,$cc66cb21,$98350000
+	dc.l	$00003fff,$0000b8e4,$fd5a20a5,$93da0000
+	dc.l	$00003fff,$0000b99f,$41f64aff,$9bb50000
+	dc.l	$00003fff,$0000ba7f,$1e17842b,$be7b0000
+	dc.l	$00003fff,$0000bb47,$12857637,$e17d0000
+	dc.l	$00003fff,$0000bbfa,$be8a4788,$df6f0000
+	dc.l	$00003fff,$0000bc9d,$0fad2b68,$9d790000
+	dc.l	$00003fff,$0000bd30,$6a39471e,$cd860000
+	dc.l	$00003fff,$0000bdb6,$c731856a,$f18a0000
+	dc.l	$00003fff,$0000be31,$cac502e8,$0d700000
+	dc.l	$00003fff,$0000bea2,$d55ce331,$94e20000
+	dc.l	$00003fff,$0000bf0b,$10b7c031,$28f00000
+	dc.l	$00003fff,$0000bf6b,$7a18dacb,$778d0000
+	dc.l	$00003fff,$0000bfc4,$ea4663fa,$18f60000
+	dc.l	$00003fff,$0000c018,$1bde8b89,$a4540000
+	dc.l	$00003fff,$0000c065,$b066cfbf,$64390000
+	dc.l	$00003fff,$0000c0ae,$345f5634,$0ae60000
+	dc.l	$00003fff,$0000c0f2,$22919cb9,$e6a70000
+	dc.l	$0000f210,$48002210,$32280004,$f22e6800
+	dc.l	$ff840281,$7fffffff,$0c813ffb,$80006c04
+	dc.l	$600000d0,$0c814002,$ffff6f04,$6000014c
+	dc.l	$02aef800,$0000ff88,$00ae0400,$0000ff88
+	dc.l	$2d7c0000,$0000ff8c,$f2000080,$f22e48a3
+	dc.l	$ff84f22e,$4828ff84,$f23c44a2,$3f800000
+	dc.l	$f2000420,$2f022401,$02810000,$78000282
+	dc.l	$7fff0000,$04823ffb,$0000e282,$d282ee81
+	dc.l	$43faf780,$d3c12d59,$ff902d59,$ff942d59
+	dc.l	$ff98222e,$ff840281,$80000000,$83aeff90
+	dc.l	$241ff227,$e004f200,$0080f200,$04a3f23a
+	dc.l	$5500f6a0,$f2000522,$f2000523,$f20000a3
+	dc.l	$f23a5522,$f696f23a,$54a3f698,$f20008a3
+	dc.l	$f2000422,$f21fd020,$f2009000,$f22e4822
+	dc.l	$ff9060ff,$000029d2,$0c813fff,$80006e00
+	dc.l	$008a0c81,$3fd78000,$6d00006c,$f227e00c
+	dc.l	$f2000023,$f2000080,$f20004a3,$f23a5500
+	dc.l	$f65af23a,$5580f65c,$f2000523,$f20005a3
+	dc.l	$f23a5522,$f656f23a,$55a2f658,$f2000523
+	dc.l	$f2000ca3,$f23a5522,$f652f23a,$54a2f654
+	dc.l	$f2000123,$f22e4823,$ff84f200,$08a2f200
+	dc.l	$0423f21f,$d030f200,$9000f22e,$4822ff84
+	dc.l	$60ff0000,$2954f200,$9000123c,$0003f22e
+	dc.l	$4800ff84,$60ff0000,$29380c81,$40638000
+	dc.l	$6e00008e,$f227e00c,$f23c4480,$bf800000
+	dc.l	$f20000a0,$f2000400,$f2000023,$f22e6880
+	dc.l	$ff84f200,$0080f200,$04a3f23a,$5580f5ec
+	dc.l	$f23a5500,$f5eef200,$05a3f200,$0523f23a
+	dc.l	$55a2f5e8,$f23a5522,$f5eaf200,$0ca3f200
+	dc.l	$0123f23a,$54a2f5e4,$f22e4823,$ff84f200
+	dc.l	$08a2f200,$0423f22e,$4822ff84,$f21fd030
+	dc.l	$f2009000,$4a106a0c,$f23a4822,$f5d660ff
+	dc.l	$000028c6,$f23a4822,$f5ba60ff,$000028b2
+	dc.l	$4a106a16,$f23a4800,$f5baf200,$9000f23a
+	dc.l	$4822f5c0,$60ff0000,$28a0f23a,$4800f594
+	dc.l	$f2009000,$f23a4822,$f5ba60ff,$00002882
+	dc.l	$60ff0000,$28baf210,$48002210,$32280004
+	dc.l	$02817fff,$ffff0c81,$3fff8000,$6c4e0c81
+	dc.l	$3fd78000,$6d00007c,$f23c4480,$3f800000
+	dc.l	$f20000a8,$f227e004,$f23c4500,$3f800000
+	dc.l	$f2000122,$f20008a3,$f21fd020,$f2000484
+	dc.l	$f2000420,$f227e001,$41d761ff,$fffffd66
+	dc.l	$dffc0000,$000c60ff,$0000280e,$f2000018
+	dc.l	$f23c4438,$3f800000,$f2d20000,$265af23a
+	dc.l	$4800b8ae,$22100281,$80000000,$00813f80
+	dc.l	$00002f01,$f2009000,$f21f4423,$60ff0000
+	dc.l	$27d8f200,$9000123c,$0003f210,$480060ff
+	dc.l	$000027be,$60ff0000,$2806f210,$48002210
+	dc.l	$32280004,$02817fff,$ffff0c81,$3fff8000
+	dc.l	$6c44f23c,$44803f80,$0000f200,$00a2f200
+	dc.l	$001af23c,$44223f80,$0000f200,$0420f200
+	dc.l	$00042f00,$4280f227,$e00141d7,$61ffffff
+	dc.l	$fcc4dffc,$0000000c,$f21f9000,$f2000022
+	dc.l	$60ff0000,$276cf200,$0018f23c,$44383f80
+	dc.l	$0000f2d2,$000025b0,$4a106a18,$f23a4800
+	dc.l	$b7f0f200,$9000f23c,$44220080,$000060ff
+	dc.l	$0000273e,$60ff0000,$2988f200,$9000f23a
+	dc.l	$4800b7de,$60ff0000,$27283fdc,$000082e3
+	dc.l	$08654361,$c4c60000,$00003fa5,$55555555
+	dc.l	$4cc13fc5,$55555555,$4a543f81,$11111117
+	dc.l	$43853fa5,$55555555,$4f5a3fc5,$55555555
+	dc.l	$55550000,$00000000,$00003ec7,$1de3a577
+	dc.l	$46823efa,$01a019d7,$cb683f2a,$01a01a01
+	dc.l	$9df33f56,$c16c16c1,$70e23f81,$11111111
+	dc.l	$11113fa5,$55555555,$55553ffc,$0000aaaa
+	dc.l	$aaaaaaaa,$aaab0000,$000048b0,$00000000
+	dc.l	$00003730,$00000000,$00003fff,$00008000
+	dc.l	$00000000,$00000000,$00003fff,$00008164
+	dc.l	$d1f3bc03,$07749f84,$1a9b3fff,$000082cd
+	dc.l	$8698ac2b,$a1d89fc1,$d5b93fff,$0000843a
+	dc.l	$28c3acde,$4048a072,$83693fff,$000085aa
+	dc.l	$c367cc48,$7b141fc5,$c95c3fff,$0000871f
+	dc.l	$61969e8d,$10101ee8,$5c9f3fff,$00008898
+	dc.l	$0e8092da,$85289fa2,$07293fff,$00008a14
+	dc.l	$d575496e,$fd9ca07b,$f9af3fff,$00008b95
+	dc.l	$c1e3ea8b,$d6e8a002,$0dcf3fff,$00008d1a
+	dc.l	$df5b7e5b,$a9e4205a,$63da3fff,$00008ea4
+	dc.l	$398b45cd,$53c01eb7,$00513fff,$00009031
+	dc.l	$dc431466,$b1dc1f6e,$b0293fff,$000091c3
+	dc.l	$d373ab11,$c338a078,$14943fff,$0000935a
+	dc.l	$2b2f13e6,$e92c9eb3,$19b03fff,$000094f4
+	dc.l	$efa8fef7,$09602017,$457d3fff,$00009694
+	dc.l	$2d372018,$5a001f11,$d5373fff,$00009837
+	dc.l	$f0518db8,$a9709fb9,$52dd3fff,$000099e0
+	dc.l	$459320b7,$fa641fe4,$30873fff,$00009b8d
+	dc.l	$39b9d54e,$55381fa2,$a8183fff,$00009d3e
+	dc.l	$d9a72cff,$b7501fde,$494d3fff,$00009ef5
+	dc.l	$326091a1,$11ac2050,$48903fff,$0000a0b0
+	dc.l	$510fb971,$4fc4a073,$691c3fff,$0000a270
+	dc.l	$43030c49,$68181f9b,$7a053fff,$0000a435
+	dc.l	$15ae09e6,$80a0a079,$71263fff,$0000a5fe
+	dc.l	$d6a9b151,$38eca071,$a1403fff,$0000a7cd
+	dc.l	$93b4e965,$3568204f,$62da3fff,$0000a9a1
+	dc.l	$5ab4ea7c,$0ef81f28,$3c4a3fff,$0000ab7a
+	dc.l	$39b5a93e,$d3389f9a,$7fdc3fff,$0000ad58
+	dc.l	$3eea42a1,$4ac8a05b,$3fac3fff,$0000af3b
+	dc.l	$78ad690a,$43741fdf,$26103fff,$0000b123
+	dc.l	$f581d2ac,$25909f70,$5f903fff,$0000b311
+	dc.l	$c412a911,$2488201f,$678a3fff,$0000b504
+	dc.l	$f333f9de,$64841f32,$fb133fff,$0000b6fd
+	dc.l	$91e328d1,$77902003,$8b303fff,$0000b8fb
+	dc.l	$af4762fb,$9ee8200d,$c3cc3fff,$0000baff
+	dc.l	$5ab2133e,$45fc9f8b,$2ae63fff,$0000bd08
+	dc.l	$a39f580c,$36c0a02b,$bf703fff,$0000bf17
+	dc.l	$99b67a73,$1084a00b,$f5183fff,$0000c12c
+	dc.l	$4cca6670,$9458a041,$dd413fff,$0000c346
+	dc.l	$ccda2497,$64089fdf,$137b3fff,$0000c567
+	dc.l	$2a115506,$dadc201f,$15683fff,$0000c78d
+	dc.l	$74c8abb9,$b15c1fc1,$3a2e3fff,$0000c9b9
+	dc.l	$bd866e2f,$27a4a03f,$8f033fff,$0000cbec
+	dc.l	$14fef272,$7c5c1ff4,$907d3fff,$0000ce24
+	dc.l	$8c151f84,$80e49e6e,$53e43fff,$0000d063
+	dc.l	$33daef2b,$25941fd6,$d45c3fff,$0000d2a8
+	dc.l	$1d91f12a,$e45ca076,$edb93fff,$0000d4f3
+	dc.l	$5aabcfed,$fa209fa6,$de213fff,$0000d744
+	dc.l	$fccad69d,$6af41ee6,$9a2f3fff,$0000d99d
+	dc.l	$15c278af,$d7b4207f,$439f3fff,$0000dbfb
+	dc.l	$b797daf2,$3754201e,$c2073fff,$0000de60
+	dc.l	$f4825e0e,$91249e8b,$e1753fff,$0000e0cc
+	dc.l	$deec2a94,$e1102003,$2c4b3fff,$0000e33f
+	dc.l	$8972be8a,$5a502004,$dff53fff,$0000e5b9
+	dc.l	$06e77c83,$48a81e72,$f47a3fff,$0000e839
+	dc.l	$6a503c4b,$dc681f72,$2f223fff,$0000eac0
+	dc.l	$c6e7dd24,$3930a017,$e9453fff,$0000ed4f
+	dc.l	$301ed994,$2b841f40,$1a5b3fff,$0000efe4
+	dc.l	$b99bdcda,$f5cc9fb9,$a9e33fff,$0000f281
+	dc.l	$773c59ff,$b1382074,$4c053fff,$0000f525
+	dc.l	$7d152486,$cc2c1f77,$3a193fff,$0000f7d0
+	dc.l	$df730ad1,$3bb81ffe,$90d53fff,$0000fa83
+	dc.l	$b2db722a,$033ca041,$ed223fff,$0000fd3e
+	dc.l	$0c0cf486,$c1741f85,$3f3a2210,$02817fff
+	dc.l	$00000c81,$3fbe0000,$6c0660ff,$00000108
+	dc.l	$32280004,$0c81400c,$b1676d06,$60ff0000
+	dc.l	$010cf210,$4800f200,$0080f23c,$442342b8
+	dc.l	$aa3bf227,$e00c2d7c,$00000000,$ff58f201
+	dc.l	$600043fa,$fbb6f201,$40002d41,$ff540281
+	dc.l	$0000003f,$e989d3c1,$222eff54,$ec810641
+	dc.l	$3fff3d7a,$fb06ff54,$f2000100,$f23c4423
+	dc.l	$bc317218,$f23a4923,$faf2f200,$0422f200
+	dc.l	$0822f200,$0080f200,$04a3f23c,$45003ab6
+	dc.l	$0b70f200,$0523f200,$0580f23c,$45a33c08
+	dc.l	$8895f23a,$5522fad4,$f23a55a2,$fad6f200
+	dc.l	$05233d41,$ff842d7c,$80000000,$ff8842ae
+	dc.l	$ff8cf200,$05a3f23c,$45223f00,$0000f200
+	dc.l	$01a3f200,$0523f200,$0c22f219,$4880f200
+	dc.l	$0822f200,$0423f21f,$d030f211,$4422f200
+	dc.l	$0422222e,$ff584a81,$6706f22e,$4823ff90
+	dc.l	$f2009000,$123c0000,$f22e4823,$ff8460ff
+	dc.l	$0000216e,$f210d080,$f2009000,$f23c4422
+	dc.l	$3f800000,$60ff0000,$21680c81,$400cb27c
+	dc.l	$6e66f210,$4800f200,$0080f23c,$442342b8
+	dc.l	$aa3bf227,$e00c2d7c,$00000001,$ff58f201
+	dc.l	$600043fa,$faa6f201,$40002d41,$ff540281
+	dc.l	$0000003f,$e989d3c1,$222eff54,$ec812d41
+	dc.l	$ff54e281,$93aeff54,$06413fff,$3d41ff90
+	dc.l	$2d7c8000,$0000ff94,$42aeff98,$222eff54
+	dc.l	$06413fff,$6000fed2,$4a106bff,$00001fbc
+	dc.l	$60ff0000,$20ae2f10,$02978000,$00000097
+	dc.l	$00800000,$f23c4400,$3f800000,$f2009000
+	dc.l	$f21f4422,$60ff0000,$20c82210,$02817fff
+	dc.l	$00000c81,$3ffd0000,$6c0660ff,$0000015e
+	dc.l	$32280004,$0c814004,$c2156f06,$60ff0000
+	dc.l	$026cf210,$4800f200,$0080f23c,$442342b8
+	dc.l	$aa3bf227,$e00cf201,$600043fa,$f9eef201
+	dc.l	$40002d41,$ff540281,$0000003f,$e989d3c1
+	dc.l	$222eff54,$ec812d41,$ff54f200,$0100f23c
+	dc.l	$4423bc31,$7218f23a,$4923f930,$f2000422
+	dc.l	$f2000822,$06413fff,$f2000080,$f20004a3
+	dc.l	$f23c4500,$3950097b,$f2000523,$f2000580
+	dc.l	$f23c45a3,$3ab60b6a,$f23a5522,$f91ef23a
+	dc.l	$55a2f920,$3d41ff84,$2d7c8000,$0000ff88
+	dc.l	$42aeff8c,$f2000523,$222eff54,$4441f200
+	dc.l	$05a30641,$3ffff23a,$5522f900,$f23c45a2
+	dc.l	$3f000000,$f2000523,$00418000,$3d41ff90
+	dc.l	$2d7c8000,$0000ff94,$42aeff98,$f2000ca3
+	dc.l	$f2000123,$f2000422,$f2000822,$f21fd030
+	dc.l	$f2114823,$222eff54,$0c810000,$003f6f1a
+	dc.l	$f2294480,$000cf22e,$48a2ff90,$f2000422
+	dc.l	$f2114822,$60ff0000,$00340c81,$fffffffd
+	dc.l	$6c16f229,$4422000c,$f2114822,$f22e4822
+	dc.l	$ff9060ff,$00000016,$f2194880,$f2114422
+	dc.l	$f22e48a2,$ff90f200,$0422f200,$9000f22e
+	dc.l	$4823ff84,$60ff0000,$1f500c81,$3fbe0000
+	dc.l	$6c6c0c81,$00330000,$6d2c2d7c,$80010000
+	dc.l	$ff842d7c,$80000000,$ff8842ae,$ff8cf210
+	dc.l	$4800f200,$9000123c,$0002f22e,$4822ff84
+	dc.l	$60ff0000,$1f0cf210,$4800f23a,$5423f86c
+	dc.l	$2d7c8001,$0000ff84,$2d7c8000,$0000ff88
+	dc.l	$42aeff8c,$f22e4822,$ff84f200,$9000123c
+	dc.l	$0000f23a,$5423f84c,$60ff0000,$1ed4f210
+	dc.l	$4800f200,$0023f227,$e00cf23c,$44802f30
+	dc.l	$caa8f200,$00a3f23c,$4500310f,$8290f23c
+	dc.l	$44a232d7,$3220f200,$0123f200,$00a3f23c
+	dc.l	$45223493,$f281f23a,$54a2f7c0,$f2000123
+	dc.l	$f20000a3,$f23a5522,$f7baf23a,$54a2f7bc
+	dc.l	$f2000123,$f20000a3,$f23a5522,$f7b6f23a
+	dc.l	$54a2f7b8,$f2000123,$f20000a3,$f23a5522
+	dc.l	$f7b2f23a,$48a2f7b4,$f2000123,$f20000a3
+	dc.l	$f2000123,$f21048a3,$f23c4423,$3f000000
+	dc.l	$f20008a2,$f21fd030,$f2000422,$f2009000
+	dc.l	$f2104822,$60ff0000,$1e302210,$0c810000
+	dc.l	$00006e00,$fbacf23c,$4400bf80,$0000f200
+	dc.l	$9000f23c,$44220080,$000060ff,$00001e1a
+	dc.l	$60ff0000,$1e4a3028,$00000880,$000f0440
+	dc.l	$3ffff200,$50006d02,$4e751d7c,$0008ff64
+	dc.l	$4e7561ff,$00002342,$44400440,$3ffff200
+	dc.l	$50001d7c,$0008ff64,$4e753028,$00000040
+	dc.l	$7fff0880,$000e2d68,$0004ff88,$2d680008
+	dc.l	$ff8c3d40,$ff84f22e,$4800ff84,$6b024e75
+	dc.l	$1d7c0008,$ff644e75,$61ff0000,$22fc60ca
+	dc.l	$7ffb0000,$80000000,$00000000,$00000000
+	dc.l	$f2104800,$22103228,$00040281,$7fffffff
+	dc.l	$0c81400c,$b1676e42,$f2000018,$2f004280
+	dc.l	$f227e001,$41d761ff,$fffffad2,$dffc0000
+	dc.l	$000cf23c,$44233f00,$0000201f,$f23c4480
+	dc.l	$3e800000,$f20000a0,$f2009000,$123c0002
+	dc.l	$f2000422,$60ff0000,$1d280c81,$400cb2b3
+	dc.l	$6e3cf200,$0018f23a,$5428adb6,$f23a5428
+	dc.l	$adb82f00,$4280f227,$e00141d7,$61ffffff
+	dc.l	$fa7cdffc,$0000000c,$201ff200,$9000123c
+	dc.l	$0000f23a,$4823ff5a,$60ff0000,$1ce460ff
+	dc.l	$00001cb0,$f23c4400,$3f800000,$f2009000
+	dc.l	$f23c4422,$00800000,$60ff0000,$1cd4f210
+	dc.l	$48002210,$32280004,$22410281,$7fffffff
+	dc.l	$0c81400c,$b1676e62,$f2000018,$48e78040
+	dc.l	$f227e001,$41d74280,$61ffffff,$fbe0dffc
+	dc.l	$0000000c,$f23c9000,$00000000,$4cdf0201
+	dc.l	$f2000080,$f23c44a2,$3f800000,$f2276800
+	dc.l	$f2000420,$22090281,$80000000,$00813f00
+	dc.l	$0000f21f,$48222f01,$f2009000,$123c0000
+	dc.l	$f21f4423,$60ff0000,$1c480c81,$400cb2b3
+	dc.l	$6eff0000,$1bc2f200,$0018f23a,$5428acd2
+	dc.l	$2f3c0000,$00002f3c,$80000000,$22090281
+	dc.l	$80000000,$00817ffb,$00002f01,$f23a5428
+	dc.l	$acb82f00,$4280f227,$e00141d7,$61ffffff
+	dc.l	$f97cdffc,$0000000c,$201ff200,$9000123c
+	dc.l	$0000f21f,$482360ff,$00001be6,$60ff0000
+	dc.l	$1c2ef210,$4800f22e,$6800ff84,$22103228
+	dc.l	$00042d41,$ff840281,$7fffffff,$0c813fd7
+	dc.l	$80006d00,$00740c81,$3fffddce,$6e00006a
+	dc.l	$222eff84,$2d41ff5c,$02817fff,$00000681
+	dc.l	$00010000,$2d41ff84,$02ae8000,$0000ff5c
+	dc.l	$f22e4800,$ff842f00,$4280f227,$e00141d7
+	dc.l	$61ffffff,$fac8dffc,$0000000c,$201ff200
+	dc.l	$0080f23c,$44a24000,$0000222e,$ff5cf22e
+	dc.l	$6880ff84,$b3aeff84,$f2009000,$f22e4820
+	dc.l	$ff8460ff,$00001b52,$0c813fff,$80006d00
+	dc.l	$00880c81,$40048aa1,$6e000092,$222eff84
+	dc.l	$2d41ff5c,$02817fff,$00000681,$00010000
+	dc.l	$2d41ff84,$02ae8000,$0000ff5c,$222eff5c
+	dc.l	$f22e4800,$ff842f00,$4280f227,$e00141d7
+	dc.l	$61ffffff,$f878dffc,$0000000c,$201f222e
+	dc.l	$ff5cf23c,$44223f80,$00000a81,$c0000000
+	dc.l	$f2014480,$f20000a0,$222eff5c,$00813f80
+	dc.l	$0000f201,$4400f200,$9000123c,$0002f200
+	dc.l	$042260ff,$00001ac2,$f2009000,$123c0003
+	dc.l	$f22e4800,$ff8460ff,$00001aa6,$222eff84
+	dc.l	$02818000,$00000081,$3f800000,$f2014400
+	dc.l	$02818000,$00000a81,$80800000,$f2009000
+	dc.l	$f2014422,$60ff0000,$1a8060ff,$00001ac0
+	dc.l	$3ffe0000,$b17217f7,$d1cf79ac,$00000000
+	dc.l	$3f800000,$00000000,$7f800000,$bf800000
+	dc.l	$3fc2499a,$b5e4040b,$bfc555b5,$848cb7db
+	dc.l	$3fc99999,$987d8730,$bfcfffff,$ff6f7e97
+	dc.l	$3fd55555,$555555a4,$bfe00000,$00000008
+	dc.l	$3f175496,$add7dad6,$3f3c71c2,$fe80c7e0
+	dc.l	$3f624924,$928bccff,$3f899999,$999995ec
+	dc.l	$3fb55555,$55555555,$40000000,$00000000
+	dc.l	$3f990000,$80000000,$00000000,$00000000
+	dc.l	$3ffe0000,$fe03f80f,$e03f80fe,$00000000
+	dc.l	$3ff70000,$ff015358,$833c47e2,$00000000
+	dc.l	$3ffe0000,$fa232cf2,$52138ac0,$00000000
+	dc.l	$3ff90000,$bdc8d83e,$ad88d549,$00000000
+	dc.l	$3ffe0000,$f6603d98,$0f6603da,$00000000
+	dc.l	$3ffa0000,$9cf43dcf,$f5eafd48,$00000000
+	dc.l	$3ffe0000,$f2b9d648,$0f2b9d65,$00000000
+	dc.l	$3ffa0000,$da16eb88,$cb8df614,$00000000
+	dc.l	$3ffe0000,$ef2eb71f,$c4345238,$00000000
+	dc.l	$3ffb0000,$8b29b775,$1bd70743,$00000000
+	dc.l	$3ffe0000,$ebbdb2a5,$c1619c8c,$00000000
+	dc.l	$3ffb0000,$a8d839f8,$30c1fb49,$00000000
+	dc.l	$3ffe0000,$e865ac7b,$7603a197,$00000000
+	dc.l	$3ffb0000,$c61a2eb1,$8cd907ad,$00000000
+	dc.l	$3ffe0000,$e525982a,$f70c880e,$00000000
+	dc.l	$3ffb0000,$e2f2a47a,$de3a18af,$00000000
+	dc.l	$3ffe0000,$e1fc780e,$1fc780e2,$00000000
+	dc.l	$3ffb0000,$ff64898e,$df55d551,$00000000
+	dc.l	$3ffe0000,$dee95c4c,$a037ba57,$00000000
+	dc.l	$3ffc0000,$8db956a9,$7b3d0148,$00000000
+	dc.l	$3ffe0000,$dbeb61ee,$d19c5958,$00000000
+	dc.l	$3ffc0000,$9b8fe100,$f47ba1de,$00000000
+	dc.l	$3ffe0000,$d901b203,$6406c80e,$00000000
+	dc.l	$3ffc0000,$a9372f1d,$0da1bd17,$00000000
+	dc.l	$3ffe0000,$d62b80d6,$2b80d62c,$00000000
+	dc.l	$3ffc0000,$b6b07f38,$ce90e46b,$00000000
+	dc.l	$3ffe0000,$d3680d36,$80d3680d,$00000000
+	dc.l	$3ffc0000,$c3fd0329,$06488481,$00000000
+	dc.l	$3ffe0000,$d0b69fcb,$d2580d0b,$00000000
+	dc.l	$3ffc0000,$d11de0ff,$15ab18ca,$00000000
+	dc.l	$3ffe0000,$ce168a77,$25080ce1,$00000000
+	dc.l	$3ffc0000,$de1433a1,$6c66b150,$00000000
+	dc.l	$3ffe0000,$cb8727c0,$65c393e0,$00000000
+	dc.l	$3ffc0000,$eae10b5a,$7ddc8add,$00000000
+	dc.l	$3ffe0000,$c907da4e,$871146ad,$00000000
+	dc.l	$3ffc0000,$f7856e5e,$e2c9b291,$00000000
+	dc.l	$3ffe0000,$c6980c69,$80c6980c,$00000000
+	dc.l	$3ffd0000,$82012ca5,$a68206d7,$00000000
+	dc.l	$3ffe0000,$c4372f85,$5d824ca6,$00000000
+	dc.l	$3ffd0000,$882c5fcd,$7256a8c5,$00000000
+	dc.l	$3ffe0000,$c1e4bbd5,$95f6e947,$00000000
+	dc.l	$3ffd0000,$8e44c60b,$4ccfd7de,$00000000
+	dc.l	$3ffe0000,$bfa02fe8,$0bfa02ff,$00000000
+	dc.l	$3ffd0000,$944ad09e,$f4351af6,$00000000
+	dc.l	$3ffe0000,$bd691047,$07661aa3,$00000000
+	dc.l	$3ffd0000,$9a3eecd4,$c3eaa6b2,$00000000
+	dc.l	$3ffe0000,$bb3ee721,$a54d880c,$00000000
+	dc.l	$3ffd0000,$a0218434,$353f1de8,$00000000
+	dc.l	$3ffe0000,$b92143fa,$36f5e02e,$00000000
+	dc.l	$3ffd0000,$a5f2fcab,$bbc506da,$00000000
+	dc.l	$3ffe0000,$b70fbb5a,$19be3659,$00000000
+	dc.l	$3ffd0000,$abb3b8ba,$2ad362a5,$00000000
+	dc.l	$3ffe0000,$b509e68a,$9b94821f,$00000000
+	dc.l	$3ffd0000,$b1641795,$ce3ca97b,$00000000
+	dc.l	$3ffe0000,$b30f6352,$8917c80b,$00000000
+	dc.l	$3ffd0000,$b7047551,$5d0f1c61,$00000000
+	dc.l	$3ffe0000,$b11fd3b8,$0b11fd3c,$00000000
+	dc.l	$3ffd0000,$bc952afe,$ea3d13e1,$00000000
+	dc.l	$3ffe0000,$af3addc6,$80af3ade,$00000000
+	dc.l	$3ffd0000,$c2168ed0,$f458ba4a,$00000000
+	dc.l	$3ffe0000,$ad602b58,$0ad602b6,$00000000
+	dc.l	$3ffd0000,$c788f439,$b3163bf1,$00000000
+	dc.l	$3ffe0000,$ab8f69e2,$8359cd11,$00000000
+	dc.l	$3ffd0000,$ccecac08,$bf04565d,$00000000
+	dc.l	$3ffe0000,$a9c84a47,$a07f5638,$00000000
+	dc.l	$3ffd0000,$d2420487,$2dd85160,$00000000
+	dc.l	$3ffe0000,$a80a80a8,$0a80a80b,$00000000
+	dc.l	$3ffd0000,$d7894992,$3bc3588a,$00000000
+	dc.l	$3ffe0000,$a655c439,$2d7b73a8,$00000000
+	dc.l	$3ffd0000,$dcc2c4b4,$9887dacc,$00000000
+	dc.l	$3ffe0000,$a4a9cf1d,$96833751,$00000000
+	dc.l	$3ffd0000,$e1eebd3e,$6d6a6b9e,$00000000
+	dc.l	$3ffe0000,$a3065e3f,$ae7cd0e0,$00000000
+	dc.l	$3ffd0000,$e70d785c,$2f9f5bdc,$00000000
+	dc.l	$3ffe0000,$a16b312e,$a8fc377d,$00000000
+	dc.l	$3ffd0000,$ec1f392c,$5179f283,$00000000
+	dc.l	$3ffe0000,$9fd809fd,$809fd80a,$00000000
+	dc.l	$3ffd0000,$f12440d3,$e36130e6,$00000000
+	dc.l	$3ffe0000,$9e4cad23,$dd5f3a20,$00000000
+	dc.l	$3ffd0000,$f61cce92,$346600bb,$00000000
+	dc.l	$3ffe0000,$9cc8e160,$c3fb19b9,$00000000
+	dc.l	$3ffd0000,$fb091fd3,$8145630a,$00000000
+	dc.l	$3ffe0000,$9b4c6f9e,$f03a3caa,$00000000
+	dc.l	$3ffd0000,$ffe97042,$bfa4c2ad,$00000000
+	dc.l	$3ffe0000,$99d722da,$bde58f06,$00000000
+	dc.l	$3ffe0000,$825efced,$49369330,$00000000
+	dc.l	$3ffe0000,$9868c809,$868c8098,$00000000
+	dc.l	$3ffe0000,$84c37a7a,$b9a905c9,$00000000
+	dc.l	$3ffe0000,$97012e02,$5c04b809,$00000000
+	dc.l	$3ffe0000,$87224c2e,$8e645fb7,$00000000
+	dc.l	$3ffe0000,$95a02568,$095a0257,$00000000
+	dc.l	$3ffe0000,$897b8cac,$9f7de298,$00000000
+	dc.l	$3ffe0000,$94458094,$45809446,$00000000
+	dc.l	$3ffe0000,$8bcf55de,$c4cd05fe,$00000000
+	dc.l	$3ffe0000,$92f11384,$0497889c,$00000000
+	dc.l	$3ffe0000,$8e1dc0fb,$89e125e5,$00000000
+	dc.l	$3ffe0000,$91a2b3c4,$d5e6f809,$00000000
+	dc.l	$3ffe0000,$9066e68c,$955b6c9b,$00000000
+	dc.l	$3ffe0000,$905a3863,$3e06c43b,$00000000
+	dc.l	$3ffe0000,$92aade74,$c7be59e0,$00000000
+	dc.l	$3ffe0000,$8f1779d9,$fdc3a219,$00000000
+	dc.l	$3ffe0000,$94e9bff6,$15845643,$00000000
+	dc.l	$3ffe0000,$8dda5202,$37694809,$00000000
+	dc.l	$3ffe0000,$9723a1b7,$20134203,$00000000
+	dc.l	$3ffe0000,$8ca29c04,$6514e023,$00000000
+	dc.l	$3ffe0000,$995899c8,$90eb8990,$00000000
+	dc.l	$3ffe0000,$8b70344a,$139bc75a,$00000000
+	dc.l	$3ffe0000,$9b88bdaa,$3a3dae2f,$00000000
+	dc.l	$3ffe0000,$8a42f870,$5669db46,$00000000
+	dc.l	$3ffe0000,$9db4224f,$ffe1157c,$00000000
+	dc.l	$3ffe0000,$891ac73a,$e9819b50,$00000000
+	dc.l	$3ffe0000,$9fdadc26,$8b7a12da,$00000000
+	dc.l	$3ffe0000,$87f78087,$f78087f8,$00000000
+	dc.l	$3ffe0000,$a1fcff17,$ce733bd4,$00000000
+	dc.l	$3ffe0000,$86d90544,$7a34acc6,$00000000
+	dc.l	$3ffe0000,$a41a9e8f,$5446fb9f,$00000000
+	dc.l	$3ffe0000,$85bf3761,$2cee3c9b,$00000000
+	dc.l	$3ffe0000,$a633cd7e,$6771cd8b,$00000000
+	dc.l	$3ffe0000,$84a9f9c8,$084a9f9d,$00000000
+	dc.l	$3ffe0000,$a8489e60,$0b435a5e,$00000000
+	dc.l	$3ffe0000,$83993052,$3fbe3368,$00000000
+	dc.l	$3ffe0000,$aa59233c,$cca4bd49,$00000000
+	dc.l	$3ffe0000,$828cbfbe,$b9a020a3,$00000000
+	dc.l	$3ffe0000,$ac656dae,$6bcc4985,$00000000
+	dc.l	$3ffe0000,$81848da8,$faf0d277,$00000000
+	dc.l	$3ffe0000,$ae6d8ee3,$60bb2468,$00000000
+	dc.l	$3ffe0000,$80808080,$80808081,$00000000
+	dc.l	$3ffe0000,$b07197a2,$3c46c654,$00000000
+	dc.l	$f2104800,$2d7c0000,$0000ff54,$22103228
+	dc.l	$00042d50,$ff842d68,$0004ff88,$2d680008
+	dc.l	$ff8c0c81,$00000000,$6d000182,$0c813ffe
+	dc.l	$f07d6d0a,$0c813fff,$88416f00,$00e2e081
+	dc.l	$e0810481,$00003fff,$d2aeff54,$41faf7b2
+	dc.l	$f2014080,$2d7c3fff,$0000ff84,$2d6eff88
+	dc.l	$ff9402ae,$fe000000,$ff9400ae,$01000000
+	dc.l	$ff94222e,$ff940281,$7e000000,$e081e081
+	dc.l	$e881d1c1,$f22e4800,$ff842d7c,$3fff0000
+	dc.l	$ff9042ae,$ff98f22e,$4828ff90,$f227e00c
+	dc.l	$f2104823,$f23a48a3,$f6c8f200,$0100f200
+	dc.l	$0923f22e,$6880ff84,$f2000980,$f2000880
+	dc.l	$f23a54a3,$f6ccf23a,$5523f6ce,$f23a54a2
+	dc.l	$f6d0f23a,$5522f6d2,$f2000ca3,$f2000d23
+	dc.l	$f23a54a2,$f6ccf23a,$5522f6ce,$f2000ca3
+	dc.l	$d1fc0000,$0010f200,$0d23f200,$00a3f200
+	dc.l	$0822f210,$48a2f21f,$d030f200,$0422f200
+	dc.l	$9000f22e,$4822ff84,$60ff0000,$10ccf23c
+	dc.l	$58380001,$f2c10000,$1318f200,$0080f23a
+	dc.l	$44a8f64e,$f23a4422,$f648f200,$04a2f200
+	dc.l	$00a0f227,$e00cf200,$0400f200,$0023f22e
+	dc.l	$6880ff84,$f2000080,$f20004a3,$f23a5580
+	dc.l	$f660f23a,$5500f662,$f20005a3,$f2000523
+	dc.l	$f23a55a2,$f65cf23a,$5522f65e,$f2000ca3
+	dc.l	$f2000123,$f23a54a2,$f658f22e,$4823ff84
+	dc.l	$f20008a2,$f21fd030,$f2000423,$f2009000
+	dc.l	$f22e4822,$ff8460ff,$0000103e,$60ff0000
+	dc.l	$0e962d7c,$ffffff9c,$ff5448e7,$3f002610
+	dc.l	$28280004,$2a280008,$42824a84,$66342805
+	dc.l	$42857420,$4286edc4,$6000edac,$d4862d43
+	dc.l	$ff842d44,$ff882d45,$ff8c4482,$2d42ff54
+	dc.l	$f22e4800,$ff844cdf,$00fc41ee,$ff846000
+	dc.l	$fe0c4286,$edc46000,$2406edac,$2e05edad
+	dc.l	$44860686,$00000020,$ecaf8887,$2d43ff84
+	dc.l	$2d44ff88,$2d45ff8c,$44822d42,$ff54f22e
+	dc.l	$4800ff84,$4cdf00fc,$41eeff84,$6000fdce
+	dc.l	$f2104800,$f2000018,$f23a4838,$f5a4f292
+	dc.l	$0014f200,$9000123c,$0003f210,$480060ff
+	dc.l	$00000f7e,$f2104800,$2d7c0000,$0000ff54
+	dc.l	$f2000080,$f23a4422,$f508f22e,$6800ff84
+	dc.l	$3d6eff88,$ff86222e,$ff840c81,$00000000
+	dc.l	$6f0000da,$0c813ffe,$80006d00,$fda20c81
+	dc.l	$3fffc000,$6e00fd98,$0c813ffe,$f07d6d00
+	dc.l	$001a0c81,$3fff8841,$6e000010,$f20004a2
+	dc.l	$f23a4422,$f4bc6000,$fe762d6e,$ff88ff94
+	dc.l	$02aefe00,$0000ff94,$00ae0100,$0000ff94
+	dc.l	$0c813fff,$80006c44,$f23a4400,$f4fc2d7c
+	dc.l	$3fff0000,$ff9042ae,$ff98f22e,$4828ff90
+	dc.l	$222eff94,$02817e00,$0000e081,$e081e881
+	dc.l	$f20004a2,$f227e00c,$f2000422,$41faf4e2
+	dc.l	$d1c1f23a,$4480f466,$6000fd76,$f23a4400
+	dc.l	$f4502d7c,$3fff0000,$ff9042ae,$ff98f22e
+	dc.l	$4828ff90,$222eff94,$02817e00,$0000e081
+	dc.l	$e081e881,$f2000422,$f227e00c,$41faf4a2
+	dc.l	$d1c1f23a,$4480f41e,$6000fd36,$0c810000
+	dc.l	$00006d10,$f23a4400,$f414f200,$900060ff
+	dc.l	$00000c4e,$f23a4400,$f3fcf200,$900060ff
+	dc.l	$00000cb4,$60ff0000,$0e962210,$32280004
+	dc.l	$02817fff,$ffff0c81,$3fff8000,$6c56f210
+	dc.l	$4818f200,$0080f200,$049af200,$0022f23c
+	dc.l	$44a23f80,$0000f200,$04202210,$02818000
+	dc.l	$00000081,$3f000000,$2f012f00,$4280f227
+	dc.l	$e00141d7,$61ffffff,$fe5adffc,$0000000c
+	dc.l	$201ff200,$9000123c,$0000f21f,$442360ff
+	dc.l	$00000dde,$f2104818,$f23c4438,$3f800000
+	dc.l	$f2d20000,$0c3260ff,$00000bb6,$60ff0000
+	dc.l	$0e0e3ffd,$0000de5b,$d8a93728,$71950000
+	dc.l	$00003fff,$0000b8aa,$3b295c17,$f0bc0000
+	dc.l	$0000f23c,$58000001,$f2104838,$f2c10000
+	dc.l	$0ff02210,$6d000090,$2f004280,$61ffffff
+	dc.l	$fba2f21f,$9000f23a,$4823ffb8,$60ff0000
+	dc.l	$0d782210,$6d000070,$2f004280,$61ffffff
+	dc.l	$fd34f21f,$9000f23a,$4823ff98,$60ff0000
+	dc.l	$0d682210,$6d000050,$22280008,$662e2228
+	dc.l	$00040281,$7fffffff,$66223210,$02810000
+	dc.l	$7fff0481,$00003fff,$67ff0000,$0f84f200
+	dc.l	$9000f201,$400060ff,$00000d1e,$2f004280
+	dc.l	$61ffffff,$fb2ef21f,$9000f23a,$4823ff54
+	dc.l	$60ff0000,$0d0460ff,$00000b5c,$22106d00
+	dc.l	$fff62f00,$428061ff,$fffffcba,$f21f9000
+	dc.l	$f23a4823,$ff2e60ff,$00000cee,$406a934f
+	dc.l	$0979a371,$3f734413,$509f8000,$bfcd0000
+	dc.l	$c0219dc1,$da994fd2,$00000000,$40000000
+	dc.l	$935d8ddd,$aaa8ac17,$00000000,$3ffe0000
+	dc.l	$b17217f7,$d1cf79ac,$00000000,$3f56c16d
+	dc.l	$6f7bd0b2,$3f811112,$302c712c,$3fa55555
+	dc.l	$55554cc1,$3fc55555,$55554a54,$3fe00000
+	dc.l	$00000000,$00000000,$00000000,$3fff0000
+	dc.l	$80000000,$00000000,$3f738000,$3fff0000
+	dc.l	$8164d1f3,$bc030773,$3fbef7ca,$3fff0000
+	dc.l	$82cd8698,$ac2ba1d7,$3fbdf8a9,$3fff0000
+	dc.l	$843a28c3,$acde4046,$3fbcd7c9,$3fff0000
+	dc.l	$85aac367,$cc487b15,$bfbde8da,$3fff0000
+	dc.l	$871f6196,$9e8d1010,$3fbde85c,$3fff0000
+	dc.l	$88980e80,$92da8527,$3fbebbf1,$3fff0000
+	dc.l	$8a14d575,$496efd9a,$3fbb80ca,$3fff0000
+	dc.l	$8b95c1e3,$ea8bd6e7,$bfba8373,$3fff0000
+	dc.l	$8d1adf5b,$7e5ba9e6,$bfbe9670,$3fff0000
+	dc.l	$8ea4398b,$45cd53c0,$3fbdb700,$3fff0000
+	dc.l	$9031dc43,$1466b1dc,$3fbeeeb0,$3fff0000
+	dc.l	$91c3d373,$ab11c336,$3fbbfd6d,$3fff0000
+	dc.l	$935a2b2f,$13e6e92c,$bfbdb319,$3fff0000
+	dc.l	$94f4efa8,$fef70961,$3fbdba2b,$3fff0000
+	dc.l	$96942d37,$20185a00,$3fbe91d5,$3fff0000
+	dc.l	$9837f051,$8db8a96f,$3fbe8d5a,$3fff0000
+	dc.l	$99e04593,$20b7fa65,$bfbcde7b,$3fff0000
+	dc.l	$9b8d39b9,$d54e5539,$bfbebaaf,$3fff0000
+	dc.l	$9d3ed9a7,$2cffb751,$bfbd86da,$3fff0000
+	dc.l	$9ef53260,$91a111ae,$bfbebedd,$3fff0000
+	dc.l	$a0b0510f,$b9714fc2,$3fbcc96e,$3fff0000
+	dc.l	$a2704303,$0c496819,$bfbec90b,$3fff0000
+	dc.l	$a43515ae,$09e6809e,$3fbbd1db,$3fff0000
+	dc.l	$a5fed6a9,$b15138ea,$3fbce5eb,$3fff0000
+	dc.l	$a7cd93b4,$e965356a,$bfbec274,$3fff0000
+	dc.l	$a9a15ab4,$ea7c0ef8,$3fbea83c,$3fff0000
+	dc.l	$ab7a39b5,$a93ed337,$3fbecb00,$3fff0000
+	dc.l	$ad583eea,$42a14ac6,$3fbe9301,$3fff0000
+	dc.l	$af3b78ad,$690a4375,$bfbd8367,$3fff0000
+	dc.l	$b123f581,$d2ac2590,$bfbef05f,$3fff0000
+	dc.l	$b311c412,$a9112489,$3fbdfb3c,$3fff0000
+	dc.l	$b504f333,$f9de6484,$3fbeb2fb,$3fff0000
+	dc.l	$b6fd91e3,$28d17791,$3fbae2cb,$3fff0000
+	dc.l	$b8fbaf47,$62fb9ee9,$3fbcdc3c,$3fff0000
+	dc.l	$baff5ab2,$133e45fb,$3fbee9aa,$3fff0000
+	dc.l	$bd08a39f,$580c36bf,$bfbeaefd,$3fff0000
+	dc.l	$bf1799b6,$7a731083,$bfbcbf51,$3fff0000
+	dc.l	$c12c4cca,$66709456,$3fbef88a,$3fff0000
+	dc.l	$c346ccda,$24976407,$3fbd83b2,$3fff0000
+	dc.l	$c5672a11,$5506dadd,$3fbdf8ab,$3fff0000
+	dc.l	$c78d74c8,$abb9b15d,$bfbdfb17,$3fff0000
+	dc.l	$c9b9bd86,$6e2f27a3,$bfbefe3c,$3fff0000
+	dc.l	$cbec14fe,$f2727c5d,$bfbbb6f8,$3fff0000
+	dc.l	$ce248c15,$1f8480e4,$bfbcee53,$3fff0000
+	dc.l	$d06333da,$ef2b2595,$bfbda4ae,$3fff0000
+	dc.l	$d2a81d91,$f12ae45a,$3fbc9124,$3fff0000
+	dc.l	$d4f35aab,$cfedfa1f,$3fbeb243,$3fff0000
+	dc.l	$d744fcca,$d69d6af4,$3fbde69a,$3fff0000
+	dc.l	$d99d15c2,$78afd7b6,$bfb8bc61,$3fff0000
+	dc.l	$dbfbb797,$daf23755,$3fbdf610,$3fff0000
+	dc.l	$de60f482,$5e0e9124,$bfbd8be1,$3fff0000
+	dc.l	$e0ccdeec,$2a94e111,$3fbacb12,$3fff0000
+	dc.l	$e33f8972,$be8a5a51,$3fbb9bfe,$3fff0000
+	dc.l	$e5b906e7,$7c8348a8,$3fbcf2f4,$3fff0000
+	dc.l	$e8396a50,$3c4bdc68,$3fbef22f,$3fff0000
+	dc.l	$eac0c6e7,$dd24392f,$bfbdbf4a,$3fff0000
+	dc.l	$ed4f301e,$d9942b84,$3fbec01a,$3fff0000
+	dc.l	$efe4b99b,$dcdaf5cb,$3fbe8cac,$3fff0000
+	dc.l	$f281773c,$59ffb13a,$bfbcbb3f,$3fff0000
+	dc.l	$f5257d15,$2486cc2c,$3fbef73a,$3fff0000
+	dc.l	$f7d0df73,$0ad13bb9,$bfb8b795,$3fff0000
+	dc.l	$fa83b2db,$722a033a,$3fbef84b,$3fff0000
+	dc.l	$fd3e0c0c,$f486c175,$bfbef581,$f210d080
+	dc.l	$22103228,$0004f22e,$6800ff84,$02817fff
+	dc.l	$ffff0c81,$3fb98000,$6c046000,$00880c81
+	dc.l	$400d80c0,$6f046000,$007cf200,$0080f23c
+	dc.l	$44a34280,$0000f22e,$6080ff54,$2f0243fa
+	dc.l	$fbbcf22e,$4080ff54,$222eff54,$24010281
+	dc.l	$0000003f,$e981d3c1,$ec822202,$e2819481
+	dc.l	$06820000,$3ffff227,$e00cf23c,$44a33c80
+	dc.l	$00002d59,$ff842d59,$ff882d59,$ff8c3d59
+	dc.l	$ff90f200,$04283d59,$ff94426e,$ff9642ae
+	dc.l	$ff98d36e,$ff84f23a,$4823fb22,$d36eff90
+	dc.l	$60000100,$0c813fff,$80006e12,$f2009000
+	dc.l	$f23c4422,$3f800000,$60ff0000,$07b4222e
+	dc.l	$ff840c81,$00000000,$6d0660ff,$00000764
+	dc.l	$60ff0000,$0666f200,$9000f23c,$44003f80
+	dc.l	$00002210,$00810080,$0001f201,$442260ff
+	dc.l	$0000077e,$f210d080,$22103228,$0004f22e
+	dc.l	$6800ff84,$02817fff,$ffff0c81,$3fb98000
+	dc.l	$6c046000,$ff900c81,$400b9b07,$6f046000
+	dc.l	$ff84f200,$0080f23a,$54a3fa62,$f22e6080
+	dc.l	$ff542f02,$43fafac6,$f22e4080,$ff54222e
+	dc.l	$ff542401,$02810000,$003fe981,$d3c1ec82
+	dc.l	$2202e281,$94810682,$00003fff,$f227e00c
+	dc.l	$f2000500,$f23a54a3,$fa2c2d59,$ff84f23a
+	dc.l	$4923fa2a,$2d59ff88,$2d59ff8c,$f2000428
+	dc.l	$3d59ff90,$f2000828,$3d59ff94,$426eff96
+	dc.l	$42aeff98,$f23a4823,$fa14d36e,$ff84d36e
+	dc.l	$ff90f200,$0080f200,$04a3f23a,$5500fa1e
+	dc.l	$f23a5580,$fa20f200,$0523f200,$05a3f23a
+	dc.l	$5522fa1a,$f23a55a2,$fa1cf200,$0523f200
+	dc.l	$05a3f23a,$5522fa16,$f20001a3,$f2000523
+	dc.l	$f2000c22,$f2000822,$f21fd030,$f22e4823
+	dc.l	$ff84f22e,$4822ff90,$f22e4822,$ff84f200
+	dc.l	$90003d42,$ff84241f,$2d7c8000,$0000ff88
+	dc.l	$42aeff8c,$123c0000,$f22e4823,$ff8460ff
+	dc.l	$0000063e,$f2009000,$f23c4400,$3f800000
+	dc.l	$22100081,$00800001,$f2014422,$60ff0000
+	dc.l	$06302f00,$32290000,$5beeff54,$02810000
+	dc.l	$7fff3028,$00000240,$7fff0c40,$3fff6d00
+	dc.l	$00c00c40,$400c6e00,$00a4f228,$48030000
+	dc.l	$f2006000,$f23c8800,$00000000,$4a290004
+	dc.l	$6b5e2f00,$3d690000,$ff842d69,$0004ff88
+	dc.l	$2d690008,$ff8c41ee,$ff8461ff,$00000b2a
+	dc.l	$4480d09f,$f22ed080,$ff840c40,$c0016c36
+	dc.l	$f21f9000,$223c8000,$00000480,$ffffc001
+	dc.l	$44800c00,$00206c0a,$e0a942a7,$2f0142a7
+	dc.l	$60280400,$0020e0a9,$2f0142a7,$42a7601a
+	dc.l	$f229d080,$0000f21f,$90000640,$3fff4840
+	dc.l	$42a72f3c,$80000000,$2f00f200,$b000123c
+	dc.l	$0000f21f,$482360ff,$0000054c,$201fc149
+	dc.l	$4a290000,$6bff0000,$041c60ff,$00000464
+	dc.l	$4a290004,$6a16201f,$f2009000,$123c0003
+	dc.l	$f2294800,$000060ff,$0000051c,$201f2049
+	dc.l	$60ff0000,$05860001,$00008000,$00000000
+	dc.l	$00000000,$0000422e,$ff652f00,$422eff5c
+	dc.l	$600c422e,$ff652f00,$1d7c0001,$ff5c48e7
+	dc.l	$3f003628,$00003d43,$ff580283,$00007fff
+	dc.l	$28280004,$2a280008,$4a83663c,$263c0000
+	dc.l	$3ffe4a84,$66162805,$42850483,$00000020
+	dc.l	$4286edc4,$6000edac,$96866022,$4286edc4
+	dc.l	$60009686,$edac2e05,$edad4486,$06860000
+	dc.l	$0020ecaf,$88876006,$06830000,$3ffe3029
+	dc.l	$00003d40,$ff5a322e,$ff58b181,$02810000
+	dc.l	$80003d41,$ff5e0280,$00007fff,$22290004
+	dc.l	$24290008,$4a80663c,$203c0000,$3ffe4a81
+	dc.l	$66162202,$42820480,$00000020,$4286edc1
+	dc.l	$6000eda9,$90866022,$4286edc1,$60009086
+	dc.l	$eda92e02,$edaa4486,$06860000,$0020ecaf
+	dc.l	$82876006,$06800000,$3ffe2d43,$ff542f00
+	dc.l	$90834286,$4283227c,$00000000,$4a806c06
+	dc.l	$201f6000,$006a588f,$4a866e0e,$b2846608
+	dc.l	$b4856604,$60000136,$65089485,$93844286
+	dc.l	$52834a80,$670ed683,$d482e391,$55c65289
+	dc.l	$538060d4,$202eff54,$4a816616,$22024282
+	dc.l	$04800000,$00204286,$edc16000,$eda99086
+	dc.l	$601c4286,$edc16000,$6b149086,$eda92e02
+	dc.l	$edaa4486,$06860000,$0020ecaf,$82870c80
+	dc.l	$000041fe,$6c2a3d40,$ff902d41,$ff942d42
+	dc.l	$ff982c2e,$ff543d46,$ff842d44,$ff882d45
+	dc.l	$ff8cf22e,$4800ff90,$1d7c0001,$ff5d6036
+	dc.l	$2d41ff94,$2d42ff98,$04800000,$3ffe3d40
+	dc.l	$ff902c2e,$ff540486,$00003ffe,$2d46ff54
+	dc.l	$f22e4800,$ff903d46,$ff842d44,$ff882d45
+	dc.l	$ff8c422e,$ff5d4a2e,$ff5c6722,$2c2eff54
+	dc.l	$5386b086,$6d186e0e,$b2846608,$b4856604
+	dc.l	$6000007a,$6508f22e,$4828ff84,$52833c2e
+	dc.l	$ff5a6c04,$f200001a,$42863c2e,$ff5e7e08
+	dc.l	$eeae0283,$0000007f,$86861d43,$ff654cdf
+	dc.l	$00fc201f,$f2009000,$4a2eff5d,$6710123c
+	dc.l	$0000f23a,$4823fdc0,$60ff0000,$02ca123c
+	dc.l	$0003f200,$000060ff,$000002bc,$52830c80
+	dc.l	$00000008,$6c04e1ab,$60024283,$f23c4400
+	dc.l	$00000000,$422eff5d,$6000ff94,$2c030286
+	dc.l	$00000001,$4a866700,$ff865283,$3c2eff5a
+	dc.l	$0a860000,$80003d46,$ff5a6000,$ff723028
+	dc.l	$00000240,$7fff0c40,$7fff6738,$08280007
+	dc.l	$00046706,$103c0000,$4e754a40,$66184aa8
+	dc.l	$0004660c,$4aa80008,$6606103c,$00014e75
+	dc.l	$103c0004,$4e7561ff,$000007f6,$4e75103c
+	dc.l	$00064e75,$4aa80008,$66122028,$00040280
+	dc.l	$7fffffff,$6606103c,$00024e75,$103c0003
+	dc.l	$4e757fff,$0000ffff,$ffffffff,$ffff4a28
+	dc.l	$00006a38,$00ae0a00,$0410ff64,$082e0002
+	dc.l	$ff62660a,$f23c4400,$ff800000,$4e75f22e
+	dc.l	$d080ffdc,$f22e9000,$ff60f23c,$4480bf80
+	dc.l	$0000f23c,$44a00000,$00004e75,$00ae0200
+	dc.l	$0410ff64,$082e0002,$ff62660a,$f23c4400
+	dc.l	$7f800000,$4e75f22e,$d080ffdc,$f22e9000
+	dc.l	$ff60f23c,$44803f80,$0000f23c,$44a00000
+	dc.l	$00004e75,$00ae0100,$2080ff64,$082e0005
+	dc.l	$ff626608,$f23ad080,$ff6a4e75,$f22ed080
+	dc.l	$ffdcf22e,$9000ff60,$f227e004,$f23c4500
+	dc.l	$7f800000,$f23c4523,$00000000,$f21fd020
+	dc.l	$4e757ffe,$0000ffff,$ffffffff,$fffffffe
+	dc.l	$0000ffff,$ffffffff,$ffff0000,$00008000
+	dc.l	$00000000,$00008000,$00008000,$00000000
+	dc.l	$00004a28,$00006a26,$00ae0800,$0a28ff64
+	dc.l	$f22e9000,$ff60f23a,$d080ffdc,$f23a4823
+	dc.l	$ffcaf200,$a800e198,$1d40ff64,$4e75006e
+	dc.l	$0a28ff66,$f22e9000,$ff60f23a,$d080ffac
+	dc.l	$f2000023,$f200a800,$e1981d40,$ff644e75
+	dc.l	$00ae0000,$1048ff64,$12000201,$00c06700
+	dc.l	$005a3d68,$0000ff84,$2d680004,$ff882d68
+	dc.l	$0008ff8c,$41eeff84,$48e7c080,$61ff0000
+	dc.l	$06184cdf,$01030c01,$00406610,$4aa80008
+	dc.l	$66184a28,$00076612,$60000020,$22280008
+	dc.l	$02810000,$07ff6700,$001200ae,$00000200
+	dc.l	$ff646006,$006e1248,$ff664a28,$00006a22
+	dc.l	$f22e9000,$ff60f23a,$d080ff14,$f23a4823
+	dc.l	$ff02f200,$a800e198,$00000000,$1d40ff64
+	dc.l	$4e75f22e,$9000ff60,$f23ad080,$fee6f23a
+	dc.l	$4823fee0,$f200a800,$e1981d40,$ff644e75
+	dc.l	$006e1248,$ff66f22e,$9000ff60,$f23ad080
+	dc.l	$fec2f23a,$4823febc,$f200a800,$e1981d40
+	dc.l	$ff644e75,$f200a800,$81aeff64,$6020f200
+	dc.l	$a80081ae,$ff64f294,$000ef281,$0032006e
+	dc.l	$0208ff66,$600800ae,$08000208,$ff64082e
+	dc.l	$0001ff62,$66024e75,$f22e9000,$ff60f23c
+	dc.l	$44803f80,$0000f23a,$48a2fe80,$4e751d7c
+	dc.l	$0004ff64,$006e0208,$ff664e75,$f22e9000
+	dc.l	$ff60f228,$48000000,$f200a800,$00800000
+	dc.l	$0a2881ae,$ff644e75,$f22e9000,$ff60f228
+	dc.l	$48000000,$f200a800,$81aeff64,$4e754e75
+	dc.l	$f2294800,$00004a29,$00006b08,$1d7c0001
+	dc.l	$ff644e75,$1d7c0009,$ff644e75,$f2284800
+	dc.l	$00004a28,$00006b08,$1d7c0001,$ff644e75
+	dc.l	$1d7c0009,$ff644e75,$f227b000,$f23c9000
+	dc.l	$00000000,$f22f4400,$0008f21f,$9000f22f
+	dc.l	$44220008,$4e75f227,$b000f23c,$90000000
+	dc.l	$0000f22f,$54000008,$f21f9000,$f22f5422
+	dc.l	$000c4e75,$f22fd080,$0004f22f,$48220010
+	dc.l	$4e75f227,$b000f23c,$90000000,$0000f22f
+	dc.l	$44000008,$f21f9000,$f22f4428,$00084e75
+	dc.l	$f227b000,$f23c9000,$00000000,$f22f5400
+	dc.l	$0008f21f,$9000f22f,$5428000c,$4e75f22f
+	dc.l	$d0800004,$f22f4828,$00104e75,$f227b000
+	dc.l	$f23c9000,$00000000,$f22f4400,$0008f21f
+	dc.l	$9000f22f,$44230008,$4e75f227,$b000f23c
+	dc.l	$90000000,$0000f22f,$54000008,$f21f9000
+	dc.l	$f22f5423,$000c4e75,$f22fd080,$0004f22f
+	dc.l	$48230010,$4e75f227,$b000f23c,$90000000
+	dc.l	$0000f22f,$44000008,$f21f9000,$f22f4420
+	dc.l	$00084e75,$f227b000,$f23c9000,$00000000
+	dc.l	$f22f5400,$0008f21f,$9000f22f,$5420000c
+	dc.l	$4e75f22f,$d0800004,$f22f4820,$00104e75
+	dc.l	$f22f4418,$00044e75,$f22f5418,$00044e75
+	dc.l	$f22f4818,$00044e75,$f22f441a,$00044e75
+	dc.l	$f22f541a,$00044e75,$f22f481a,$00044e75
+	dc.l	$f22f4404,$00044e75,$f22f5404,$00044e75
+	dc.l	$f22f4804,$00044e75,$f22f4401,$00044e75
+	dc.l	$f22f5401,$00044e75,$f22f4801,$00044e75
+	dc.l	$f22f4403,$00044e75,$f22f5403,$00044e75
+	dc.l	$f22f4803,$00044e75,$4a280000,$6b10f23c
+	dc.l	$44000000,$00001d7c,$0004ff64,$4e75f23c
+	dc.l	$44008000,$00001d7c,$000cff64,$4e754a29
+	dc.l	$00006bea,$60d84a28,$00006b10,$f23c4400
+	dc.l	$7f800000,$1d7c0002,$ff644e75,$f23c4400
+	dc.l	$ff800000,$1d7c000a,$ff644e75,$4a290000
+	dc.l	$6bea60d8,$4a280000,$6ba460d0,$4a280000
+	dc.l	$6b00fba2,$60c64a28,$00006b16,$60be4a28
+	dc.l	$00006b0e,$f23c4400,$3f800000,$422eff64
+	dc.l	$4e75f23c,$4400bf80,$00001d7c,$0008ff64
+	dc.l	$4e753fff,$0000c90f,$daa22168,$c235bfff
+	dc.l	$0000c90f,$daa22168,$c2354a28,$00006b0e
+	dc.l	$f2009000,$f23a4800,$ffda6000,$fcf2f200
+	dc.l	$9000f23a,$4800ffd8,$6000fcec,$f23c4480
+	dc.l	$3f800000,$4a280000,$6a10f23c,$44008000
+	dc.l	$00001d7c,$000cff64,$4e75f23c,$44000000
+	dc.l	$00001d7c,$0004ff64,$4e75f23a,$4880fa84
+	dc.l	$6000fb02,$f2284880,$00006000,$fd30122e
+	dc.l	$ff4f67ff,$fffff782,$0c010001,$67000078
+	dc.l	$0c010002,$67ffffff,$fade0c01,$000467ff
+	dc.l	$fffff766,$60ffffff,$fcea122e,$ff4f67ff
+	dc.l	$fffffac4,$0c010001,$67ffffff,$faba0c01
+	dc.l	$000267ff,$fffffab0,$0c010004,$67ffffff
+	dc.l	$faa660ff,$fffffcbc,$122eff4f,$67ff0000
+	dc.l	$00440c01,$000167ff,$0000001e,$0c010002
+	dc.l	$67ffffff,$fa820c01,$000467ff,$00000026
+	dc.l	$60ffffff,$fc8e1228,$00001029,$0000b101
+	dc.l	$02010080,$1d41ff65,$4a006a00,$fe526000
+	dc.l	$fe5e422e,$ff652f00,$12280000,$10290000
+	dc.l	$b1010201,$00801d41,$ff650c2e,$0004ff4f
+	dc.l	$660c41e9,$0000201f,$60ffffff,$fc2ef21f
+	dc.l	$9000f229,$48000000,$4a290000,$6b024e75
+	dc.l	$1d7c0008,$ff644e75,$122eff4f,$67ffffff
+	dc.l	$f6a40c01,$00016700,$ff8e0c01,$000267ff
+	dc.l	$fffff9f4,$0c010004,$67ffffff,$f68860ff
+	dc.l	$fffffc00,$122eff4f,$67ffffff,$f9da0c01
+	dc.l	$000167ff,$fffff9d0,$0c010002,$67ffffff
+	dc.l	$f9c60c01,$000467ff,$fffff9bc,$60ffffff
+	dc.l	$fbd2122e,$ff4f6700,$ff5a0c01,$00016700
+	dc.l	$ff360c01,$000267ff,$fffff99c,$0c010004
+	dc.l	$67ffffff,$ff4060ff,$fffffba8,$122eff4f
+	dc.l	$67ffffff,$f5000c01,$000167ff,$fffffd92
+	dc.l	$0c010002,$67ffffff,$fdb60c01,$000467ff
+	dc.l	$fffff4e2,$60ffffff,$fb7a122e,$ff4f67ff
+	dc.l	$fffff4d2,$0c010001,$67ffffff,$fd640c01
+	dc.l	$000267ff,$fffffd88,$0c010004,$67ffffff
+	dc.l	$f4b460ff,$fffffb4c,$122eff4f,$67ffffff
+	dc.l	$f9260c01,$000367ff,$fffffb38,$60ffffff
+	dc.l	$f916122e,$ff4f0c01,$000367ff,$fffffb24
+	dc.l	$60ffffff,$fb3a2f02,$2f032028,$00042228
+	dc.l	$0008edc0,$2000671a,$e5a8e9c1,$30228083
+	dc.l	$e5a92140,$00042141,$00082002,$261f241f
+	dc.l	$4e75edc1,$2000e5a9,$06820000,$00202141
+	dc.l	$000442a8,$00082002,$261f241f,$4e75ede8
+	dc.l	$00000004,$660eede8,$00000008,$67000074
+	dc.l	$06400020,$42813228,$00000241,$7fffb041
+	dc.l	$6e1c9240,$30280000,$02408000,$82403141
+	dc.l	$000061ff,$ffffff82,$103c0000,$4e750c01
+	dc.l	$00206e20,$e9e80840,$00042140,$00042028
+	dc.l	$0008e3a8,$21400008,$02688000,$0000103c
+	dc.l	$00044e75,$04410020,$20280008,$e3a82140
+	dc.l	$000442a8,$00080268,$80000000,$103c0004
+	dc.l	$4e750268,$80000000,$103c0001,$4e7551fc
diff --git a/arch/m68k/ifpsp060/fpsp.doc b/arch/m68k/ifpsp060/fpsp.doc
new file mode 100644
index 0000000..4083152
--- /dev/null
+++ b/arch/m68k/ifpsp060/fpsp.doc
@@ -0,0 +1,295 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+68060 FLOATING-POINT SOFTWARE PACKAGE (Kernel version)
+-------------------------------------------------------
+
+The file fpsp.sa contains the 68060 Floating-Point Software
+Package. This package is essentially a set of exception handlers
+that can be integrated into an operating system.
+These exception handlers emulate Unimplemented FP instructions,
+instructions using unimplemented data types, and instructions
+using unimplemented addressing modes. In addition, this package
+includes exception handlers to provide full IEEE-754 compliant
+exception handling.
+
+Release file format:
+--------------------
+The file fpsp.sa is essentially a hexadecimal image of the
+release package. This is the ONLY format which will be supported.
+The hex image was created by assembling the source code and
+then converting the resulting binary output image into an
+ASCII text file. The hexadecimal numbers are listed
+using the Motorola Assembly Syntax assembler directive "dc.l"
+(define constant longword). The file can be converted to other
+assembly syntaxes by using any word processor with a global
+search and replace function.
+
+To assist in assembling and linking this module with other modules,
+the installer should add a symbolic label to the top of the file.
+This will allow calling routines to access the entry points
+of this package.
+
+The source code fpsp.s has also been included but only for
+documentation purposes.
+
+Release file structure:
+-----------------------
+
+(top of module)
+	-----------------
+	|		| - 128 byte-sized section
+   (1)  |   Call-Out	| - 4 bytes per entry (user fills these in)
+	|		| - example routines in fskeleton.s
+	-----------------
+	|		| - 8 bytes per entry
+   (2)  | Entry Point	| - user does "bra" or "jmp" to this address
+	|		|
+	-----------------
+	|		| - code section
+   (3)  ~		~
+	|		|
+	-----------------
+(bottom of module)
+
+The first section of this module is the "Call-out" section. This section
+is NOT INCLUDED in fpsp.sa (an example "Call-out" section is provided at
+the end of the file fskeleton.s). The purpose of this section is to allow
+the FPSP routines to reference external functions that must be provided
+by the host operating system. This section MUST be exactly 128 bytes in
+size. There are 32 fields, each 4 bytes in size. Each field corresponds
+to a function required by the FPSP (these functions and their location are
+listed in "68060FPSP call-outs" below). Each field entry should contain
+the address of the corresponding function RELATIVE to the starting address
+of the "call-out" section. The "Call-out" section must sit adjacent to the
+fpsp.sa image in memory.
+
+The second section, the "Entry-point" section, is used by external routines
+to access the functions within the FPSP. Since the fpsp.sa hex file contains
+no symbol names, this section contains function entry points that are fixed
+with respect to the top of the package. The currently defined entry-points
+are listed in section "68060 FPSP entry points" below. A calling routine
+would simply execute a "bra" or "jmp" that jumped to the selected function
+entry-point.
+
+For example, if the 68060 hardware took a "Line-F Emulator" exception
+(vector #11), the operating system should execute something similar to:
+
+	bra	_060FPSP_TOP+128+48
+
+(_060FPSP_TOP is the starting address of the "Call-out" section; the "Call-out"
+section is 128 bytes long; and the F-Line FPSP handler entry point is located
+48 bytes from the top of the "Entry-point" section.)
+
+The third section is the code section. After entering through an "Entry-point",
+the entry code jumps to the appropriate emulation code within the code section.
+
+68060FPSP call-outs: (details in fskeleton.s)
+--------------------
+0x000:	_060_real_bsun
+0x004:	_060_real_snan
+0x008:	_060_real_operr
+0x00c:	_060_real_ovfl
+0x010:	_060_real_unfl
+0x014:	_060_real_dz
+0x018:	_060_real_inex
+0x01c:	_060_real_fline
+0x020:	_060_real_fpu_disabled
+0x024:	_060_real_trap
+0x028:	_060_real_trace
+0x02c:	_060_real_access
+0x030:	_060_fpsp_done
+
+0x034:	(Motorola reserved)
+0x038:	(Motorola reserved)
+0x03c:	(Motorola reserved)
+
+0x040:	_060_imem_read
+0x044:	_060_dmem_read
+0x048:	_060_dmem_write
+0x04c:	_060_imem_read_word
+0x050:	_060_imem_read_long
+0x054:	_060_dmem_read_byte
+0x058:	_060_dmem_read_word
+0x05c:	_060_dmem_read_long
+0x060:	_060_dmem_write_byte
+0x064:	_060_dmem_write_word
+0x068:	_060_dmem_write_long
+
+0x06c:	(Motorola reserved)
+0x070:	(Motorola reserved)
+0x074:	(Motorola reserved)
+0x078:	(Motorola reserved)
+0x07c:	(Motorola reserved)
+
+68060FPSP entry points:
+-----------------------
+0x000:	_060_fpsp_snan
+0x008:	_060_fpsp_operr
+0x010:	_060_fpsp_ovfl
+0x018:	_060_fpsp_unfl
+0x020:	_060_fpsp_dz
+0x028:	_060_fpsp_inex
+0x030:	_060_fpsp_fline
+0x038:	_060_fpsp_unsupp
+0x040:	_060_fpsp_effadd
+
+
+
+Miscellaneous:
+--------------
+
+_060_fpsp_snan:
+----------------
+- documented in 3.5 of 060SP spec.
+- Basic flow:
+	exception taken ---> enter _060_fpsp_snan --|
+						    |
+            always exits through _060_real_snan <----
+
+_060_fpsp_operr:
+----------------
+- documented in 3.5 of 060SP spec.
+- Basic flow:
+	exception taken ---> enter _060_fpsp_operr --|
+						     |
+           always exits through _060_real_operr <-----
+
+_060_fpsp_dz:
+----------------
+- documented in 3.7 of 060SP spec.
+- Basic flow:
+	exception taken ---> enter _060_fpsp_dz   --|
+						    |
+            always exits through _060_real_dz   <----
+
+_060_fpsp_inex:
+----------------
+- documented in 3.6 of 060SP spec.
+- Basic flow:
+	exception taken ---> enter _060_fpsp_inex --|
+						    |
+            always exits through _060_real_inex <----
+
+
+_060_fpsp_ovfl:
+----------------
+- documented in 3.4 of 060SP spec.
+- Basic flow:
+	exception taken ---> enter _060_fpsp_ovfl --|
+						    |
+            may exit through _060_real_inex     <---|
+						 or |
+            may exit through _060_real_ovfl     <---|
+						 or |
+            may exit through _060_fpsp_done     <---|
+
+_060_fpsp_unfl:
+----------------
+- documented in 3.4 of 060SP spec.
+- Basic flow:
+	exception taken ---> enter _060_fpsp_unfl --|
+						    |
+            may exit through _060_real_inex     <---|
+						 or |
+            may exit through _060_real_unfl     <---|
+						 or |
+            may exit through _060_fpsp_done     <---|
+
+
+_060_fpsp_fline:
+-----------------
+- not fully documented in 060SP spec.
+- Basic flow:
+	exception taken ---> enter _060_fpsp_fline --|
+						     |
+            -------------------------------------------
+            |               |                     |
+            v               v                     v
+   (unimplemented   (fpu disabled)       (possible F-line illegal)
+     stack frame)           |                     v
+            |               v               special case "fmovecr"?
+            |         exit through                    |
+            |     _060_real_fpu_disabled        -------------
+            |                                   |           |
+            |               ^                   v           v
+            |               |                 (yes)        (no)
+            |               |                   v           v
+            |               |             fpu disabled?   exit through
+            |               |                   |         _060_real_fline
+            v               |             -------------
+            |               |             |           |
+            |               |             v           v
+            |               |-----------(yes)        (no)
+            |                                         |
+            |----<------------------------------------|
+            |
+            |
+            |----> may exit through _060_real_trace
+            |
+            |----> may exit through _060_real_trap
+            |
+            |----> may exit through _060_real_bsun
+            |
+            |----> may exit through _060_fpsp_done
+
+
+_060_fpsp_unsupp:
+------------------
+- documented in 3.1 of 060SP spec.
+- Basic flow:
+	exception taken ---> enter _060_fpsp_unsupp --|
+						      |
+						      |
+            may exit through _060_real_snan      <----|
+						  or  |
+            may exit through _060_real_operr     <----|
+						  or  |
+            may exit through _060_real_ovfl      <----|
+						  or  |
+            may exit through _060_real_unfl      <----|
+						  or  |
+            may exit through _060_real_inex      <----|
+						  or  |
+            may exit through _060_real_trace     <----|
+						  or  |
+            may exit through _060_fpsp_done      <----|
+
+
+_060_fpsp_effadd:
+------------------
+- documented in 3.3 of 060 spec.
+- Basic flow:
+	exception taken ---> enter _060_fpsp_effadd --|
+						      |
+						      |
+            may exit through _060_real_trace     <----|
+						  or  |
+     may exit through _060_real_fpu_disabled     <----|
+						  or  |
+            may exit through _060_fpsp_done      <----|
diff --git a/arch/m68k/ifpsp060/fpsp.sa b/arch/m68k/ifpsp060/fpsp.sa
new file mode 100644
index 0000000..d69486a
--- /dev/null
+++ b/arch/m68k/ifpsp060/fpsp.sa
@@ -0,0 +1,3401 @@
+	.long	0x60ff0000,0x17400000,0x60ff0000,0x15f40000
+	.long	0x60ff0000,0x02b60000,0x60ff0000,0x04700000
+	.long	0x60ff0000,0x1b100000,0x60ff0000,0x19aa0000
+	.long	0x60ff0000,0x1b5a0000,0x60ff0000,0x062e0000
+	.long	0x60ff0000,0x102c0000,0x51fc51fc,0x51fc51fc
+	.long	0x51fc51fc,0x51fc51fc,0x51fc51fc,0x51fc51fc
+	.long	0x51fc51fc,0x51fc51fc,0x51fc51fc,0x51fc51fc
+	.long	0x51fc51fc,0x51fc51fc,0x51fc51fc,0x51fc51fc
+	.long	0x2f00203a,0xff2c487b,0x0930ffff,0xfef8202f
+	.long	0x00044e74,0x00042f00,0x203afef2,0x487b0930
+	.long	0xfffffee2,0x202f0004,0x4e740004,0x2f00203a
+	.long	0xfee0487b,0x0930ffff,0xfecc202f,0x00044e74
+	.long	0x00042f00,0x203afed2,0x487b0930,0xfffffeb6
+	.long	0x202f0004,0x4e740004,0x2f00203a,0xfea4487b
+	.long	0x0930ffff,0xfea0202f,0x00044e74,0x00042f00
+	.long	0x203afe96,0x487b0930,0xfffffe8a,0x202f0004
+	.long	0x4e740004,0x2f00203a,0xfe7c487b,0x0930ffff
+	.long	0xfe74202f,0x00044e74,0x00042f00,0x203afe76
+	.long	0x487b0930,0xfffffe5e,0x202f0004,0x4e740004
+	.long	0x2f00203a,0xfe68487b,0x0930ffff,0xfe48202f
+	.long	0x00044e74,0x00042f00,0x203afe56,0x487b0930
+	.long	0xfffffe32,0x202f0004,0x4e740004,0x2f00203a
+	.long	0xfe44487b,0x0930ffff,0xfe1c202f,0x00044e74
+	.long	0x00042f00,0x203afe32,0x487b0930,0xfffffe06
+	.long	0x202f0004,0x4e740004,0x2f00203a,0xfe20487b
+	.long	0x0930ffff,0xfdf0202f,0x00044e74,0x00042f00
+	.long	0x203afe1e,0x487b0930,0xfffffdda,0x202f0004
+	.long	0x4e740004,0x2f00203a,0xfe0c487b,0x0930ffff
+	.long	0xfdc4202f,0x00044e74,0x00042f00,0x203afdfa
+	.long	0x487b0930,0xfffffdae,0x202f0004,0x4e740004
+	.long	0x2f00203a,0xfde8487b,0x0930ffff,0xfd98202f
+	.long	0x00044e74,0x00042f00,0x203afdd6,0x487b0930
+	.long	0xfffffd82,0x202f0004,0x4e740004,0x2f00203a
+	.long	0xfdc4487b,0x0930ffff,0xfd6c202f,0x00044e74
+	.long	0x00042f00,0x203afdb2,0x487b0930,0xfffffd56
+	.long	0x202f0004,0x4e740004,0x2f00203a,0xfda0487b
+	.long	0x0930ffff,0xfd40202f,0x00044e74,0x00042f00
+	.long	0x203afd8e,0x487b0930,0xfffffd2a,0x202f0004
+	.long	0x4e740004,0x2f00203a,0xfd7c487b,0x0930ffff
+	.long	0xfd14202f,0x00044e74,0x00042f00,0x203afd6a
+	.long	0x487b0930,0xfffffcfe,0x202f0004,0x4e740004
+	.long	0x40c62d38,0xd3d64634,0x3d6f90ae,0xb1e75cc7
+	.long	0x40000000,0xc90fdaa2,0x2168c235,0x00000000
+	.long	0x3fff0000,0xc90fdaa2,0x2168c235,0x00000000
+	.long	0x3fe45f30,0x6dc9c883,0x4e56ff40,0xf32eff6c
+	.long	0x48ee0303,0xff9cf22e,0xbc00ff60,0xf22ef0c0
+	.long	0xffdc2d6e,0xff68ff44,0x206eff44,0x58aeff44
+	.long	0x61ffffff,0xff042d40,0xff40082e,0x0005ff42
+	.long	0x66000116,0x41eeff6c,0x61ff0000,0x051c41ee
+	.long	0xff6c61ff,0x0000c1dc,0x1d40ff4e,0x082e0005
+	.long	0xff436726,0xe9ee0183,0xff4261ff,0x0000bd22
+	.long	0x41eeff78,0x61ff0000,0xc1ba0c00,0x00066606
+	.long	0x61ff0000,0xc11e1d40,0xff4f4280,0x102eff63
+	.long	0x122eff43,0x0241007f,0x02ae00ff,0x01ffff64
+	.long	0xf23c9000,0x00000000,0xf23c8800,0x00000000
+	.long	0x41eeff6c,0x43eeff78,0x223b1530,0x00007112
+	.long	0x4ebb1930,0x0000710a,0xe9ee0183,0xff4261ff
+	.long	0x0000bd4e,0x082e0004,0xff626622,0x082e0001
+	.long	0xff626644,0xf22ed0c0,0xffdcf22e,0x9c00ff60
+	.long	0x4cee0303,0xff9c4e5e,0x60ffffff,0xfcc6f22e
+	.long	0xf040ff6c,0x3d7ce005,0xff6ef22e,0xd0c0ffdc
+	.long	0xf22e9c00,0xff604cee,0x0303ff9c,0xf36eff6c
+	.long	0x4e5e60ff,0xfffffcb2,0xf22ef040,0xff6c1d7c
+	.long	0x00c4000b,0x3d7ce001,0xff6ef22e,0xd0c0ffdc
+	.long	0xf22e9c00,0xff604cee,0x0303ff9c,0xf36eff6c
+	.long	0x4e5e60ff,0xfffffcae,0x1d7c0000,0xff4e4280
+	.long	0x102eff63,0x02aeffff,0x00ffff64,0xf23c9000
+	.long	0x00000000,0xf23c8800,0x00000000,0x41eeff6c
+	.long	0x61ff0000,0xb2ce082e,0x0004ff62,0x6600ff70
+	.long	0x082e0001,0xff626600,0xff90f22e,0xd0c0ffdc
+	.long	0xf22e9c00,0xff604cee,0x0303ff9c,0x4e5e0817
+	.long	0x000767ff,0xfffffc0c,0xf22fa400,0x00083f7c
+	.long	0x20240006,0x60ffffff,0xfcec4e56,0xff40f32e
+	.long	0xff6c48ee,0x0303ff9c,0xf22ebc00,0xff60f22e
+	.long	0xf0c0ffdc,0x2d6eff68,0xff44206e,0xff4458ae
+	.long	0xff4461ff,0xfffffd42,0x2d40ff40,0x082e0005
+	.long	0xff426600,0x013241ee,0xff6c61ff,0x0000035a
+	.long	0x41eeff6c,0x61ff0000,0xc01a1d40,0xff4e082e
+	.long	0x0005ff43,0x672e082e,0x0004ff43,0x6626e9ee
+	.long	0x0183ff42,0x61ff0000,0xbb5841ee,0xff7861ff
+	.long	0x0000bff0,0x0c000006,0x660661ff,0x0000bf54
+	.long	0x1d40ff4f,0x4280102e,0xff63122e,0xff430241
+	.long	0x007f02ae,0x00ff01ff,0xff64f23c,0x90000000
+	.long	0x0000f23c,0x88000000,0x000041ee,0xff6c43ee
+	.long	0xff78223b,0x15300000,0x6f484ebb,0x19300000
+	.long	0x6f40e9ee,0x0183ff42,0x61ff0000,0xbb84082e
+	.long	0x0003ff62,0x6622082e,0x0001ff62,0x664ef22e
+	.long	0xd0c0ffdc,0xf22e9c00,0xff604cee,0x0303ff9c
+	.long	0x4e5e60ff,0xfffffafc,0x082e0003,0xff666700
+	.long	0xffd6f22e,0xf040ff6c,0x3d7ce003,0xff6ef22e
+	.long	0xd0c0ffdc,0xf22e9c00,0xff604cee,0x0303ff9c
+	.long	0xf36eff6c,0x4e5e60ff,0xfffffaf4,0x082e0001
+	.long	0xff666700,0xffaaf22e,0xf040ff6c,0x1d7c00c4
+	.long	0x000b3d7c,0xe001ff6e,0xf22ed0c0,0xffdcf22e
+	.long	0x9c00ff60,0x4cee0303,0xff9cf36e,0xff6c4e5e
+	.long	0x60ffffff,0xfad01d7c,0x0000ff4e,0x4280102e
+	.long	0xff6302ae,0xffff00ff,0xff64f23c,0x90000000
+	.long	0x0000f23c,0x88000000,0x000041ee,0xff6c61ff
+	.long	0x0000b0f0,0x082e0003,0xff626600,0xff66082e
+	.long	0x0001ff62,0x6600ff90,0xf22ed0c0,0xffdcf22e
+	.long	0x9c00ff60,0x4cee0303,0xff9c4e5e,0x08170007
+	.long	0x67ffffff,0xfa2ef22f,0xa4000008,0x3f7c2024
+	.long	0x000660ff,0xfffffb0e,0x4e56ff40,0xf32eff6c
+	.long	0x48ee0303,0xff9cf22e,0xbc00ff60,0xf22ef0c0
+	.long	0xffdc082e,0x00050004,0x66084e68,0x2d48ffd8
+	.long	0x600841ee,0x00102d48,0xffd82d6e,0xff68ff44
+	.long	0x206eff44,0x58aeff44,0x61ffffff,0xfb4c2d40
+	.long	0xff40422e,0xff4a082e,0x0005ff42,0x66000208
+	.long	0xe9ee0006,0xff420c00,0x00136700,0x049e02ae
+	.long	0x00ff00ff,0xff64f23c,0x90000000,0x0000f23c
+	.long	0x88000000,0x000041ee,0xff6c61ff,0x0000013a
+	.long	0x41eeff6c,0x61ff0000,0xbdfa0c00,0x00066606
+	.long	0x61ff0000,0xbd5e1d40,0xff4ee9ee,0x0183ff42
+	.long	0x082e0005,0xff436728,0x0c2e003a,0xff436720
+	.long	0x61ff0000,0xb92c41ee,0xff7861ff,0x0000bdc4
+	.long	0x0c000006,0x660661ff,0x0000bd28,0x1d40ff4f
+	.long	0x4280102e,0xff63e9ee,0x1047ff43,0x41eeff6c
+	.long	0x43eeff78,0x223b1d30,0x00006d36,0x4ebb1930
+	.long	0x00006d2e,0x102eff62,0x6634102e,0xff430200
+	.long	0x00380c00,0x0038670c,0xe9ee0183,0xff4261ff
+	.long	0x0000b95e,0xf22ed0c0,0xffdcf22e,0x9c00ff60
+	.long	0x4cee0303,0xff9c4e5e,0x60ffffff,0xf8e6c02e
+	.long	0xff66edc0,0x06086614,0x082e0004,0xff6667ba
+	.long	0x082e0001,0xff6267b2,0x60000066,0x04800000
+	.long	0x00180c00,0x00066614,0x082e0003,0xff666600
+	.long	0x004a082e,0x0004ff66,0x66000046,0x2f0061ff
+	.long	0x000007e0,0x201f3d7b,0x0222ff6e,0xf22ed0c0
+	.long	0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9cf36e
+	.long	0xff6c4e5e,0x60ffffff,0xf87ae000,0xe006e004
+	.long	0xe005e003,0xe002e001,0xe001303c,0x000460bc
+	.long	0x303c0003,0x60b6e9ee,0x0006ff42,0x0c000011
+	.long	0x67080c00,0x00156750,0x4e753028,0x00000240
+	.long	0x7fff0c40,0x3f806708,0x0c40407f,0x672c4e75
+	.long	0x02a87fff,0xffff0004,0x671861ff,0x0000bbbc
+	.long	0x44400640,0x3f810268,0x80000000,0x81680000
+	.long	0x4e750268,0x80000000,0x4e750228,0x007f0004
+	.long	0x00687fff,0x00004e75,0x30280000,0x02407fff
+	.long	0x0c403c00,0x67080c40,0x43ff67de,0x4e7502a8
+	.long	0x7fffffff,0x00046606,0x4aa80008,0x67c461ff
+	.long	0x0000bb68,0x44400640,0x3c010268,0x80000000
+	.long	0x81680000,0x4e75e9ee,0x00c3ff42,0x0c000003
+	.long	0x670004a2,0x0c000007,0x6700049a,0x02aeffff
+	.long	0x00ffff64,0xf23c9000,0x00000000,0xf23c8800
+	.long	0x00000000,0x302eff6c,0x02407fff,0x671041ee
+	.long	0xff6c61ff,0x0000bb5c,0x1d40ff4e,0x60061d7c
+	.long	0x0004ff4e,0x4280102e,0xff6341ee,0xff6c2d56
+	.long	0xffd461ff,0x0000adec,0x102eff62,0x66000086
+	.long	0x2caeffd4,0x082e0005,0x00046626,0x206effd8
+	.long	0x4e60f22e,0xd0c0ffdc,0xf22e9c00,0xff604cee
+	.long	0x0303ff9c,0x4e5e0817,0x0007667a,0x60ffffff
+	.long	0xf7220c2e,0x0008ff4a,0x66d8f22e,0xf080ff6c
+	.long	0xf22ed0c0,0xffdcf22e,0x9c00ff60,0x4cee0303
+	.long	0xff9c2c56,0x2f6f00c4,0x00b82f6f,0x00c800bc
+	.long	0x2f6f002c,0x00c42f6f,0x003000c8,0x2f6f0034
+	.long	0x00ccdffc,0x000000b8,0x08170007,0x662860ff
+	.long	0xfffff6d0,0xc02eff66,0xedc00608,0x662a082e
+	.long	0x0004ff66,0x6700ff6a,0x082e0001,0xff626700
+	.long	0xff606000,0x01663f7c,0x20240006,0xf22fa400
+	.long	0x000860ff,0xfffff78e,0x04800000,0x0018303b
+	.long	0x020a4efb,0x00064afc,0x00080000,0x0000003a
+	.long	0x00640094,0x00000140,0x0000f22e,0xd0c0ffdc
+	.long	0xf22e9c00,0xff604cee,0x0303ff9c,0x3d7c30d8
+	.long	0x000a3d7c,0xe006ff6e,0xf36eff6c,0x4e5e60ff
+	.long	0xfffff6d4,0xf22ed0c0,0xffdcf22e,0x9c00ff60
+	.long	0x4cee0303,0xff9c3d7c,0x30d0000a,0x3d7ce004
+	.long	0xff6ef36e,0xff6c4e5e,0x60ffffff,0xf694f22e
+	.long	0xf040ff6c,0xf22ed0c0,0xffdcf22e,0x9c00ff60
+	.long	0x4cee0303,0xff9c3d7c,0x30d4000a,0x3d7ce005
+	.long	0xff6ef36e,0xff6c4e5e,0x60ffffff,0xf60c2cae
+	.long	0xffd4082e,0x00050004,0x66000038,0x206effd8
+	.long	0x4e60f22e,0xf040ff6c,0xf22ed0c0,0xffdcf22e
+	.long	0x9c00ff60,0x4cee0303,0xff9c3d7c,0x30cc000a
+	.long	0x3d7ce003,0xff6ef36e,0xff6c4e5e,0x60ffffff
+	.long	0xf5de0c2e,0x0008ff4a,0x66c8f22e,0xf080ff6c
+	.long	0xf22ef040,0xff78f22e,0xd0c0ffdc,0xf22e9c00
+	.long	0xff604cee,0x0303ff9c,0x3d7c30cc,0x000a3d7c
+	.long	0xe003ff7a,0xf36eff78,0x2c562f6f,0x00c400b8
+	.long	0x2f6f00c8,0x00bc2f6f,0x00cc00c0,0x2f6f002c
+	.long	0x00c42f6f,0x003000c8,0x2f6f0034,0x00ccdffc
+	.long	0x000000b8,0x60ffffff,0xf576f22e,0xf040ff6c
+	.long	0xf22ed0c0,0xffdcf22e,0x9c00ff60,0x4cee0303
+	.long	0xff9c3d7c,0x30c4000a,0x3d7ce001,0xff6ef36e
+	.long	0xff6c4e5e,0x60ffffff,0xf55c02ae,0x00ff00ff
+	.long	0xff64f23c,0x90000000,0x0000f23c,0x88000000
+	.long	0x000061ff,0x0000bdba,0x41eeff6c,0x61ff0000
+	.long	0xb9621d40,0xff4ee9ee,0x0183ff42,0x082e0005
+	.long	0xff436728,0x0c2e003a,0xff436720,0x61ff0000
+	.long	0xb4a041ee,0xff7861ff,0x0000b938,0x0c000006
+	.long	0x660661ff,0x0000b89c,0x1d40ff4f,0x4280102e
+	.long	0xff63e9ee,0x1047ff43,0x41eeff6c,0x43eeff78
+	.long	0x223b1d30,0x000068aa,0x4ebb1930,0x000068a2
+	.long	0x102eff62,0x6600008a,0x102eff43,0x02000038
+	.long	0x0c000038,0x670ce9ee,0x0183ff42,0x61ff0000
+	.long	0xb4d0082e,0x00050004,0x6600002a,0x206effd8
+	.long	0x4e60f22e,0xd0c0ffdc,0xf22e9c00,0xff604cee
+	.long	0x0303ff9c,0x4e5e0817,0x00076600,0x012660ff
+	.long	0xfffff440,0x082e0002,0xff4a67d6,0xf22ed0c0
+	.long	0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9c4e5e
+	.long	0x2f6f0004,0x00102f6f,0x0000000c,0xdffc0000
+	.long	0x000c0817,0x00076600,0x00ea60ff,0xfffff404
+	.long	0xc02eff66,0xedc00608,0x6618082e,0x0004ff66
+	.long	0x6700ff66,0x082e0001,0xff626700,0xff5c6000
+	.long	0x006e0480,0x00000018,0x0c000006,0x6d14082e
+	.long	0x0003ff66,0x66000060,0x082e0004,0xff666600
+	.long	0x004e082e,0x00050004,0x66000054,0x206effd8
+	.long	0x4e603d7b,0x022aff6e,0xf22ed0c0,0xffdcf22e
+	.long	0x9c00ff60,0x4cee0303,0xff9cf36e,0xff6c4e5e
+	.long	0x08170007,0x6600006c,0x60ffffff,0xf386e000
+	.long	0xe006e004,0xe005e003,0xe002e001,0xe001303c
+	.long	0x00036000,0xffae303c,0x00046000,0xffa6082e
+	.long	0x0002ff4a,0x67ac3d7b,0x02d6ff6e,0xf22ed0c0
+	.long	0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9cf36e
+	.long	0xff6c4e5e,0x2f6f0004,0x00102f6f,0x0000000c
+	.long	0xdffc0000,0x000c0817,0x00076606,0x60ffffff
+	.long	0xf3223f7c,0x20240006,0xf22fa400,0x000860ff
+	.long	0xfffff402,0x02aeffff,0x00ffff64,0xf23c9000
+	.long	0x00000000,0xf23c8800,0x00000000,0xe9ee0183
+	.long	0xff4261ff,0x0000b22a,0x41eeff6c,0x61ff0000
+	.long	0xb7520c00,0x00066606,0x61ff0000,0xb6b61d40
+	.long	0xff4e4280,0x102eff63,0x41eeff6c,0x2d56ffd4
+	.long	0x61ff0000,0xa94e102e,0xff626600,0x00842cae
+	.long	0xffd4082e,0x00050004,0x6628206e,0xffd84e60
+	.long	0xf22ed0c0,0xffdcf22e,0x9c00ff60,0x4cee0303
+	.long	0xff9c4e5e,0x08170007,0x6600ff68,0x60ffffff
+	.long	0xf282082e,0x0003ff4a,0x67d6f22e,0xd0c0ffdc
+	.long	0xf22e9c00,0xff604cee,0x0303ff9c,0x2c562f6f
+	.long	0x00c400b8,0x2f6f00c8,0x00bc2f6f,0x003800c4
+	.long	0x2f6f003c,0x00c82f6f,0x004000cc,0xdffc0000
+	.long	0x00b80817,0x00076600,0xff1a60ff,0xfffff234
+	.long	0xc02eff66,0xedc00608,0x6700ff74,0x2caeffd4
+	.long	0x0c00001a,0x6e0000e8,0x67000072,0x082e0005
+	.long	0x0004660a,0x206effd8,0x4e606000,0xfb8e0c2e
+	.long	0x0008ff4a,0x6600fb84,0xf22ed0c0,0xffdcf22e
+	.long	0x9c00ff60,0x4cee0303,0xff9c3d7c,0x30d8000a
+	.long	0x3d7ce006,0xff6ef36e,0xff6c2c56,0x2f6f00c4
+	.long	0x00b82f6f,0x00c800bc,0x2f6f00cc,0x00c02f6f
+	.long	0x003800c4,0x2f6f003c,0x00c82f6f,0x004000cc
+	.long	0xdffc0000,0x00b860ff,0xfffff22c,0x082e0005
+	.long	0x00046600,0x000c206e,0xffd84e60,0x6000fb46
+	.long	0x0c2e0008,0xff4a6600,0xfb3cf22e,0xd0c0ffdc
+	.long	0xf22e9c00,0xff604cee,0x0303ff9c,0x3d7c30d0
+	.long	0x000a3d7c,0xe004ff6e,0xf36eff6c,0x2c562f6f
+	.long	0x00c400b8,0x2f6f00c8,0x00bc2f6f,0x00cc00c0
+	.long	0x2f6f0038,0x00c42f6f,0x003c00c8,0x2f6f0040
+	.long	0x00ccdffc,0x000000b8,0x60ffffff,0xf1a4082e
+	.long	0x00050004,0x6600000c,0x206effd8,0x4e606000
+	.long	0xfbda0c2e,0x0008ff4a,0x6600fbd0,0xf22ed0c0
+	.long	0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9c3d7c
+	.long	0x30c4000a,0x3d7ce001,0xff6ef36e,0xff6c2c56
+	.long	0x2f6f00c4,0x00b82f6f,0x00c800bc,0x2f6f00cc
+	.long	0x00c02f6f,0x003800c4,0x2f6f003c,0x00c82f6f
+	.long	0x004000cc,0xdffc0000,0x00b860ff,0xfffff106
+	.long	0xe9ee00c3,0xff420c00,0x00016708,0x0c000005
+	.long	0x67344e75,0x302eff6c,0x02407fff,0x67260c40
+	.long	0x3f806e20,0x44400640,0x3f81222e,0xff70e0a9
+	.long	0x08c1001f,0x2d41ff70,0x026e8000,0xff6c006e
+	.long	0x3f80ff6c,0x4e75302e,0xff6c0240,0x7fff673a
+	.long	0x0c403c00,0x6e344a2e,0xff6c5bee,0xff6e3d40
+	.long	0xff6c4280,0x41eeff6c,0x323c3c01,0x61ff0000
+	.long	0xb156303c,0x3c004a2e,0xff6e6704,0x08c0000f
+	.long	0x08ee0007,0xff703d40,0xff6c4e75,0x082e0005
+	.long	0x000467ff,0xfffff176,0x2d680000,0xff782d68
+	.long	0x0004ff7c,0x2d680008,0xff804281,0x4e752f00
+	.long	0x4e7a0808,0x08000001,0x66000460,0x201f4e56
+	.long	0xff4048ee,0x0303ff9c,0xf22ebc00,0xff60f22e
+	.long	0xf0c0ffdc,0x2d6e0006,0xff44206e,0xff4458ae
+	.long	0xff4461ff,0xfffff152,0x2d40ff40,0x4a406b00
+	.long	0x020e02ae,0x00ff00ff,0xff640800,0x000a6618
+	.long	0x206eff44,0x43eeff6c,0x700c61ff,0xfffff0d2
+	.long	0x4a816600,0x04926048,0x206eff44,0x43eeff6c
+	.long	0x700c61ff,0xfffff0ba,0x4a816600,0x047ae9ee
+	.long	0x004fff6c,0x0c407fff,0x6726102e,0xff6f0200
+	.long	0x000f660c,0x4aaeff70,0x66064aae,0xff746710
+	.long	0x41eeff6c,0x61ff0000,0xb88cf22e,0xf080ff6c
+	.long	0x06ae0000,0x000cff44,0x41eeff6c,0x61ff0000
+	.long	0xb3c21d40,0xff4e0c00,0x0006660a,0x61ff0000
+	.long	0xb3221d40,0xff4e422e,0xff53082e,0x0005ff43
+	.long	0x6748082e,0x0004ff43,0x662ce9ee,0x0183ff42
+	.long	0x61ff0000,0xaeec41ee,0xff7861ff,0x0000b384
+	.long	0x1d40ff4f,0x0c000006,0x662061ff,0x0000b2e4
+	.long	0x1d40ff4f,0x6014082e,0x0003ff43,0x670c50ee
+	.long	0xff53082e,0x0001ff43,0x67c04280,0x102eff63
+	.long	0x122eff43,0x0241007f,0xf23c9000,0x00000000
+	.long	0xf23c8800,0x00000000,0x41eeff6c,0x43eeff78
+	.long	0x223b1530,0x000062ca,0x4ebb1930,0x000062c2
+	.long	0x102eff62,0x66404a2e,0xff53660c,0xe9ee0183
+	.long	0xff4261ff,0x0000aefa,0x2d6e0006,0xff682d6e
+	.long	0xff440006,0xf22ed0c0,0xffdcf22e,0x9c00ff60
+	.long	0x4cee0303,0xff9c4e5e,0x08170007,0x66000096
+	.long	0x60ffffff,0xee6ec02e,0xff66edc0,0x06086612
+	.long	0x082e0004,0xff6667ae,0x082e0001,0xff6267ac
+	.long	0x60340480,0x00000018,0x0c000006,0x6610082e
+	.long	0x0004ff66,0x6620082e,0x0003ff66,0x66203d7b
+	.long	0x0206ff6e,0x601ee002,0xe006e004,0xe005e003
+	.long	0xe002e001,0xe0013d7c,0xe005ff6e,0x60063d7c
+	.long	0xe003ff6e,0x2d6e0006,0xff682d6e,0xff440006
+	.long	0xf22ed0c0,0xffdcf22e,0x9c00ff60,0x4cee0303
+	.long	0xff9cf36e,0xff6c4e5e,0x08170007,0x660660ff
+	.long	0xffffede0,0x2f173f6f,0x00080004,0x3f7c2024
+	.long	0x0006f22f,0xa4000008,0x60ffffff,0xeeb80800
+	.long	0x000e6700,0x01c2082e,0x00050004,0x66164e68
+	.long	0x2d48ffd8,0x61ff0000,0x9564206e,0xffd84e60
+	.long	0x600001aa,0x422eff4a,0x41ee000c,0x2d48ffd8
+	.long	0x61ff0000,0x95480c2e,0x0008ff4a,0x67000086
+	.long	0x0c2e0004,0xff4a6600,0x0184082e,0x00070004
+	.long	0x66363dae,0x00040804,0x2daeff44,0x08063dbc
+	.long	0x00f0080a,0x41f60804,0x2d480004,0xf22ed0c0
+	.long	0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9c4e5e
+	.long	0x2e5f60ff,0xffffed3c,0x3dae0004,0x08002dae
+	.long	0xff440802,0x3dbc2024,0x08062dae,0x00060808
+	.long	0x41f60800,0x2d480004,0xf22ed0c0,0xffdcf22e
+	.long	0x9c00ff60,0x4cee0303,0xff9c4e5e,0x2e5f60ff
+	.long	0xffffedf2,0x1d41000a,0x1d40000b,0xf22ed0c0
+	.long	0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9c2f16
+	.long	0x2f002f01,0x2f2eff44,0x4280102e,0x000b4480
+	.long	0x082e0007,0x0004671c,0x3dae0004,0x08002dae
+	.long	0x00060808,0x2d9f0802,0x3dbc2024,0x08064876
+	.long	0x08006014,0x3dae0004,0x08042d9f,0x08063dbc
+	.long	0x00f0080a,0x48760804,0x4281122e,0x000a4a01
+	.long	0x6a0cf236,0xf080080c,0x06800000,0x000ce309
+	.long	0x6a0cf236,0xf040080c,0x06800000,0x000ce309
+	.long	0x6a0cf236,0xf020080c,0x06800000,0x000ce309
+	.long	0x6a0cf236,0xf010080c,0x06800000,0x000ce309
+	.long	0x6a0cf236,0xf008080c,0x06800000,0x000ce309
+	.long	0x6a0cf236,0xf004080c,0x06800000,0x000ce309
+	.long	0x6a0cf236,0xf002080c,0x06800000,0x000ce309
+	.long	0x6a06f236,0xf001080c,0x222f0004,0x202f0008
+	.long	0x2c6f000c,0x2e5f0817,0x000767ff,0xffffec04
+	.long	0x60ffffff,0xecf061ff,0x00009bda,0xf22ed0c0
+	.long	0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9c082e
+	.long	0x00070004,0x660e2d6e,0xff440006,0x4e5e60ff
+	.long	0xffffebd0,0x2c563f6f,0x00c400c0,0x2f6f00c6
+	.long	0x00c82f6f,0x000400c2,0x3f7c2024,0x00c6dffc
+	.long	0x000000c0,0x60ffffff,0xec9c201f,0x4e56ff40
+	.long	0x48ee0303,0xff9c2d6e,0x0006ff44,0x206eff44
+	.long	0x58aeff44,0x61ffffff,0xed002d40,0xff404a40
+	.long	0x6b047010,0x60260800,0x000e6610,0xe9c014c3
+	.long	0x700c0c01,0x00076614,0x58806010,0x428061ff
+	.long	0x0000967c,0x202eff44,0x90ae0006,0x3d40000a
+	.long	0x4cee0303,0xff9c4e5e,0x518f2f00,0x3f6f000c
+	.long	0x00042f6f,0x000e0006,0x4280302f,0x00122f6f
+	.long	0x00060010,0xd1af0006,0x3f7c402c,0x000a201f
+	.long	0x60ffffff,0xebe44e7a,0x08080800,0x0001660c
+	.long	0xf22e9c00,0xff60f22e,0xd0c0ffdc,0x4cee0303
+	.long	0xff9c4e5e,0x514f2eaf,0x00083f6f,0x000c0004
+	.long	0x3f7c4008,0x00062f6f,0x00020008,0x2f7c0942
+	.long	0x8001000c,0x08170005,0x670608ef,0x0002000d
+	.long	0x60ffffff,0xebd64fee,0xff404e7a,0x18080801
+	.long	0x0001660c,0xf22ed0c0,0xffdcf22f,0x9c000020
+	.long	0x2c562f6f,0x00c400bc,0x3f6f00c8,0x00c03f7c
+	.long	0x400800c2,0x2f4800c4,0x3f4000c8,0x3f7c0001
+	.long	0x00ca4cef,0x0303005c,0xdefc00bc,0x60a64e56
+	.long	0xff40f32e,0xff6c48ee,0x0303ff9c,0xf22ebc00
+	.long	0xff60f22e,0xf0c0ffdc,0x2d6eff68,0xff44206e
+	.long	0xff4458ae,0xff4461ff,0xffffebce,0x2d40ff40
+	.long	0x0800000d,0x662841ee,0xff6c61ff,0xfffff1ea
+	.long	0xf22ed0c0,0xffdcf22e,0x9c00ff60,0x4cee0303
+	.long	0xff9cf36e,0xff6c4e5e,0x60ffffff,0xea94322e
+	.long	0xff6c0241,0x7fff0c41,0x7fff661a,0x4aaeff74
+	.long	0x660c222e,0xff700281,0x7fffffff,0x67082d6e
+	.long	0xff70ff54,0x6012223c,0x7fffffff,0x4a2eff6c
+	.long	0x6a025281,0x2d41ff54,0xe9c004c3,0x122eff41
+	.long	0x307b0206,0x4efb8802,0x006c0000,0x0000ff98
+	.long	0x003e0000,0x00100000,0x102eff54,0x0c010007
+	.long	0x6f16206e,0x000c61ff,0xffffeb86,0x4a8166ff
+	.long	0x0000bca8,0x6000ff6a,0x02410007,0x61ff0000
+	.long	0xa8046000,0xff5c302e,0xff540c01,0x00076f16
+	.long	0x206e000c,0x61ffffff,0xeb6e4a81,0x66ff0000
+	.long	0xbc886000,0xff3c0241,0x000761ff,0x0000a79a
+	.long	0x6000ff2e,0x202eff54,0x0c010007,0x6f16206e
+	.long	0x000c61ff,0xffffeb56,0x4a8166ff,0x0000bc68
+	.long	0x6000ff0e,0x02410007,0x61ff0000,0xa7306000
+	.long	0xff004e56,0xff40f32e,0xff6c48ee,0x0303ff9c
+	.long	0xf22ebc00,0xff60f22e,0xf0c0ffdc,0x2d6eff68
+	.long	0xff44206e,0xff4458ae,0xff4461ff,0xffffea8a
+	.long	0x2d40ff40,0x0800000d,0x6600002a,0x41eeff6c
+	.long	0x61ffffff,0xf0a4f22e,0xd0c0ffdc,0xf22e9c00
+	.long	0xff604cee,0x0303ff9c,0xf36eff6c,0x4e5e60ff
+	.long	0xffffe964,0xe9c004c3,0x122eff41,0x307b0206
+	.long	0x4efb8802,0x007400a6,0x015a0000,0x00420104
+	.long	0x00100000,0x102eff70,0x08c00006,0x0c010007
+	.long	0x6f16206e,0x000c61ff,0xffffea76,0x4a8166ff
+	.long	0x0000bb98,0x6000ffa0,0x02410007,0x61ff0000
+	.long	0xa6f46000,0xff92302e,0xff7008c0,0x000e0c01
+	.long	0x00076f16,0x206e000c,0x61ffffff,0xea5a4a81
+	.long	0x66ff0000,0xbb746000,0xff6e0241,0x000761ff
+	.long	0x0000a686,0x6000ff60,0x202eff70,0x08c0001e
+	.long	0x0c010007,0x6f16206e,0x000c61ff,0xffffea3e
+	.long	0x4a8166ff,0x0000bb50,0x6000ff3c,0x02410007
+	.long	0x61ff0000,0xa6186000,0xff2e0c01,0x00076f2e
+	.long	0x202eff6c,0x02808000,0x00000080,0x7fc00000
+	.long	0x222eff70,0xe0898081,0x206e000c,0x61ffffff
+	.long	0xe9fc4a81,0x66ff0000,0xbb0e6000,0xfefa202e
+	.long	0xff6c0280,0x80000000,0x00807fc0,0x00002f01
+	.long	0x222eff70,0xe0898081,0x221f0241,0x000761ff
+	.long	0x0000a5ba,0x6000fed0,0x202eff6c,0x02808000
+	.long	0x00000080,0x7ff80000,0x222eff70,0x2d40ff84
+	.long	0x700be0a9,0x83aeff84,0x222eff70,0x02810000
+	.long	0x07ffe0b9,0x2d41ff88,0x222eff74,0xe0a983ae
+	.long	0xff8841ee,0xff84226e,0x000c7008,0x61ffffff
+	.long	0xe8cc4a81,0x66ff0000,0xba9c6000,0xfe7a422e
+	.long	0xff4a3d6e,0xff6cff84,0x426eff86,0x202eff70
+	.long	0x08c0001e,0x2d40ff88,0x2d6eff74,0xff8c082e
+	.long	0x00050004,0x66384e68,0x2d48ffd8,0x2d56ffd4
+	.long	0x61ff0000,0x98922248,0x2d48000c,0x206effd8
+	.long	0x4e602cae,0xffd441ee,0xff84700c,0x61ffffff
+	.long	0xe86c4a81,0x66ff0000,0xba4a6000,0xfe1a2d56
+	.long	0xffd461ff,0x00009860,0x22482d48,0x000c2cae
+	.long	0xffd40c2e,0x0008ff4a,0x66ccf22e,0xd0c0ffdc
+	.long	0xf22e9c00,0xff604cee,0x0303ff9c,0xf36eff6c
+	.long	0x2c6effd4,0x2f6f00c4,0x00b82f6f,0x00c800bc
+	.long	0x2f6f00cc,0x00c02f6f,0x004400c4,0x2f6f0048
+	.long	0x00c82f6f,0x004c00cc,0xdffc0000,0x00b860ff
+	.long	0xffffe734,0x4e56ff40,0xf32eff6c,0x48ee0303
+	.long	0xff9cf22e,0xbc00ff60,0xf22ef0c0,0xffdc2d6e
+	.long	0xff68ff44,0x206eff44,0x58aeff44,0x61ffffff
+	.long	0xe7f82d40,0xff400800,0x000d6600,0x0106e9c0
+	.long	0x04c36622,0x0c6e401e,0xff6c661a,0xf23c9000
+	.long	0x00000000,0xf22e4000,0xff70f22e,0x6800ff6c
+	.long	0x3d7ce001,0xff6e41ee,0xff6c61ff,0xffffedea
+	.long	0x02ae00ff,0x01ffff64,0xf23c9000,0x00000000
+	.long	0xf23c8800,0x00000000,0xe9ee1006,0xff420c01
+	.long	0x00176700,0x009641ee,0xff6c61ff,0x0000aa84
+	.long	0x1d40ff4e,0x082e0005,0xff43672e,0x082e0004
+	.long	0xff436626,0xe9ee0183,0xff4261ff,0x0000a5c2
+	.long	0x41eeff78,0x61ff0000,0xaa5a0c00,0x00066606
+	.long	0x61ff0000,0xa9be1d40,0xff4f4280,0x102eff63
+	.long	0x122eff43,0x0241007f,0x41eeff6c,0x43eeff78
+	.long	0x223b1530,0x000059ca,0x4ebb1930,0x000059c2
+	.long	0xe9ee0183,0xff4261ff,0x0000a606,0xf22ed0c0
+	.long	0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9cf36e
+	.long	0xff6c4e5e,0x60ffffff,0xe5cc4280,0x102eff63
+	.long	0x122eff43,0x02810000,0x007f61ff,0x000043ce
+	.long	0x60be1d7c,0x0000ff4e,0x4280102e,0xff6302ae
+	.long	0xffff00ff,0xff6441ee,0xff6c61ff,0x00009be4
+	.long	0x60aa4e56,0xff40f32e,0xff6c48ee,0x0303ff9c
+	.long	0xf22ebc00,0xff60f22e,0xf0c0ffdc,0x2d6eff68
+	.long	0xff44206e,0xff4458ae,0xff4461ff,0xffffe69a
+	.long	0x2d40ff40,0x41eeff6c,0x61ffffff,0xecbcf22e
+	.long	0xd0c0ffdc,0xf22e9c00,0xff604cee,0x0303ff9c
+	.long	0xf36eff6c,0x4e5e60ff,0xffffe592,0x0c6f202c
+	.long	0x000667ff,0x000000aa,0x0c6f402c,0x000667ff
+	.long	0xffffe5a6,0x4e56ff40,0x48ee0303,0xff9c2d6e
+	.long	0x0006ff44,0x206eff44,0x58aeff44,0x61ffffff
+	.long	0xe638e9c0,0x100a0c41,0x03c86664,0xe9c01406
+	.long	0x0c010017,0x665a4e7a,0x08080800,0x0001672a
+	.long	0x4cee0303,0xff9c4e5e,0x518f3eaf,0x00082f6f
+	.long	0x000a0002,0x3f7c402c,0x00062f6f,0x0002000c
+	.long	0x58af0002,0x60ffffff,0xe5404cee,0x0303ff9c
+	.long	0x4e5ef22f,0x84000002,0x58af0002,0x2f172f6f
+	.long	0x00080004,0x1f7c0020,0x000660ff,0x00000012
+	.long	0x4cee0303,0xff9c4e5e,0x60ffffff,0xe4f64e56
+	.long	0xff4048ee,0x0303ff9c,0xf22ebc00,0xff60f22e
+	.long	0xf0c0ffdc,0x082e0005,0x00046608,0x4e682d48
+	.long	0xffd8600c,0x41ee0010,0x2d48ffd8,0x2d48ffd4
+	.long	0x2d6eff68,0xff44206e,0xff4458ae,0xff4461ff
+	.long	0xffffe576,0x2d40ff40,0xf23c9000,0x00000000
+	.long	0xf23c8800,0x00000000,0x422eff4a,0x08000016
+	.long	0x66000182,0x422eff53,0x02ae00ff,0x00ffff64
+	.long	0xe9c01406,0x0c010017,0x670000be,0x61ff0000
+	.long	0x95fc4280,0x102eff63,0x122eff43,0x0241003f
+	.long	0xe749822e,0xff4e43ee,0xff7841ee,0xff6c323b
+	.long	0x132002b2,0x4ebb1120,0x02ac102e,0xff626600
+	.long	0x00a2e9ee,0x0183ff42,0x61ff0000,0xa3e4f22e
+	.long	0xd0c0ffdc,0xf22e9c00,0xff604cee,0x0303ff9c
+	.long	0x0c2e0004,0xff4a672a,0x0c2e0008,0xff4a6722
+	.long	0x4e5e0817,0x000767ff,0xffffe358,0xf327f22f
+	.long	0xa4000014,0xf35f3f7c,0x20240006,0x60ffffff
+	.long	0xe434082e,0x00050004,0x660c2f08,0x206effd8
+	.long	0x4e60205f,0x60ca2f00,0x202effd8,0x90aeffd4
+	.long	0x2dae0008,0x08082dae,0x00040804,0x3d400004
+	.long	0x201f4e5e,0xded760aa,0x4280102e,0xff63122e
+	.long	0xff430281,0x0000007f,0x61ff0000,0x41506000
+	.long	0xff5ac02e,0xff66edc0,0x06086616,0x082e0004
+	.long	0xff666700,0xff4e082e,0x0001ff62,0x6700ff44
+	.long	0x603e0480,0x00000018,0x0c000006,0x6610082e
+	.long	0x0004ff66,0x662a082e,0x0003ff66,0x66302f00
+	.long	0x61ffffff,0xf1ee201f,0x3d7b0206,0xff6e602a
+	.long	0xe002e006,0xe004e005,0xe003e002,0xe001e001
+	.long	0x61ffffff,0xf1ce3d7c,0xe005ff6e,0x600c61ff
+	.long	0xfffff1c0,0x3d7ce003,0xff6ef22e,0xd0c0ffdc
+	.long	0xf22e9c00,0xff604cee,0x0303ff9c,0xf36eff6c
+	.long	0x6000feee,0xe9c01283,0x0c010001,0x67000056
+	.long	0x0c010007,0x66000078,0xe9c01343,0x0c010002
+	.long	0x6d00006c,0x61ff0000,0x82780c2e,0x0002ff4a
+	.long	0x670000d2,0x0c2e0001,0xff4a6600,0x01002d6e
+	.long	0xff68000c,0x3d7c201c,0x000af22e,0xd0c0ffdc
+	.long	0xf22e9c00,0xff604cee,0x0303ff9c,0x4e5e60ff
+	.long	0xffffe2dc,0x206eff44,0x54aeff44,0x61ffffff
+	.long	0xe3524a81,0x6600047c,0x48c061ff,0x00007e60
+	.long	0x0c2e0002,0xff4a6700,0x007c6000,0x00b061ff
+	.long	0x00008562,0x0c2e0002,0xff4a6700,0x0068082e
+	.long	0x00050004,0x660a206e,0xffd84e60,0x6000008e
+	.long	0x0c2e0008,0xff4a6600,0x0084f22e,0xd0c0ffdc
+	.long	0xf22e9c00,0xff604cee,0x0303ff9c,0x4e5e0817
+	.long	0x00076612,0x558f2eaf,0x00022f6f,0x00060004
+	.long	0x60ffffff,0xe17e558f,0x2eaf0002,0x3f6f0006
+	.long	0x00043f7c,0x20240006,0xf22fa400,0x000860ff
+	.long	0xffffe252,0x3d7c00c0,0x000e2d6e,0xff68000a
+	.long	0x3d6e0004,0x00083d7c,0xe000ff6e,0xf22ed0c0
+	.long	0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9cf36e
+	.long	0xff6c4e5e,0x588f60ff,0xffffe180,0xf22ed0c0
+	.long	0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9c4e5e
+	.long	0x08170007,0x660660ff,0xffffe108,0xf22fa400
+	.long	0x00081f7c,0x00240007,0x60ffffff,0xe1e84afc
+	.long	0x01c00000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x000028a4,0x4b1e4b4c,0x4f4c2982,0x4f3c0000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x000035c6,0x4b1e4b82,0x4f4c371a,0x4f3c0000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x000024b0,0x4b1e4b8c,0x4f4c2766,0x4f3c0000
+	.long	0x00002988,0x4b1e4b94,0x4f4c2af0,0x4f3c0000
+	.long	0x00001ab8,0x4b1e4bd0,0x4f4c1cf6,0x4f3c0000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00001cfc,0x4b1e4744,0x4f4c1daa,0x4f3c0000
+	.long	0x00003720,0x4b1e4744,0x4f4c37a2,0x4f3c0000
+	.long	0x00000468,0x4b1e4744,0x4f4c064c,0x4f3c0000
+	.long	0x00000f2a,0x4b1e4744,0x4f4c108e,0x4f3c0000
+	.long	0x000022e0,0x4b9a4b7a,0x4f4c248c,0x4f3c0000
+	.long	0x00003d02,0x4b9a4b7a,0x4f4c3ddc,0x4f3c0000
+	.long	0x00003dfa,0x4b9a4b7a,0x4f4c3f2a,0x4f3c0000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00003386,0x47324b82,0x4f4c3538,0x4f3c0000
+	.long	0x000037c8,0x47324b82,0x4f4c37f8,0x4f3c0000
+	.long	0x00003818,0x47324b82,0x4f4c3872,0x4f3c0000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x000027e6,0x4b9a4b52,0x4f4c288a,0x4f3c0000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00001db0,0x4bd64744,0x4f4c1e40,0x4f3c0000
+	.long	0x00000472,0x4b9a4744,0x4f4c0652,0x4f3c0000
+	.long	0x0000276c,0x4b1e4744,0x4f4c2788,0x4f3c0000
+	.long	0x000027a0,0x4b1e4744,0x4f4c27ce,0x4f3c0000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00004ca4,0x4cda4d12,0x4ee24ca4,0x4ef40000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00004dac,0x4de24e1a,0x4ee24dac,0x4ef40000
+	.long	0x00004e4e,0x4e864ebe,0x4ee24e4e,0x4ef40000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000660,0x4bf24c20,0x4c3008f6,0x4c400000
+	.long	0x00000660,0x4bf24c20,0x4c3008f6,0x4c400000
+	.long	0x00000660,0x4bf24c20,0x4c3008f6,0x4c400000
+	.long	0x00000660,0x4bf24c20,0x4c3008f6,0x4c400000
+	.long	0x00000660,0x4bf24c20,0x4c3008f6,0x4c400000
+	.long	0x00000660,0x4bf24c20,0x4c3008f6,0x4c400000
+	.long	0x00000660,0x4bf24c20,0x4c3008f6,0x4c400000
+	.long	0x00000660,0x4bf24c20,0x4c3008f6,0x4c400000
+	.long	0x00004cee,0x0303ff9c,0xf22e9c00,0xff60f22e
+	.long	0xd0c0ffdc,0x2d6eff68,0x00064e5e,0x2f173f6f
+	.long	0x00080004,0x3f7c4008,0x00062f6f,0x00020008
+	.long	0x2f7c0942,0x8001000c,0x08170005,0x670608ef
+	.long	0x0002000d,0x60ffffff,0xde32bd6a,0xaa77ccc9
+	.long	0x94f53de6,0x12097aae,0x8da1be5a,0xe6452a11
+	.long	0x8ae43ec7,0x1de3a534,0x1531bf2a,0x01a01a01
+	.long	0x8b590000,0x00000000,0x00003ff8,0x00008888
+	.long	0x88888888,0x59af0000,0x0000bffc,0x0000aaaa
+	.long	0xaaaaaaaa,0xaa990000,0x00003d2a,0xc4d0d601
+	.long	0x1ee3bda9,0x396f9f45,0xac193e21,0xeed90612
+	.long	0xc972be92,0x7e4fb79d,0x9fcf3efa,0x01a01a01
+	.long	0xd4230000,0x00000000,0x0000bff5,0x0000b60b
+	.long	0x60b60b61,0xd4380000,0x00003ffa,0x0000aaaa
+	.long	0xaaaaaaaa,0xab5ebf00,0x00002d7c,0x00000000
+	.long	0xff5c6008,0x2d7c0000,0x0001ff5c,0xf2104800
+	.long	0xf22e6800,0xff842210,0x32280004,0x02817fff
+	.long	0xffff0c81,0x3fd78000,0x6c046000,0x01780c81
+	.long	0x4004bc7e,0x6d046000,0x0468f200,0x0080f23a
+	.long	0x54a3de7e,0x43fb0170,0x00000866,0xf22e6080
+	.long	0xff58222e,0xff58e981,0xd3c1f219,0x4828f211
+	.long	0x4428222e,0xff58d2ae,0xff5ce299,0x0c810000
+	.long	0x00006d00,0x0088f227,0xe00cf22e,0x6800ff84
+	.long	0xf2000023,0xf23a5580,0xfed2f23a,0x5500fed4
+	.long	0xf2000080,0xf20004a3,0xe2990281,0x80000000
+	.long	0xb3aeff84,0xf20005a3,0xf2000523,0xf23a55a2
+	.long	0xfebaf23a,0x5522febc,0xf20005a3,0xf2000523
+	.long	0xf23a55a2,0xfeb6f23a,0x4922fec0,0xf2000ca3
+	.long	0xf2000123,0xf23a48a2,0xfec2f22e,0x4823ff84
+	.long	0xf20008a2,0xf2000423,0xf21fd030,0xf2009000
+	.long	0xf22e4822,0xff8460ff,0x00004364,0xf227e00c
+	.long	0xf2000023,0xf23a5500,0xfea2f23a,0x5580fea4
+	.long	0xf2000080,0xf20004a3,0xf22e6800,0xff84e299
+	.long	0x02818000,0x0000f200,0x0523b3ae,0xff840281
+	.long	0x80000000,0xf20005a3,0x00813f80,0x00002d41
+	.long	0xff54f23a,0x5522fe74,0xf23a55a2,0xfe76f200
+	.long	0x0523f200,0x05a3f23a,0x5522fe70,0xf23a49a2
+	.long	0xfe7af200,0x0523f200,0x0ca3f23a,0x4922fe7c
+	.long	0xf23a44a2,0xfe82f200,0x0823f200,0x0422f22e
+	.long	0x4823ff84,0xf21fd030,0xf2009000,0xf22e4422
+	.long	0xff5460ff,0x000042c8,0x0c813fff,0x80006eff
+	.long	0x00000300,0x222eff5c,0x0c810000,0x00006e14
+	.long	0xf2009000,0x123c0003,0xf22e4800,0xff8460ff
+	.long	0x0000428e,0xf23c4400,0x3f800000,0xf2009000
+	.long	0xf23c4422,0x80800000,0x60ff0000,0x428a60ff
+	.long	0x00004110,0xf23c4400,0x3f800000,0x60ff0000
+	.long	0x42762d7c,0x00000004,0xff5cf210,0x4800f22e
+	.long	0x6800ff84,0x22103228,0x00040281,0x7fffffff
+	.long	0x0c813fd7,0x80006c04,0x60000240,0x0c814004
+	.long	0xbc7e6d04,0x6000027a,0xf2000080,0xf23a54a3
+	.long	0xdc9043fb,0x01700000,0x0678f22e,0x6080ff58
+	.long	0x222eff58,0xe981d3c1,0xf2194828,0xf2114428
+	.long	0x222eff58,0xe2990c81,0x00000000,0x6c000106
+	.long	0xf227e004,0xf22e6800,0xff84f200,0x0023f23a
+	.long	0x5480fce8,0xf23a5500,0xfd32f200,0x00a3f200
+	.long	0x01232f02,0x2401e29a,0x02828000,0x0000b382
+	.long	0x02828000,0x0000f23a,0x54a2fcc8,0xf23a5522
+	.long	0xfd12f200,0x00a3b5ae,0xff84241f,0xf2000123
+	.long	0xe2990281,0x80000000,0x2d7c3f80,0x0000ff54
+	.long	0xb3aeff54,0xf23a54a2,0xfca2f23a,0x5522fcec
+	.long	0xf20000a3,0xf2000123,0xf22e6800,0xff90f23a
+	.long	0x54a2fc90,0xb3aeff90,0xf23a5522,0xfcd6f200
+	.long	0x00a3f200,0x0123f23a,0x54a2fc80,0xf23a5522
+	.long	0xfccaf200,0x00a3f200,0x0123f23a,0x48a2fc7c
+	.long	0xf23a4922,0xfcc6f200,0x00a3f200,0x0123f23a
+	.long	0x48a2fc78,0xf23a4922,0xfcc2f200,0x00a3f200
+	.long	0x0823f22e,0x48a3ff84,0xf23a4422,0xfcbaf22e
+	.long	0x4823ff90,0xf21fd020,0xf2009000,0xf22e48a2
+	.long	0xff8461ff,0x0000448e,0xf22e4422,0xff5460ff
+	.long	0x000040fc,0xf227e004,0xf22e6800,0xff84f200
+	.long	0x0023f23a,0x5480fc34,0xf23a5500,0xfbdef200
+	.long	0x00a3f22e,0x6800ff90,0xf2000123,0xe2990281
+	.long	0x80000000,0xf23a54a2,0xfc1af23a,0x5522fbc4
+	.long	0xb3aeff84,0xb3aeff90,0xf20000a3,0x00813f80
+	.long	0x00002d41,0xff54f200,0x0123f23a,0x54a2fbfc
+	.long	0xf23a5522,0xfba6f200,0x00a3f200,0x0123f23a
+	.long	0x54a2fbf0,0xf23a5522,0xfb9af200,0x00a3f200
+	.long	0x0123f23a,0x54a2fbe4,0xf23a5522,0xfb8ef200
+	.long	0x00a3f200,0x0123f23a,0x48a2fbe0,0xf23a4922
+	.long	0xfb8af200,0x00a3f200,0x0123f23a,0x48a2fbdc
+	.long	0xf23a4922,0xfb86f200,0x00a3f200,0x0823f23a
+	.long	0x44a2fbd4,0xf22e4823,0xff84f22e,0x48a3ff90
+	.long	0xf21fd020,0xf2009000,0xf22e44a2,0xff5461ff
+	.long	0x000043a2,0xf22e4822,0xff8460ff,0x00004010
+	.long	0x0c813fff,0x80006e00,0x0048f23c,0x44803f80
+	.long	0x0000f200,0x9000f23c,0x44a80080,0x000061ff
+	.long	0x00004372,0xf200b000,0x123c0003,0xf22e4800
+	.long	0xff8460ff,0x00003fca,0x2f00f23c,0x44803f80
+	.long	0x000061ff,0x0000434e,0x201f60ff,0x00003e54
+	.long	0xf227e03c,0x2f02f23c,0x44800000,0x00000c81
+	.long	0x7ffeffff,0x66523d7c,0x7ffeff84,0x2d7cc90f
+	.long	0xdaa2ff88,0x42aeff8c,0x3d7c7fdc,0xff902d7c
+	.long	0x85a308d3,0xff9442ae,0xff98f200,0x003af294
+	.long	0x000e002e,0x0080ff84,0x002e0080,0xff90f22e
+	.long	0x4822ff84,0xf2000080,0xf22e4822,0xff90f200
+	.long	0x00a8f22e,0x48a2ff90,0xf22e6800,0xff84322e
+	.long	0xff842241,0x02810000,0x7fff0481,0x00003fff
+	.long	0x0c810000,0x001c6f0e,0x04810000,0x001b1d7c
+	.long	0x0000ff58,0x60084281,0x1d7c0001,0xff58243c
+	.long	0x00003ffe,0x94812d7c,0xa2f9836e,0xff882d7c
+	.long	0x4e44152a,0xff8c3d42,0xff84f200,0x0100f22e
+	.long	0x4923ff84,0x24094842,0x02828000,0x00000082
+	.long	0x5f000000,0x2d42ff54,0xf22e4522,0xff54f22e
+	.long	0x4528ff54,0x24010682,0x00003fff,0x3d42ff84
+	.long	0x2d7cc90f,0xdaa2ff88,0x42aeff8c,0x06810000
+	.long	0x3fdd3d41,0xff902d7c,0x85a308d3,0xff9442ae
+	.long	0xff98122e,0xff58f200,0x0a00f22e,0x4a23ff84
+	.long	0xf2000a80,0xf22e4aa3,0xff90f200,0x1180f200
+	.long	0x15a2f200,0x0e28f200,0x0c28f200,0x1622f200
+	.long	0x0180f200,0x10a8f200,0x04220c01,0x00006e00
+	.long	0x000ef200,0x01a8f200,0x0ca26000,0xff0cf22e
+	.long	0x6100ff58,0x241ff21f,0xd03c222e,0xff5c0c81
+	.long	0x00000004,0x6d00fa4c,0x6000fc36,0x3ea0b759
+	.long	0xf50f8688,0xbef2baa5,0xa8924f04,0xbf346f59
+	.long	0xb39ba65f,0x00000000,0x00000000,0x3ff60000
+	.long	0xe073d3fc,0x199c4a00,0x00000000,0x3ff90000
+	.long	0xd23cd684,0x15d95fa1,0x00000000,0xbffc0000
+	.long	0x8895a6c5,0xfb423bca,0x00000000,0xbffd0000
+	.long	0xeef57e0d,0xa84bc8ce,0x00000000,0x3ffc0000
+	.long	0xa2f9836e,0x4e44152a,0x00000000,0x40010000
+	.long	0xc90fdaa2,0x00000000,0x00000000,0x3fdf0000
+	.long	0x85a308d4,0x00000000,0x00000000,0xc0040000
+	.long	0xc90fdaa2,0x2168c235,0x21800000,0xc0040000
+	.long	0xc2c75bcd,0x105d7c23,0xa0d00000,0xc0040000
+	.long	0xbc7edcf7,0xff523611,0xa1e80000,0xc0040000
+	.long	0xb6365e22,0xee46f000,0x21480000,0xc0040000
+	.long	0xafeddf4d,0xdd3ba9ee,0xa1200000,0xc0040000
+	.long	0xa9a56078,0xcc3063dd,0x21fc0000,0xc0040000
+	.long	0xa35ce1a3,0xbb251dcb,0x21100000,0xc0040000
+	.long	0x9d1462ce,0xaa19d7b9,0xa1580000,0xc0040000
+	.long	0x96cbe3f9,0x990e91a8,0x21e00000,0xc0040000
+	.long	0x90836524,0x88034b96,0x20b00000,0xc0040000
+	.long	0x8a3ae64f,0x76f80584,0xa1880000,0xc0040000
+	.long	0x83f2677a,0x65ecbf73,0x21c40000,0xc0030000
+	.long	0xfb53d14a,0xa9c2f2c2,0x20000000,0xc0030000
+	.long	0xeec2d3a0,0x87ac669f,0x21380000,0xc0030000
+	.long	0xe231d5f6,0x6595da7b,0xa1300000,0xc0030000
+	.long	0xd5a0d84c,0x437f4e58,0x9fc00000,0xc0030000
+	.long	0xc90fdaa2,0x2168c235,0x21000000,0xc0030000
+	.long	0xbc7edcf7,0xff523611,0xa1680000,0xc0030000
+	.long	0xafeddf4d,0xdd3ba9ee,0xa0a00000,0xc0030000
+	.long	0xa35ce1a3,0xbb251dcb,0x20900000,0xc0030000
+	.long	0x96cbe3f9,0x990e91a8,0x21600000,0xc0030000
+	.long	0x8a3ae64f,0x76f80584,0xa1080000,0xc0020000
+	.long	0xfb53d14a,0xa9c2f2c2,0x1f800000,0xc0020000
+	.long	0xe231d5f6,0x6595da7b,0xa0b00000,0xc0020000
+	.long	0xc90fdaa2,0x2168c235,0x20800000,0xc0020000
+	.long	0xafeddf4d,0xdd3ba9ee,0xa0200000,0xc0020000
+	.long	0x96cbe3f9,0x990e91a8,0x20e00000,0xc0010000
+	.long	0xfb53d14a,0xa9c2f2c2,0x1f000000,0xc0010000
+	.long	0xc90fdaa2,0x2168c235,0x20000000,0xc0010000
+	.long	0x96cbe3f9,0x990e91a8,0x20600000,0xc0000000
+	.long	0xc90fdaa2,0x2168c235,0x1f800000,0xbfff0000
+	.long	0xc90fdaa2,0x2168c235,0x1f000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x3fff0000
+	.long	0xc90fdaa2,0x2168c235,0x9f000000,0x40000000
+	.long	0xc90fdaa2,0x2168c235,0x9f800000,0x40010000
+	.long	0x96cbe3f9,0x990e91a8,0xa0600000,0x40010000
+	.long	0xc90fdaa2,0x2168c235,0xa0000000,0x40010000
+	.long	0xfb53d14a,0xa9c2f2c2,0x9f000000,0x40020000
+	.long	0x96cbe3f9,0x990e91a8,0xa0e00000,0x40020000
+	.long	0xafeddf4d,0xdd3ba9ee,0x20200000,0x40020000
+	.long	0xc90fdaa2,0x2168c235,0xa0800000,0x40020000
+	.long	0xe231d5f6,0x6595da7b,0x20b00000,0x40020000
+	.long	0xfb53d14a,0xa9c2f2c2,0x9f800000,0x40030000
+	.long	0x8a3ae64f,0x76f80584,0x21080000,0x40030000
+	.long	0x96cbe3f9,0x990e91a8,0xa1600000,0x40030000
+	.long	0xa35ce1a3,0xbb251dcb,0xa0900000,0x40030000
+	.long	0xafeddf4d,0xdd3ba9ee,0x20a00000,0x40030000
+	.long	0xbc7edcf7,0xff523611,0x21680000,0x40030000
+	.long	0xc90fdaa2,0x2168c235,0xa1000000,0x40030000
+	.long	0xd5a0d84c,0x437f4e58,0x1fc00000,0x40030000
+	.long	0xe231d5f6,0x6595da7b,0x21300000,0x40030000
+	.long	0xeec2d3a0,0x87ac669f,0xa1380000,0x40030000
+	.long	0xfb53d14a,0xa9c2f2c2,0xa0000000,0x40040000
+	.long	0x83f2677a,0x65ecbf73,0xa1c40000,0x40040000
+	.long	0x8a3ae64f,0x76f80584,0x21880000,0x40040000
+	.long	0x90836524,0x88034b96,0xa0b00000,0x40040000
+	.long	0x96cbe3f9,0x990e91a8,0xa1e00000,0x40040000
+	.long	0x9d1462ce,0xaa19d7b9,0x21580000,0x40040000
+	.long	0xa35ce1a3,0xbb251dcb,0xa1100000,0x40040000
+	.long	0xa9a56078,0xcc3063dd,0xa1fc0000,0x40040000
+	.long	0xafeddf4d,0xdd3ba9ee,0x21200000,0x40040000
+	.long	0xb6365e22,0xee46f000,0xa1480000,0x40040000
+	.long	0xbc7edcf7,0xff523611,0x21e80000,0x40040000
+	.long	0xc2c75bcd,0x105d7c23,0x20d00000,0x40040000
+	.long	0xc90fdaa2,0x2168c235,0xa1800000,0xf2104800
+	.long	0x22103228,0x00040281,0x7fffffff,0x0c813fd7
+	.long	0x80006c04,0x60000134,0x0c814004,0xbc7e6d04
+	.long	0x60000144,0xf2000080,0xf23a54a3,0xd3d443fa
+	.long	0xfdbcf201,0x6080e981,0xd3c1f219,0x4828f211
+	.long	0x4428ea99,0x02818000,0x0000f227,0xe00c0c81
+	.long	0x00000000,0x6d000072,0xf2000080,0xf20004a3
+	.long	0xf23a5580,0xfaf8f23a,0x5500fafa,0xf20005a3
+	.long	0xf2000523,0xf23a55a2,0xfaf4f23a,0x4922fafe
+	.long	0xf20005a3,0xf2000523,0xf23a49a2,0xfb00f23a
+	.long	0x4922fb0a,0xf20005a3,0xf2000523,0xf23a49a2
+	.long	0xfb0cf200,0x0123f200,0x0ca3f200,0x0822f23c
+	.long	0x44a23f80,0x0000f21f,0xd030f200,0x9000f200
+	.long	0x042060ff,0x000038d8,0xf2000080,0xf2000023
+	.long	0xf23a5580,0xfa88f23a,0x5500fa8a,0xf20001a3
+	.long	0xf2000123,0xf23a55a2,0xfa84f23a,0x4922fa8e
+	.long	0xf20001a3,0xf2000123,0xf23a49a2,0xfa90f23a
+	.long	0x4922fa9a,0xf20001a3,0xf2000123,0xf23a49a2
+	.long	0xfa9cf200,0x0523f200,0x0c23f200,0x08a2f23c
+	.long	0x44223f80,0x0000f21f,0xd030f227,0x68800a97
+	.long	0x80000000,0xf2009000,0xf21f4820,0x60ff0000
+	.long	0x385e0c81,0x3fff8000,0x6e1cf227,0x6800f200
+	.long	0x9000123c,0x0003f21f,0x480060ff,0x00003832
+	.long	0x60ff0000,0x36cef227,0xe03c2f02,0xf23c4480
+	.long	0x00000000,0x0c817ffe,0xffff6652,0x3d7c7ffe
+	.long	0xff842d7c,0xc90fdaa2,0xff8842ae,0xff8c3d7c
+	.long	0x7fdcff90,0x2d7c85a3,0x08d3ff94,0x42aeff98
+	.long	0xf200003a,0xf294000e,0x002e0080,0xff84002e
+	.long	0x0080ff90,0xf22e4822,0xff84f200,0x0080f22e
+	.long	0x4822ff90,0xf20000a8,0xf22e48a2,0xff90f22e
+	.long	0x6800ff84,0x322eff84,0x22410281,0x00007fff
+	.long	0x04810000,0x3fff0c81,0x0000001c,0x6f0e0481
+	.long	0x0000001b,0x1d7c0000,0xff586008,0x42811d7c
+	.long	0x0001ff58,0x243c0000,0x3ffe9481,0x2d7ca2f9
+	.long	0x836eff88,0x2d7c4e44,0x152aff8c,0x3d42ff84
+	.long	0xf2000100,0xf22e4923,0xff842409,0x48420282
+	.long	0x80000000,0x00825f00,0x00002d42,0xff54f22e
+	.long	0x4522ff54,0xf22e4528,0xff542401,0x06820000
+	.long	0x3fff3d42,0xff842d7c,0xc90fdaa2,0xff8842ae
+	.long	0xff8c0681,0x00003fdd,0x3d41ff90,0x2d7c85a3
+	.long	0x08d3ff94,0x42aeff98,0x122eff58,0xf2000a00
+	.long	0xf22e4a23,0xff84f200,0x0a80f22e,0x4aa3ff90
+	.long	0xf2001180,0xf20015a2,0xf2000e28,0xf2000c28
+	.long	0xf2001622,0xf2000180,0xf20010a8,0xf2000422
+	.long	0x0c010000,0x6e00000e,0xf20001a8,0xf2000ca2
+	.long	0x6000ff0c,0xf22e6100,0xff54241f,0xf21fd03c
+	.long	0x222eff54,0xe2996000,0xfd72bff6,0x687e3149
+	.long	0x87d84002,0xac6934a2,0x6db3bfc2,0x476f4e1d
+	.long	0xa28e3fb3,0x44447f87,0x6989bfb7,0x44ee7faf
+	.long	0x45db3fbc,0x71c64694,0x0220bfc2,0x49249218
+	.long	0x72f93fc9,0x99999999,0x8fa9bfd5,0x55555555
+	.long	0x5555bfb7,0x0bf39853,0x9e6a3fbc,0x7187962d
+	.long	0x1d7dbfc2,0x49248271,0x07b83fc9,0x99999996
+	.long	0x263ebfd5,0x55555555,0x55363fff,0x0000c90f
+	.long	0xdaa22168,0xc2350000,0x0000bfff,0x0000c90f
+	.long	0xdaa22168,0xc2350000,0x00000001,0x00008000
+	.long	0x00000000,0x00000000,0x00008001,0x00008000
+	.long	0x00000000,0x00000000,0x00003ffb,0x000083d1
+	.long	0x52c5060b,0x7a510000,0x00003ffb,0x00008bc8
+	.long	0x54456549,0x8b8b0000,0x00003ffb,0x000093be
+	.long	0x40601762,0x6b0d0000,0x00003ffb,0x00009bb3
+	.long	0x078d35ae,0xc2020000,0x00003ffb,0x0000a3a6
+	.long	0x9a525ddc,0xe7de0000,0x00003ffb,0x0000ab98
+	.long	0xe9436276,0x56190000,0x00003ffb,0x0000b389
+	.long	0xe502f9c5,0x98620000,0x00003ffb,0x0000bb79
+	.long	0x7e436b09,0xe6fb0000,0x00003ffb,0x0000c367
+	.long	0xa5c739e5,0xf4460000,0x00003ffb,0x0000cb54
+	.long	0x4c61cff7,0xd5c60000,0x00003ffb,0x0000d33f
+	.long	0x62f82488,0x533e0000,0x00003ffb,0x0000db28
+	.long	0xda816240,0x4c770000,0x00003ffb,0x0000e310
+	.long	0xa4078ad3,0x4f180000,0x00003ffb,0x0000eaf6
+	.long	0xb0a8188e,0xe1eb0000,0x00003ffb,0x0000f2da
+	.long	0xf1949dbe,0x79d50000,0x00003ffb,0x0000fabd
+	.long	0x581361d4,0x7e3e0000,0x00003ffc,0x00008346
+	.long	0xac210959,0xecc40000,0x00003ffc,0x00008b23
+	.long	0x2a083042,0x82d80000,0x00003ffc,0x000092fb
+	.long	0x70b8d29a,0xe2f90000,0x00003ffc,0x00009acf
+	.long	0x476f5ccd,0x1cb40000,0x00003ffc,0x0000a29e
+	.long	0x76304954,0xf23f0000,0x00003ffc,0x0000aa68
+	.long	0xc5d08ab8,0x52300000,0x00003ffc,0x0000b22d
+	.long	0xfffd9d53,0x9f830000,0x00003ffc,0x0000b9ed
+	.long	0xef453e90,0x0ea50000,0x00003ffc,0x0000c1a8
+	.long	0x5f1cc75e,0x3ea50000,0x00003ffc,0x0000c95d
+	.long	0x1be82813,0x8de60000,0x00003ffc,0x0000d10b
+	.long	0xf300840d,0x2de40000,0x00003ffc,0x0000d8b4
+	.long	0xb2ba6bc0,0x5e7a0000,0x00003ffc,0x0000e057
+	.long	0x2a6bb423,0x35f60000,0x00003ffc,0x0000e7f3
+	.long	0x2a70ea9c,0xaa8f0000,0x00003ffc,0x0000ef88
+	.long	0x843264ec,0xefaa0000,0x00003ffc,0x0000f717
+	.long	0x0a28ecc0,0x66660000,0x00003ffd,0x0000812f
+	.long	0xd288332d,0xad320000,0x00003ffd,0x000088a8
+	.long	0xd1b1218e,0x4d640000,0x00003ffd,0x00009012
+	.long	0xab3f23e4,0xaee80000,0x00003ffd,0x0000976c
+	.long	0xc3d411e7,0xf1b90000,0x00003ffd,0x00009eb6
+	.long	0x89493889,0xa2270000,0x00003ffd,0x0000a5ef
+	.long	0x72c34487,0x361b0000,0x00003ffd,0x0000ad17
+	.long	0x00baf07a,0x72270000,0x00003ffd,0x0000b42c
+	.long	0xbcfafd37,0xefb70000,0x00003ffd,0x0000bb30
+	.long	0x3a940ba8,0x0f890000,0x00003ffd,0x0000c221
+	.long	0x15c6fcae,0xbbaf0000,0x00003ffd,0x0000c8fe
+	.long	0xf3e68633,0x12210000,0x00003ffd,0x0000cfc9
+	.long	0x8330b400,0x0c700000,0x00003ffd,0x0000d680
+	.long	0x7aa1102c,0x5bf90000,0x00003ffd,0x0000dd23
+	.long	0x99bc3125,0x2aa30000,0x00003ffd,0x0000e3b2
+	.long	0xa8556b8f,0xc5170000,0x00003ffd,0x0000ea2d
+	.long	0x764f6431,0x59890000,0x00003ffd,0x0000f3bf
+	.long	0x5bf8bad1,0xa21d0000,0x00003ffe,0x0000801c
+	.long	0xe39e0d20,0x5c9a0000,0x00003ffe,0x00008630
+	.long	0xa2dada1e,0xd0660000,0x00003ffe,0x00008c1a
+	.long	0xd445f3e0,0x9b8c0000,0x00003ffe,0x000091db
+	.long	0x8f1664f3,0x50e20000,0x00003ffe,0x00009773
+	.long	0x1420365e,0x538c0000,0x00003ffe,0x00009ce1
+	.long	0xc8e6a0b8,0xcdba0000,0x00003ffe,0x0000a228
+	.long	0x32dbcada,0xae090000,0x00003ffe,0x0000a746
+	.long	0xf2ddb760,0x22940000,0x00003ffe,0x0000ac3e
+	.long	0xc0fb997d,0xd6a20000,0x00003ffe,0x0000b110
+	.long	0x688aebdc,0x6f6a0000,0x00003ffe,0x0000b5bc
+	.long	0xc49059ec,0xc4b00000,0x00003ffe,0x0000ba44
+	.long	0xbc7dd470,0x782f0000,0x00003ffe,0x0000bea9
+	.long	0x4144fd04,0x9aac0000,0x00003ffe,0x0000c2eb
+	.long	0x4abb6616,0x28b60000,0x00003ffe,0x0000c70b
+	.long	0xd54ce602,0xee140000,0x00003ffe,0x0000cd00
+	.long	0x0549adec,0x71590000,0x00003ffe,0x0000d484
+	.long	0x57d2d8ea,0x4ea30000,0x00003ffe,0x0000db94
+	.long	0x8da712de,0xce3b0000,0x00003ffe,0x0000e238
+	.long	0x55f969e8,0x096a0000,0x00003ffe,0x0000e877
+	.long	0x1129c435,0x32590000,0x00003ffe,0x0000ee57
+	.long	0xc16e0d37,0x9c0d0000,0x00003ffe,0x0000f3e1
+	.long	0x0211a87c,0x37790000,0x00003ffe,0x0000f919
+	.long	0x039d758b,0x8d410000,0x00003ffe,0x0000fe05
+	.long	0x8b8f6493,0x5fb30000,0x00003fff,0x00008155
+	.long	0xfb497b68,0x5d040000,0x00003fff,0x00008388
+	.long	0x9e3549d1,0x08e10000,0x00003fff,0x0000859c
+	.long	0xfa76511d,0x724b0000,0x00003fff,0x00008795
+	.long	0x2ecfff81,0x31e70000,0x00003fff,0x00008973
+	.long	0x2fd19557,0x641b0000,0x00003fff,0x00008b38
+	.long	0xcad10193,0x2a350000,0x00003fff,0x00008ce7
+	.long	0xa8d8301e,0xe6b50000,0x00003fff,0x00008f46
+	.long	0xa39e2eae,0x52810000,0x00003fff,0x0000922d
+	.long	0xa7d79188,0x84870000,0x00003fff,0x000094d1
+	.long	0x9fcbdedf,0x52410000,0x00003fff,0x0000973a
+	.long	0xb94419d2,0xa08b0000,0x00003fff,0x0000996f
+	.long	0xf00e08e1,0x0b960000,0x00003fff,0x00009b77
+	.long	0x3f951232,0x1da70000,0x00003fff,0x00009d55
+	.long	0xcc320f93,0x56240000,0x00003fff,0x00009f10
+	.long	0x0575006c,0xc5710000,0x00003fff,0x0000a0a9
+	.long	0xc290d97c,0xc06c0000,0x00003fff,0x0000a226
+	.long	0x59ebebc0,0x630a0000,0x00003fff,0x0000a388
+	.long	0xb4aff6ef,0x0ec90000,0x00003fff,0x0000a4d3
+	.long	0x5f1061d2,0x92c40000,0x00003fff,0x0000a608
+	.long	0x95dcfbe3,0x187e0000,0x00003fff,0x0000a72a
+	.long	0x51dc7367,0xbeac0000,0x00003fff,0x0000a83a
+	.long	0x51530956,0x168f0000,0x00003fff,0x0000a93a
+	.long	0x20077539,0x546e0000,0x00003fff,0x0000aa9e
+	.long	0x7245023b,0x26050000,0x00003fff,0x0000ac4c
+	.long	0x84ba6fe4,0xd58f0000,0x00003fff,0x0000adce
+	.long	0x4a4a606b,0x97120000,0x00003fff,0x0000af2a
+	.long	0x2dcd8d26,0x3c9c0000,0x00003fff,0x0000b065
+	.long	0x6f81f222,0x65c70000,0x00003fff,0x0000b184
+	.long	0x65150f71,0x496a0000,0x00003fff,0x0000b28a
+	.long	0xaa156f9a,0xda350000,0x00003fff,0x0000b37b
+	.long	0x44ff3766,0xb8950000,0x00003fff,0x0000b458
+	.long	0xc3dce963,0x04330000,0x00003fff,0x0000b525
+	.long	0x529d5622,0x46bd0000,0x00003fff,0x0000b5e2
+	.long	0xcca95f9d,0x88cc0000,0x00003fff,0x0000b692
+	.long	0xcada7aca,0x1ada0000,0x00003fff,0x0000b736
+	.long	0xaea7a692,0x58380000,0x00003fff,0x0000b7cf
+	.long	0xab287e9f,0x7b360000,0x00003fff,0x0000b85e
+	.long	0xcc66cb21,0x98350000,0x00003fff,0x0000b8e4
+	.long	0xfd5a20a5,0x93da0000,0x00003fff,0x0000b99f
+	.long	0x41f64aff,0x9bb50000,0x00003fff,0x0000ba7f
+	.long	0x1e17842b,0xbe7b0000,0x00003fff,0x0000bb47
+	.long	0x12857637,0xe17d0000,0x00003fff,0x0000bbfa
+	.long	0xbe8a4788,0xdf6f0000,0x00003fff,0x0000bc9d
+	.long	0x0fad2b68,0x9d790000,0x00003fff,0x0000bd30
+	.long	0x6a39471e,0xcd860000,0x00003fff,0x0000bdb6
+	.long	0xc731856a,0xf18a0000,0x00003fff,0x0000be31
+	.long	0xcac502e8,0x0d700000,0x00003fff,0x0000bea2
+	.long	0xd55ce331,0x94e20000,0x00003fff,0x0000bf0b
+	.long	0x10b7c031,0x28f00000,0x00003fff,0x0000bf6b
+	.long	0x7a18dacb,0x778d0000,0x00003fff,0x0000bfc4
+	.long	0xea4663fa,0x18f60000,0x00003fff,0x0000c018
+	.long	0x1bde8b89,0xa4540000,0x00003fff,0x0000c065
+	.long	0xb066cfbf,0x64390000,0x00003fff,0x0000c0ae
+	.long	0x345f5634,0x0ae60000,0x00003fff,0x0000c0f2
+	.long	0x22919cb9,0xe6a70000,0x0000f210,0x48002210
+	.long	0x32280004,0xf22e6800,0xff840281,0x7fffffff
+	.long	0x0c813ffb,0x80006c04,0x600000d0,0x0c814002
+	.long	0xffff6f04,0x6000014c,0x02aef800,0x0000ff88
+	.long	0x00ae0400,0x0000ff88,0x2d7c0000,0x0000ff8c
+	.long	0xf2000080,0xf22e48a3,0xff84f22e,0x4828ff84
+	.long	0xf23c44a2,0x3f800000,0xf2000420,0x2f022401
+	.long	0x02810000,0x78000282,0x7fff0000,0x04823ffb
+	.long	0x0000e282,0xd282ee81,0x43faf780,0xd3c12d59
+	.long	0xff902d59,0xff942d59,0xff98222e,0xff840281
+	.long	0x80000000,0x83aeff90,0x241ff227,0xe004f200
+	.long	0x0080f200,0x04a3f23a,0x5500f6a0,0xf2000522
+	.long	0xf2000523,0xf20000a3,0xf23a5522,0xf696f23a
+	.long	0x54a3f698,0xf20008a3,0xf2000422,0xf21fd020
+	.long	0xf2009000,0xf22e4822,0xff9060ff,0x00002d30
+	.long	0x0c813fff,0x80006e00,0x008a0c81,0x3fd78000
+	.long	0x6d00006c,0xf227e00c,0xf2000023,0xf2000080
+	.long	0xf20004a3,0xf23a5500,0xf65af23a,0x5580f65c
+	.long	0xf2000523,0xf20005a3,0xf23a5522,0xf656f23a
+	.long	0x55a2f658,0xf2000523,0xf2000ca3,0xf23a5522
+	.long	0xf652f23a,0x54a2f654,0xf2000123,0xf22e4823
+	.long	0xff84f200,0x08a2f200,0x0423f21f,0xd030f200
+	.long	0x9000f22e,0x4822ff84,0x60ff0000,0x2cb2f200
+	.long	0x9000123c,0x0003f22e,0x4800ff84,0x60ff0000
+	.long	0x2c900c81,0x40638000,0x6e00008e,0xf227e00c
+	.long	0xf23c4480,0xbf800000,0xf20000a0,0xf2000400
+	.long	0xf2000023,0xf22e6880,0xff84f200,0x0080f200
+	.long	0x04a3f23a,0x5580f5ec,0xf23a5500,0xf5eef200
+	.long	0x05a3f200,0x0523f23a,0x55a2f5e8,0xf23a5522
+	.long	0xf5eaf200,0x0ca3f200,0x0123f23a,0x54a2f5e4
+	.long	0xf22e4823,0xff84f200,0x08a2f200,0x0423f22e
+	.long	0x4822ff84,0xf21fd030,0xf2009000,0x4a106a0c
+	.long	0xf23a4822,0xf5d660ff,0x00002c24,0xf23a4822
+	.long	0xf5ba60ff,0x00002c10,0x4a106a16,0xf23a4800
+	.long	0xf5baf200,0x9000f23a,0x4822f5c0,0x60ff0000
+	.long	0x2bfef23a,0x4800f594,0xf2009000,0xf23a4822
+	.long	0xf5ba60ff,0x00002be0,0x60ff0000,0x2a66f210
+	.long	0x48002210,0x32280004,0x02817fff,0xffff0c81
+	.long	0x3fff8000,0x6c4e0c81,0x3fd78000,0x6d00007c
+	.long	0xf23c4480,0x3f800000,0xf20000a8,0xf227e004
+	.long	0xf23c4500,0x3f800000,0xf2000122,0xf20008a3
+	.long	0xf21fd020,0xf2000484,0xf2000420,0xf227e001
+	.long	0x41d761ff,0xfffffd66,0xdffc0000,0x000c60ff
+	.long	0x00002b6c,0xf2000018,0xf23c4438,0x3f800000
+	.long	0xf2d20000,0x29d4f23a,0x4800c5a6,0x22100281
+	.long	0x80000000,0x00813f80,0x00002f01,0xf2009000
+	.long	0xf21f4423,0x60ff0000,0x2b36f200,0x9000123c
+	.long	0x0003f210,0x480060ff,0x00002b16,0x60ff0000
+	.long	0x29b2f210,0x48002210,0x32280004,0x02817fff
+	.long	0xffff0c81,0x3fff8000,0x6c44f23c,0x44803f80
+	.long	0x0000f200,0x00a2f200,0x001af23c,0x44223f80
+	.long	0x0000f200,0x0420f200,0x00042f00,0x4280f227
+	.long	0xe00141d7,0x61ffffff,0xfcc4dffc,0x0000000c
+	.long	0xf21f9000,0xf2000022,0x60ff0000,0x2acaf200
+	.long	0x0018f23c,0x44383f80,0x0000f2d2,0x0000292a
+	.long	0x4a106a18,0xf23a4800,0xc4e8f200,0x9000f23c
+	.long	0x44220080,0x000060ff,0x00002a9c,0x60ff0000
+	.long	0x2ce8f200,0x9000f23a,0x4800c4d6,0x60ff0000
+	.long	0x2a863fdc,0x000082e3,0x08654361,0xc4c60000
+	.long	0x00003fa5,0x55555555,0x4cc13fc5,0x55555555
+	.long	0x4a543f81,0x11111117,0x43853fa5,0x55555555
+	.long	0x4f5a3fc5,0x55555555,0x55550000,0x00000000
+	.long	0x00003ec7,0x1de3a577,0x46823efa,0x01a019d7
+	.long	0xcb683f2a,0x01a01a01,0x9df33f56,0xc16c16c1
+	.long	0x70e23f81,0x11111111,0x11113fa5,0x55555555
+	.long	0x55553ffc,0x0000aaaa,0xaaaaaaaa,0xaaab0000
+	.long	0x000048b0,0x00000000,0x00003730,0x00000000
+	.long	0x00003fff,0x00008000,0x00000000,0x00000000
+	.long	0x00003fff,0x00008164,0xd1f3bc03,0x07749f84
+	.long	0x1a9b3fff,0x000082cd,0x8698ac2b,0xa1d89fc1
+	.long	0xd5b93fff,0x0000843a,0x28c3acde,0x4048a072
+	.long	0x83693fff,0x000085aa,0xc367cc48,0x7b141fc5
+	.long	0xc95c3fff,0x0000871f,0x61969e8d,0x10101ee8
+	.long	0x5c9f3fff,0x00008898,0x0e8092da,0x85289fa2
+	.long	0x07293fff,0x00008a14,0xd575496e,0xfd9ca07b
+	.long	0xf9af3fff,0x00008b95,0xc1e3ea8b,0xd6e8a002
+	.long	0x0dcf3fff,0x00008d1a,0xdf5b7e5b,0xa9e4205a
+	.long	0x63da3fff,0x00008ea4,0x398b45cd,0x53c01eb7
+	.long	0x00513fff,0x00009031,0xdc431466,0xb1dc1f6e
+	.long	0xb0293fff,0x000091c3,0xd373ab11,0xc338a078
+	.long	0x14943fff,0x0000935a,0x2b2f13e6,0xe92c9eb3
+	.long	0x19b03fff,0x000094f4,0xefa8fef7,0x09602017
+	.long	0x457d3fff,0x00009694,0x2d372018,0x5a001f11
+	.long	0xd5373fff,0x00009837,0xf0518db8,0xa9709fb9
+	.long	0x52dd3fff,0x000099e0,0x459320b7,0xfa641fe4
+	.long	0x30873fff,0x00009b8d,0x39b9d54e,0x55381fa2
+	.long	0xa8183fff,0x00009d3e,0xd9a72cff,0xb7501fde
+	.long	0x494d3fff,0x00009ef5,0x326091a1,0x11ac2050
+	.long	0x48903fff,0x0000a0b0,0x510fb971,0x4fc4a073
+	.long	0x691c3fff,0x0000a270,0x43030c49,0x68181f9b
+	.long	0x7a053fff,0x0000a435,0x15ae09e6,0x80a0a079
+	.long	0x71263fff,0x0000a5fe,0xd6a9b151,0x38eca071
+	.long	0xa1403fff,0x0000a7cd,0x93b4e965,0x3568204f
+	.long	0x62da3fff,0x0000a9a1,0x5ab4ea7c,0x0ef81f28
+	.long	0x3c4a3fff,0x0000ab7a,0x39b5a93e,0xd3389f9a
+	.long	0x7fdc3fff,0x0000ad58,0x3eea42a1,0x4ac8a05b
+	.long	0x3fac3fff,0x0000af3b,0x78ad690a,0x43741fdf
+	.long	0x26103fff,0x0000b123,0xf581d2ac,0x25909f70
+	.long	0x5f903fff,0x0000b311,0xc412a911,0x2488201f
+	.long	0x678a3fff,0x0000b504,0xf333f9de,0x64841f32
+	.long	0xfb133fff,0x0000b6fd,0x91e328d1,0x77902003
+	.long	0x8b303fff,0x0000b8fb,0xaf4762fb,0x9ee8200d
+	.long	0xc3cc3fff,0x0000baff,0x5ab2133e,0x45fc9f8b
+	.long	0x2ae63fff,0x0000bd08,0xa39f580c,0x36c0a02b
+	.long	0xbf703fff,0x0000bf17,0x99b67a73,0x1084a00b
+	.long	0xf5183fff,0x0000c12c,0x4cca6670,0x9458a041
+	.long	0xdd413fff,0x0000c346,0xccda2497,0x64089fdf
+	.long	0x137b3fff,0x0000c567,0x2a115506,0xdadc201f
+	.long	0x15683fff,0x0000c78d,0x74c8abb9,0xb15c1fc1
+	.long	0x3a2e3fff,0x0000c9b9,0xbd866e2f,0x27a4a03f
+	.long	0x8f033fff,0x0000cbec,0x14fef272,0x7c5c1ff4
+	.long	0x907d3fff,0x0000ce24,0x8c151f84,0x80e49e6e
+	.long	0x53e43fff,0x0000d063,0x33daef2b,0x25941fd6
+	.long	0xd45c3fff,0x0000d2a8,0x1d91f12a,0xe45ca076
+	.long	0xedb93fff,0x0000d4f3,0x5aabcfed,0xfa209fa6
+	.long	0xde213fff,0x0000d744,0xfccad69d,0x6af41ee6
+	.long	0x9a2f3fff,0x0000d99d,0x15c278af,0xd7b4207f
+	.long	0x439f3fff,0x0000dbfb,0xb797daf2,0x3754201e
+	.long	0xc2073fff,0x0000de60,0xf4825e0e,0x91249e8b
+	.long	0xe1753fff,0x0000e0cc,0xdeec2a94,0xe1102003
+	.long	0x2c4b3fff,0x0000e33f,0x8972be8a,0x5a502004
+	.long	0xdff53fff,0x0000e5b9,0x06e77c83,0x48a81e72
+	.long	0xf47a3fff,0x0000e839,0x6a503c4b,0xdc681f72
+	.long	0x2f223fff,0x0000eac0,0xc6e7dd24,0x3930a017
+	.long	0xe9453fff,0x0000ed4f,0x301ed994,0x2b841f40
+	.long	0x1a5b3fff,0x0000efe4,0xb99bdcda,0xf5cc9fb9
+	.long	0xa9e33fff,0x0000f281,0x773c59ff,0xb1382074
+	.long	0x4c053fff,0x0000f525,0x7d152486,0xcc2c1f77
+	.long	0x3a193fff,0x0000f7d0,0xdf730ad1,0x3bb81ffe
+	.long	0x90d53fff,0x0000fa83,0xb2db722a,0x033ca041
+	.long	0xed223fff,0x0000fd3e,0x0c0cf486,0xc1741f85
+	.long	0x3f3a2210,0x02817fff,0x00000c81,0x3fbe0000
+	.long	0x6c0660ff,0x00000108,0x32280004,0x0c81400c
+	.long	0xb1676d06,0x60ff0000,0x010cf210,0x4800f200
+	.long	0x0080f23c,0x442342b8,0xaa3bf227,0xe00c2d7c
+	.long	0x00000000,0xff58f201,0x600043fa,0xfbb6f201
+	.long	0x40002d41,0xff540281,0x0000003f,0xe989d3c1
+	.long	0x222eff54,0xec810641,0x3fff3d7a,0xfb06ff54
+	.long	0xf2000100,0xf23c4423,0xbc317218,0xf23a4923
+	.long	0xfaf2f200,0x0422f200,0x0822f200,0x0080f200
+	.long	0x04a3f23c,0x45003ab6,0x0b70f200,0x0523f200
+	.long	0x0580f23c,0x45a33c08,0x8895f23a,0x5522fad4
+	.long	0xf23a55a2,0xfad6f200,0x05233d41,0xff842d7c
+	.long	0x80000000,0xff8842ae,0xff8cf200,0x05a3f23c
+	.long	0x45223f00,0x0000f200,0x01a3f200,0x0523f200
+	.long	0x0c22f219,0x4880f200,0x0822f200,0x0423f21f
+	.long	0xd030f211,0x4422f200,0x0422222e,0xff584a81
+	.long	0x6706f22e,0x4823ff90,0xf2009000,0x123c0000
+	.long	0xf22e4823,0xff8460ff,0x000024c6,0xf210d080
+	.long	0xf2009000,0xf23c4422,0x3f800000,0x60ff0000
+	.long	0x24c60c81,0x400cb27c,0x6e66f210,0x4800f200
+	.long	0x0080f23c,0x442342b8,0xaa3bf227,0xe00c2d7c
+	.long	0x00000001,0xff58f201,0x600043fa,0xfaa6f201
+	.long	0x40002d41,0xff540281,0x0000003f,0xe989d3c1
+	.long	0x222eff54,0xec812d41,0xff54e281,0x93aeff54
+	.long	0x06413fff,0x3d41ff90,0x2d7c8000,0x0000ff94
+	.long	0x42aeff98,0x222eff54,0x06413fff,0x6000fed2
+	.long	0x4a106bff,0x00002370,0x60ff0000,0x24122f10
+	.long	0x02978000,0x00000097,0x00800000,0xf23c4400
+	.long	0x3f800000,0xf2009000,0xf21f4422,0x60ff0000
+	.long	0x24262210,0x02817fff,0x00000c81,0x3ffd0000
+	.long	0x6c0660ff,0x0000015e,0x32280004,0x0c814004
+	.long	0xc2156f06,0x60ff0000,0x026cf210,0x4800f200
+	.long	0x0080f23c,0x442342b8,0xaa3bf227,0xe00cf201
+	.long	0x600043fa,0xf9eef201,0x40002d41,0xff540281
+	.long	0x0000003f,0xe989d3c1,0x222eff54,0xec812d41
+	.long	0xff54f200,0x0100f23c,0x4423bc31,0x7218f23a
+	.long	0x4923f930,0xf2000422,0xf2000822,0x06413fff
+	.long	0xf2000080,0xf20004a3,0xf23c4500,0x3950097b
+	.long	0xf2000523,0xf2000580,0xf23c45a3,0x3ab60b6a
+	.long	0xf23a5522,0xf91ef23a,0x55a2f920,0x3d41ff84
+	.long	0x2d7c8000,0x0000ff88,0x42aeff8c,0xf2000523
+	.long	0x222eff54,0x4441f200,0x05a30641,0x3ffff23a
+	.long	0x5522f900,0xf23c45a2,0x3f000000,0xf2000523
+	.long	0x00418000,0x3d41ff90,0x2d7c8000,0x0000ff94
+	.long	0x42aeff98,0xf2000ca3,0xf2000123,0xf2000422
+	.long	0xf2000822,0xf21fd030,0xf2114823,0x222eff54
+	.long	0x0c810000,0x003f6f1a,0xf2294480,0x000cf22e
+	.long	0x48a2ff90,0xf2000422,0xf2114822,0x60ff0000
+	.long	0x00340c81,0xfffffffd,0x6c16f229,0x4422000c
+	.long	0xf2114822,0xf22e4822,0xff9060ff,0x00000016
+	.long	0xf2194880,0xf2114422,0xf22e48a2,0xff90f200
+	.long	0x0422f200,0x9000f22e,0x4823ff84,0x60ff0000
+	.long	0x22ae0c81,0x3fbe0000,0x6c6c0c81,0x00330000
+	.long	0x6d2c2d7c,0x80010000,0xff842d7c,0x80000000
+	.long	0xff8842ae,0xff8cf210,0x4800f200,0x9000123c
+	.long	0x0002f22e,0x4822ff84,0x60ff0000,0x2264f210
+	.long	0x4800f23a,0x5423f86c,0x2d7c8001,0x0000ff84
+	.long	0x2d7c8000,0x0000ff88,0x42aeff8c,0xf22e4822
+	.long	0xff84f200,0x9000123c,0x0000f23a,0x5423f84c
+	.long	0x60ff0000,0x222cf210,0x4800f200,0x0023f227
+	.long	0xe00cf23c,0x44802f30,0xcaa8f200,0x00a3f23c
+	.long	0x4500310f,0x8290f23c,0x44a232d7,0x3220f200
+	.long	0x0123f200,0x00a3f23c,0x45223493,0xf281f23a
+	.long	0x54a2f7c0,0xf2000123,0xf20000a3,0xf23a5522
+	.long	0xf7baf23a,0x54a2f7bc,0xf2000123,0xf20000a3
+	.long	0xf23a5522,0xf7b6f23a,0x54a2f7b8,0xf2000123
+	.long	0xf20000a3,0xf23a5522,0xf7b2f23a,0x48a2f7b4
+	.long	0xf2000123,0xf20000a3,0xf2000123,0xf21048a3
+	.long	0xf23c4423,0x3f000000,0xf20008a2,0xf21fd030
+	.long	0xf2000422,0xf2009000,0xf2104822,0x60ff0000
+	.long	0x218e2210,0x0c810000,0x00006e00,0xfbacf23c
+	.long	0x4400bf80,0x0000f200,0x9000f23c,0x44220080
+	.long	0x000060ff,0x00002178,0x60ff0000,0x1ff63028
+	.long	0x00000880,0x000f0440,0x3ffff200,0x50006d02
+	.long	0x4e751d7c,0x0008ff64,0x4e7561ff,0x00007cfc
+	.long	0x44400440,0x3ffff200,0x50001d7c,0x0008ff64
+	.long	0x4e753028,0x00000040,0x7fff0880,0x000e2d68
+	.long	0x0004ff88,0x2d680008,0xff8c3d40,0xff84f22e
+	.long	0x4800ff84,0x6b024e75,0x1d7c0008,0xff644e75
+	.long	0x61ff0000,0x7cb660ca,0x7ffb0000,0x80000000
+	.long	0x00000000,0x00000000,0xf2104800,0x22103228
+	.long	0x00040281,0x7fffffff,0x0c81400c,0xb1676e42
+	.long	0xf2000018,0x2f004280,0xf227e001,0x41d761ff
+	.long	0xfffffad2,0xdffc0000,0x000cf23c,0x44233f00
+	.long	0x0000201f,0xf23c4480,0x3e800000,0xf20000a0
+	.long	0xf2009000,0x123c0002,0xf2000422,0x60ff0000
+	.long	0x20800c81,0x400cb2b3,0x6e3cf200,0x0018f23a
+	.long	0x5428baae,0xf23a5428,0xbab02f00,0x4280f227
+	.long	0xe00141d7,0x61ffffff,0xfa7cdffc,0x0000000c
+	.long	0x201ff200,0x9000123c,0x0000f23a,0x4823ff5a
+	.long	0x60ff0000,0x203c60ff,0x00002014,0xf23c4400
+	.long	0x3f800000,0xf2009000,0xf23c4422,0x00800000
+	.long	0x60ff0000,0x2032f210,0x48002210,0x32280004
+	.long	0x22410281,0x7fffffff,0x0c81400c,0xb1676e62
+	.long	0xf2000018,0x48e78040,0xf227e001,0x41d74280
+	.long	0x61ffffff,0xfbe0dffc,0x0000000c,0xf23c9000
+	.long	0x00000000,0x4cdf0201,0xf2000080,0xf23c44a2
+	.long	0x3f800000,0xf2276800,0xf2000420,0x22090281
+	.long	0x80000000,0x00813f00,0x0000f21f,0x48222f01
+	.long	0xf2009000,0x123c0000,0xf21f4423,0x60ff0000
+	.long	0x1fa00c81,0x400cb2b3,0x6eff0000,0x1f4cf200
+	.long	0x0018f23a,0x5428b9ca,0x2f3c0000,0x00002f3c
+	.long	0x80000000,0x22090281,0x80000000,0x00817ffb
+	.long	0x00002f01,0xf23a5428,0xb9b02f00,0x4280f227
+	.long	0xe00141d7,0x61ffffff,0xf97cdffc,0x0000000c
+	.long	0x201ff200,0x9000123c,0x0000f21f,0x482360ff
+	.long	0x00001f3e,0x60ff0000,0x1ddaf210,0x4800f22e
+	.long	0x6800ff84,0x22103228,0x00042d41,0xff840281
+	.long	0x7fffffff,0x0c813fd7,0x80006d00,0x00740c81
+	.long	0x3fffddce,0x6e00006a,0x222eff84,0x2d41ff5c
+	.long	0x02817fff,0x00000681,0x00010000,0x2d41ff84
+	.long	0x02ae8000,0x0000ff5c,0xf22e4800,0xff842f00
+	.long	0x4280f227,0xe00141d7,0x61ffffff,0xfac8dffc
+	.long	0x0000000c,0x201ff200,0x0080f23c,0x44a24000
+	.long	0x0000222e,0xff5cf22e,0x6880ff84,0xb3aeff84
+	.long	0xf2009000,0xf22e4820,0xff8460ff,0x00001eb0
+	.long	0x0c813fff,0x80006d00,0x00880c81,0x40048aa1
+	.long	0x6e000092,0x222eff84,0x2d41ff5c,0x02817fff
+	.long	0x00000681,0x00010000,0x2d41ff84,0x02ae8000
+	.long	0x0000ff5c,0x222eff5c,0xf22e4800,0xff842f00
+	.long	0x4280f227,0xe00141d7,0x61ffffff,0xf878dffc
+	.long	0x0000000c,0x201f222e,0xff5cf23c,0x44223f80
+	.long	0x00000a81,0xc0000000,0xf2014480,0xf20000a0
+	.long	0x222eff5c,0x00813f80,0x0000f201,0x4400f200
+	.long	0x9000123c,0x0002f200,0x042260ff,0x00001e20
+	.long	0xf2009000,0x123c0003,0xf22e4800,0xff8460ff
+	.long	0x00001dfe,0x222eff84,0x02818000,0x00000081
+	.long	0x3f800000,0xf2014400,0x02818000,0x00000a81
+	.long	0x80800000,0xf2009000,0xf2014422,0x60ff0000
+	.long	0x1dde60ff,0x00001c6c,0x3ffe0000,0xb17217f7
+	.long	0xd1cf79ac,0x00000000,0x3f800000,0x00000000
+	.long	0x7f800000,0xbf800000,0x3fc2499a,0xb5e4040b
+	.long	0xbfc555b5,0x848cb7db,0x3fc99999,0x987d8730
+	.long	0xbfcfffff,0xff6f7e97,0x3fd55555,0x555555a4
+	.long	0xbfe00000,0x00000008,0x3f175496,0xadd7dad6
+	.long	0x3f3c71c2,0xfe80c7e0,0x3f624924,0x928bccff
+	.long	0x3f899999,0x999995ec,0x3fb55555,0x55555555
+	.long	0x40000000,0x00000000,0x3f990000,0x80000000
+	.long	0x00000000,0x00000000,0x3ffe0000,0xfe03f80f
+	.long	0xe03f80fe,0x00000000,0x3ff70000,0xff015358
+	.long	0x833c47e2,0x00000000,0x3ffe0000,0xfa232cf2
+	.long	0x52138ac0,0x00000000,0x3ff90000,0xbdc8d83e
+	.long	0xad88d549,0x00000000,0x3ffe0000,0xf6603d98
+	.long	0x0f6603da,0x00000000,0x3ffa0000,0x9cf43dcf
+	.long	0xf5eafd48,0x00000000,0x3ffe0000,0xf2b9d648
+	.long	0x0f2b9d65,0x00000000,0x3ffa0000,0xda16eb88
+	.long	0xcb8df614,0x00000000,0x3ffe0000,0xef2eb71f
+	.long	0xc4345238,0x00000000,0x3ffb0000,0x8b29b775
+	.long	0x1bd70743,0x00000000,0x3ffe0000,0xebbdb2a5
+	.long	0xc1619c8c,0x00000000,0x3ffb0000,0xa8d839f8
+	.long	0x30c1fb49,0x00000000,0x3ffe0000,0xe865ac7b
+	.long	0x7603a197,0x00000000,0x3ffb0000,0xc61a2eb1
+	.long	0x8cd907ad,0x00000000,0x3ffe0000,0xe525982a
+	.long	0xf70c880e,0x00000000,0x3ffb0000,0xe2f2a47a
+	.long	0xde3a18af,0x00000000,0x3ffe0000,0xe1fc780e
+	.long	0x1fc780e2,0x00000000,0x3ffb0000,0xff64898e
+	.long	0xdf55d551,0x00000000,0x3ffe0000,0xdee95c4c
+	.long	0xa037ba57,0x00000000,0x3ffc0000,0x8db956a9
+	.long	0x7b3d0148,0x00000000,0x3ffe0000,0xdbeb61ee
+	.long	0xd19c5958,0x00000000,0x3ffc0000,0x9b8fe100
+	.long	0xf47ba1de,0x00000000,0x3ffe0000,0xd901b203
+	.long	0x6406c80e,0x00000000,0x3ffc0000,0xa9372f1d
+	.long	0x0da1bd17,0x00000000,0x3ffe0000,0xd62b80d6
+	.long	0x2b80d62c,0x00000000,0x3ffc0000,0xb6b07f38
+	.long	0xce90e46b,0x00000000,0x3ffe0000,0xd3680d36
+	.long	0x80d3680d,0x00000000,0x3ffc0000,0xc3fd0329
+	.long	0x06488481,0x00000000,0x3ffe0000,0xd0b69fcb
+	.long	0xd2580d0b,0x00000000,0x3ffc0000,0xd11de0ff
+	.long	0x15ab18ca,0x00000000,0x3ffe0000,0xce168a77
+	.long	0x25080ce1,0x00000000,0x3ffc0000,0xde1433a1
+	.long	0x6c66b150,0x00000000,0x3ffe0000,0xcb8727c0
+	.long	0x65c393e0,0x00000000,0x3ffc0000,0xeae10b5a
+	.long	0x7ddc8add,0x00000000,0x3ffe0000,0xc907da4e
+	.long	0x871146ad,0x00000000,0x3ffc0000,0xf7856e5e
+	.long	0xe2c9b291,0x00000000,0x3ffe0000,0xc6980c69
+	.long	0x80c6980c,0x00000000,0x3ffd0000,0x82012ca5
+	.long	0xa68206d7,0x00000000,0x3ffe0000,0xc4372f85
+	.long	0x5d824ca6,0x00000000,0x3ffd0000,0x882c5fcd
+	.long	0x7256a8c5,0x00000000,0x3ffe0000,0xc1e4bbd5
+	.long	0x95f6e947,0x00000000,0x3ffd0000,0x8e44c60b
+	.long	0x4ccfd7de,0x00000000,0x3ffe0000,0xbfa02fe8
+	.long	0x0bfa02ff,0x00000000,0x3ffd0000,0x944ad09e
+	.long	0xf4351af6,0x00000000,0x3ffe0000,0xbd691047
+	.long	0x07661aa3,0x00000000,0x3ffd0000,0x9a3eecd4
+	.long	0xc3eaa6b2,0x00000000,0x3ffe0000,0xbb3ee721
+	.long	0xa54d880c,0x00000000,0x3ffd0000,0xa0218434
+	.long	0x353f1de8,0x00000000,0x3ffe0000,0xb92143fa
+	.long	0x36f5e02e,0x00000000,0x3ffd0000,0xa5f2fcab
+	.long	0xbbc506da,0x00000000,0x3ffe0000,0xb70fbb5a
+	.long	0x19be3659,0x00000000,0x3ffd0000,0xabb3b8ba
+	.long	0x2ad362a5,0x00000000,0x3ffe0000,0xb509e68a
+	.long	0x9b94821f,0x00000000,0x3ffd0000,0xb1641795
+	.long	0xce3ca97b,0x00000000,0x3ffe0000,0xb30f6352
+	.long	0x8917c80b,0x00000000,0x3ffd0000,0xb7047551
+	.long	0x5d0f1c61,0x00000000,0x3ffe0000,0xb11fd3b8
+	.long	0x0b11fd3c,0x00000000,0x3ffd0000,0xbc952afe
+	.long	0xea3d13e1,0x00000000,0x3ffe0000,0xaf3addc6
+	.long	0x80af3ade,0x00000000,0x3ffd0000,0xc2168ed0
+	.long	0xf458ba4a,0x00000000,0x3ffe0000,0xad602b58
+	.long	0x0ad602b6,0x00000000,0x3ffd0000,0xc788f439
+	.long	0xb3163bf1,0x00000000,0x3ffe0000,0xab8f69e2
+	.long	0x8359cd11,0x00000000,0x3ffd0000,0xccecac08
+	.long	0xbf04565d,0x00000000,0x3ffe0000,0xa9c84a47
+	.long	0xa07f5638,0x00000000,0x3ffd0000,0xd2420487
+	.long	0x2dd85160,0x00000000,0x3ffe0000,0xa80a80a8
+	.long	0x0a80a80b,0x00000000,0x3ffd0000,0xd7894992
+	.long	0x3bc3588a,0x00000000,0x3ffe0000,0xa655c439
+	.long	0x2d7b73a8,0x00000000,0x3ffd0000,0xdcc2c4b4
+	.long	0x9887dacc,0x00000000,0x3ffe0000,0xa4a9cf1d
+	.long	0x96833751,0x00000000,0x3ffd0000,0xe1eebd3e
+	.long	0x6d6a6b9e,0x00000000,0x3ffe0000,0xa3065e3f
+	.long	0xae7cd0e0,0x00000000,0x3ffd0000,0xe70d785c
+	.long	0x2f9f5bdc,0x00000000,0x3ffe0000,0xa16b312e
+	.long	0xa8fc377d,0x00000000,0x3ffd0000,0xec1f392c
+	.long	0x5179f283,0x00000000,0x3ffe0000,0x9fd809fd
+	.long	0x809fd80a,0x00000000,0x3ffd0000,0xf12440d3
+	.long	0xe36130e6,0x00000000,0x3ffe0000,0x9e4cad23
+	.long	0xdd5f3a20,0x00000000,0x3ffd0000,0xf61cce92
+	.long	0x346600bb,0x00000000,0x3ffe0000,0x9cc8e160
+	.long	0xc3fb19b9,0x00000000,0x3ffd0000,0xfb091fd3
+	.long	0x8145630a,0x00000000,0x3ffe0000,0x9b4c6f9e
+	.long	0xf03a3caa,0x00000000,0x3ffd0000,0xffe97042
+	.long	0xbfa4c2ad,0x00000000,0x3ffe0000,0x99d722da
+	.long	0xbde58f06,0x00000000,0x3ffe0000,0x825efced
+	.long	0x49369330,0x00000000,0x3ffe0000,0x9868c809
+	.long	0x868c8098,0x00000000,0x3ffe0000,0x84c37a7a
+	.long	0xb9a905c9,0x00000000,0x3ffe0000,0x97012e02
+	.long	0x5c04b809,0x00000000,0x3ffe0000,0x87224c2e
+	.long	0x8e645fb7,0x00000000,0x3ffe0000,0x95a02568
+	.long	0x095a0257,0x00000000,0x3ffe0000,0x897b8cac
+	.long	0x9f7de298,0x00000000,0x3ffe0000,0x94458094
+	.long	0x45809446,0x00000000,0x3ffe0000,0x8bcf55de
+	.long	0xc4cd05fe,0x00000000,0x3ffe0000,0x92f11384
+	.long	0x0497889c,0x00000000,0x3ffe0000,0x8e1dc0fb
+	.long	0x89e125e5,0x00000000,0x3ffe0000,0x91a2b3c4
+	.long	0xd5e6f809,0x00000000,0x3ffe0000,0x9066e68c
+	.long	0x955b6c9b,0x00000000,0x3ffe0000,0x905a3863
+	.long	0x3e06c43b,0x00000000,0x3ffe0000,0x92aade74
+	.long	0xc7be59e0,0x00000000,0x3ffe0000,0x8f1779d9
+	.long	0xfdc3a219,0x00000000,0x3ffe0000,0x94e9bff6
+	.long	0x15845643,0x00000000,0x3ffe0000,0x8dda5202
+	.long	0x37694809,0x00000000,0x3ffe0000,0x9723a1b7
+	.long	0x20134203,0x00000000,0x3ffe0000,0x8ca29c04
+	.long	0x6514e023,0x00000000,0x3ffe0000,0x995899c8
+	.long	0x90eb8990,0x00000000,0x3ffe0000,0x8b70344a
+	.long	0x139bc75a,0x00000000,0x3ffe0000,0x9b88bdaa
+	.long	0x3a3dae2f,0x00000000,0x3ffe0000,0x8a42f870
+	.long	0x5669db46,0x00000000,0x3ffe0000,0x9db4224f
+	.long	0xffe1157c,0x00000000,0x3ffe0000,0x891ac73a
+	.long	0xe9819b50,0x00000000,0x3ffe0000,0x9fdadc26
+	.long	0x8b7a12da,0x00000000,0x3ffe0000,0x87f78087
+	.long	0xf78087f8,0x00000000,0x3ffe0000,0xa1fcff17
+	.long	0xce733bd4,0x00000000,0x3ffe0000,0x86d90544
+	.long	0x7a34acc6,0x00000000,0x3ffe0000,0xa41a9e8f
+	.long	0x5446fb9f,0x00000000,0x3ffe0000,0x85bf3761
+	.long	0x2cee3c9b,0x00000000,0x3ffe0000,0xa633cd7e
+	.long	0x6771cd8b,0x00000000,0x3ffe0000,0x84a9f9c8
+	.long	0x084a9f9d,0x00000000,0x3ffe0000,0xa8489e60
+	.long	0x0b435a5e,0x00000000,0x3ffe0000,0x83993052
+	.long	0x3fbe3368,0x00000000,0x3ffe0000,0xaa59233c
+	.long	0xcca4bd49,0x00000000,0x3ffe0000,0x828cbfbe
+	.long	0xb9a020a3,0x00000000,0x3ffe0000,0xac656dae
+	.long	0x6bcc4985,0x00000000,0x3ffe0000,0x81848da8
+	.long	0xfaf0d277,0x00000000,0x3ffe0000,0xae6d8ee3
+	.long	0x60bb2468,0x00000000,0x3ffe0000,0x80808080
+	.long	0x80808081,0x00000000,0x3ffe0000,0xb07197a2
+	.long	0x3c46c654,0x00000000,0xf2104800,0x2d7c0000
+	.long	0x0000ff54,0x22103228,0x00042d50,0xff842d68
+	.long	0x0004ff88,0x2d680008,0xff8c0c81,0x00000000
+	.long	0x6d000182,0x0c813ffe,0xf07d6d0a,0x0c813fff
+	.long	0x88416f00,0x00e2e081,0xe0810481,0x00003fff
+	.long	0xd2aeff54,0x41faf7b2,0xf2014080,0x2d7c3fff
+	.long	0x0000ff84,0x2d6eff88,0xff9402ae,0xfe000000
+	.long	0xff9400ae,0x01000000,0xff94222e,0xff940281
+	.long	0x7e000000,0xe081e081,0xe881d1c1,0xf22e4800
+	.long	0xff842d7c,0x3fff0000,0xff9042ae,0xff98f22e
+	.long	0x4828ff90,0xf227e00c,0xf2104823,0xf23a48a3
+	.long	0xf6c8f200,0x0100f200,0x0923f22e,0x6880ff84
+	.long	0xf2000980,0xf2000880,0xf23a54a3,0xf6ccf23a
+	.long	0x5523f6ce,0xf23a54a2,0xf6d0f23a,0x5522f6d2
+	.long	0xf2000ca3,0xf2000d23,0xf23a54a2,0xf6ccf23a
+	.long	0x5522f6ce,0xf2000ca3,0xd1fc0000,0x0010f200
+	.long	0x0d23f200,0x00a3f200,0x0822f210,0x48a2f21f
+	.long	0xd030f200,0x0422f200,0x9000f22e,0x4822ff84
+	.long	0x60ff0000,0x142af23c,0x58380001,0xf2c10000
+	.long	0x1678f200,0x0080f23a,0x44a8f64e,0xf23a4422
+	.long	0xf648f200,0x04a2f200,0x00a0f227,0xe00cf200
+	.long	0x0400f200,0x0023f22e,0x6880ff84,0xf2000080
+	.long	0xf20004a3,0xf23a5580,0xf660f23a,0x5500f662
+	.long	0xf20005a3,0xf2000523,0xf23a55a2,0xf65cf23a
+	.long	0x5522f65e,0xf2000ca3,0xf2000123,0xf23a54a2
+	.long	0xf658f22e,0x4823ff84,0xf20008a2,0xf21fd030
+	.long	0xf2000423,0xf2009000,0xf22e4822,0xff8460ff
+	.long	0x0000139c,0x60ff0000,0x12102d7c,0xffffff9c
+	.long	0xff5448e7,0x3f002610,0x28280004,0x2a280008
+	.long	0x42824a84,0x66342805,0x42857420,0x4286edc4
+	.long	0x6000edac,0xd4862d43,0xff842d44,0xff882d45
+	.long	0xff8c4482,0x2d42ff54,0xf22e4800,0xff844cdf
+	.long	0x00fc41ee,0xff846000,0xfe0c4286,0xedc46000
+	.long	0x2406edac,0x2e05edad,0x44860686,0x00000020
+	.long	0xecaf8887,0x2d43ff84,0x2d44ff88,0x2d45ff8c
+	.long	0x44822d42,0xff54f22e,0x4800ff84,0x4cdf00fc
+	.long	0x41eeff84,0x6000fdce,0xf2104800,0xf2000018
+	.long	0xf23a4838,0xf5a4f292,0x0014f200,0x9000123c
+	.long	0x0003f210,0x480060ff,0x000012d6,0xf2104800
+	.long	0x2d7c0000,0x0000ff54,0xf2000080,0xf23a4422
+	.long	0xf508f22e,0x6800ff84,0x3d6eff88,0xff86222e
+	.long	0xff840c81,0x00000000,0x6f0000da,0x0c813ffe
+	.long	0x80006d00,0xfda20c81,0x3fffc000,0x6e00fd98
+	.long	0x0c813ffe,0xf07d6d00,0x001a0c81,0x3fff8841
+	.long	0x6e000010,0xf20004a2,0xf23a4422,0xf4bc6000
+	.long	0xfe762d6e,0xff88ff94,0x02aefe00,0x0000ff94
+	.long	0x00ae0100,0x0000ff94,0x0c813fff,0x80006c44
+	.long	0xf23a4400,0xf4fc2d7c,0x3fff0000,0xff9042ae
+	.long	0xff98f22e,0x4828ff90,0x222eff94,0x02817e00
+	.long	0x0000e081,0xe081e881,0xf20004a2,0xf227e00c
+	.long	0xf2000422,0x41faf4e2,0xd1c1f23a,0x4480f466
+	.long	0x6000fd76,0xf23a4400,0xf4502d7c,0x3fff0000
+	.long	0xff9042ae,0xff98f22e,0x4828ff90,0x222eff94
+	.long	0x02817e00,0x0000e081,0xe081e881,0xf2000422
+	.long	0xf227e00c,0x41faf4a2,0xd1c1f23a,0x4480f41e
+	.long	0x6000fd36,0x0c810000,0x00006d10,0xf23a4400
+	.long	0xf414f200,0x900060ff,0x00001014,0xf23a4400
+	.long	0xf3fcf200,0x900060ff,0x0000102e,0x60ff0000
+	.long	0x10422210,0x32280004,0x02817fff,0xffff0c81
+	.long	0x3fff8000,0x6c56f210,0x4818f200,0x0080f200
+	.long	0x049af200,0x0022f23c,0x44a23f80,0x0000f200
+	.long	0x04202210,0x02818000,0x00000081,0x3f000000
+	.long	0x2f012f00,0x4280f227,0xe00141d7,0x61ffffff
+	.long	0xfe5adffc,0x0000000c,0x201ff200,0x9000123c
+	.long	0x0000f21f,0x442360ff,0x00001136,0xf2104818
+	.long	0xf23c4438,0x3f800000,0xf2d20000,0x0fac60ff
+	.long	0x00000f7c,0x60ff0000,0x0fba3ffd,0x0000de5b
+	.long	0xd8a93728,0x71950000,0x00003fff,0x0000b8aa
+	.long	0x3b295c17,0xf0bc0000,0x0000f23c,0x58000001
+	.long	0xf2104838,0xf2c10000,0x13502210,0x6d000090
+	.long	0x2f004280,0x61ffffff,0xfba2f21f,0x9000f23a
+	.long	0x4823ffb8,0x60ff0000,0x10d62210,0x6d000070
+	.long	0x2f004280,0x61ffffff,0xfd34f21f,0x9000f23a
+	.long	0x4823ff98,0x60ff0000,0x10c62210,0x6d000050
+	.long	0x22280008,0x662e2228,0x00040281,0x7fffffff
+	.long	0x66223210,0x02810000,0x7fff0481,0x00003fff
+	.long	0x67ff0000,0x12e4f200,0x9000f201,0x400060ff
+	.long	0x0000107c,0x2f004280,0x61ffffff,0xfb2ef21f
+	.long	0x9000f23a,0x4823ff54,0x60ff0000,0x106260ff
+	.long	0x00000ed6,0x22106d00,0xfff62f00,0x428061ff
+	.long	0xfffffcba,0xf21f9000,0xf23a4823,0xff2e60ff
+	.long	0x0000104c,0x406a934f,0x0979a371,0x3f734413
+	.long	0x509f8000,0xbfcd0000,0xc0219dc1,0xda994fd2
+	.long	0x00000000,0x40000000,0x935d8ddd,0xaaa8ac17
+	.long	0x00000000,0x3ffe0000,0xb17217f7,0xd1cf79ac
+	.long	0x00000000,0x3f56c16d,0x6f7bd0b2,0x3f811112
+	.long	0x302c712c,0x3fa55555,0x55554cc1,0x3fc55555
+	.long	0x55554a54,0x3fe00000,0x00000000,0x00000000
+	.long	0x00000000,0x3fff0000,0x80000000,0x00000000
+	.long	0x3f738000,0x3fff0000,0x8164d1f3,0xbc030773
+	.long	0x3fbef7ca,0x3fff0000,0x82cd8698,0xac2ba1d7
+	.long	0x3fbdf8a9,0x3fff0000,0x843a28c3,0xacde4046
+	.long	0x3fbcd7c9,0x3fff0000,0x85aac367,0xcc487b15
+	.long	0xbfbde8da,0x3fff0000,0x871f6196,0x9e8d1010
+	.long	0x3fbde85c,0x3fff0000,0x88980e80,0x92da8527
+	.long	0x3fbebbf1,0x3fff0000,0x8a14d575,0x496efd9a
+	.long	0x3fbb80ca,0x3fff0000,0x8b95c1e3,0xea8bd6e7
+	.long	0xbfba8373,0x3fff0000,0x8d1adf5b,0x7e5ba9e6
+	.long	0xbfbe9670,0x3fff0000,0x8ea4398b,0x45cd53c0
+	.long	0x3fbdb700,0x3fff0000,0x9031dc43,0x1466b1dc
+	.long	0x3fbeeeb0,0x3fff0000,0x91c3d373,0xab11c336
+	.long	0x3fbbfd6d,0x3fff0000,0x935a2b2f,0x13e6e92c
+	.long	0xbfbdb319,0x3fff0000,0x94f4efa8,0xfef70961
+	.long	0x3fbdba2b,0x3fff0000,0x96942d37,0x20185a00
+	.long	0x3fbe91d5,0x3fff0000,0x9837f051,0x8db8a96f
+	.long	0x3fbe8d5a,0x3fff0000,0x99e04593,0x20b7fa65
+	.long	0xbfbcde7b,0x3fff0000,0x9b8d39b9,0xd54e5539
+	.long	0xbfbebaaf,0x3fff0000,0x9d3ed9a7,0x2cffb751
+	.long	0xbfbd86da,0x3fff0000,0x9ef53260,0x91a111ae
+	.long	0xbfbebedd,0x3fff0000,0xa0b0510f,0xb9714fc2
+	.long	0x3fbcc96e,0x3fff0000,0xa2704303,0x0c496819
+	.long	0xbfbec90b,0x3fff0000,0xa43515ae,0x09e6809e
+	.long	0x3fbbd1db,0x3fff0000,0xa5fed6a9,0xb15138ea
+	.long	0x3fbce5eb,0x3fff0000,0xa7cd93b4,0xe965356a
+	.long	0xbfbec274,0x3fff0000,0xa9a15ab4,0xea7c0ef8
+	.long	0x3fbea83c,0x3fff0000,0xab7a39b5,0xa93ed337
+	.long	0x3fbecb00,0x3fff0000,0xad583eea,0x42a14ac6
+	.long	0x3fbe9301,0x3fff0000,0xaf3b78ad,0x690a4375
+	.long	0xbfbd8367,0x3fff0000,0xb123f581,0xd2ac2590
+	.long	0xbfbef05f,0x3fff0000,0xb311c412,0xa9112489
+	.long	0x3fbdfb3c,0x3fff0000,0xb504f333,0xf9de6484
+	.long	0x3fbeb2fb,0x3fff0000,0xb6fd91e3,0x28d17791
+	.long	0x3fbae2cb,0x3fff0000,0xb8fbaf47,0x62fb9ee9
+	.long	0x3fbcdc3c,0x3fff0000,0xbaff5ab2,0x133e45fb
+	.long	0x3fbee9aa,0x3fff0000,0xbd08a39f,0x580c36bf
+	.long	0xbfbeaefd,0x3fff0000,0xbf1799b6,0x7a731083
+	.long	0xbfbcbf51,0x3fff0000,0xc12c4cca,0x66709456
+	.long	0x3fbef88a,0x3fff0000,0xc346ccda,0x24976407
+	.long	0x3fbd83b2,0x3fff0000,0xc5672a11,0x5506dadd
+	.long	0x3fbdf8ab,0x3fff0000,0xc78d74c8,0xabb9b15d
+	.long	0xbfbdfb17,0x3fff0000,0xc9b9bd86,0x6e2f27a3
+	.long	0xbfbefe3c,0x3fff0000,0xcbec14fe,0xf2727c5d
+	.long	0xbfbbb6f8,0x3fff0000,0xce248c15,0x1f8480e4
+	.long	0xbfbcee53,0x3fff0000,0xd06333da,0xef2b2595
+	.long	0xbfbda4ae,0x3fff0000,0xd2a81d91,0xf12ae45a
+	.long	0x3fbc9124,0x3fff0000,0xd4f35aab,0xcfedfa1f
+	.long	0x3fbeb243,0x3fff0000,0xd744fcca,0xd69d6af4
+	.long	0x3fbde69a,0x3fff0000,0xd99d15c2,0x78afd7b6
+	.long	0xbfb8bc61,0x3fff0000,0xdbfbb797,0xdaf23755
+	.long	0x3fbdf610,0x3fff0000,0xde60f482,0x5e0e9124
+	.long	0xbfbd8be1,0x3fff0000,0xe0ccdeec,0x2a94e111
+	.long	0x3fbacb12,0x3fff0000,0xe33f8972,0xbe8a5a51
+	.long	0x3fbb9bfe,0x3fff0000,0xe5b906e7,0x7c8348a8
+	.long	0x3fbcf2f4,0x3fff0000,0xe8396a50,0x3c4bdc68
+	.long	0x3fbef22f,0x3fff0000,0xeac0c6e7,0xdd24392f
+	.long	0xbfbdbf4a,0x3fff0000,0xed4f301e,0xd9942b84
+	.long	0x3fbec01a,0x3fff0000,0xefe4b99b,0xdcdaf5cb
+	.long	0x3fbe8cac,0x3fff0000,0xf281773c,0x59ffb13a
+	.long	0xbfbcbb3f,0x3fff0000,0xf5257d15,0x2486cc2c
+	.long	0x3fbef73a,0x3fff0000,0xf7d0df73,0x0ad13bb9
+	.long	0xbfb8b795,0x3fff0000,0xfa83b2db,0x722a033a
+	.long	0x3fbef84b,0x3fff0000,0xfd3e0c0c,0xf486c175
+	.long	0xbfbef581,0xf210d080,0x22103228,0x0004f22e
+	.long	0x6800ff84,0x02817fff,0xffff0c81,0x3fb98000
+	.long	0x6c046000,0x00880c81,0x400d80c0,0x6f046000
+	.long	0x007cf200,0x0080f23c,0x44a34280,0x0000f22e
+	.long	0x6080ff54,0x2f0243fa,0xfbbcf22e,0x4080ff54
+	.long	0x222eff54,0x24010281,0x0000003f,0xe981d3c1
+	.long	0xec822202,0xe2819481,0x06820000,0x3ffff227
+	.long	0xe00cf23c,0x44a33c80,0x00002d59,0xff842d59
+	.long	0xff882d59,0xff8c3d59,0xff90f200,0x04283d59
+	.long	0xff94426e,0xff9642ae,0xff98d36e,0xff84f23a
+	.long	0x4823fb22,0xd36eff90,0x60000100,0x0c813fff
+	.long	0x80006e12,0xf2009000,0xf23c4422,0x3f800000
+	.long	0x60ff0000,0x0b12222e,0xff840c81,0x00000000
+	.long	0x6d0660ff,0x00000ac8,0x60ff0000,0x0a1af200
+	.long	0x9000f23c,0x44003f80,0x00002210,0x00810080
+	.long	0x0001f201,0x442260ff,0x00000adc,0xf210d080
+	.long	0x22103228,0x0004f22e,0x6800ff84,0x02817fff
+	.long	0xffff0c81,0x3fb98000,0x6c046000,0xff900c81
+	.long	0x400b9b07,0x6f046000,0xff84f200,0x0080f23a
+	.long	0x54a3fa62,0xf22e6080,0xff542f02,0x43fafac6
+	.long	0xf22e4080,0xff54222e,0xff542401,0x02810000
+	.long	0x003fe981,0xd3c1ec82,0x2202e281,0x94810682
+	.long	0x00003fff,0xf227e00c,0xf2000500,0xf23a54a3
+	.long	0xfa2c2d59,0xff84f23a,0x4923fa2a,0x2d59ff88
+	.long	0x2d59ff8c,0xf2000428,0x3d59ff90,0xf2000828
+	.long	0x3d59ff94,0x426eff96,0x42aeff98,0xf23a4823
+	.long	0xfa14d36e,0xff84d36e,0xff90f200,0x0080f200
+	.long	0x04a3f23a,0x5500fa1e,0xf23a5580,0xfa20f200
+	.long	0x0523f200,0x05a3f23a,0x5522fa1a,0xf23a55a2
+	.long	0xfa1cf200,0x0523f200,0x05a3f23a,0x5522fa16
+	.long	0xf20001a3,0xf2000523,0xf2000c22,0xf2000822
+	.long	0xf21fd030,0xf22e4823,0xff84f22e,0x4822ff90
+	.long	0xf22e4822,0xff84f200,0x90003d42,0xff84241f
+	.long	0x2d7c8000,0x0000ff88,0x42aeff8c,0x123c0000
+	.long	0xf22e4823,0xff8460ff,0x00000996,0xf2009000
+	.long	0xf23c4400,0x3f800000,0x22100081,0x00800001
+	.long	0xf2014422,0x60ff0000,0x098e2f01,0xe8082200
+	.long	0x02410003,0x0240000c,0x48403001,0x221f4a01
+	.long	0x671e0c01,0x000a6f12,0x0c01000e,0x6f3c0c01
+	.long	0x002f6f06,0x0c01003f,0x6f6260ff,0x00000baa
+	.long	0x4a00660c,0x41fb0170,0x000000d6,0x60000086
+	.long	0x0c000003,0x670a41fb,0x01700000,0x00d06074
+	.long	0x41fb0170,0x000000d2,0x606a0401,0x000b4a00
+	.long	0x661041fb,0x01700000,0x00cc0c01,0x00026f54
+	.long	0x605a0c00,0x0003670a,0x41fb0170,0x000000f2
+	.long	0x60e841fb,0x01700000,0x012460de,0x04010030
+	.long	0x4a006616,0x41fb0170,0x0000014e,0x0c010001
+	.long	0x6f220c01,0x00076f24,0x601a0c00,0x0003670a
+	.long	0x41fb0170,0x000001f2,0x60e241fb,0x01700000
+	.long	0x02a860d8,0x00ae0000,0x0208ff64,0xc2fc000c
+	.long	0x48404a00,0x6608f230,0xd0801000,0x4e754840
+	.long	0x3d701000,0xff902d70,0x1004ff94,0x2d701008
+	.long	0xff982200,0x428041ee,0xff904268,0x000261ff
+	.long	0x000062c6,0xf210d080,0x4e7551fc,0x40000000
+	.long	0xc90fdaa2,0x2168c235,0x40000000,0xc90fdaa2
+	.long	0x2168c234,0x40000000,0xc90fdaa2,0x2168c235
+	.long	0x3ffd0000,0x9a209a84,0xfbcff798,0x40000000
+	.long	0xadf85458,0xa2bb4a9a,0x3fff0000,0xb8aa3b29
+	.long	0x5c17f0bc,0x3ffd0000,0xde5bd8a9,0x37287195
+	.long	0x00000000,0x00000000,0x00000000,0x3ffd0000
+	.long	0x9a209a84,0xfbcff798,0x40000000,0xadf85458
+	.long	0xa2bb4a9a,0x3fff0000,0xb8aa3b29,0x5c17f0bb
+	.long	0x3ffd0000,0xde5bd8a9,0x37287195,0x00000000
+	.long	0x00000000,0x00000000,0x3ffd0000,0x9a209a84
+	.long	0xfbcff799,0x40000000,0xadf85458,0xa2bb4a9b
+	.long	0x3fff0000,0xb8aa3b29,0x5c17f0bc,0x3ffd0000
+	.long	0xde5bd8a9,0x37287195,0x00000000,0x00000000
+	.long	0x00000000,0x3ffe0000,0xb17217f7,0xd1cf79ac
+	.long	0x40000000,0x935d8ddd,0xaaa8ac17,0x3fff0000
+	.long	0x80000000,0x00000000,0x40020000,0xa0000000
+	.long	0x00000000,0x40050000,0xc8000000,0x00000000
+	.long	0x400c0000,0x9c400000,0x00000000,0x40190000
+	.long	0xbebc2000,0x00000000,0x40340000,0x8e1bc9bf
+	.long	0x04000000,0x40690000,0x9dc5ada8,0x2b70b59e
+	.long	0x40d30000,0xc2781f49,0xffcfa6d5,0x41a80000
+	.long	0x93ba47c9,0x80e98ce0,0x43510000,0xaa7eebfb
+	.long	0x9df9de8e,0x46a30000,0xe319a0ae,0xa60e91c7
+	.long	0x4d480000,0xc9767586,0x81750c17,0x5a920000
+	.long	0x9e8b3b5d,0xc53d5de5,0x75250000,0xc4605202
+	.long	0x8a20979b,0x3ffe0000,0xb17217f7,0xd1cf79ab
+	.long	0x40000000,0x935d8ddd,0xaaa8ac16,0x3fff0000
+	.long	0x80000000,0x00000000,0x40020000,0xa0000000
+	.long	0x00000000,0x40050000,0xc8000000,0x00000000
+	.long	0x400c0000,0x9c400000,0x00000000,0x40190000
+	.long	0xbebc2000,0x00000000,0x40340000,0x8e1bc9bf
+	.long	0x04000000,0x40690000,0x9dc5ada8,0x2b70b59d
+	.long	0x40d30000,0xc2781f49,0xffcfa6d5,0x41a80000
+	.long	0x93ba47c9,0x80e98cdf,0x43510000,0xaa7eebfb
+	.long	0x9df9de8d,0x46a30000,0xe319a0ae,0xa60e91c6
+	.long	0x4d480000,0xc9767586,0x81750c17,0x5a920000
+	.long	0x9e8b3b5d,0xc53d5de4,0x75250000,0xc4605202
+	.long	0x8a20979a,0x3ffe0000,0xb17217f7,0xd1cf79ac
+	.long	0x40000000,0x935d8ddd,0xaaa8ac17,0x3fff0000
+	.long	0x80000000,0x00000000,0x40020000,0xa0000000
+	.long	0x00000000,0x40050000,0xc8000000,0x00000000
+	.long	0x400c0000,0x9c400000,0x00000000,0x40190000
+	.long	0xbebc2000,0x00000000,0x40340000,0x8e1bc9bf
+	.long	0x04000000,0x40690000,0x9dc5ada8,0x2b70b59e
+	.long	0x40d30000,0xc2781f49,0xffcfa6d6,0x41a80000
+	.long	0x93ba47c9,0x80e98ce0,0x43510000,0xaa7eebfb
+	.long	0x9df9de8e,0x46a30000,0xe319a0ae,0xa60e91c7
+	.long	0x4d480000,0xc9767586,0x81750c18,0x5a920000
+	.long	0x9e8b3b5d,0xc53d5de5,0x75250000,0xc4605202
+	.long	0x8a20979b,0x2f003229,0x00005bee,0xff540281
+	.long	0x00007fff,0x30280000,0x02407fff,0x0c403fff
+	.long	0x6d0000c0,0x0c40400c,0x6e0000a4,0xf2284803
+	.long	0x0000f200,0x6000f23c,0x88000000,0x00004a29
+	.long	0x00046b5e,0x2f003d69,0x0000ff84,0x2d690004
+	.long	0xff882d69,0x0008ff8c,0x41eeff84,0x61ff0000
+	.long	0x60ba4480,0xd09ff22e,0xd080ff84,0x0c40c001
+	.long	0x6c36f21f,0x9000223c,0x80000000,0x0480ffff
+	.long	0xc0014480,0x0c000020,0x6c0ae0a9,0x42a72f01
+	.long	0x42a76028,0x04000020,0xe0a92f01,0x42a742a7
+	.long	0x601af229,0xd0800000,0xf21f9000,0x06403fff
+	.long	0x484042a7,0x2f3c8000,0x00002f00,0xf200b000
+	.long	0x123c0000,0xf21f4823,0x60ff0000,0x04ce201f
+	.long	0xc1494a29,0x00006bff,0x0000038c,0x60ff0000
+	.long	0x03c44a29,0x00046a16,0x201ff200,0x9000123c
+	.long	0x0003f229,0x48000000,0x60ff0000,0x049e201f
+	.long	0x204960ff,0x000002e2,0x00010000,0x80000000
+	.long	0x00000000,0x00000000,0x422eff65,0x2f00422e
+	.long	0xff5c600c,0x422eff65,0x2f001d7c,0x0001ff5c
+	.long	0x48e73f00,0x36280000,0x3d43ff58,0x02830000
+	.long	0x7fff2828,0x00042a28,0x00084a83,0x663c263c
+	.long	0x00003ffe,0x4a846616,0x28054285,0x04830000
+	.long	0x00204286,0xedc46000,0xedac9686,0x60224286
+	.long	0xedc46000,0x9686edac,0x2e05edad,0x44860686
+	.long	0x00000020,0xecaf8887,0x60060683,0x00003ffe
+	.long	0x30290000,0x3d40ff5a,0x322eff58,0xb1810281
+	.long	0x00008000,0x3d41ff5e,0x02800000,0x7fff2229
+	.long	0x00042429,0x00084a80,0x663c203c,0x00003ffe
+	.long	0x4a816616,0x22024282,0x04800000,0x00204286
+	.long	0xedc16000,0xeda99086,0x60224286,0xedc16000
+	.long	0x9086eda9,0x2e02edaa,0x44860686,0x00000020
+	.long	0xecaf8287,0x60060680,0x00003ffe,0x2d43ff54
+	.long	0x2f009083,0x42864283,0x227c0000,0x00004a80
+	.long	0x6c06201f,0x6000006a,0x588f4a86,0x6e0eb284
+	.long	0x6608b485,0x66046000,0x01366508,0x94859384
+	.long	0x42865283,0x4a80670e,0xd683d482,0xe39155c6
+	.long	0x52895380,0x60d4202e,0xff544a81,0x66162202
+	.long	0x42820480,0x00000020,0x4286edc1,0x6000eda9
+	.long	0x9086601c,0x4286edc1,0x60006b14,0x9086eda9
+	.long	0x2e02edaa,0x44860686,0x00000020,0xecaf8287
+	.long	0x0c800000,0x41fe6c2a,0x3d40ff90,0x2d41ff94
+	.long	0x2d42ff98,0x2c2eff54,0x3d46ff84,0x2d44ff88
+	.long	0x2d45ff8c,0xf22e4800,0xff901d7c,0x0001ff5d
+	.long	0x60362d41,0xff942d42,0xff980480,0x00003ffe
+	.long	0x3d40ff90,0x2c2eff54,0x04860000,0x3ffe2d46
+	.long	0xff54f22e,0x4800ff90,0x3d46ff84,0x2d44ff88
+	.long	0x2d45ff8c,0x422eff5d,0x4a2eff5c,0x67222c2e
+	.long	0xff545386,0xb0866d18,0x6e0eb284,0x6608b485
+	.long	0x66046000,0x007a6508,0xf22e4828,0xff845283
+	.long	0x3c2eff5a,0x6c04f200,0x001a4286,0x3c2eff5e
+	.long	0x7e08eeae,0x02830000,0x007f8686,0x1d43ff65
+	.long	0x4cdf00fc,0x201ff200,0x90004a2e,0xff5d6710
+	.long	0x123c0000,0xf23a4823,0xfdc060ff,0x0000024c
+	.long	0x123c0003,0xf2000000,0x60ff0000,0x023e5283
+	.long	0x0c800000,0x00086c04,0xe1ab6002,0x4283f23c
+	.long	0x44000000,0x0000422e,0xff5d6000,0xff942c03
+	.long	0x02860000,0x00014a86,0x6700ff86,0x52833c2e
+	.long	0xff5a0a86,0x00008000,0x3d46ff5a,0x6000ff72
+	.long	0x7fff0000,0xffffffff,0xffffffff,0x4a280000
+	.long	0x6b12f23c,0x44007f80,0x000000ae,0x02000410
+	.long	0xff644e75,0xf23c4400,0xff800000,0x00ae0a00
+	.long	0x0410ff64,0x4e7500ae,0x01002080,0xff64f23a
+	.long	0xd080ffbe,0x4e7500ae,0x00000800,0xff646008
+	.long	0x00ae0000,0x0a28ff64,0x22482200,0x020100c0
+	.long	0x660e4a28,0x00006a18,0x08ee0003,0xff646010
+	.long	0x2f094a28,0x00005bc1,0x61ff0000,0x0196225f
+	.long	0xf210d080,0x102eff62,0x0200000a,0x66024e75
+	.long	0x3d690000,0xff842d69,0x0004ff88,0x2d690008
+	.long	0xff8c41ee,0xff8461ff,0x00005cd0,0x06800000
+	.long	0x6000026e,0x8000ff84,0x816eff84,0xf22ed040
+	.long	0xff844e75,0x00ae0000,0x0a28ff64,0x4a105bc1
+	.long	0x61ff0000,0x013ef210,0xd080f23c,0x44800000
+	.long	0x00004e75,0x00ae0000,0x0a28ff64,0x51c161ff
+	.long	0x00000120,0xf210d080,0xf23c4480,0x00000000
+	.long	0x4e7500ae,0x00001048,0xff641200,0x020100c0
+	.long	0x675c4a28,0x00046b24,0x3d680000,0xff842d68
+	.long	0x0004ff88,0x2d680008,0xff8c41ee,0xff8448e7
+	.long	0xc08061ff,0x00005c44,0x4cdf0103,0x0c010040
+	.long	0x660e4aa8,0x00086614,0x4a280007,0x660e601e
+	.long	0x22280008,0x02810000,0x07ff6712,0x00ae0000
+	.long	0x0200ff64,0x600800ae,0x00001248,0xff644a28
+	.long	0x00005bc1,0x61ff0000,0x5f261d40,0xff64f210
+	.long	0xd080f23c,0x44800000,0x00004e75,0x00ae0000
+	.long	0x1248ff64,0x51c161ff,0x00005f04,0x1d40ff64
+	.long	0xf210d080,0xf23c4480,0x00000000,0x4e75f327
+	.long	0x4a2f0002,0x6b2edffc,0x0000000c,0xf294000e
+	.long	0xf2810014,0x006e0208,0xff664e75,0x00ae0800
+	.long	0x0208ff64,0x4e751d7c,0x0004ff64,0x006e0208
+	.long	0xff664e75,0x006e0208,0xff6661ff,0x00000bae
+	.long	0xdffc0000,0x000c4e75,0xf3274a2f,0x00026bea
+	.long	0xdffc0000,0x000cf200,0xa80081ae,0xff644e75
+	.long	0x00ae0000,0x0a28ff64,0x02410010,0xe8080200
+	.long	0x000f8001,0x2200e309,0x1d7b000a,0xff6441fb
+	.long	0x16204e75,0x04040400,0x04040400,0x04040400
+	.long	0x00000000,0x0c0c080c,0x0c0c080c,0x0c0c080c
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000001,0x00000000
+	.long	0x3f810000,0x00000000,0x00000000,0x00000000
+	.long	0x3f810000,0x00000000,0x00000000,0x00000000
+	.long	0x3f810000,0x00000000,0x00000000,0x00000000
+	.long	0x3f810000,0x00000100,0x00000000,0x00000000
+	.long	0x3c010000,0x00000000,0x00000000,0x00000000
+	.long	0x3c010000,0x00000000,0x00000000,0x00000000
+	.long	0x3c010000,0x00000000,0x00000000,0x00000000
+	.long	0x3c010000,0x00000000,0x00000800,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x80000000,0x00000000,0x00000000,0x00000000
+	.long	0x80000000,0x00000000,0x00000000,0x00000000
+	.long	0x80000000,0x00000000,0x00000001,0x00000000
+	.long	0x80000000,0x00000000,0x00000000,0x00000000
+	.long	0xbf810000,0x00000000,0x00000000,0x00000000
+	.long	0xbf810000,0x00000000,0x00000000,0x00000000
+	.long	0xbf810000,0x00000100,0x00000000,0x00000000
+	.long	0xbf810000,0x00000000,0x00000000,0x00000000
+	.long	0xbc010000,0x00000000,0x00000000,0x00000000
+	.long	0xbc010000,0x00000000,0x00000000,0x00000000
+	.long	0xbc010000,0x00000000,0x00000800,0x00000000
+	.long	0xbc010000,0x00000000,0x00000000,0x00000000
+	.long	0x4a280000,0x6b10f23c,0x44000000,0x00001d7c
+	.long	0x0004ff64,0x4e75f23c,0x44008000,0x00001d7c
+	.long	0x000cff64,0x4e754a29,0x00006bea,0x60d84a28
+	.long	0x00006b10,0xf23c4400,0x7f800000,0x1d7c0002
+	.long	0xff644e75,0xf23c4400,0xff800000,0x1d7c000a
+	.long	0xff644e75,0x4a290000,0x6bea60d8,0x4a280000
+	.long	0x6ba460d0,0x4a280000,0x6b00fbbc,0x60c64a28
+	.long	0x00006b16,0x60be4a28,0x00006b0e,0xf23c4400
+	.long	0x3f800000,0x422eff64,0x4e75f23c,0x4400bf80
+	.long	0x00001d7c,0x0008ff64,0x4e753fff,0x0000c90f
+	.long	0xdaa22168,0xc235bfff,0x0000c90f,0xdaa22168
+	.long	0xc2354a28,0x00006b0e,0xf2009000,0xf23a4800
+	.long	0xffda6000,0xfcf0f200,0x9000f23a,0x4800ffd8
+	.long	0x6000fcea,0xf23c4480,0x3f800000,0x4a280000
+	.long	0x6a10f23c,0x44008000,0x00001d7c,0x000cff64
+	.long	0x6040f23c,0x44000000,0x00001d7c,0x0004ff64
+	.long	0x6030f23a,0x4880faea,0x61ff0000,0x00286000
+	.long	0xfb16f228,0x48800000,0x61ff0000,0x00186000
+	.long	0x030ef228,0x48800000,0x61ff0000,0x00086000
+	.long	0x02ee102e,0xff430240,0x0007303b,0x02064efb
+	.long	0x00020010,0x00180020,0x0026002c,0x00320038
+	.long	0x003ef22e,0xf040ffdc,0x4e75f22e,0xf040ffe8
+	.long	0x4e75f200,0x05004e75,0xf2000580,0x4e75f200
+	.long	0x06004e75,0xf2000680,0x4e75f200,0x07004e75
+	.long	0xf2000780,0x4e75122e,0xff4f67ff,0xfffff7dc
+	.long	0x0c010001,0x67000096,0x0c010002,0x67ffffff
+	.long	0xfa880c01,0x000467ff,0xfffff7c0,0x0c010005
+	.long	0x67ff0000,0x024060ff,0x0000024a,0x122eff4f
+	.long	0x67ffffff,0xfa640c01,0x000167ff,0xfffffa5a
+	.long	0x0c010002,0x67ffffff,0xfa500c01,0x000467ff
+	.long	0xfffffa46,0x0c010003,0x67ff0000,0x021860ff
+	.long	0x00000202,0x122eff4f,0x67ff0000,0x004e0c01
+	.long	0x000167ff,0x00000028,0x0c010002,0x67ffffff
+	.long	0xfa180c01,0x000467ff,0x00000030,0x0c010003
+	.long	0x67ff0000,0x01e060ff,0x000001ca,0x12280000
+	.long	0x10290000,0xb1010201,0x00801d41,0xff654a00
+	.long	0x6a00fdc4,0x6000fdd0,0x422eff65,0x2f001228
+	.long	0x00001029,0x0000b101,0x02010080,0x1d41ff65
+	.long	0x0c2e0004,0xff4f660c,0x41e90000,0x201f60ff
+	.long	0xfffff9c6,0xf21f9000,0xf2294800,0x00004a29
+	.long	0x00006b02,0x4e751d7c,0x0008ff64,0x4e75122e
+	.long	0xff4f67ff,0xfffff6e0,0x0c010001,0x6700ff8e
+	.long	0x0c010002,0x67ffffff,0xf9800c01,0x000467ff
+	.long	0xfffff6c4,0x0c010003,0x67ff0000,0x014860ff
+	.long	0x00000132,0x122eff4f,0x67ffffff,0xf95c0c01
+	.long	0x000167ff,0xfffff952,0x0c010002,0x67ffffff
+	.long	0xf9480c01,0x000467ff,0xfffff93e,0x0c010003
+	.long	0x67ff0000,0x011060ff,0x000000fa,0x122eff4f
+	.long	0x6700ff46,0x0c010001,0x6700ff22,0x0c010002
+	.long	0x67ffffff,0xf9140c01,0x000467ff,0xffffff2c
+	.long	0x0c010003,0x67ff0000,0x00dc60ff,0x000000c6
+	.long	0x122eff4f,0x67ffffff,0xf51e0c01,0x000167ff
+	.long	0xfffffce6,0x0c010002,0x67ffffff,0xfd0a0c01
+	.long	0x000467ff,0xfffff500,0x0c010003,0x67ff0000
+	.long	0x00a460ff,0x0000008e,0x122eff4f,0x67ffffff
+	.long	0xf4e60c01,0x000167ff,0xfffffcae,0x0c010002
+	.long	0x67ffffff,0xfcd20c01,0x000467ff,0xfffff4c8
+	.long	0x0c010003,0x67ff0000,0x006c60ff,0x00000056
+	.long	0x122eff4f,0x67ffffff,0xf8800c01,0x000367ff
+	.long	0x00000052,0x0c010005,0x67ff0000,0x003860ff
+	.long	0xfffff866,0x122eff4f,0x0c010003,0x67340c01
+	.long	0x0005671e,0x6058122e,0xff4f0c01,0x00036708
+	.long	0x0c010005,0x670c6036,0x00ae0100,0x4080ff64
+	.long	0x6010f229,0x48000000,0xf200a800,0x81aeff64
+	.long	0x4e75f229,0x48000000,0x4a290000,0x6b081d7c
+	.long	0x0001ff64,0x4e751d7c,0x0009ff64,0x4e75f228
+	.long	0x48000000,0xf200a800,0x81aeff64,0x4e75f228
+	.long	0x48000000,0x4a280000,0x6bdc1d7c,0x0001ff64
+	.long	0x4e751d7c,0x0009ff64,0x4e75122e,0xff4e67ff
+	.long	0xffffd936,0x0c010001,0x67ffffff,0xfba60c01
+	.long	0x000267ff,0xfffffbca,0x0c010004,0x67ffffff
+	.long	0xd9f60c01,0x000367ff,0xffffffb6,0x60ffffff
+	.long	0xffa0122e,0xff4e67ff,0xffffe620,0x0c010001
+	.long	0x67ffffff,0xfb6e0c01,0x000267ff,0xfffffbc8
+	.long	0x0c010004,0x67ffffff,0xe7560c01,0x000367ff
+	.long	0xffffff7e,0x60ffffff,0xff68122e,0xff4e67ff
+	.long	0xffffd4d2,0x0c010001,0x67ffffff,0xfb360c01
+	.long	0x000267ff,0xfffffb9a,0x0c010004,0x67ffffff
+	.long	0xd76a0c01,0x000367ff,0xffffff46,0x60ffffff
+	.long	0xff30122e,0xff4e67ff,0xffffd972,0x0c010001
+	.long	0x67ffffff,0xfafe0c01,0x000267ff,0xfffffb6a
+	.long	0x0c010004,0x67ffffff,0xdabc0c01,0x000367ff
+	.long	0xffffff0e,0x60ffffff,0xfef8122e,0xff4e67ff
+	.long	0xffffca6a,0x0c010001,0x67ffffff,0xfac60c01
+	.long	0x000267ff,0xfffffb6e,0x0c010004,0x67ffffff
+	.long	0xcc8a0c01,0x000367ff,0xfffffed6,0x60ffffff
+	.long	0xfec0122e,0xff4e67ff,0xffffcc76,0x0c010001
+	.long	0x67ffffff,0xfa8e0c01,0x000267ff,0xfffff6aa
+	.long	0x0c010004,0x67ffffff,0xcd060c01,0x000367ff
+	.long	0xfffffe9e,0x60ffffff,0xfe88122e,0xff4e67ff
+	.long	0xffffe662,0x0c010001,0x67ffffff,0xfa560c01
+	.long	0x000267ff,0xfffff672,0x0c010004,0x67ffffff
+	.long	0xe6c60c01,0x000367ff,0xfffffe66,0x60ffffff
+	.long	0xfe50122e,0xff4e67ff,0xffffb372,0x0c010001
+	.long	0x67ffffff,0xfa1e0c01,0x000267ff,0xfffff63a
+	.long	0x0c010004,0x67ffffff,0xb5380c01,0x000367ff
+	.long	0xfffffe2e,0x60ffffff,0xfe18122e,0xff4e67ff
+	.long	0xffffbdfc,0x0c010001,0x67ffffff,0xf9e60c01
+	.long	0x000267ff,0xfffff602,0x0c010004,0x67ffffff
+	.long	0xbf420c01,0x000367ff,0xfffffdf6,0x60ffffff
+	.long	0xfde0122e,0xff4e67ff,0xffffd17a,0x0c010001
+	.long	0x67ffffff,0xfa2a0c01,0x000267ff,0xfffffa00
+	.long	0x0c010004,0x67ffffff,0xd3080c01,0x000367ff
+	.long	0xfffffdbe,0x60ffffff,0xfda8122e,0xff4e67ff
+	.long	0xffffeb64,0x0c010001,0x67ffffff,0xf9f20c01
+	.long	0x000267ff,0xfffff9c8,0x0c010004,0x67ffffff
+	.long	0xec200c01,0x000367ff,0xfffffd86,0x60ffffff
+	.long	0xfd70122e,0xff4e67ff,0xffffec24,0x0c010001
+	.long	0x67ffffff,0xf9ba0c01,0x000267ff,0xfffff990
+	.long	0x0c010004,0x67ffffff,0xed360c01,0x000367ff
+	.long	0xfffffd4e,0x60ffffff,0xfd38122e,0xff4e67ff
+	.long	0xffffe178,0x0c010001,0x67ffffff,0xf51a0c01
+	.long	0x000267ff,0xfffff960,0x0c010004,0x67ffffff
+	.long	0xe30c0c01,0x000367ff,0xfffffd16,0x60ffffff
+	.long	0xfd00122e,0xff4e67ff,0xffffe582,0x0c010001
+	.long	0x67ffffff,0xf4e20c01,0x000267ff,0xfffff928
+	.long	0x0c010004,0x67ffffff,0xe5940c01,0x000367ff
+	.long	0xfffffcde,0x60ffffff,0xfcc8122e,0xff4e67ff
+	.long	0xffffe59a,0x0c010001,0x67ffffff,0xf4aa0c01
+	.long	0x000267ff,0xfffff8f0,0x0c010004,0x67ffffff
+	.long	0xe5d60c01,0x000367ff,0xfffffca6,0x60ffffff
+	.long	0xfc90122e,0xff4e67ff,0xffffd530,0x0c010001
+	.long	0x67ffffff,0xf8da0c01,0x000267ff,0xfffff888
+	.long	0x0c010004,0x67ffffff,0xd5b60c01,0x000367ff
+	.long	0xfffffc6e,0x60ffffff,0xfc58122e,0xff4e67ff
+	.long	0xffffcac2,0x0c010001,0x67ffffff,0xf8de0c01
+	.long	0x000267ff,0xfffff442,0x0c010004,0x67ffffff
+	.long	0xcb340c01,0x000367ff,0xfffffc36,0x60ffffff
+	.long	0xfc20122e,0xff4e67ff,0xffffb14c,0x0c010001
+	.long	0x67ffffff,0xf86a0c01,0x000267ff,0xfffff40a
+	.long	0x0c010004,0x67ffffff,0xb30e0c01,0x000367ff
+	.long	0xfffffbfe,0x60ffffff,0xfbe8122e,0xff4e67ff
+	.long	0xffffd40e,0x0c010001,0x67ffffff,0xf7b60c01
+	.long	0x000267ff,0xfffff3d2,0x0c010004,0x67ffffff
+	.long	0xd40c0c01,0x000367ff,0xfffffbc6,0x60ffffff
+	.long	0xfbb0122e,0xff4e67ff,0xffffd40a,0x0c010001
+	.long	0x67ffffff,0xf77e0c01,0x000267ff,0xfffff39a
+	.long	0x0c010004,0x67ffffff,0xd41a0c01,0x000367ff
+	.long	0xfffffb8e,0x60ffffff,0xfb78122e,0xff4e67ff
+	.long	0xffffb292,0x0c010001,0x67ffffff,0xf81a0c01
+	.long	0x000267ff,0xfffff83e,0x0c010004,0x67ffffff
+	.long	0xb50a0c01,0x000367ff,0xfffff83a,0x60ffffff
+	.long	0xf844122e,0xff4e67ff,0xfffff89e,0x0c010001
+	.long	0x67ffffff,0xf8ca0c01,0x000267ff,0xfffff8f8
+	.long	0x0c010004,0x67ffffff,0xf8800c01,0x000367ff
+	.long	0xfffffab4,0x60ffffff,0xfac0122e,0xff4e67ff
+	.long	0xfffff96e,0x0c010001,0x67ffffff,0xf99a0c01
+	.long	0x000267ff,0xfffff9c8,0x0c010004,0x67ffffff
+	.long	0xf9500c01,0x000367ff,0xfffffa7c,0x60ffffff
+	.long	0xfa88122e,0xff4e67ff,0xfffff9d8,0x0c010001
+	.long	0x67ffffff,0xfa060c01,0x000267ff,0xfffffa34
+	.long	0x0c010004,0x67ffffff,0xf9ba0c01,0x000367ff
+	.long	0xfffffa44,0x60ffffff,0xfa500c2f,0x00070003
+	.long	0x673e1d7c,0x0000ff4e,0x1d7c0000,0xff4ff22e
+	.long	0xf080ff78,0x41ef0004,0x43eeff78,0x0c010003
+	.long	0x67160c01,0x00026708,0x61ff0000,0x02004e75
+	.long	0x61ff0000,0x1b9e4e75,0x61ff0000,0x05e44e75
+	.long	0x1d7c0004,0xff4e60c0,0x4afc006d,0x000005d2
+	.long	0x00000fc8,0xfffffa6e,0x0000106c,0x00002314
+	.long	0x00000000,0xfffffaa6,0x00000000,0xfffffade
+	.long	0xfffffb16,0xfffffb4e,0x00000000,0xfffffb86
+	.long	0xfffffbbe,0xfffffbf6,0xfffffc2e,0xfffffc66
+	.long	0xfffffc9e,0xfffffcd6,0x00000000,0xfffffd0e
+	.long	0xfffffd46,0xfffffd7e,0x00000000,0x00001112
+	.long	0xfffffdb6,0x00000ca8,0x00000000,0xfffffdee
+	.long	0xfffffe26,0xfffffe5e,0xfffffe96,0x0000089e
+	.long	0xffffff06,0x00001b84,0x000001de,0x00001854
+	.long	0xffffff3e,0xffffff76,0x00001512,0x00001f4c
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0xfffffece
+	.long	0xfffffece,0xfffffece,0xfffffece,0xfffffece
+	.long	0xfffffece,0xfffffece,0xfffffece,0x000013b0
+	.long	0x00000000,0x00000f56,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x000005c0
+	.long	0x00002302,0x00000000,0x00000000,0x000005ca
+	.long	0x0000230c,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00001100
+	.long	0x00000000,0x00000c96,0x00000000,0x0000110a
+	.long	0x00000000,0x00000ca0,0x00000000,0x0000088c
+	.long	0x00000000,0x00001b72,0x000001cc,0x00000896
+	.long	0x00000000,0x00001b7c,0x000001d6,0x00001f3a
+	.long	0x00000000,0x00000000,0x00000000,0x00001f44
+	.long	0xffffc001,0xffffff81,0xfffffc01,0x00004000
+	.long	0x0000007f,0x000003ff,0x02000030,0x00000040
+	.long	0x60080200,0x00300000,0x00802d40,0xff5c4241
+	.long	0x122eff4f,0xe709822e,0xff4e6600,0x02e43d69
+	.long	0x0000ff90,0x2d690004,0xff942d69,0x0008ff98
+	.long	0x3d680000,0xff842d68,0x0004ff88,0x2d680008
+	.long	0xff8c61ff,0x000024ce,0x2f0061ff,0x00002572
+	.long	0xd197322e,0xff5eec09,0x201fb0bb,0x14846700
+	.long	0x011e6d00,0x0062b0bb,0x14846700,0x021a6e00
+	.long	0x014af22e,0xd080ff90,0xf22e9000,0xff5cf23c
+	.long	0x88000000,0x0000f22e,0x4823ff84,0xf201a800
+	.long	0xf23c9000,0x00000000,0x83aeff64,0xf22ef080
+	.long	0xff842f02,0x322eff84,0x24010281,0x00007fff
+	.long	0x02428000,0x92808242,0x3d41ff84,0x241ff22e
+	.long	0xd080ff84,0x4e75f22e,0xd080ff90,0xf22e9000
+	.long	0xff5cf23c,0x88000000,0x0000f22e,0x4823ff84
+	.long	0xf201a800,0xf23c9000,0x00000000,0x83aeff64
+	.long	0x00ae0000,0x1048ff64,0x122eff62,0x02010013
+	.long	0x661c082e,0x0003ff64,0x56c1202e,0xff5c61ff
+	.long	0x00004fcc,0x812eff64,0xf210d080,0x4e75222e
+	.long	0xff5c0201,0x00c06634,0xf22ef080,0xff842f02
+	.long	0x322eff84,0x34010281,0x00007fff,0x92800481
+	.long	0x00006000,0x02417fff,0x02428000,0x82423d41
+	.long	0xff84241f,0xf22ed040,0xff8460a6,0xf22ed080
+	.long	0xff90222e,0xff5c0201,0x0030f201,0x9000f22e
+	.long	0x4823ff84,0xf23c9000,0x00000000,0x60aaf22e
+	.long	0xd080ff90,0xf22e9000,0xff5cf23c,0x88000000
+	.long	0x0000f22e,0x4823ff84,0xf201a800,0xf23c9000
+	.long	0x00000000,0x83aeff64,0xf2000098,0xf23c58b8
+	.long	0x0002f293,0xff3c6000,0xfee408ee,0x0003ff66
+	.long	0xf22ed080,0xff90f23c,0x90000000,0x0010f23c
+	.long	0x88000000,0x0000f22e,0x4823ff84,0xf201a800
+	.long	0xf23c9000,0x00000000,0x83aeff64,0x122eff62
+	.long	0x0201000b,0x6620f22e,0xf080ff84,0x41eeff84
+	.long	0x222eff5c,0x61ff0000,0x4dd8812e,0xff64f22e
+	.long	0xd080ff84,0x4e75f22e,0xd040ff90,0x222eff5c
+	.long	0x020100c0,0x6652f22e,0x9000ff5c,0xf23c8800
+	.long	0x00000000,0xf22e48a3,0xff84f23c,0x90000000
+	.long	0x0000f22e,0xf040ff84,0x2f02322e,0xff842401
+	.long	0x02810000,0x7fff0242,0x80009280,0x06810000
+	.long	0x60000241,0x7fff8242,0x3d41ff84,0x241ff22e
+	.long	0xd040ff84,0x6000ff80,0x222eff5c,0x02010030
+	.long	0xf2019000,0x60a6f22e,0xd080ff90,0xf22e9000
+	.long	0xff5cf23c,0x88000000,0x0000f22e,0x4823ff84
+	.long	0xf201a800,0xf23c9000,0x00000000,0x83aeff64
+	.long	0xf2000098,0xf23c58b8,0x0002f292,0xfde0f294
+	.long	0xfefaf22e,0xd040ff90,0x222eff5c,0x020100c0
+	.long	0x00010010,0xf2019000,0xf23c8800,0x00000000
+	.long	0xf22e48a3,0xff84f23c,0x90000000,0x0000f200
+	.long	0x0498f23c,0x58b80002,0xf293fda2,0x6000febc
+	.long	0x323b120a,0x4efb1006,0x4afc0030,0xfd120072
+	.long	0x00cc006c,0xfd120066,0x00000000,0x00720072
+	.long	0x0060006c,0x00720066,0x00000000,0x009e0060
+	.long	0x009e006c,0x009e0066,0x00000000,0x006c006c
+	.long	0x006c006c,0x006c0066,0x00000000,0xfd120072
+	.long	0x00cc006c,0xfd120066,0x00000000,0x00660066
+	.long	0x00660066,0x00660066,0x00000000,0x60ff0000
+	.long	0x230e60ff,0x00002284,0x60ff0000,0x227e1028
+	.long	0x00001229,0x0000b101,0x6a10f23c,0x44008000
+	.long	0x00001d7c,0x000cff64,0x4e75f23c,0x44000000
+	.long	0x00001d7c,0x0004ff64,0x4e75f229,0xd0800000
+	.long	0x10280000,0x12290000,0xb1016a10,0xf2000018
+	.long	0xf200001a,0x1d7c000a,0xff644e75,0xf2000018
+	.long	0x1d7c0002,0xff644e75,0xf228d080,0x00001028
+	.long	0x00001229,0x0000b101,0x6ae260d0,0x02000030
+	.long	0x00000040,0x60080200,0x00300000,0x00802d40
+	.long	0xff5c122e,0xff4e6600,0x02620200,0x00c06600
+	.long	0x007c4a28,0x00006a06,0x08ee0003,0xff64f228
+	.long	0xd0800000,0x4e750200,0x00c06600,0x006008ee
+	.long	0x0003ff66,0x4a280000,0x6a0608ee,0x0003ff64
+	.long	0xf228d080,0x0000082e,0x0003ff62,0x66024e75
+	.long	0x3d680000,0xff842d68,0x0004ff88,0x2d680008
+	.long	0xff8c41ee,0xff8461ff,0x00004950,0x44400640
+	.long	0x6000322e,0xff840241,0x80000240,0x7fff8041
+	.long	0x3d40ff84,0xf22ed040,0xff844e75,0x0c000040
+	.long	0x667e3d68,0x0000ff84,0x2d680004,0xff882d68
+	.long	0x0008ff8c,0x61ff0000,0x206c0c80,0x0000007f
+	.long	0x6c000092,0x0c80ffff,0xff816700,0x01786d00
+	.long	0x00f4f23c,0x88000000,0x0000f22e,0x9000ff5c
+	.long	0xf22e4800,0xff84f201,0xa800f23c,0x90000000
+	.long	0x000083ae,0xff642f02,0xf22ef080,0xff84322e
+	.long	0xff843401,0x02810000,0x7fff9280,0x02428000
+	.long	0x84413d42,0xff84241f,0xf22ed080,0xff844e75
+	.long	0x3d680000,0xff842d68,0x0004ff88,0x2d680008
+	.long	0xff8c61ff,0x00001fee,0x0c800000,0x03ff6c00
+	.long	0x00140c80,0xfffffc01,0x670000fa,0x6d000076
+	.long	0x6000ff80,0x08ee0003,0xff664a2e,0xff846a06
+	.long	0x08ee0003,0xff64122e,0xff620201,0x000b661a
+	.long	0x41eeff84,0x222eff5c,0x61ff0000,0x4a74812e
+	.long	0xff64f22e,0xd080ff84,0x4e752d6e,0xff88ff94
+	.long	0x2d6eff8c,0xff98322e,0xff842f02,0x34010281
+	.long	0x00007fff,0x92800242,0x80000681,0x00006000
+	.long	0x02417fff,0x84413d42,0xff90f22e,0xd040ff90
+	.long	0x241f60ac,0xf23c8800,0x00000000,0xf22e9000
+	.long	0xff5cf22e,0x4800ff84,0xf23c9000,0x00000000
+	.long	0xf201a800,0x83aeff64,0x00ae0000,0x1048ff64
+	.long	0x122eff62,0x02010013,0x661c082e,0x0003ff64
+	.long	0x56c1202e,0xff5c61ff,0x00004ae4,0x812eff64
+	.long	0xf210d080,0x4e752f02,0x322eff84,0x24010281
+	.long	0x00007fff,0x02428000,0x92800481,0x00006000
+	.long	0x02417fff,0x82423d41,0xff84241f,0xf22ed040
+	.long	0xff8460b6,0xf23c8800,0x00000000,0xf22e9000
+	.long	0xff5cf22e,0x4800ff84,0xf201a800,0xf23c9000
+	.long	0x00000000,0x83aeff64,0xf2000098,0xf23c58b8
+	.long	0x0002f293,0xff746000,0xfe7e0c01,0x00046700
+	.long	0xfdb60c01,0x000567ff,0x00001f98,0x0c010003
+	.long	0x67ff0000,0x1fa2f228,0x48000000,0xf200a800
+	.long	0xe1981d40,0xff644e75,0x51fc51fc,0x51fc51fc
+	.long	0x00003fff,0x0000007e,0x000003fe,0xffffc001
+	.long	0xffffff81,0xfffffc01,0x02000030,0x00000040
+	.long	0x60080200,0x00300000,0x00802d40,0xff5c4241
+	.long	0x122eff4f,0xe709822e,0xff4e6600,0x02d63d69
+	.long	0x0000ff90,0x2d690004,0xff942d69,0x0008ff98
+	.long	0x3d680000,0xff842d68,0x0004ff88,0x2d680008
+	.long	0xff8c61ff,0x00001e0e,0x2f0061ff,0x00001eb2
+	.long	0x4497d197,0x322eff5e,0xec09201f,0xb0bb148e
+	.long	0x6f000074,0xb0bb1520,0xff7a6700,0x020c6e00
+	.long	0x013cf22e,0xd080ff90,0xf22e9000,0xff5cf23c
+	.long	0x88000000,0x0000f22e,0x4820ff84,0xf201a800
+	.long	0xf23c9000,0x00000000,0x83aeff64,0xf22ef080
+	.long	0xff842f02,0x322eff84,0x24010281,0x00007fff
+	.long	0x02428000,0x92808242,0x3d41ff84,0x241ff22e
+	.long	0xd080ff84,0x4e750000,0x7fff0000,0x407f0000
+	.long	0x43ff201f,0x60c62f00,0xf22ed080,0xff90f22e
+	.long	0x9000ff5c,0xf23c8800,0x00000000,0xf22e4820
+	.long	0xff84f200,0xa800f23c,0x90000000,0x000081ae
+	.long	0xff64f227,0xe0013017,0xdffc0000,0x000c0280
+	.long	0x00007fff,0x9097b0bb,0x14ae6db6,0x201f00ae
+	.long	0x00001048,0xff64122e,0xff620201,0x0013661c
+	.long	0x082e0003,0xff6456c1,0x202eff5c,0x61ff0000
+	.long	0x48de812e,0xff64f210,0xd0804e75,0x222eff5c
+	.long	0x020100c0,0x6634f22e,0xf080ff84,0x2f02322e
+	.long	0xff843401,0x02810000,0x7fff9280,0x04810000
+	.long	0x60000241,0x7fff0242,0x80008242,0x3d41ff84
+	.long	0x241ff22e,0xd040ff84,0x60a6f22e,0xd080ff90
+	.long	0x222eff5c,0x02010030,0xf2019000,0xf22e4820
+	.long	0xff84f23c,0x90000000,0x000060aa,0x08ee0003
+	.long	0xff66f22e,0xd080ff90,0xf23c9000,0x00000010
+	.long	0xf23c8800,0x00000000,0xf22e4820,0xff84f201
+	.long	0xa800f23c,0x90000000,0x000083ae,0xff64122e
+	.long	0xff620201,0x000b6620,0xf22ef080,0xff8441ee
+	.long	0xff84222e,0xff5c61ff,0x00004726,0x812eff64
+	.long	0xf22ed080,0xff844e75,0xf22ed040,0xff90222e
+	.long	0xff5c0201,0x00c06652,0xf22e9000,0xff5cf23c
+	.long	0x88000000,0x0000f22e,0x48a0ff84,0xf23c9000
+	.long	0x00000000,0xf22ef040,0xff842f02,0x322eff84
+	.long	0x24010281,0x00007fff,0x02428000,0x92800681
+	.long	0x00006000,0x02417fff,0x82423d41,0xff84241f
+	.long	0xf22ed040,0xff846000,0xff80222e,0xff5c0201
+	.long	0x0030f201,0x900060a6,0xf22ed080,0xff90f22e
+	.long	0x9000ff5c,0xf23c8800,0x00000000,0xf22e4820
+	.long	0xff84f201,0xa800f23c,0x90000000,0x000083ae
+	.long	0xff64f200,0x0098f23c,0x58b80001,0xf292fdee
+	.long	0xf294fefa,0xf22ed040,0xff90222e,0xff5c0201
+	.long	0x00c00001,0x0010f201,0x9000f23c,0x88000000
+	.long	0x0000f22e,0x48a0ff84,0xf23c9000,0x00000000
+	.long	0xf2000498,0xf23c58b8,0x0001f293,0xfdb06000
+	.long	0xfebc323b,0x120a4efb,0x10064afc,0x0030fd20
+	.long	0x009e0072,0x0060fd20,0x00660000,0x00000072
+	.long	0x006c0072,0x00600072,0x00660000,0x000000d0
+	.long	0x00d0006c,0x006000d0,0x00660000,0x00000060
+	.long	0x00600060,0x00600060,0x00660000,0x0000fd20
+	.long	0x009e0072,0x0060fd20,0x00660000,0x00000066
+	.long	0x00660066,0x00660066,0x00660000,0x000060ff
+	.long	0x00001bd8,0x60ff0000,0x1bd260ff,0x00001c50
+	.long	0x10280000,0x12290000,0xb1016a10,0xf23c4400
+	.long	0x80000000,0x1d7c000c,0xff644e75,0xf23c4400
+	.long	0x00000000,0x1d7c0004,0xff644e75,0x006e0410
+	.long	0xff661028,0x00001229,0x0000b101,0x6a10f23c
+	.long	0x4400ff80,0x00001d7c,0x000aff64,0x4e75f23c
+	.long	0x44007f80,0x00001d7c,0x0002ff64,0x4e751029
+	.long	0x00001228,0x0000b101,0x6a16f229,0xd0800000
+	.long	0xf2000018,0xf200001a,0x1d7c000a,0xff644e75
+	.long	0xf229d080,0x0000f200,0x00181d7c,0x0002ff64
+	.long	0x4e750200,0x00300000,0x00406008,0x02000030
+	.long	0x00000080,0x2d40ff5c,0x122eff4e,0x66000276
+	.long	0x020000c0,0x66000090,0x2d680004,0xff882d68
+	.long	0x0008ff8c,0x30280000,0x0a408000,0x6a061d7c
+	.long	0x0008ff64,0x3d40ff84,0xf22ed080,0xff844e75
+	.long	0x020000c0,0x666008ee,0x0003ff66,0x2d680004
+	.long	0xff882d68,0x0008ff8c,0x30280000,0x0a408000
+	.long	0x6a061d7c,0x0008ff64,0x3d40ff84,0xf22ed080
+	.long	0xff84082e,0x0003ff62,0x66024e75,0x41eeff84
+	.long	0x61ff0000,0x42664440,0x06406000,0x322eff84
+	.long	0x02418000,0x02407fff,0x80413d40,0xff84f22e
+	.long	0xd040ff84,0x4e750c00,0x0040667e,0x3d680000
+	.long	0xff842d68,0x0004ff88,0x2d680008,0xff8c61ff
+	.long	0x00001982,0x0c800000,0x007f6c00,0x00900c80
+	.long	0xffffff81,0x67000178,0x6d0000f4,0xf23c8800
+	.long	0x00000000,0xf22e9000,0xff5cf22e,0x481aff84
+	.long	0xf201a800,0xf23c9000,0x00000000,0x83aeff64
+	.long	0x2f02f22e,0xf080ff84,0x322eff84,0x34010281
+	.long	0x00007fff,0x92800242,0x80008441,0x3d42ff84
+	.long	0x241ff22e,0xd080ff84,0x4e753d68,0x0000ff84
+	.long	0x2d680004,0xff882d68,0x0008ff8c,0x61ff0000
+	.long	0x19040c80,0x000003ff,0x6c120c80,0xfffffc01
+	.long	0x670000fc,0x6d000078,0x6000ff82,0x08ee0003
+	.long	0xff660a2e,0x0080ff84,0x6a0608ee,0x0003ff64
+	.long	0x122eff62,0x0201000b,0x661a41ee,0xff84222e
+	.long	0xff5c61ff,0x0000438a,0x812eff64,0xf22ed080
+	.long	0xff844e75,0x2d6eff88,0xff942d6e,0xff8cff98
+	.long	0x322eff84,0x2f022401,0x02810000,0x7fff0242
+	.long	0x80009280,0x06810000,0x60000241,0x7fff8242
+	.long	0x3d41ff90,0xf22ed040,0xff90241f,0x60acf23c
+	.long	0x88000000,0x0000f22e,0x9000ff5c,0xf22e481a
+	.long	0xff84f23c,0x90000000,0x0000f201,0xa80083ae
+	.long	0xff6400ae,0x00001048,0xff64122e,0xff620201
+	.long	0x0013661c,0x082e0003,0xff6456c1,0x202eff5c
+	.long	0x61ff0000,0x43fa812e,0xff64f210,0xd0804e75
+	.long	0x2f02322e,0xff842401,0x02810000,0x7fff0242
+	.long	0x80009280,0x04810000,0x60000241,0x7fff8242
+	.long	0x3d41ff84,0xf22ed040,0xff84241f,0x60b6f23c
+	.long	0x88000000,0x0000f22e,0x9000ff5c,0xf22e481a
+	.long	0xff84f201,0xa800f23c,0x90000000,0x000083ae
+	.long	0xff64f200,0x0098f23c,0x58b80002,0xf293ff74
+	.long	0x6000fe7e,0x0c010004,0x6700fdb6,0x0c010005
+	.long	0x67ff0000,0x18ae0c01,0x000367ff,0x000018b8
+	.long	0xf228481a,0x0000f200,0xa800e198,0x1d40ff64
+	.long	0x4e75122e,0xff4e6610,0x4a280000,0x6b024e75
+	.long	0x1d7c0008,0xff644e75,0x0c010001,0x67400c01
+	.long	0x00026724,0x0c010005,0x67ff0000,0x18660c01
+	.long	0x000367ff,0x00001870,0x4a280000,0x6b024e75
+	.long	0x1d7c0008,0xff644e75,0x4a280000,0x6b081d7c
+	.long	0x0002ff64,0x4e751d7c,0x000aff64,0x4e754a28
+	.long	0x00006b08,0x1d7c0004,0xff644e75,0x1d7c000c
+	.long	0xff644e75,0x122eff4e,0x66280200,0x0030f200
+	.long	0x9000f23c,0x88000000,0x0000f228,0x48010000
+	.long	0xf23c9000,0x00000000,0xf200a800,0x81aeff64
+	.long	0x4e750c01,0x0001672e,0x0c010002,0x674e0c01
+	.long	0x00046710,0x0c010005,0x67ff0000,0x17d660ff
+	.long	0x000017e4,0x3d680000,0xff841d7c,0x0080ff88
+	.long	0x41eeff84,0x60a44a28,0x00006b10,0xf23c4400
+	.long	0x00000000,0x1d7c0004,0xff644e75,0xf23c4400
+	.long	0x80000000,0x1d7c000c,0xff644e75,0xf228d080
+	.long	0x00004a28,0x00006b08,0x1d7c0002,0xff644e75
+	.long	0x1d7c000a,0xff644e75,0x122eff4e,0x6618f23c
+	.long	0x88000000,0x0000f228,0x48030000,0xf200a800
+	.long	0x81aeff64,0x4e750c01,0x0001672e,0x0c010002
+	.long	0x674e0c01,0x00046710,0x0c010005,0x67ff0000
+	.long	0x174260ff,0x00001750,0x3d680000,0xff841d7c
+	.long	0x0080ff88,0x41eeff84,0x60b44a28,0x00006b10
+	.long	0xf23c4400,0x00000000,0x1d7c0004,0xff644e75
+	.long	0xf23c4400,0x80000000,0x1d7c000c,0xff644e75
+	.long	0xf228d080,0x00004a28,0x00006b08,0x1d7c0002
+	.long	0xff644e75,0x1d7c000a,0xff644e75,0x02000030
+	.long	0x00000040,0x60080200,0x00300000,0x00802d40
+	.long	0xff5c122e,0xff4e6600,0x025c0200,0x00c0667e
+	.long	0x2d680004,0xff882d68,0x0008ff8c,0x32280000
+	.long	0x0881000f,0x3d41ff84,0xf22ed080,0xff844e75
+	.long	0x020000c0,0x665808ee,0x0003ff66,0x2d680004
+	.long	0xff882d68,0x0008ff8c,0x30280000,0x0880000f
+	.long	0x3d40ff84,0xf22ed080,0xff84082e,0x0003ff62
+	.long	0x66024e75,0x41eeff84,0x61ff0000,0x3e0e4440
+	.long	0x06406000,0x322eff84,0x02418000,0x02407fff
+	.long	0x80413d40,0xff84f22e,0xd040ff84,0x4e750c00
+	.long	0x0040667e,0x3d680000,0xff842d68,0x0004ff88
+	.long	0x2d680008,0xff8c61ff,0x0000152a,0x0c800000
+	.long	0x007f6c00,0x00900c80,0xffffff81,0x67000170
+	.long	0x6d0000ec,0xf23c8800,0x00000000,0xf22e9000
+	.long	0xff5cf22e,0x4818ff84,0xf201a800,0xf23c9000
+	.long	0x00000000,0x83aeff64,0x2f02f22e,0xf080ff84
+	.long	0x322eff84,0x24010281,0x00007fff,0x92800242
+	.long	0x80008441,0x3d42ff84,0x241ff22e,0xd080ff84
+	.long	0x4e753d68,0x0000ff84,0x2d680004,0xff882d68
+	.long	0x0008ff8c,0x61ff0000,0x14ac0c80,0x000003ff
+	.long	0x6c120c80,0xfffffc01,0x670000f4,0x6d000070
+	.long	0x6000ff82,0x08ee0003,0xff6608ae,0x0007ff84
+	.long	0x122eff62,0x0201000b,0x661a41ee,0xff84222e
+	.long	0xff5c61ff,0x00003f3a,0x812eff64,0xf22ed080
+	.long	0xff844e75,0x2d6eff88,0xff942d6e,0xff8cff98
+	.long	0x322eff84,0x2f022401,0x02810000,0x7fff0242
+	.long	0x80009280,0x06810000,0x60000241,0x7fff8242
+	.long	0x3d41ff90,0xf22ed040,0xff90241f,0x60acf23c
+	.long	0x88000000,0x0000f22e,0x9000ff5c,0xf22e4818
+	.long	0xff84f23c,0x90000000,0x0000f201,0xa80083ae
+	.long	0xff6400ae,0x00001048,0xff64122e,0xff620201
+	.long	0x0013661c,0x082e0003,0xff6456c1,0x202eff5c
+	.long	0x61ff0000,0x3faa812e,0xff64f210,0xd0804e75
+	.long	0x2f02322e,0xff842401,0x02810000,0x7fff0242
+	.long	0x80009280,0x04810000,0x60000241,0x7fff8242
+	.long	0x3d41ff84,0xf22ed040,0xff84241f,0x60b6f23c
+	.long	0x88000000,0x0000f22e,0x9000ff5c,0xf22e4818
+	.long	0xff84f201,0xa800f23c,0x90000000,0x000083ae
+	.long	0xff64f200,0x0098f23c,0x58b80002,0xf293ff74
+	.long	0x6000fe86,0x0c010004,0x6700fdc6,0x0c010005
+	.long	0x67ff0000,0x145e0c01,0x000367ff,0x00001468
+	.long	0xf2284818,0x00000c01,0x00026708,0x1d7c0004
+	.long	0xff644e75,0x1d7c0002,0xff644e75,0x4241122e
+	.long	0xff4fe709,0x822eff4e,0x6618f229,0xd0800000
+	.long	0xf2284838,0x0000f200,0xa800e198,0x1d40ff64
+	.long	0x4e75323b,0x120a4efb,0x10064afc,0x0030ffdc
+	.long	0xffdcffdc,0x006000f8,0x006e0000,0x0000ffdc
+	.long	0xffdcffdc,0x0060007c,0x006e0000,0x0000ffdc
+	.long	0xffdcffdc,0x0060007c,0x006e0000,0x00000060
+	.long	0x00600060,0x00600060,0x006e0000,0x00000114
+	.long	0x009c009c,0x006000bc,0x006e0000,0x0000006e
+	.long	0x006e006e,0x006e006e,0x006e0000,0x000061ff
+	.long	0x00001388,0x022e00f7,0xff644e75,0x61ff0000
+	.long	0x137a022e,0x00f7ff64,0x4e753d68,0x0000ff84
+	.long	0x20280004,0x08c0001f,0x2d40ff88,0x2d680008
+	.long	0xff8c41ee,0xff846000,0xff422d69,0x0000ff84
+	.long	0x20290004,0x08c0001f,0x2d40ff88,0x2d690008
+	.long	0xff8c43ee,0xff846000,0xff223d69,0x0000ff90
+	.long	0x3d680000,0xff842029,0x000408c0,0x001f2d40
+	.long	0xff942028,0x000408c0,0x001f2d40,0xff882d69
+	.long	0x0008ff98,0x2d680008,0xff8c43ee,0xff9041ee
+	.long	0xff846000,0xfee61028,0x00001229,0x0000b101
+	.long	0x6b00ff78,0x4a006b02,0x4e751d7c,0x0008ff64
+	.long	0x4e751028,0x00001229,0x0000b101,0x6b00ff7c
+	.long	0x4a006a02,0x4e751d7c,0x0008ff64,0x4e752d40
+	.long	0xff5c4241,0x122eff4f,0xe709822e,0xff4e6600
+	.long	0x02a03d69,0x0000ff90,0x2d690004,0xff942d69
+	.long	0x0008ff98,0x3d680000,0xff842d68,0x0004ff88
+	.long	0x2d680008,0xff8c61ff,0x0000119a,0x2f0061ff
+	.long	0x0000123e,0xd09f0c80,0xffffc001,0x670000f8
+	.long	0x6d000064,0x0c800000,0x40006700,0x01da6e00
+	.long	0x0122f22e,0xd080ff90,0xf22e9000,0xff5cf23c
+	.long	0x88000000,0x0000f22e,0x4827ff84,0xf201a800
+	.long	0xf23c9000,0x00000000,0x83aeff64,0xf22ef080
+	.long	0xff842f02,0x322eff84,0x24010281,0x00007fff
+	.long	0x02428000,0x92808242,0x3d41ff84,0x241ff22e
+	.long	0xd080ff84,0x4e75f22e,0xd080ff90,0xf22e9000
+	.long	0xff5cf23c,0x88000000,0x0000f22e,0x4827ff84
+	.long	0xf201a800,0xf23c9000,0x00000000,0x83aeff64
+	.long	0x00ae0000,0x1048ff64,0x122eff62,0x02010013
+	.long	0x6620082e,0x0003ff64,0x56c1202e,0xff5c0200
+	.long	0x003061ff,0x00003c98,0x812eff64,0xf210d080
+	.long	0x4e75f22e,0xf080ff84,0x2f02322e,0xff842401
+	.long	0x02810000,0x7fff9280,0x04810000,0x60000241
+	.long	0x7fff0242,0x80008242,0x3d41ff84,0x241ff22e
+	.long	0xd040ff84,0x60acf22e,0xd080ff90,0xf22e9000
+	.long	0xff5cf23c,0x88000000,0x0000f22e,0x4827ff84
+	.long	0xf201a800,0xf23c9000,0x00000000,0x83aeff64
+	.long	0xf2000098,0xf23c58b8,0x0002f293,0xff646000
+	.long	0xff0c08ee,0x0003ff66,0xf22ed080,0xff90f23c
+	.long	0x90000000,0x0010f23c,0x88000000,0x0000f22e
+	.long	0x4827ff84,0xf201a800,0xf23c9000,0x00000000
+	.long	0x83aeff64,0x122eff62,0x0201000b,0x6620f22e
+	.long	0xf080ff84,0x41eeff84,0x222eff5c,0x61ff0000
+	.long	0x3b56812e,0xff64f22e,0xd080ff84,0x4e75f22e
+	.long	0xd040ff90,0xf22e9000,0xff5cf23c,0x88000000
+	.long	0x0000f22e,0x48a7ff84,0xf23c9000,0x00000000
+	.long	0xf22ef040,0xff842f02,0x322eff84,0x24010281
+	.long	0x00007fff,0x02428000,0x92800681,0x00006000
+	.long	0x02417fff,0x82423d41,0xff84241f,0xf22ed040
+	.long	0xff846000,0xff8af22e,0xd080ff90,0xf22e9000
+	.long	0xff5cf23c,0x88000000,0x0000f22e,0x4827ff84
+	.long	0xf201a800,0xf23c9000,0x00000000,0x83aeff64
+	.long	0xf2000098,0xf23c58b8,0x0002f292,0xfe20f294
+	.long	0xff12f22e,0xd040ff90,0x222eff5c,0x020100c0
+	.long	0x00010010,0xf2019000,0xf23c8800,0x00000000
+	.long	0xf22e48a7,0xff84f23c,0x90000000,0x0000f200
+	.long	0x0498f23c,0x58b80002,0xf293fde2,0x6000fed4
+	.long	0x323b120a,0x4efb1006,0x4afc0030,0xfd560072
+	.long	0x0078006c,0xfd560066,0x00000000,0x00720072
+	.long	0x0060006c,0x00720066,0x00000000,0x007e0060
+	.long	0x007e006c,0x007e0066,0x00000000,0x006c006c
+	.long	0x006c006c,0x006c0066,0x00000000,0xfd560072
+	.long	0x0078006c,0xfd560066,0x00000000,0x00660066
+	.long	0x00660066,0x00660066,0x00000000,0x60ff0000
+	.long	0x101e60ff,0x00000f94,0x60ff0000,0x0f8e60ff
+	.long	0xffffed0e,0x60ffffff,0xed6260ff,0xffffed2e
+	.long	0x2d40ff5c,0x4241122e,0xff4fe709,0x822eff4e
+	.long	0x6600027c,0x3d690000,0xff902d69,0x0004ff94
+	.long	0x2d690008,0xff983d68,0x0000ff84,0x2d680004
+	.long	0xff882d68,0x0008ff8c,0x61ff0000,0x0e582f00
+	.long	0x61ff0000,0x0efc4497,0xd197322e,0xff5eec09
+	.long	0x201f0c80,0xffffc001,0x6f000064,0x0c800000
+	.long	0x3fff6700,0x01b66e00,0x0100f22e,0xd080ff90
+	.long	0xf22e9000,0xff5cf23c,0x88000000,0x0000f22e
+	.long	0x4824ff84,0xf201a800,0xf23c9000,0x00000000
+	.long	0x83aeff64,0xf22ef080,0xff842f02,0x322eff84
+	.long	0x24010281,0x00007fff,0x02428000,0x92808242
+	.long	0x3d41ff84,0x241ff22e,0xd080ff84,0x4e75f22e
+	.long	0xd080ff90,0xf22e9000,0xff5cf23c,0x88000000
+	.long	0x0000f22e,0x4824ff84,0xf201a800,0xf23c9000
+	.long	0x00000000,0x83aeff64,0xf227e001,0x3217dffc
+	.long	0x0000000c,0x02810000,0x7fff9280,0x0c810000
+	.long	0x7fff6d90,0x006e1048,0xff66122e,0xff620201
+	.long	0x00136620,0x082e0003,0xff6456c1,0x202eff5c
+	.long	0x02000030,0x61ff0000,0x3936812e,0xff64f210
+	.long	0xd0804e75,0xf22ef080,0xff842f02,0x322eff84
+	.long	0x24010281,0x00007fff,0x02428000,0x92800481
+	.long	0x00006000,0x02417fff,0x82423d41,0xff84241f
+	.long	0xf22ed040,0xff8460ac,0x08ee0003,0xff66f22e
+	.long	0xd080ff90,0xf23c9000,0x00000010,0xf23c8800
+	.long	0x00000000,0xf22e4824,0xff84f201,0xa800f23c
+	.long	0x90000000,0x000083ae,0xff64122e,0xff620201
+	.long	0x000b6620,0xf22ef080,0xff8441ee,0xff84222e
+	.long	0xff5c61ff,0x00003830,0x812eff64,0xf22ed080
+	.long	0xff844e75,0xf22ed040,0xff90f22e,0x9000ff5c
+	.long	0xf23c8800,0x00000000,0xf22e48a4,0xff84f23c
+	.long	0x90000000,0x0000f22e,0xf040ff84,0x2f02322e
+	.long	0xff842401,0x02810000,0x7fff0242,0x80009280
+	.long	0x06810000,0x60000241,0x7fff8242,0x3d41ff84
+	.long	0x241ff22e,0xd040ff84,0x608af22e,0xd080ff90
+	.long	0xf22e9000,0xff5cf23c,0x88000000,0x0000f22e
+	.long	0x4824ff84,0xf201a800,0xf23c9000,0x00000000
+	.long	0x83aeff64,0xf2000098,0xf23c58b8,0x0001f292
+	.long	0xfe44f294,0xff14f22e,0xd040ff90,0x42810001
+	.long	0x0010f201,0x9000f23c,0x88000000,0x0000f22e
+	.long	0x48a4ff84,0xf23c9000,0x00000000,0xf2000498
+	.long	0xf23c58b8,0x0001f293,0xfe0c6000,0xfedc323b
+	.long	0x120a4efb,0x10064afc,0x0030fd7a,0x00720078
+	.long	0x0060fd7a,0x00660000,0x00000078,0x006c0078
+	.long	0x00600078,0x00660000,0x0000007e,0x007e006c
+	.long	0x0060007e,0x00660000,0x00000060,0x00600060
+	.long	0x00600060,0x00660000,0x0000fd7a,0x00720078
+	.long	0x0060fd7a,0x00660000,0x00000066,0x00660066
+	.long	0x00660066,0x00660000,0x000060ff,0x00000c7c
+	.long	0x60ff0000,0x0c7660ff,0x00000cf4,0x60ffffff
+	.long	0xf0ce60ff,0xfffff09c,0x60ffffff,0xf0f40200
+	.long	0x00300000,0x00406008,0x02000030,0x00000080
+	.long	0x2d40ff5c,0x4241122e,0xff4fe709,0x822eff4e
+	.long	0x6600024c,0x61ff0000,0x0a5cf22e,0xd080ff90
+	.long	0xf23c8800,0x00000000,0xf22e9000,0xff5cf22e
+	.long	0x4822ff84,0xf23c9000,0x00000000,0xf201a800
+	.long	0x83aeff64,0xf281003c,0x2f02f227,0xe001322e
+	.long	0xff5eec09,0x34170282,0x00007fff,0x9480b4bb
+	.long	0x14246c38,0xb4bb142a,0x6d0000b8,0x67000184
+	.long	0x32170241,0x80008242,0x3e81f21f,0xd080241f
+	.long	0x4e754e75,0x00007fff,0x0000407f,0x000043ff
+	.long	0x00000000,0x00003f81,0x00003c01,0x00ae0000
+	.long	0x1048ff64,0x122eff62,0x02010013,0x6624dffc
+	.long	0x0000000c,0x082e0003,0xff6456c1,0x202eff5c
+	.long	0x61ff0000,0x366a812e,0xff64f210,0xd080241f
+	.long	0x4e75122e,0xff5c0201,0x00c0661a,0x32170241
+	.long	0x80000482,0x00006000,0x02427fff,0x82423e81
+	.long	0xf21fd040,0x60bef22e,0xd080ff90,0x222eff5c
+	.long	0x02010030,0xf2019000,0xf22e4822,0xff84f23c
+	.long	0x90000000,0x0000dffc,0x0000000c,0xf227e001
+	.long	0x60ba08ee,0x0003ff66,0xdffc0000,0x000cf22e
+	.long	0xd080ff90,0xf23c9000,0x00000010,0xf23c8800
+	.long	0x00000000,0xf22e4822,0xff84f23c,0x90000000
+	.long	0x0000f201,0xa80083ae,0xff64122e,0xff620201
+	.long	0x000b6622,0xf22ef080,0xff8441ee,0xff84222e
+	.long	0xff5c61ff,0x000034ba,0x812eff64,0xf22ed080
+	.long	0xff84241f,0x4e75f22e,0xd040ff90,0x222eff5c
+	.long	0x020100c0,0x664ef22e,0x9000ff5c,0xf23c8800
+	.long	0x00000000,0xf22e48a2,0xff84f23c,0x90000000
+	.long	0x0000f22e,0xf040ff84,0x322eff84,0x24010281
+	.long	0x00007fff,0x02428000,0x92800681,0x00006000
+	.long	0x02417fff,0x82423d41,0xff84f22e,0xd040ff84
+	.long	0x6000ff82,0x222eff5c,0x02010030,0xf2019000
+	.long	0x60aa222e,0xff5c0201,0x00c06700,0xfe74222f
+	.long	0x00040c81,0x80000000,0x6600fe66,0x4aaf0008
+	.long	0x6600fe5e,0x082e0001,0xff666700,0xfe54f22e
+	.long	0xd040ff90,0x222eff5c,0x020100c0,0x00010010
+	.long	0xf2019000,0xf23c8800,0x00000000,0xf22e48a2
+	.long	0xff84f23c,0x90000000,0x0000f200,0x0018f200
+	.long	0x0498f200,0x0438f292,0xfeca6000,0xfe14323b
+	.long	0x120a4efb,0x10064afc,0x0030fdaa,0x00e4011c
+	.long	0x0060fdaa,0x00660000,0x000000bc,0x006c011c
+	.long	0x006000bc,0x00660000,0x00000130,0x0130010c
+	.long	0x00600130,0x00660000,0x00000060,0x00600060
+	.long	0x00600060,0x00660000,0x0000fdaa,0x00e4011c
+	.long	0x0060fdaa,0x00660000,0x00000066,0x00660066
+	.long	0x00660066,0x00660000,0x000060ff,0x0000097c
+	.long	0x60ff0000,0x09761028,0x00001229,0x0000b101
+	.long	0x6b000016,0x4a006b2e,0xf23c4400,0x00000000
+	.long	0x1d7c0004,0xff644e75,0x122eff5f,0x02010030
+	.long	0x0c010020,0x6710f23c,0x44000000,0x00001d7c
+	.long	0x0004ff64,0x4e75f23c,0x44008000,0x00001d7c
+	.long	0x000cff64,0x4e753d68,0x0000ff84,0x2d680004
+	.long	0xff882d68,0x0008ff8c,0x61ff0000,0x0828426e
+	.long	0xff9042ae,0xff9442ae,0xff986000,0xfcce3d69
+	.long	0x0000ff90,0x2d690004,0xff942d69,0x0008ff98
+	.long	0x61ff0000,0x08ac426e,0xff8442ae,0xff8842ae
+	.long	0xff8c6000,0xfca61028,0x00001229,0x0000b300
+	.long	0x6bff0000,0x094af228,0xd0800000,0x4a280000
+	.long	0x6a1c1d7c,0x000aff64,0x4e75f229,0xd0800000
+	.long	0x4a290000,0x6a081d7c,0x000aff64,0x4e751d7c
+	.long	0x0002ff64,0x4e750200,0x00300000,0x00406008
+	.long	0x02000030,0x00000080,0x2d40ff5c,0x4241122e
+	.long	0xff4fe709,0x822eff4e,0x6600024c,0x61ff0000
+	.long	0x0694f22e,0xd080ff90,0xf23c8800,0x00000000
+	.long	0xf22e9000,0xff5cf22e,0x4828ff84,0xf23c9000
+	.long	0x00000000,0xf201a800,0x83aeff64,0xf281003c
+	.long	0x2f02f227,0xe001322e,0xff5eec09,0x34170282
+	.long	0x00007fff,0x9480b4bb,0x14246c38,0xb4bb142a
+	.long	0x6d0000b8,0x67000184,0x32170241,0x80008242
+	.long	0x3e81f21f,0xd080241f,0x4e754e75,0x00007fff
+	.long	0x0000407f,0x000043ff,0x00000000,0x00003f81
+	.long	0x00003c01,0x00ae0000,0x1048ff64,0x122eff62
+	.long	0x02010013,0x6624dffc,0x0000000c,0x082e0003
+	.long	0xff6456c1,0x202eff5c,0x61ff0000,0x32a2812e
+	.long	0xff64f210,0xd080241f,0x4e75122e,0xff5c0201
+	.long	0x00c0661a,0x32170241,0x80000482,0x00006000
+	.long	0x02427fff,0x82423e81,0xf21fd040,0x60bef22e
+	.long	0xd080ff90,0x222eff5c,0x02010030,0xf2019000
+	.long	0xf22e4828,0xff84f23c,0x90000000,0x0000dffc
+	.long	0x0000000c,0xf227e001,0x60ba08ee,0x0003ff66
+	.long	0xdffc0000,0x000cf22e,0xd080ff90,0xf23c9000
+	.long	0x00000010,0xf23c8800,0x00000000,0xf22e4828
+	.long	0xff84f23c,0x90000000,0x0000f201,0xa80083ae
+	.long	0xff64122e,0xff620201,0x000b6622,0xf22ef080
+	.long	0xff8441ee,0xff84222e,0xff5c61ff,0x000030f2
+	.long	0x812eff64,0xf22ed080,0xff84241f,0x4e75f22e
+	.long	0xd040ff90,0x222eff5c,0x020100c0,0x664ef22e
+	.long	0x9000ff5c,0xf23c8800,0x00000000,0xf22e48a8
+	.long	0xff84f23c,0x90000000,0x0000f22e,0xf040ff84
+	.long	0x322eff84,0x24010281,0x00007fff,0x02428000
+	.long	0x92800681,0x00006000,0x02417fff,0x82423d41
+	.long	0xff84f22e,0xd040ff84,0x6000ff82,0x222eff5c
+	.long	0x02010030,0xf2019000,0x60aa222e,0xff5c0201
+	.long	0x00c06700,0xfe74222f,0x00040c81,0x80000000
+	.long	0x6600fe66,0x4aaf0008,0x6600fe5e,0x082e0001
+	.long	0xff666700,0xfe54f22e,0xd040ff90,0x222eff5c
+	.long	0x020100c0,0x00010010,0xf2019000,0xf23c8800
+	.long	0x00000000,0xf22e48a8,0xff84f23c,0x90000000
+	.long	0x0000f200,0x0018f200,0x0498f200,0x0438f292
+	.long	0xfeca6000,0xfe14323b,0x120a4efb,0x10064afc
+	.long	0x0030fdaa,0x00e2011a,0x0060fdaa,0x00660000
+	.long	0x000000ba,0x006c011a,0x006000ba,0x00660000
+	.long	0x00000130,0x0130010a,0x00600130,0x00660000
+	.long	0x00000060,0x00600060,0x00600060,0x00660000
+	.long	0x0000fdaa,0x00e2011a,0x0060fdaa,0x00660000
+	.long	0x00000066,0x00660066,0x00660066,0x00660000
+	.long	0x000060ff,0x000005b4,0x60ff0000,0x05ae1028
+	.long	0x00001229,0x0000b300,0x6a144a00,0x6b2ef23c
+	.long	0x44000000,0x00001d7c,0x0004ff64,0x4e75122e
+	.long	0xff5f0201,0x00300c01,0x00206710,0xf23c4400
+	.long	0x00000000,0x1d7c0004,0xff644e75,0xf23c4400
+	.long	0x80000000,0x1d7c000c,0xff644e75,0x3d680000
+	.long	0xff842d68,0x0004ff88,0x2d680008,0xff8c61ff
+	.long	0x00000462,0x426eff90,0x42aeff94,0x42aeff98
+	.long	0x6000fcd0,0x3d690000,0xff902d69,0x0004ff94
+	.long	0x2d690008,0xff9861ff,0x000004e6,0x426eff84
+	.long	0x42aeff88,0x42aeff8c,0x6000fca8,0x10280000
+	.long	0x12290000,0xb3006aff,0x00000584,0xf228d080
+	.long	0x0000f200,0x001af293,0x001e1d7c,0x000aff64
+	.long	0x4e75f229,0xd0800000,0x4a290000,0x6a081d7c
+	.long	0x000aff64,0x4e751d7c,0x0002ff64,0x4e750200
+	.long	0x00300000,0x00406008,0x02000030,0x00000080
+	.long	0x2d40ff5c,0x4241122e,0xff4e6600,0x02744a28
+	.long	0x00006bff,0x00000528,0x020000c0,0x6648f22e
+	.long	0x9000ff5c,0xf23c8800,0x00000000,0xf2104804
+	.long	0xf201a800,0x83aeff64,0x4e754a28,0x00006bff
+	.long	0x000004fc,0x020000c0,0x661c3d68,0x0000ff84
+	.long	0x2d680004,0xff882d68,0x0008ff8c,0x61ff0000
+	.long	0x03ae6000,0x003e0c00,0x00406600,0x00843d68
+	.long	0x0000ff84,0x2d680004,0xff882d68,0x0008ff8c
+	.long	0x61ff0000,0x038a0c80,0x0000007e,0x67000098
+	.long	0x6e00009e,0x0c80ffff,0xff806700,0x01a46d00
+	.long	0x0120f23c,0x88000000,0x0000f22e,0x9000ff5c
+	.long	0xf22e4804,0xff84f201,0xa800f23c,0x90000000
+	.long	0x000083ae,0xff642f02,0xf22ef080,0xff84322e
+	.long	0xff842401,0x02810000,0x7fff9280,0x02428000
+	.long	0x84413d42,0xff84241f,0xf22ed080,0xff844e75
+	.long	0x3d680000,0xff842d68,0x0004ff88,0x2d680008
+	.long	0xff8c61ff,0x00000308,0x0c800000,0x03fe6700
+	.long	0x00166e1c,0x0c80ffff,0xfc006700,0x01246d00
+	.long	0x00a06000,0xff7e082e,0x0000ff85,0x6600ff74
+	.long	0x08ee0003,0xff66f23c,0x90000000,0x0010f23c
+	.long	0x88000000,0x0000f22e,0x4804ff84,0xf201a800
+	.long	0xf23c9000,0x00000000,0x83aeff64,0x122eff62
+	.long	0x0201000b,0x6620f22e,0xf080ff84,0x41eeff84
+	.long	0x222eff5c,0x61ff0000,0x2d28812e,0xff64f22e
+	.long	0xd080ff84,0x4e752d6e,0xff88ff94,0x2d6eff8c
+	.long	0xff98322e,0xff842f02,0x24010281,0x00007fff
+	.long	0x02428000,0x92800681,0x00006000,0x02417fff
+	.long	0x82423d41,0xff90f22e,0xd040ff90,0x241f60a6
+	.long	0xf23c8800,0x00000000,0xf22e9000,0xff5cf22e
+	.long	0x4804ff84,0xf23c9000,0x00000000,0xf201a800
+	.long	0x83aeff64,0x00ae0000,0x1048ff64,0x122eff62
+	.long	0x02010013,0x661c082e,0x0003ff64,0x56c1202e
+	.long	0xff5c61ff,0x00002d98,0x812eff64,0xf210d080
+	.long	0x4e752f02,0x322eff84,0x24010281,0x00007fff
+	.long	0x02428000,0x92800481,0x00006000,0x02417fff
+	.long	0x82423d41,0xff84f22e,0xd040ff84,0x241f60b6
+	.long	0x082e0000,0xff856600,0xff78f23c,0x88000000
+	.long	0x0000f22e,0x9000ff5c,0xf22e4804,0xff84f201
+	.long	0xa800f23c,0x90000000,0x000083ae,0xff64f200
+	.long	0x0080f23c,0x58b80001,0xf293ff6a,0x6000fe48
+	.long	0x0c010004,0x6700fdb4,0x0c010001,0x67160c01
+	.long	0x00026736,0x0c010005,0x67ff0000,0x023660ff
+	.long	0x00000244,0x4a280000,0x6b10f23c,0x44000000
+	.long	0x00001d7c,0x0004ff64,0x4e75f23c,0x44008000
+	.long	0x00001d7c,0x000cff64,0x4e754a28,0x00006bff
+	.long	0x0000026c,0xf228d080,0x00001d7c,0x0002ff64
+	.long	0x4e752d68,0x0004ff88,0x2d690004,0xff942d68
+	.long	0x0008ff8c,0x2d690008,0xff983028,0x00003229
+	.long	0x00003d40,0xff843d41,0xff900240,0x7fff0241
+	.long	0x7fff3d40,0xff543d41,0xff56b041,0x6cff0000
+	.long	0x005c61ff,0x0000015a,0x2f000c2e,0x0004ff4e
+	.long	0x661041ee,0xff8461ff,0x00002940,0x44403d40
+	.long	0xff54302e,0xff560440,0x0042b06e,0xff546c1a
+	.long	0x302eff54,0xd06f0002,0x322eff84,0x02418000
+	.long	0x80413d40,0xff84201f,0x4e75026e,0x8000ff84
+	.long	0x08ee0000,0xff85201f,0x4e7561ff,0x00000056
+	.long	0x2f000c2e,0x0004ff4f,0x661041ee,0xff9061ff
+	.long	0x000028e8,0x44403d40,0xff56302e,0xff540440
+	.long	0x0042b06e,0xff566c1a,0x302eff56,0xd06f0002
+	.long	0x322eff90,0x02418000,0x80413d40,0xff90201f
+	.long	0x4e75026e,0x8000ff90,0x08ee0000,0xff91201f
+	.long	0x4e75322e,0xff843001,0x02810000,0x7fff0240
+	.long	0x80000040,0x3fff3d40,0xff840c2e,0x0004ff4e
+	.long	0x670a203c,0x00003fff,0x90814e75,0x41eeff84
+	.long	0x61ff0000,0x28764480,0x220060e6,0x0c2e0004
+	.long	0xff4e673a,0x322eff84,0x02810000,0x7fff026e
+	.long	0x8000ff84,0x08010000,0x6712006e,0x3fffff84
+	.long	0x203c0000,0x3fff9081,0xe2804e75,0x006e3ffe
+	.long	0xff84203c,0x00003ffe,0x9081e280,0x4e7541ee
+	.long	0xff8461ff,0x00002824,0x08000000,0x6710006e
+	.long	0x3fffff84,0x06800000,0x3fffe280,0x4e75006e
+	.long	0x3ffeff84,0x06800000,0x3ffee280,0x4e75322e
+	.long	0xff903001,0x02810000,0x7fff0240,0x80000040
+	.long	0x3fff3d40,0xff900c2e,0x0004ff4f,0x670a203c
+	.long	0x00003fff,0x90814e75,0x41eeff90,0x61ff0000
+	.long	0x27ca4480,0x220060e6,0x0c2e0005,0xff4f6732
+	.long	0x0c2e0003,0xff4f673e,0x0c2e0003,0xff4e6714
+	.long	0x08ee0006,0xff7000ae,0x01004080,0xff6441ee
+	.long	0xff6c6042,0x00ae0100,0x0000ff64,0x41eeff6c
+	.long	0x603400ae,0x01004080,0xff6408ee,0x0006ff7c
+	.long	0x41eeff78,0x602041ee,0xff780c2e,0x0005ff4e
+	.long	0x66ff0000,0x000c00ae,0x00004080,0xff6400ae
+	.long	0x01000000,0xff640828,0x00070000,0x670800ae
+	.long	0x08000000,0xff64f210,0xd0804e75,0x00ae0100
+	.long	0x2080ff64,0xf23bd080,0x01700000,0x00084e75
+	.long	0x7fff0000,0xffffffff,0xffffffff,0x2d40ff54
+	.long	0x302eff42,0x4281122e,0xff64e099,0xf2018800
+	.long	0x323b0206,0x4efb1002,0x02340040,0x02f8030c
+	.long	0x03200334,0x0348035c,0x03660352,0x033e032a
+	.long	0x03160302,0x004a0238,0x023a0276,0x0054009e
+	.long	0x0102014c,0x01b201fc,0x021801d8,0x018c0128
+	.long	0x00de007a,0x02b6025a,0xf2810006,0x6000032a
+	.long	0x4e75f28e,0x00066000,0x03204e75,0xf2920022
+	.long	0x082e0000,0xff646700,0x031000ae,0x00008080
+	.long	0xff64082e,0x0007ff62,0x6600032c,0x600002fa
+	.long	0x4e75f29d,0x00066000,0x02f0082e,0x0000ff64
+	.long	0x671200ae,0x00008080,0xff64082e,0x0007ff62
+	.long	0x66000304,0x4e75f293,0x0022082e,0x0000ff64
+	.long	0x670002c6,0x00ae0000,0x8080ff64,0x082e0007
+	.long	0xff626600,0x02e26000,0x02b0082e,0x0000ff64
+	.long	0x671200ae,0x00008080,0xff64082e,0x0007ff62
+	.long	0x660002c4,0x4e75f29c,0x00066000,0x028c082e
+	.long	0x0000ff64,0x671200ae,0x00008080,0xff64082e
+	.long	0x0007ff62,0x660002a0,0x4e75f294,0x0022082e
+	.long	0x0000ff64,0x67000262,0x00ae0000,0x8080ff64
+	.long	0x082e0007,0xff626600,0x027e6000,0x024c4e75
+	.long	0xf29b0006,0x60000242,0x082e0000,0xff646712
+	.long	0x00ae0000,0x8080ff64,0x082e0007,0xff626600
+	.long	0x02564e75,0xf2950022,0x082e0000,0xff646700
+	.long	0x021800ae,0x00008080,0xff64082e,0x0007ff62
+	.long	0x66000234,0x60000202,0x082e0000,0xff646712
+	.long	0x00ae0000,0x8080ff64,0x082e0007,0xff626600
+	.long	0x02164e75,0xf29a0006,0x600001de,0x082e0000
+	.long	0xff646700,0x001400ae,0x00008080,0xff64082e
+	.long	0x0007ff62,0x660001f0,0x4e75f296,0x0022082e
+	.long	0x0000ff64,0x670001b2,0x00ae0000,0x8080ff64
+	.long	0x082e0007,0xff626600,0x01ce6000,0x019c4e75
+	.long	0xf2990006,0x60000192,0x082e0000,0xff646712
+	.long	0x00ae0000,0x8080ff64,0x082e0007,0xff626600
+	.long	0x01a64e75,0xf2970018,0x00ae0000,0x8080ff64
+	.long	0x082e0007,0xff626600,0x018e6000,0x015c4e75
+	.long	0xf2980006,0x60000152,0x00ae0000,0x8080ff64
+	.long	0x082e0007,0xff626600,0x016e4e75,0x6000013a
+	.long	0x4e75082e,0x0000ff64,0x6700012e,0x00ae0000
+	.long	0x8080ff64,0x082e0007,0xff626600,0x014a6000
+	.long	0x0118082e,0x0000ff64,0x671200ae,0x00008080
+	.long	0xff64082e,0x0007ff62,0x6600012c,0x4e75f291
+	.long	0x0022082e,0x0000ff64,0x670000ee,0x00ae0000
+	.long	0x8080ff64,0x082e0007,0xff626600,0x010a6000
+	.long	0x00d8082e,0x0000ff64,0x671200ae,0x00008080
+	.long	0xff64082e,0x0007ff62,0x660000ec,0x4e75f29e
+	.long	0x0022082e,0x0000ff64,0x670000ae,0x00ae0000
+	.long	0x8080ff64,0x082e0007,0xff626600,0x00ca6000
+	.long	0x0098082e,0x0000ff64,0x67000014,0x00ae0000
+	.long	0x8080ff64,0x082e0007,0xff626600,0x00aa4e75
+	.long	0xf2820006,0x60000072,0x4e75f28d,0x00066000
+	.long	0x00684e75,0xf2830006,0x6000005e,0x4e75f28c
+	.long	0x00066000,0x00544e75,0xf2840006,0x6000004a
+	.long	0x4e75f28b,0x00066000,0x00404e75,0xf2850006
+	.long	0x60000036,0x4e75f28a,0x00066000,0x002c4e75
+	.long	0xf2860006,0x60000022,0x4e75f289,0x00066000
+	.long	0x00184e75,0xf2870006,0x6000000e,0x4e75f288
+	.long	0x00066000,0x00044e75,0x122eff41,0x02410007
+	.long	0x61ff0000,0x1d665340,0x61ff0000,0x1dd00c40
+	.long	0xffff6602,0x4e75202e,0xff54d0ae,0xff685880
+	.long	0x2d400006,0x4e751d7c,0x0002ff4a,0x4e75302e
+	.long	0xff424281,0x122eff64,0xe099f201,0x8800323b
+	.long	0x02064efb,0x1002021e,0x004002e4,0x02f002fc
+	.long	0x03080314,0x03200326,0x031a030e,0x030202f6
+	.long	0x02ea0046,0x02200224,0x0260004c,0x009200f8
+	.long	0x013e01a4,0x01ea0202,0x01c4017e,0x011800d2
+	.long	0x006c02a2,0x0240f281,0x02ea4e75,0xf28e02e4
+	.long	0x4e75f292,0x02de082e,0x0000ff64,0x671200ae
+	.long	0x00008080,0xff64082e,0x0007ff62,0x660002cc
+	.long	0x4e75f29d,0x00044e75,0x082e0000,0xff646700
+	.long	0x02b200ae,0x00008080,0xff64082e,0x0007ff62
+	.long	0x660002a8,0x6000029c,0xf293001e,0x082e0000
+	.long	0xff646712,0x00ae0000,0x8080ff64,0x082e0007
+	.long	0xff626600,0x02864e75,0x082e0000,0xff646700
+	.long	0x027200ae,0x00008080,0xff64082e,0x0007ff62
+	.long	0x66000268,0x6000025c,0xf29c0004,0x4e75082e
+	.long	0x0000ff64,0x6700024c,0x00ae0000,0x8080ff64
+	.long	0x082e0007,0xff626600,0x02426000,0x0236f294
+	.long	0x0232082e,0x0000ff64,0x671200ae,0x00008080
+	.long	0xff64082e,0x0007ff62,0x66000220,0x4e75f29b
+	.long	0x00044e75,0x082e0000,0xff646700,0x020600ae
+	.long	0x00008080,0xff64082e,0x0007ff62,0x660001fc
+	.long	0x600001f0,0xf295001e,0x082e0000,0xff646712
+	.long	0x00ae0000,0x8080ff64,0x082e0007,0xff626600
+	.long	0x01da4e75,0x082e0000,0xff646700,0x01c600ae
+	.long	0x00008080,0xff64082e,0x0007ff62,0x660001bc
+	.long	0x600001b0,0xf29a0004,0x4e75082e,0x0000ff64
+	.long	0x670001a0,0x00ae0000,0x8080ff64,0x082e0007
+	.long	0xff626600,0x01966000,0x018af296,0x0186082e
+	.long	0x0000ff64,0x671200ae,0x00008080,0xff64082e
+	.long	0x0007ff62,0x66000174,0x4e75f299,0x00044e75
+	.long	0x082e0000,0xff646700,0x015a00ae,0x00008080
+	.long	0xff64082e,0x0007ff62,0x66000150,0x60000144
+	.long	0xf2970140,0x00ae0000,0x8080ff64,0x082e0007
+	.long	0xff626600,0x01364e75,0xf2980004,0x4e7500ae
+	.long	0x00008080,0xff64082e,0x0007ff62,0x6600011c
+	.long	0x60000110,0x4e756000,0x010a082e,0x0000ff64
+	.long	0x671200ae,0x00008080,0xff64082e,0x0007ff62
+	.long	0x660000f8,0x4e75082e,0x0000ff64,0x670000e4
+	.long	0x00ae0000,0x8080ff64,0x082e0007,0xff626600
+	.long	0x00da6000,0x00cef291,0x0020082e,0x0000ff64
+	.long	0x67000014,0x00ae0000,0x8080ff64,0x082e0007
+	.long	0xff626600,0x00b64e75,0x082e0000,0xff646700
+	.long	0x00a200ae,0x00008080,0xff64082e,0x0007ff62
+	.long	0x66000098,0x6000008c,0xf29e0020,0x082e0000
+	.long	0xff646700,0x001400ae,0x00008080,0xff64082e
+	.long	0x0007ff62,0x66000074,0x4e75082e,0x0000ff64
+	.long	0x67000060,0x00ae0000,0x8080ff64,0x082e0007
+	.long	0xff626600,0x00566000,0x004af282,0x00464e75
+	.long	0xf28d0040,0x4e75f283,0x003a4e75,0xf28c0034
+	.long	0x4e75f284,0x002e4e75,0xf28b0028,0x4e75f285
+	.long	0x00224e75,0xf28a001c,0x4e75f286,0x00164e75
+	.long	0xf2890010,0x4e75f287,0x000a4e75,0xf2880004
+	.long	0x4e751d7c,0x0001ff4a,0x4e751d7c,0x0002ff4a
+	.long	0x4e75302e,0xff424281,0x122eff64,0xe099f201
+	.long	0x8800323b,0x02064efb,0x10020208,0x004002ac
+	.long	0x02cc02ec,0x030c032c,0x034c035c,0x033c031c
+	.long	0x02fc02dc,0x02bc0050,0x020e0214,0x02440060
+	.long	0x00a400fa,0x013e0194,0x01d801f0,0x01b60172
+	.long	0x011c00d8,0x00820278,0x022cf281,0x00084200
+	.long	0x6000032e,0x50c06000,0x0328f28e,0x00084200
+	.long	0x6000031e,0x50c06000,0x0318f292,0x001a4200
+	.long	0x082e0000,0xff646700,0x030800ae,0x00008080
+	.long	0xff646000,0x02f250c0,0x600002f6,0xf29d0008
+	.long	0x42006000,0x02ec50c0,0x082e0000,0xff646700
+	.long	0x02e000ae,0x00008080,0xff646000,0x02caf293
+	.long	0x001a4200,0x082e0000,0xff646700,0x02c400ae
+	.long	0x00008080,0xff646000,0x02ae50c0,0x082e0000
+	.long	0xff646700,0x02ac00ae,0x00008080,0xff646000
+	.long	0x0296f29c,0x00084200,0x60000296,0x50c0082e
+	.long	0x0000ff64,0x6700028a,0x00ae0000,0x8080ff64
+	.long	0x60000274,0xf294001a,0x4200082e,0x0000ff64
+	.long	0x6700026e,0x00ae0000,0x8080ff64,0x60000258
+	.long	0x50c06000,0x025cf29b,0x00084200,0x60000252
+	.long	0x50c0082e,0x0000ff64,0x67000246,0x00ae0000
+	.long	0x8080ff64,0x60000230,0xf295001a,0x4200082e
+	.long	0x0000ff64,0x6700022a,0x00ae0000,0x8080ff64
+	.long	0x60000214,0x50c0082e,0x0000ff64,0x67000212
+	.long	0x00ae0000,0x8080ff64,0x600001fc,0xf29a0008
+	.long	0x42006000,0x01fc50c0,0x082e0000,0xff646700
+	.long	0x01f000ae,0x00008080,0xff646000,0x01daf296
+	.long	0x001a4200,0x082e0000,0xff646700,0x01d400ae
+	.long	0x00008080,0xff646000,0x01be50c0,0x600001c2
+	.long	0xf2990008,0x42006000,0x01b850c0,0x082e0000
+	.long	0xff646700,0x01ac00ae,0x00008080,0xff646000
+	.long	0x0196f297,0x00104200,0x00ae0000,0x8080ff64
+	.long	0x60000184,0x50c06000,0x0188f298,0x00084200
+	.long	0x6000017e,0x50c000ae,0x00008080,0xff646000
+	.long	0x01664200,0x6000016a,0x50c06000,0x01644200
+	.long	0x082e0000,0xff646700,0x015800ae,0x00008080
+	.long	0xff646000,0x014250c0,0x082e0000,0xff646700
+	.long	0x014000ae,0x00008080,0xff646000,0x012af291
+	.long	0x001a4200,0x082e0000,0xff646700,0x012400ae
+	.long	0x00008080,0xff646000,0x010e50c0,0x082e0000
+	.long	0xff646700,0x010c00ae,0x00008080,0xff646000
+	.long	0x00f6f29e,0x001a4200,0x082e0000,0xff646700
+	.long	0x00f000ae,0x00008080,0xff646000,0x00da50c0
+	.long	0x082e0000,0xff646700,0x00d800ae,0x00008080
+	.long	0xff646000,0x00c2f282,0x00084200,0x600000c2
+	.long	0x50c06000,0x00bcf28d,0x00084200,0x600000b2
+	.long	0x50c06000,0x00acf283,0x00084200,0x600000a2
+	.long	0x50c06000,0x009cf28c,0x00084200,0x60000092
+	.long	0x50c06000,0x008cf284,0x00084200,0x60000082
+	.long	0x50c06000,0x007cf28b,0x00084200,0x60000072
+	.long	0x50c06000,0x006cf285,0x00084200,0x60000062
+	.long	0x50c06000,0x005cf28a,0x00084200,0x60000052
+	.long	0x50c06000,0x004cf286,0x00084200,0x60000042
+	.long	0x50c06000,0x003cf289,0x00084200,0x60000032
+	.long	0x50c06000,0x002cf287,0x00084200,0x60000022
+	.long	0x50c06000,0x001cf288,0x00084200,0x60000012
+	.long	0x50c06000,0x000c082e,0x0007ff62,0x66000088
+	.long	0x2040122e,0xff412001,0x02010038,0x66102200
+	.long	0x02410007,0x200861ff,0x0000172a,0x4e750c01
+	.long	0x0018671a,0x0c010020,0x67382008,0x206e000c
+	.long	0x61ffffff,0x5a7c4a81,0x66000054,0x4e752008
+	.long	0x206e000c,0x61ffffff,0x5a684a81,0x66000040
+	.long	0x122eff41,0x02410007,0x700161ff,0x00001722
+	.long	0x4e752008,0x206e000c,0x61ffffff,0x5a444a81
+	.long	0x6600001c,0x122eff41,0x02410007,0x700161ff
+	.long	0x0000174e,0x4e751d7c,0x0002ff4a,0x4e753d7c
+	.long	0x00a1000a,0x60ff0000,0x2b86122e,0xff430241
+	.long	0x0070e809,0x61ff0000,0x15b20280,0x000000ff
+	.long	0x2f00103b,0x09200148,0x2f0061ff,0x00000340
+	.long	0x201f221f,0x67000134,0x082e0005,0xff426700
+	.long	0x00b8082e,0x0004ff42,0x6600001a,0x123b1120
+	.long	0x021e082e,0x00050004,0x670a0c2e,0x0008ff4a
+	.long	0x66024e75,0x22489fc0,0x41d74a01,0x6a0c20ee
+	.long	0xffdc20ee,0xffe020ee,0xffe4e309,0x6a0c20ee
+	.long	0xffe820ee,0xffec20ee,0xfff0e309,0x6a0af210
+	.long	0xf020d1fc,0x0000000c,0xe3096a0a,0xf210f010
+	.long	0xd1fc0000,0x000ce309,0x6a0af210,0xf008d1fc
+	.long	0x0000000c,0xe3096a0a,0xf210f004,0xd1fc0000
+	.long	0x000ce309,0x6a0af210,0xf002d1fc,0x0000000c
+	.long	0xe3096a0a,0xf210f001,0xd1fc0000,0x000c2d49
+	.long	0xff5441d7,0x2f0061ff,0xffff58b2,0x201fdfc0
+	.long	0x4a816600,0x071e4e75,0x2d48ff54,0x9fc043d7
+	.long	0x2f012f00,0x61ffffff,0x587e201f,0x4a816600
+	.long	0x070e221f,0x41d74a01,0x6a0c2d58,0xffdc2d58
+	.long	0xffe02d58,0xffe4e309,0x6a0c2d58,0xffe82d58
+	.long	0xffec2d58,0xfff0e309,0x6a04f218,0xd020e309
+	.long	0x6a04f218,0xd010e309,0x6a04f218,0xd008e309
+	.long	0x6a04f218,0xd004e309,0x6a04f218,0xd002e309
+	.long	0x6a04f218,0xd001dfc0,0x4e754e75,0x000c0c18
+	.long	0x0c181824,0x0c181824,0x18242430,0x0c181824
+	.long	0x18242430,0x18242430,0x2430303c,0x0c181824
+	.long	0x18242430,0x18242430,0x2430303c,0x18242430
+	.long	0x2430303c,0x2430303c,0x303c3c48,0x0c181824
+	.long	0x18242430,0x18242430,0x2430303c,0x18242430
+	.long	0x2430303c,0x2430303c,0x303c3c48,0x18242430
+	.long	0x2430303c,0x2430303c,0x303c3c48,0x2430303c
+	.long	0x303c3c48,0x303c3c48,0x3c484854,0x0c181824
+	.long	0x18242430,0x18242430,0x2430303c,0x18242430
+	.long	0x2430303c,0x2430303c,0x303c3c48,0x18242430
+	.long	0x2430303c,0x2430303c,0x303c3c48,0x2430303c
+	.long	0x303c3c48,0x303c3c48,0x3c484854,0x18242430
+	.long	0x2430303c,0x2430303c,0x303c3c48,0x2430303c
+	.long	0x303c3c48,0x303c3c48,0x3c484854,0x2430303c
+	.long	0x303c3c48,0x303c3c48,0x3c484854,0x303c3c48
+	.long	0x3c484854,0x3c484854,0x48545460,0x008040c0
+	.long	0x20a060e0,0x109050d0,0x30b070f0,0x088848c8
+	.long	0x28a868e8,0x189858d8,0x38b878f8,0x048444c4
+	.long	0x24a464e4,0x149454d4,0x34b474f4,0x0c8c4ccc
+	.long	0x2cac6cec,0x1c9c5cdc,0x3cbc7cfc,0x028242c2
+	.long	0x22a262e2,0x129252d2,0x32b272f2,0x0a8a4aca
+	.long	0x2aaa6aea,0x1a9a5ada,0x3aba7afa,0x068646c6
+	.long	0x26a666e6,0x169656d6,0x36b676f6,0x0e8e4ece
+	.long	0x2eae6eee,0x1e9e5ede,0x3ebe7efe,0x018141c1
+	.long	0x21a161e1,0x119151d1,0x31b171f1,0x098949c9
+	.long	0x29a969e9,0x199959d9,0x39b979f9,0x058545c5
+	.long	0x25a565e5,0x159555d5,0x35b575f5,0x0d8d4dcd
+	.long	0x2dad6ded,0x1d9d5ddd,0x3dbd7dfd,0x038343c3
+	.long	0x23a363e3,0x139353d3,0x33b373f3,0x0b8b4bcb
+	.long	0x2bab6beb,0x1b9b5bdb,0x3bbb7bfb,0x078747c7
+	.long	0x27a767e7,0x179757d7,0x37b777f7,0x0f8f4fcf
+	.long	0x2faf6fef,0x1f9f5fdf,0x3fbf7fff,0x2040302e
+	.long	0xff403200,0x0240003f,0x02810000,0x0007303b
+	.long	0x020a4efb,0x00064afc,0x00400000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000080,0x0086008c
+	.long	0x00900094,0x0098009c,0x00a000a6,0x00b600c6
+	.long	0x00d200de,0x00ea00f6,0x01020118,0x01260134
+	.long	0x013e0148,0x0152015c,0x0166017a,0x019801b6
+	.long	0x01d201ee,0x020a0226,0x02420260,0x02600260
+	.long	0x02600260,0x02600260,0x026002c0,0x02da02f4
+	.long	0x03140000,0x00000000,0x0000206e,0xffa44e75
+	.long	0x206effa8,0x4e75204a,0x4e75204b,0x4e75204c
+	.long	0x4e75204d,0x4e752056,0x4e75206e,0xffd84e75
+	.long	0x202effa4,0x2200d288,0x2d41ffa4,0x20404e75
+	.long	0x202effa8,0x2200d288,0x2d41ffa8,0x20404e75
+	.long	0x200a2200,0xd2882441,0x20404e75,0x200b2200
+	.long	0xd2882641,0x20404e75,0x200c2200,0xd2882841
+	.long	0x20404e75,0x200d2200,0xd2882a41,0x20404e75
+	.long	0x20162200,0xd2882c81,0x20404e75,0x1d7c0004
+	.long	0xff4a202e,0xffd82200,0xd2882d41,0xffd82040
+	.long	0x4e75202e,0xffa49088,0x2d40ffa4,0x20404e75
+	.long	0x202effa8,0x90882d40,0xffa82040,0x4e75200a
+	.long	0x90882440,0x20404e75,0x200b9088,0x26402040
+	.long	0x4e75200c,0x90882840,0x20404e75,0x200d9088
+	.long	0x2a402040,0x4e752016,0x90882c80,0x20404e75
+	.long	0x1d7c0008,0xff4a202e,0xffd89088,0x2d40ffd8
+	.long	0x20404e75,0x206eff44,0x54aeff44,0x61ffffff
+	.long	0x54a24a81,0x66ffffff,0x68203040,0xd1eeffa4
+	.long	0x4e75206e,0xff4454ae,0xff4461ff,0xffff5484
+	.long	0x4a8166ff,0xffff6802,0x3040d1ee,0xffa84e75
+	.long	0x206eff44,0x54aeff44,0x61ffffff,0x54664a81
+	.long	0x66ffffff,0x67e43040,0xd1ca4e75,0x206eff44
+	.long	0x54aeff44,0x61ffffff,0x544a4a81,0x66ffffff
+	.long	0x67c83040,0xd1cb4e75,0x206eff44,0x54aeff44
+	.long	0x61ffffff,0x542e4a81,0x66ffffff,0x67ac3040
+	.long	0xd1cc4e75,0x206eff44,0x54aeff44,0x61ffffff
+	.long	0x54124a81,0x66ffffff,0x67903040,0xd1cd4e75
+	.long	0x206eff44,0x54aeff44,0x61ffffff,0x53f64a81
+	.long	0x66ffffff,0x67743040,0xd1d64e75,0x206eff44
+	.long	0x54aeff44,0x61ffffff,0x53da4a81,0x66ffffff
+	.long	0x67583040,0xd1eeffd8,0x4e755081,0x61ff0000
+	.long	0x0fda2f00,0x206eff44,0x54aeff44,0x61ffffff
+	.long	0x53b24a81,0x66ffffff,0x6730205f,0x08000008
+	.long	0x660000e6,0x2d40ff54,0x2200e959,0x0241000f
+	.long	0x61ff0000,0x0fa62f02,0x242eff54,0x0802000b
+	.long	0x660248c0,0x2202ef59,0x02810000,0x0003e3a8
+	.long	0x49c2d082,0xd1c0241f,0x4e75206e,0xff4454ae
+	.long	0xff4461ff,0xffff535c,0x4a8166ff,0xffff66da
+	.long	0x30404e75,0x206eff44,0x58aeff44,0x61ffffff
+	.long	0x53584a81,0x66ffffff,0x66c02040,0x4e75206e
+	.long	0xff4454ae,0xff4461ff,0xffff5328,0x4a8166ff
+	.long	0xffff66a6,0x3040d1ee,0xff445588,0x4e75206e
+	.long	0xff4454ae,0xff4461ff,0xffff5308,0x4a8166ff
+	.long	0xffff6686,0x206eff44,0x55880800,0x00086600
+	.long	0x00382d40,0xff542200,0xe9590241,0x000f61ff
+	.long	0x00000ef8,0x2f02242e,0xff540802,0x000b6602
+	.long	0x48c02202,0xef590281,0x00000003,0xe3a849c2
+	.long	0xd082d1c0,0x241f4e75,0x08000006,0x670c48e7
+	.long	0x3c002a00,0x26084282,0x60282d40,0xff54e9c0
+	.long	0x140461ff,0x00000eb4,0x48e73c00,0x24002a2e
+	.long	0xff542608,0x0805000b,0x660248c2,0xe9c50542
+	.long	0xe1aa0805,0x00076702,0x4283e9c5,0x06820c00
+	.long	0x00026d34,0x6718206e,0xff4458ae,0xff4461ff
+	.long	0xffff5276,0x4a8166ff,0x000000b0,0x6018206e
+	.long	0xff4454ae,0xff4461ff,0xffff5248,0x4a8166ff
+	.long	0x00000098,0x48c0d680,0xe9c50782,0x6700006e
+	.long	0x0c000002,0x6d346718,0x206eff44,0x58aeff44
+	.long	0x61ffffff,0x52344a81,0x66ff0000,0x006e601c
+	.long	0x206eff44,0x54aeff44,0x61ffffff,0x52064a81
+	.long	0x66ff0000,0x005648c0,0x60024280,0x28000805
+	.long	0x00026714,0x204361ff,0xffff5240,0x4a816600
+	.long	0x0028d082,0xd0846018,0xd6822043,0x61ffffff
+	.long	0x522a4a81,0x66000012,0xd0846004,0xd6822003
+	.long	0x20404cdf,0x003c4e75,0x20434cdf,0x003c303c
+	.long	0x010160ff,0xffff6582,0x4cdf003c,0x60ffffff
+	.long	0x652861ff,0x000023c6,0x303c00e1,0x600a61ff
+	.long	0x000023ba,0x303c0161,0x206eff54,0x60ffffff
+	.long	0x6558102e,0xff420c00,0x009c6700,0x00b20c00
+	.long	0x00986700,0x00740c00,0x00946736,0x206eff44
+	.long	0x58aeff44,0x61ffffff,0x51704a81,0x66ffffff
+	.long	0x64d82d40,0xff64206e,0xff4458ae,0xff4461ff
+	.long	0xffff5156,0x4a8166ff,0xffff64be,0x2d40ff68
+	.long	0x4e75206e,0xff4458ae,0xff4461ff,0xffff513a
+	.long	0x4a8166ff,0xffff64a2,0x2d40ff60,0x206eff44
+	.long	0x58aeff44,0x61ffffff,0x51204a81,0x66ffffff
+	.long	0x64882d40,0xff684e75,0x206eff44,0x58aeff44
+	.long	0x61ffffff,0x51044a81,0x66ffffff,0x646c2d40
+	.long	0xff60206e,0xff4458ae,0xff4461ff,0xffff50ea
+	.long	0x4a8166ff,0xffff6452,0x2d40ff64,0x4e75206e
+	.long	0xff4458ae,0xff4461ff,0xffff50ce,0x4a8166ff
+	.long	0xffff6436,0x2d40ff60,0x206eff44,0x58aeff44
+	.long	0x61ffffff,0x50b44a81,0x66ffffff,0x641c2d40
+	.long	0xff64206e,0xff4458ae,0xff4461ff,0xffff509a
+	.long	0x4a8166ff,0xffff6402,0x2d40ff68,0x4e752040
+	.long	0x102eff41,0x22000240,0x00380281,0x00000007
+	.long	0x0c000018,0x67240c00,0x0020672c,0x80410c00
+	.long	0x003c6706,0x206e000c,0x4e751d7c,0x0080ff4a
+	.long	0x41f60162,0xff680004,0x4e752008,0x61ff0000
+	.long	0x0d70206e,0x000c4e75,0x200861ff,0x00000db2
+	.long	0x206e000c,0x0c00000c,0x67024e75,0x51882d48
+	.long	0x000c4e75,0x102eff41,0x22000240,0x00380281
+	.long	0x00000007,0x0c000018,0x670e0c00,0x00206700
+	.long	0x0076206e,0x000c4e75,0x323b120e,0x206e000c
+	.long	0x4efb1006,0x4afc0008,0x0010001a,0x0024002c
+	.long	0x0034003c,0x0044004e,0x06ae0000,0x000cffa4
+	.long	0x4e7506ae,0x0000000c,0xffa84e75,0xd5fc0000
+	.long	0x000c4e75,0xd7fc0000,0x000c4e75,0xd9fc0000
+	.long	0x000c4e75,0xdbfc0000,0x000c4e75,0x06ae0000
+	.long	0x000cffd4,0x4e751d7c,0x0004ff4a,0x06ae0000
+	.long	0x000cffd8,0x4e75323b,0x1214206e,0x000c5188
+	.long	0x51ae000c,0x4efb1006,0x4afc0008,0x00100016
+	.long	0x001c0020,0x00240028,0x002c0032,0x2d48ffa4
+	.long	0x4e752d48,0xffa84e75,0x24484e75,0x26484e75
+	.long	0x28484e75,0x2a484e75,0x2d48ffd4,0x4e752d48
+	.long	0xffd81d7c,0x0008ff4a,0x4e75082e,0x0006ff42
+	.long	0x6664102e,0xff430800,0x0005672c,0x08000004
+	.long	0x670a0240,0x007f0c40,0x0038661c,0xe9ee0183
+	.long	0xff4261ff,0x00000d6a,0x61ff0000,0x12060c00
+	.long	0x00066722,0x1d40ff4f,0xe9ee00c3,0xff4261ff
+	.long	0x00000cbe,0x61ff0000,0x11ea0c00,0x0006670e
+	.long	0x1d40ff4e,0x4e7561ff,0x00001148,0x60d661ff
+	.long	0x00001140,0x60ea302e,0xff420800,0x0005672c
+	.long	0x08000004,0x670a0240,0x007f0c40,0x0038661c
+	.long	0xe9ee0183,0xff4261ff,0x00000d06,0x61ff0000
+	.long	0x11a20c00,0x00066726,0x1d40ff4f,0xe9ee00c3
+	.long	0xff42e9ee,0x1283ff40,0x660000be,0x422eff4e
+	.long	0xe9ee1343,0xff40303b,0x02124efb,0x000e61ff
+	.long	0x000010e0,0x60d24afc,0x00080010,0x006a0000
+	.long	0x0000002e,0x0000004c,0x000061ff,0x00000a5c
+	.long	0xf2004000,0xf22ef080,0xff6cf281,0x00044e75
+	.long	0x1d7c0001,0xff4e4e75,0x61ff0000,0x0a3ef200
+	.long	0x5000f22e,0xf080ff6c,0xf2810004,0x4e751d7c
+	.long	0x0001ff4e,0x4e7561ff,0x00000a20,0xf2005800
+	.long	0xf22ef080,0xff6cf281,0x00044e75,0x1d7c0001
+	.long	0xff4e4e75,0x61ff0000,0x0a022d40,0xff5441ee
+	.long	0xff5461ff,0x000011de,0x1d40ff4e,0x0c000005
+	.long	0x670001a4,0x0c000004,0x6700015e,0xf2104400
+	.long	0xf22ef080,0xff6c4e75,0x422eff4e,0x303b020a
+	.long	0x4efb0006,0x4afc0008,0x001000e2,0x027202b0
+	.long	0x005601a0,0x009c0000,0x700461ff,0xfffffd22
+	.long	0x0c2e0080,0xff4a6726,0x61ffffff,0x4dde4a81
+	.long	0x66ff0000,0x1eecf200,0x4000f22e,0xf080ff6c
+	.long	0xf2810004,0x4e751d7c,0x0001ff4e,0x4e7561ff
+	.long	0xffff4d76,0x4a8166ff,0xffff6e8a,0x60d87002
+	.long	0x61ffffff,0xfcdc0c2e,0x0080ff4a,0x672661ff
+	.long	0xffff4d82,0x4a8166ff,0x00001e98,0xf2005000
+	.long	0xf22ef080,0xff6cf281,0x00044e75,0x1d7c0001
+	.long	0xff4e4e75,0x61ffffff,0x4d1a4a81,0x66ffffff
+	.long	0x6e4460d8,0x700161ff,0xfffffc96,0x0c2e0080
+	.long	0xff4a6726,0x61ffffff,0x4d264a81,0x66ff0000
+	.long	0x1e42f200,0x5800f22e,0xf080ff6c,0xf2810004
+	.long	0x4e751d7c,0x0001ff4e,0x4e7561ff,0xffff4cd4
+	.long	0x4a8166ff,0xffff6dfe,0x60d87004,0x61ffffff
+	.long	0xfc500c2e,0x0080ff4a,0x673e61ff,0xffff4d0c
+	.long	0x2d40ff54,0x4a8166ff,0x00001e16,0x41eeff54
+	.long	0x61ff0000,0x10a01d40,0xff4e0c00,0x00046700
+	.long	0x00280c00,0x00056700,0x005ef22e,0x4400ff54
+	.long	0xf22ef080,0xff6c4e75,0x61ffffff,0x4c8c4a81
+	.long	0x66ffffff,0x6da060c4,0x426eff6c,0xe9d00257
+	.long	0xe1882d40,0xff7042ae,0xff74426e,0xff6c0810
+	.long	0x00076706,0x08ee0007,0xff6c41ee,0xff6c61ff
+	.long	0x00000e78,0x323c3f81,0x9240836e,0xff6c1d7c
+	.long	0x0000ff4e,0x4e753d7c,0x7fffff6c,0xe9d00257
+	.long	0xe1882d40,0xff7042ae,0xff740810,0x00076706
+	.long	0x08ee0007,0xff6c4e75,0x700861ff,0xfffffb92
+	.long	0x0c2e0080,0xff4a6740,0x43eeff54,0x700861ff
+	.long	0xffff4bc4,0x4a8166ff,0x00001d64,0x41eeff54
+	.long	0x61ff0000,0x0f701d40,0xff4e0c00,0x00046700
+	.long	0x002e0c00,0x00056700,0x0068f22e,0x5400ff54
+	.long	0xf22ef080,0xff6c4e75,0x43eeff54,0x700861ff
+	.long	0xffff4b6e,0x4a8166ff,0xffff6cda,0x60be426e
+	.long	0xff6ce9d0,0x031f2d40,0xff70e9e8,0x02d50004
+	.long	0x720be3a8,0x2d40ff74,0x08100007,0x670608ee
+	.long	0x0007ff6c,0x41eeff6c,0x61ff0000,0x0dae323c
+	.long	0x3c019240,0x836eff6c,0x1d7c0000,0xff4e4e75
+	.long	0x3d7c7fff,0xff6ce9d0,0x031f2d40,0xff70e9e8
+	.long	0x02d50004,0x720be3a8,0x2d40ff74,0x08100007
+	.long	0x670608ee,0x0007ff6c,0x4e75700c,0x61ffffff
+	.long	0xfac043ee,0xff6c700c,0x61ffffff,0x4afa4a81
+	.long	0x66ff0000,0x1ca841ee,0xff6c61ff,0x00000e24
+	.long	0x0c000006,0x67061d40,0xff4e4e75,0x61ff0000
+	.long	0x0d821d40,0xff4e4e75,0x61ff0000,0x125441ee
+	.long	0xff6c61ff,0x00000dfc,0x0c000006,0x67061d40
+	.long	0xff4e4e75,0x61ff0000,0x0d5a1d40,0xff4e4e75
+	.long	0xe9ee10c3,0xff42327b,0x120a4efb,0x98064afc
+	.long	0x000800e0,0x01e00148,0x06200078,0x041a0010
+	.long	0x06204a2e,0xff4e664c,0xf228d080,0x0000f200
+	.long	0x9000f200,0x7800f23c,0x90000000,0x0000f201
+	.long	0xa800836e,0xff66122e,0xff410201,0x00386714
+	.long	0x206e000c,0x61ffffff,0x4ae84a81,0x66ff0000
+	.long	0x1c0a4e75,0x122eff41,0x02410007,0x61ff0000
+	.long	0x07644e75,0x22280000,0x02818000,0x00000081
+	.long	0x00800000,0xf2014400,0x60a44a2e,0xff4e664c
+	.long	0xf228d080,0x0000f200,0x9000f200,0x7000f23c
+	.long	0x90000000,0x0000f201,0xa800836e,0xff66122e
+	.long	0xff410201,0x00386714,0x206e000c,0x61ffffff
+	.long	0x4a964a81,0x66ff0000,0x1bb04e75,0x122eff41
+	.long	0x02410007,0x61ff0000,0x06c04e75,0x22280000
+	.long	0x02818000,0x00000081,0x00800000,0xf2014400
+	.long	0x60a44a2e,0xff4e664c,0xf228d080,0x0000f200
+	.long	0x9000f200,0x6000f23c,0x90000000,0x0000f201
+	.long	0xa800836e,0xff66122e,0xff410201,0x00386714
+	.long	0x206e000c,0x61ffffff,0x4a444a81,0x66ff0000
+	.long	0x1b564e75,0x122eff41,0x02410007,0x61ff0000
+	.long	0x061c4e75,0x22280000,0x02818000,0x00000081
+	.long	0x00800000,0xf2014400,0x60a43d68,0x0000ff84
+	.long	0x426eff86,0x2d680004,0xff882d68,0x0008ff8c
+	.long	0xf228d080,0x000061ff,0xfffff94c,0x224841ee
+	.long	0xff84700c,0x0c2e0008,0xff4a6726,0x61ffffff
+	.long	0x492c4a81,0x66000052,0x4a2eff4e,0x66024e75
+	.long	0x08ee0003,0xff66102e,0xff620200,0x000a6616
+	.long	0x4e7561ff,0xffff5788,0x4a816600,0x002c4a2e
+	.long	0xff4e66dc,0x4e7541ee,0xff8461ff,0x00000b3c
+	.long	0x44400240,0x7fff026e,0x8000ff84,0x816eff84
+	.long	0xf22ed040,0xff844e75,0x2caeffd4,0x60ff0000
+	.long	0x1ab20200,0x00300000,0x00402d40,0xff5c3028
+	.long	0x00000240,0x7fff0c40,0x407e6e00,0x00e66700
+	.long	0x01520c40,0x3f816d00,0x0058f228,0xd0800000
+	.long	0xf22e9000,0xff5cf23c,0x88000000,0x0000f200
+	.long	0x6400f23c,0x90000000,0x0000f201,0xa800836e
+	.long	0xff66122e,0xff410201,0x00386714,0x206e000c
+	.long	0x61ffffff,0x49184a81,0x66ff0000,0x1a2a4e75
+	.long	0x122eff41,0x02410007,0x61ff0000,0x04f04e75
+	.long	0x08ee0003,0xff663d68,0x0000ff84,0x2d680004
+	.long	0xff882d68,0x0008ff8c,0x2f084280,0x0c2e0004
+	.long	0xff4e660a,0x41eeff84,0x61ff0000,0x0a6e41ee
+	.long	0xff84222e,0xff5c61ff,0x00000c86,0x41eeff84
+	.long	0x61ff0000,0x034c122e,0xff410201,0x00386714
+	.long	0x206e000c,0x61ffffff,0x48a44a81,0x66ff0000
+	.long	0x19b6600e,0x122eff41,0x02410007,0x61ff0000
+	.long	0x047c122e,0xff620201,0x000a6600,0x00b8588f
+	.long	0x4e754a28,0x0007660e,0x4aa80008,0x6608006e
+	.long	0x1048ff66,0x6006006e,0x1248ff66,0x2f084a28
+	.long	0x00005bc1,0x202eff5c,0x61ff0000,0x0d12f210
+	.long	0xd080f200,0x6400122e,0xff410201,0x00386714
+	.long	0x206e000c,0x61ffffff,0x48344a81,0x66ff0000
+	.long	0x1946600e,0x122eff41,0x02410007,0x61ff0000
+	.long	0x040c122e,0xff620201,0x000a6600,0x007c588f
+	.long	0x4e753228,0x00000241,0x80000041,0x3fff3d41
+	.long	0xff842d68,0x0004ff88,0x2d680008,0xff8cf22e
+	.long	0x9000ff5c,0xf22e4800,0xff84f23c,0x90000000
+	.long	0x0000f200,0x0018f23c,0x58380002,0xf294fe7c
+	.long	0x6000ff50,0x205f3d68,0x0000ff84,0x2d680004
+	.long	0xff882d68,0x0008ff8c,0x0c2e0004,0xff4e662c
+	.long	0x41eeff84,0x61ff0000,0x09424480,0x02407fff
+	.long	0xefee004f,0xff846014,0x205f3d68,0x0000ff84
+	.long	0x2d680004,0xff882d68,0x0008ff8c,0x08ae0007
+	.long	0xff8456ee,0xff8641ee,0xff84122e,0xff5fe809
+	.long	0x0241000c,0x4841122e,0xff5fe809,0x02410003
+	.long	0x428061ff,0x00000782,0x4a2eff86,0x670608ee
+	.long	0x0007ff84,0xf22ed040,0xff844e75,0x02000030
+	.long	0x00000080,0x2d40ff5c,0x30280000,0x02407fff
+	.long	0x0c4043fe,0x6e0000c8,0x67000120,0x0c403c01
+	.long	0x6d000046,0xf228d080,0x0000f22e,0x9000ff5c
+	.long	0xf23c8800,0x00000000,0xf22e7400,0xff54f23c
+	.long	0x90000000,0x0000f200,0xa800816e,0xff66226e
+	.long	0x000c41ee,0xff547008,0x61ffffff,0x46304a81
+	.long	0x66ff0000,0x18004e75,0x08ee0003,0xff663d68
+	.long	0x0000ff84,0x2d680004,0xff882d68,0x0008ff8c
+	.long	0x2f084280,0x0c2e0004,0xff4e660a,0x41eeff84
+	.long	0x61ff0000,0x084641ee,0xff84222e,0xff5c61ff
+	.long	0x00000a5e,0x41eeff84,0x61ff0000,0x00d22d40
+	.long	0xff542d41,0xff58226e,0x000c41ee,0xff547008
+	.long	0x61ffffff,0x45c84a81,0x66ff0000,0x1798122e
+	.long	0xff620201,0x000a6600,0xfe9c588f,0x4e753028
+	.long	0x000a0240,0x07ff6608,0x006e1048,0xff666006
+	.long	0x006e1248,0xff662f08,0x4a280000,0x5bc1202e
+	.long	0xff5c61ff,0x00000af8,0xf210d080,0xf22e7400
+	.long	0xff54226e,0x000c41ee,0xff547008,0x61ffffff
+	.long	0x456c4a81,0x66ff0000,0x173c122e,0xff620201
+	.long	0x000a6600,0xfe74588f,0x4e753228,0x00000241
+	.long	0x80000041,0x3fff3d41,0xff842d68,0x0004ff88
+	.long	0x2d680008,0xff8cf22e,0x9000ff5c,0xf22e4800
+	.long	0xff84f23c,0x90000000,0x0000f200,0x0018f23c
+	.long	0x58380002,0xf294feae,0x6000ff64,0x42803028
+	.long	0x00000440,0x3fff0640,0x03ff4a28,0x00046b02
+	.long	0x53404840,0xe9884a28,0x00006a04,0x08c0001f
+	.long	0x22280004,0xe9c11054,0x80812d40,0xff542228
+	.long	0x00047015,0xe1a92d41,0xff582228,0x0008e9c1
+	.long	0x0015222e,0xff588280,0x202eff54,0x4e754280
+	.long	0x30280000,0x04403fff,0x0640007f,0x4a280004
+	.long	0x6b025340,0x4840ef88,0x4a280000,0x6a0408c0
+	.long	0x001f2228,0x00040281,0x7fffff00,0xe0898081
+	.long	0x4e7561ff,0xfffff490,0x2f08102e,0xff4e6600
+	.long	0x0082082e,0x0004ff42,0x6712122e,0xff43e809
+	.long	0x02410007,0x61ff0000,0x00926004,0x102eff43
+	.long	0xebc00647,0x2f0041ee,0xff6c61ff,0x00000ed0
+	.long	0x02aecfff,0xf00fff84,0x201f4a2e,0xff876616
+	.long	0x4aaeff88,0x66104aae,0xff8c660a,0x4a806606
+	.long	0x026ef000,0xff8441ee,0xff84225f,0x700c0c2e
+	.long	0x0008ff4a,0x670e61ff,0xffff4412,0x4a816600
+	.long	0xfb384e75,0x61ffffff,0x52864a81,0x6600fb2a
+	.long	0x4e750c00,0x00046700,0xff7a41ee,0xff6c426e
+	.long	0xff6e0c00,0x00056702,0x60c0006e,0x4080ff66
+	.long	0x08ee0006,0xff7060b2,0x303b1206,0x4efb0002
+	.long	0x00200026,0x002c0030,0x00340038,0x003c0040
+	.long	0x0044004a,0x00500054,0x0058005c,0x00600064
+	.long	0x202eff9c,0x4e75202e,0xffa04e75,0x20024e75
+	.long	0x20034e75,0x20044e75,0x20054e75,0x20064e75
+	.long	0x20074e75,0x202effa4,0x4e75202e,0xffa84e75
+	.long	0x200a4e75,0x200b4e75,0x200c4e75,0x200d4e75
+	.long	0x20164e75,0x202effd8,0x4e75323b,0x12064efb
+	.long	0x10020010,0x0016001c,0x00200024,0x0028002c
+	.long	0x00302d40,0xff9c4e75,0x2d40ffa0,0x4e752400
+	.long	0x4e752600,0x4e752800,0x4e752a00,0x4e752c00
+	.long	0x4e752e00,0x4e75323b,0x12064efb,0x10020010
+	.long	0x0016001c,0x00200024,0x0028002c,0x00303d40
+	.long	0xff9e4e75,0x3d40ffa2,0x4e753400,0x4e753600
+	.long	0x4e753800,0x4e753a00,0x4e753c00,0x4e753e00
+	.long	0x4e75323b,0x12064efb,0x10020010,0x0016001c
+	.long	0x00200024,0x0028002c,0x00301d40,0xff9f4e75
+	.long	0x1d40ffa3,0x4e751400,0x4e751600,0x4e751800
+	.long	0x4e751a00,0x4e751c00,0x4e751e00,0x4e75323b
+	.long	0x12064efb,0x10020010,0x0016001c,0x00200024
+	.long	0x0028002c,0x0030d1ae,0xffa44e75,0xd1aeffa8
+	.long	0x4e75d5c0,0x4e75d7c0,0x4e75d9c0,0x4e75dbc0
+	.long	0x4e75d196,0x4e751d7c,0x0004ff4a,0x0c000001
+	.long	0x6706d1ae,0xffd84e75,0x54aeffd8,0x4e75323b
+	.long	0x12064efb,0x10020010,0x0016001c,0x00200024
+	.long	0x0028002c,0x003091ae,0xffa44e75,0x91aeffa8
+	.long	0x4e7595c0,0x4e7597c0,0x4e7599c0,0x4e759bc0
+	.long	0x4e759196,0x4e751d7c,0x0008ff4a,0x0c000001
+	.long	0x670691ae,0xffd84e75,0x55aeffd8,0x4e75303b
+	.long	0x02064efb,0x00020010,0x00280040,0x004c0058
+	.long	0x00640070,0x007c2d6e,0xffdcff6c,0x2d6effe0
+	.long	0xff702d6e,0xffe4ff74,0x41eeff6c,0x4e752d6e
+	.long	0xffe8ff6c,0x2d6effec,0xff702d6e,0xfff0ff74
+	.long	0x41eeff6c,0x4e75f22e,0xf020ff6c,0x41eeff6c
+	.long	0x4e75f22e,0xf010ff6c,0x41eeff6c,0x4e75f22e
+	.long	0xf008ff6c,0x41eeff6c,0x4e75f22e,0xf004ff6c
+	.long	0x41eeff6c,0x4e75f22e,0xf002ff6c,0x41eeff6c
+	.long	0x4e75f22e,0xf001ff6c,0x41eeff6c,0x4e75303b
+	.long	0x02064efb,0x00020010,0x00280040,0x004c0058
+	.long	0x00640070,0x007c2d6e,0xffdcff78,0x2d6effe0
+	.long	0xff7c2d6e,0xffe4ff80,0x41eeff78,0x4e752d6e
+	.long	0xffe8ff78,0x2d6effec,0xff7c2d6e,0xfff0ff80
+	.long	0x41eeff78,0x4e75f22e,0xf020ff78,0x41eeff78
+	.long	0x4e75f22e,0xf010ff78,0x41eeff78,0x4e75f22e
+	.long	0xf008ff78,0x41eeff78,0x4e75f22e,0xf004ff78
+	.long	0x41eeff78,0x4e75f22e,0xf002ff78,0x41eeff78
+	.long	0x4e75f22e,0xf001ff78,0x41eeff78,0x4e75303b
+	.long	0x02064efb,0x00020010,0x00180020,0x002a0034
+	.long	0x003e0048,0x0052f22e,0xf080ffdc,0x4e75f22e
+	.long	0xf080ffe8,0x4e75f227,0xe001f21f,0xd0204e75
+	.long	0xf227e001,0xf21fd010,0x4e75f227,0xe001f21f
+	.long	0xd0084e75,0xf227e001,0xf21fd004,0x4e75f227
+	.long	0xe001f21f,0xd0024e75,0xf227e001,0xf21fd001
+	.long	0x4e750000,0x3f813c01,0xe408323b,0x02f63001
+	.long	0x90680000,0x0c400042,0x6a164280,0x082e0001
+	.long	0xff666704,0x08c0001d,0x61ff0000,0x001a4e75
+	.long	0x203c2000,0x00003141,0x000042a8,0x000442a8
+	.long	0x00084e75,0x2d680008,0xff542d40,0xff582001
+	.long	0x92680000,0x6f100c41,0x00206d10,0x0c410040
+	.long	0x6d506000,0x009a202e,0xff584e75,0x2f023140
+	.long	0x00007020,0x90410c41,0x001d6d08,0x142eff58
+	.long	0x852eff57,0xe9e82020,0x0004e9e8,0x18000004
+	.long	0xe9ee0800,0xff542142,0x00042141,0x0008e8c0
+	.long	0x009e6704,0x08c0001d,0x0280e000,0x0000241f
+	.long	0x4e752f02,0x31400000,0x04410020,0x70209041
+	.long	0x142eff58,0x852eff57,0xe9e82020,0x0004e9e8
+	.long	0x18000004,0xe8c1009e,0x660ce8ee,0x081fff54
+	.long	0x66042001,0x60062001,0x08c0001d,0x42a80004
+	.long	0x21420008,0x0280e000,0x0000241f,0x4e753140
+	.long	0x00000c41,0x00416d12,0x672442a8,0x000442a8
+	.long	0x0008203c,0x20000000,0x4e752028,0x00042200
+	.long	0x0280c000,0x00000281,0x3fffffff,0x60122028
+	.long	0x00040280,0x80000000,0xe2880281,0x7fffffff
+	.long	0x66164aa8,0x00086610,0x4a2eff58,0x660a42a8
+	.long	0x000442a8,0x00084e75,0x08c0001d,0x42a80004
+	.long	0x42a80008,0x4e7561ff,0x00000110,0x4a806700
+	.long	0x00fa006e,0x0208ff66,0x327b1206,0x4efb9802
+	.long	0x004000ea,0x00240008,0x4a280002,0x6b0000dc
+	.long	0x70ff4841,0x0c010004,0x6700003e,0x6e000094
+	.long	0x60000064,0x4a280002,0x6a0000c0,0x70ff4841
+	.long	0x0c010004,0x67000022,0x6e000078,0x60000048
+	.long	0xe3806400,0x00a64841,0x0c010004,0x6700000a
+	.long	0x6e000060,0x60000030,0x06a80000,0x01000004
+	.long	0x640ce4e8,0x0004e4e8,0x00065268,0x00004a80
+	.long	0x66060268,0xfe000006,0x02a8ffff,0xff000004
+	.long	0x42a80008,0x4e7552a8,0x0008641a,0x52a80004
+	.long	0x6414e4e8,0x0004e4e8,0x0006e4e8,0x0008e4e8
+	.long	0x000a5268,0x00004a80,0x66060228,0x00fe000b
+	.long	0x4e7506a8,0x00000800,0x0008641a,0x52a80004
+	.long	0x6414e4e8,0x0004e4e8,0x0006e4e8,0x0008e4e8
+	.long	0x000a5268,0x00004a80,0x66060268,0xf000000a
+	.long	0x02a8ffff,0xf8000008,0x4e754841,0x0c010004
+	.long	0x6700ff86,0x6eea4e75,0x48414a01,0x66044841
+	.long	0x4e7548e7,0x30000c01,0x00046622,0xe9e83602
+	.long	0x0004741e,0xe5ab2428,0x00040282,0x0000003f
+	.long	0x66284aa8,0x00086622,0x4a80661e,0x6020e9e8
+	.long	0x35420008,0x741ee5ab,0x24280008,0x02820000
+	.long	0x01ff6606,0x4a806602,0x600408c3,0x001d2003
+	.long	0x4cdf000c,0x48414e75,0x2f022f03,0x20280004
+	.long	0x22280008,0xedc02000,0x671ae5a8,0xe9c13022
+	.long	0x8083e5a9,0x21400004,0x21410008,0x2002261f
+	.long	0x241f4e75,0xedc12000,0xe5a90682,0x00000020
+	.long	0x21410004,0x42a80008,0x2002261f,0x241f4e75
+	.long	0xede80000,0x0004660e,0xede80000,0x00086700
+	.long	0x00740640,0x00204281,0x32280000,0x02417fff
+	.long	0xb0416e1c,0x92403028,0x00000240,0x80008240
+	.long	0x31410000,0x61ffffff,0xff82103c,0x00004e75
+	.long	0x0c010020,0x6e20e9e8,0x08400004,0x21400004
+	.long	0x20280008,0xe3a82140,0x00080268,0x80000000
+	.long	0x103c0004,0x4e750441,0x00202028,0x0008e3a8
+	.long	0x21400004,0x42a80008,0x02688000,0x0000103c
+	.long	0x00044e75,0x02688000,0x0000103c,0x00014e75
+	.long	0x30280000,0x02407fff,0x0c407fff,0x67480828
+	.long	0x00070004,0x6706103c,0x00004e75,0x4a406618
+	.long	0x4aa80004,0x660c4aa8,0x00086606,0x103c0001
+	.long	0x4e75103c,0x00044e75,0x4aa80004,0x66124aa8
+	.long	0x0008660c,0x02688000,0x0000103c,0x00014e75
+	.long	0x103c0006,0x4e754aa8,0x00086612,0x20280004
+	.long	0x02807fff,0xffff6606,0x103c0002,0x4e750828
+	.long	0x00060004,0x6706103c,0x00034e75,0x103c0005
+	.long	0x4e752028,0x00002200,0x02807ff0,0x0000670e
+	.long	0x0c807ff0,0x00006728,0x103c0000,0x4e750281
+	.long	0x000fffff,0x66ff0000,0x00144aa8,0x000466ff
+	.long	0x0000000a,0x103c0001,0x4e75103c,0x00044e75
+	.long	0x0281000f,0xffff66ff,0x00000014,0x4aa80004
+	.long	0x66ff0000,0x000a103c,0x00024e75,0x08010013
+	.long	0x66ff0000,0x000a103c,0x00054e75,0x103c0003
+	.long	0x4e752028,0x00002200,0x02807f80,0x0000670e
+	.long	0x0c807f80,0x0000671e,0x103c0000,0x4e750281
+	.long	0x007fffff,0x66ff0000,0x000a103c,0x00014e75
+	.long	0x103c0004,0x4e750281,0x007fffff,0x66ff0000
+	.long	0x000a103c,0x00024e75,0x08010016,0x66ff0000
+	.long	0x000a103c,0x00054e75,0x103c0003,0x4e752f01
+	.long	0x08280007,0x000056e8,0x00023228,0x00000241
+	.long	0x7fff9240,0x31410000,0x2f08202f,0x00040240
+	.long	0x00c0e848,0x61ffffff,0xfae22057,0x322f0006
+	.long	0x024100c0,0xe8494841,0x322f0006,0x02410030
+	.long	0xe84961ff,0xfffffc22,0x205f08a8,0x00070000
+	.long	0x4a280002,0x670a08e8,0x00070000,0x42280002
+	.long	0x42804aa8,0x0004660a,0x4aa80008,0x660408c0
+	.long	0x0002082e,0x0001ff66,0x670608ee,0x0005ff67
+	.long	0x588f4e75,0x2f010828,0x00070000,0x56e80002
+	.long	0x32280000,0x02417fff,0x92403141,0x00002f08
+	.long	0x428061ff,0xfffffa64,0x2057323c,0x00044841
+	.long	0x322f0006,0x02410030,0xe84961ff,0xfffffbaa
+	.long	0x205f08a8,0x00070000,0x4a280002,0x670a08e8
+	.long	0x00070000,0x42280002,0x42804aa8,0x0004660a
+	.long	0x4aa80008,0x660408c0,0x0002082e,0x0001ff66
+	.long	0x670608ee,0x0005ff67,0x588f4e75,0x02410010
+	.long	0xe8088200,0x3001e309,0x600e0241,0x00108200
+	.long	0x48408200,0x3001e309,0x103b0008,0x41fb1620
+	.long	0x4e750200,0x00020200,0x00020200,0x00020000
+	.long	0x00000a08,0x0a080a08,0x0a080a08,0x0a087fff
+	.long	0x00000000,0x00000000,0x00000000,0x00007ffe
+	.long	0x0000ffff,0xffffffff,0xffff0000,0x00007ffe
+	.long	0x0000ffff,0xffffffff,0xffff0000,0x00007fff
+	.long	0x00000000,0x00000000,0x00000000,0x00007fff
+	.long	0x00000000,0x00000000,0x00000000,0x0000407e
+	.long	0x0000ffff,0xff000000,0x00000000,0x0000407e
+	.long	0x0000ffff,0xff000000,0x00000000,0x00007fff
+	.long	0x00000000,0x00000000,0x00000000,0x00007fff
+	.long	0x00000000,0x00000000,0x00000000,0x000043fe
+	.long	0x0000ffff,0xffffffff,0xf8000000,0x000043fe
+	.long	0x0000ffff,0xffffffff,0xf8000000,0x00007fff
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x0000ffff
+	.long	0x00000000,0x00000000,0x00000000,0x0000fffe
+	.long	0x0000ffff,0xffffffff,0xffff0000,0x0000ffff
+	.long	0x00000000,0x00000000,0x00000000,0x0000fffe
+	.long	0x0000ffff,0xffffffff,0xffff0000,0x0000ffff
+	.long	0x00000000,0x00000000,0x00000000,0x0000c07e
+	.long	0x0000ffff,0xff000000,0x00000000,0x0000ffff
+	.long	0x00000000,0x00000000,0x00000000,0x0000c07e
+	.long	0x0000ffff,0xff000000,0x00000000,0x0000ffff
+	.long	0x00000000,0x00000000,0x00000000,0x0000c3fe
+	.long	0x0000ffff,0xffffffff,0xf8000000,0x0000ffff
+	.long	0x00000000,0x00000000,0x00000000,0x0000c3fe
+	.long	0x0000ffff,0xffffffff,0xf8000000,0x0000700c
+	.long	0x61ffffff,0xe82c43ee,0xff6c700c,0x61ffffff
+	.long	0x38664a81,0x66ff0000,0x0a14e9ee,0x004fff6c
+	.long	0x0c407fff,0x66024e75,0x102eff6f,0x0200000f
+	.long	0x660e4aae,0xff706608,0x4aaeff74,0x66024e75
+	.long	0x41eeff6c,0x61ff0000,0x001cf22e,0xf080ff6c
+	.long	0x4e750000,0x00000203,0x02030203,0x03020302
+	.long	0x02032d68,0x0000ff84,0x2d680004,0xff882d68
+	.long	0x0008ff8c,0x41eeff84,0x48e73c00,0xf227e001
+	.long	0x74027604,0x28104281,0x4c3c1001,0x0000000a
+	.long	0xe9c408c4,0xd2805803,0x51caffee,0x0804001e
+	.long	0x67024481,0x04810000,0x00106c0e,0x44810084
+	.long	0x40000000,0x00904000,0x00002f01,0x7201f23c
+	.long	0x44000000,0x0000e9d0,0x0704f200,0x58222830
+	.long	0x1c007600,0x7407f23c,0x44234120,0x0000e9c4
+	.long	0x08c4f200,0x58225803,0x51caffec,0x52810c81
+	.long	0x00000002,0x6fd80810,0x001f6704,0xf200001a
+	.long	0x22170c81,0x0000001b,0x6f0000e4,0x0810001e
+	.long	0x66744281,0x2810e9c4,0x07046624,0x52817a01
+	.long	0x28305c00,0x66085081,0x52852830,0x5c004283
+	.long	0x7407e9c4,0x08c46608,0x58835281,0x51cafff4
+	.long	0x20012217,0x92806c10,0x44812810,0x00844000
+	.long	0x00000090,0x40000000,0x43fb0170,0x00000666
+	.long	0x4283f23c,0x44803f80,0x00007403,0xe2806406
+	.long	0xf23148a3,0x38000683,0x0000000c,0x4a8066ec
+	.long	0xf2000423,0x60684281,0x7a022830,0x5c006608
+	.long	0x53855081,0x28305c00,0x761c7407,0xe9c408c4
+	.long	0x66085983,0x528151ca,0xfff42001,0x22179280
+	.long	0x6e104481,0x28100284,0xbfffffff,0x0290bfff
+	.long	0xffff43fb,0x01700000,0x05fc4283,0xf23c4480
+	.long	0x3f800000,0x7403e280,0x6406f231,0x48a33800
+	.long	0x06830000,0x000c4a80,0x66ecf200,0x0420262e
+	.long	0xff60e9c3,0x26822810,0xe582e9c4,0x0002d480
+	.long	0x43fafe50,0x10312800,0x4283efc3,0x0682f203
+	.long	0x9000e280,0x640a43fb,0x01700000,0x06446016
+	.long	0xe280640a,0x43fb0170,0x000006d2,0x600843fb
+	.long	0x01700000,0x05902001,0x6a084480,0x00904000
+	.long	0x00004283,0xf23c4480,0x3f800000,0xe2806406
+	.long	0xf23148a3,0x38000683,0x0000000c,0x4a8066ec
+	.long	0x0810001e,0x6706f200,0x04206004,0xf2000423
+	.long	0xf200a800,0x08800009,0x6706006e,0x0108ff66
+	.long	0x588ff21f,0xd0404cdf,0x003cf23c,0x90000000
+	.long	0x0000f23c,0x88000000,0x00004e75,0x3ffd0000
+	.long	0x9a209a84,0xfbcff798,0x00000000,0x3ffd0000
+	.long	0x9a209a84,0xfbcff799,0x00000000,0x3f800000
+	.long	0x00000000,0x00000000,0x00000000,0x40000000
+	.long	0x00000000,0x00000000,0x00000000,0x41200000
+	.long	0x00000000,0x00000000,0x00000000,0x459a2800
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x03030202,0x03020203,0x02030302,0x48e73f20
+	.long	0xf227e007,0xf23c9000,0x00000020,0x2d50ff58
+	.long	0x2e00422e,0xff500c2e,0x0004ff4e,0x66000030
+	.long	0x30100240,0x7fff2228,0x00042428,0x00085340
+	.long	0xe38ae391,0x4a816cf6,0x4a406e04,0x50eeff50
+	.long	0x02407fff,0x30802141,0x00042142,0x00082d50
+	.long	0xff902d68,0x0004ff94,0x2d680008,0xff9802ae
+	.long	0x7fffffff,0xff904a2e,0xff506708,0x2c3cffff
+	.long	0xecbb6038,0x302eff90,0x3d7c3fff,0xff90f22e
+	.long	0x4800ff90,0x04403fff,0xf2005022,0xf23a4428
+	.long	0xff1cf293,0x000ef23a,0x4823ff02,0xf2066000
+	.long	0x600af23a,0x4823fee6,0xf2066000,0xf23c8800
+	.long	0x00000000,0x42454a87,0x6f042807,0x60062806
+	.long	0x98875284,0x4a846f18,0x0c840000,0x00116f12
+	.long	0x78114a87,0x6f0c00ae,0x00002080,0xff646002
+	.long	0x78014a87,0x6e06be86,0x6d022c07,0x20065280
+	.long	0x90844845,0x42454242,0x4a806c14,0x52450c80
+	.long	0xffffecd4,0x6e080680,0x00000018,0x74184480
+	.long	0xf23a4480,0xfe98e9ee,0x1682ff60,0xe349d245
+	.long	0xe3494aae,0xff586c02,0x528145fa,0xfec01632
+	.long	0x1800e98b,0xf2039000,0xe88b4a03,0x660a43fb
+	.long	0x01700000,0x03706016,0xe20b640a,0x43fb0170
+	.long	0x000003fe,0x600843fb,0x01700000,0x04904283
+	.long	0xe2886406,0xf23148a3,0x38000683,0x0000000c
+	.long	0x4a8066ec,0xf23c8800,0x00000000,0xf23c9000
+	.long	0x00000010,0xf2104800,0xf2000018,0x4a456608
+	.long	0xf2000420,0x6000008e,0x4a2eff50,0x67000072
+	.long	0xf227e002,0x36170243,0x7fff0050,0x8000d650
+	.long	0x04433fff,0xd6690024,0x04433fff,0xd6690030
+	.long	0x04433fff,0x6b000048,0x02578000,0x87570250
+	.long	0x7fff2f28,0x00082f28,0x00042f3c,0x3fff0000
+	.long	0xf21fd080,0xf21f4823,0x2f29002c,0x2f290028
+	.long	0x2f3c3fff,0x00002f29,0x00382f29,0x00342f3c
+	.long	0x3fff0000,0xf21f4823,0xf21f4823,0x601660fe
+	.long	0x4a42670c,0xf2294823,0x0024f229,0x48230030
+	.long	0xf2000423,0xf200a800,0xf22e6800,0xff9045ee
+	.long	0xff900800,0x0009670e,0x00aa0000,0x00010008
+	.long	0xf22e4800,0xff902d6e,0xff60ff54,0x02ae0000
+	.long	0x0030ff60,0x48e7c0c0,0x2f2eff54,0x2f2eff58
+	.long	0x41eeff90,0xf2106800,0x4aaeff58,0x6c060090
+	.long	0x80000000,0x2f2eff64,0xf22e9000,0xff60f23c
+	.long	0x88000000,0x0000f22e,0x4801ff90,0xf200a800
+	.long	0x816eff66,0x1d57ff64,0x588f2d5f,0xff582d5f
+	.long	0xff544cdf,0x03032d6e,0xff58ff90,0x2d6eff54
+	.long	0xff604845,0x4a4566ff,0x00000086,0xf23a4500
+	.long	0xfcec2004,0x53804283,0xe2886406,0xf2314923
+	.long	0x38000683,0x0000000c,0x4a8066ec,0x4a2eff50
+	.long	0x670af200,0x001860ff,0x00000028,0xf2000018
+	.long	0xf2000838,0xf293001a,0x53863a3c,0x0001f23c
+	.long	0x90000000,0x0020f23a,0x4523fcc2,0x6000fda8
+	.long	0xf23a4523,0xfcb8f200,0x0838f294,0x005cf292
+	.long	0x000cf23a,0x4420fca6,0x5286604c,0x52863a3c
+	.long	0x0001f23c,0x90000000,0x00206000,0xfd7af23a
+	.long	0x4500fc6a,0x20044283,0xe2886406,0xf2314923
+	.long	0x38000683,0x0000000c,0x4a8066ec,0xf2000018
+	.long	0xf2000838,0xf28e0012,0xf23a4420,0xfc605286
+	.long	0x5284f23a,0x4523fc56,0xf23c9000,0x00000010
+	.long	0xf2000820,0x41eeff84,0xf2106800,0x24280004
+	.long	0x26280008,0x42a80004,0x42a80008,0x20104840
+	.long	0x67140480,0x00003ffd,0x4a806e0a,0x4480e28a
+	.long	0xe29351c8,0xfffa4a82,0x66044a83,0x67104281
+	.long	0x06830000,0x0080d581,0x0283ffff,0xff802004
+	.long	0x568861ff,0x000002b0,0x4a2eff50,0x6728f200
+	.long	0x003af281,0x000cf206,0x4000f200,0x0018602e
+	.long	0x4a876d08,0xf23a4400,0xfbe46022,0xf2064000
+	.long	0xf2000018,0x6018f200,0x003af28e,0x000af23a
+	.long	0x4400fb9a,0x6008f206,0x4000f200,0x0018f229
+	.long	0x48200018,0xf22e6800,0xff90242a,0x0004262a
+	.long	0x00083012,0x670e0440,0x3ffd4440,0xe28ae293
+	.long	0x51c8fffa,0x42810683,0x00000080,0xd5810283
+	.long	0xffffff80,0x700441ee,0xff5461ff,0x00000228
+	.long	0x202eff54,0x720ce2a8,0xefee010c,0xff84e2a8
+	.long	0xefee0404,0xff844a00,0x670800ae,0x00002080
+	.long	0xff644280,0x022e000f,0xff844aae,0xff586c02
+	.long	0x70024a86,0x6c025280,0xefee0002,0xff84f23c
+	.long	0x88000000,0x0000f21f,0xd0e04cdf,0x04fc4e75
+	.long	0x40020000,0xa0000000,0x00000000,0x40050000
+	.long	0xc8000000,0x00000000,0x400c0000,0x9c400000
+	.long	0x00000000,0x40190000,0xbebc2000,0x00000000
+	.long	0x40340000,0x8e1bc9bf,0x04000000,0x40690000
+	.long	0x9dc5ada8,0x2b70b59e,0x40d30000,0xc2781f49
+	.long	0xffcfa6d5,0x41a80000,0x93ba47c9,0x80e98ce0
+	.long	0x43510000,0xaa7eebfb,0x9df9de8e,0x46a30000
+	.long	0xe319a0ae,0xa60e91c7,0x4d480000,0xc9767586
+	.long	0x81750c17,0x5a920000,0x9e8b3b5d,0xc53d5de5
+	.long	0x75250000,0xc4605202,0x8a20979b,0x40020000
+	.long	0xa0000000,0x00000000,0x40050000,0xc8000000
+	.long	0x00000000,0x400c0000,0x9c400000,0x00000000
+	.long	0x40190000,0xbebc2000,0x00000000,0x40340000
+	.long	0x8e1bc9bf,0x04000000,0x40690000,0x9dc5ada8
+	.long	0x2b70b59e,0x40d30000,0xc2781f49,0xffcfa6d6
+	.long	0x41a80000,0x93ba47c9,0x80e98ce0,0x43510000
+	.long	0xaa7eebfb,0x9df9de8e,0x46a30000,0xe319a0ae
+	.long	0xa60e91c7,0x4d480000,0xc9767586,0x81750c18
+	.long	0x5a920000,0x9e8b3b5d,0xc53d5de5,0x75250000
+	.long	0xc4605202,0x8a20979b,0x40020000,0xa0000000
+	.long	0x00000000,0x40050000,0xc8000000,0x00000000
+	.long	0x400c0000,0x9c400000,0x00000000,0x40190000
+	.long	0xbebc2000,0x00000000,0x40340000,0x8e1bc9bf
+	.long	0x04000000,0x40690000,0x9dc5ada8,0x2b70b59d
+	.long	0x40d30000,0xc2781f49,0xffcfa6d5,0x41a80000
+	.long	0x93ba47c9,0x80e98cdf,0x43510000,0xaa7eebfb
+	.long	0x9df9de8d,0x46a30000,0xe319a0ae,0xa60e91c6
+	.long	0x4d480000,0xc9767586,0x81750c17,0x5a920000
+	.long	0x9e8b3b5d,0xc53d5de4,0x75250000,0xc4605202
+	.long	0x8a20979a,0x48e7ff00,0x7e015380,0x28022a03
+	.long	0xe9c21003,0xe782e9c3,0x6003e783,0x8486e385
+	.long	0xe3944846,0xd346d685,0x4e71d584,0x4e71d346
+	.long	0x48464a47,0x67124847,0xe947de41,0x10c74847
+	.long	0x424751c8,0xffc86012,0x48473e01,0x48475247
+	.long	0x51c8ffba,0x4847e94f,0x10c74cdf,0x00ff4e75
+	.long	0x70016100,0x00d63d7c,0x0121000a,0x6000007e
+	.long	0x70026100,0x00c63d7c,0x0141000a,0x606e7004
+	.long	0x610000b8,0x3d7c0101,0x000a6060,0x70086100
+	.long	0x00aa3d7c,0x0161000a,0x6052700c,0x6100009c
+	.long	0x3d7c0161,0x000a6044,0x70016100,0x008e3d7c
+	.long	0x00a1000a,0x60367002,0x61000080,0x3d7c00c1
+	.long	0x000a6028,0x70046100,0x00723d7c,0x0081000a
+	.long	0x601a7008,0x61000064,0x3d7c00e1,0x000a600c
+	.long	0x700c6100,0x00563d7c,0x00e1000a,0x2d6eff68
+	.long	0x0006f22e,0xd0c0ffdc,0xf22e9c00,0xff604cee
+	.long	0x0303ff9c,0x4e5e2f17,0x2f6f0008,0x00042f6f
+	.long	0x000c0008,0x2f7c0000,0x0001000c,0x3f6f0006
+	.long	0x000c3f7c,0x40080006,0x08170005,0x670608ef
+	.long	0x0002000d,0x60ffffff,0x2d82122e,0xff410201
+	.long	0x00380c01,0x00186700,0x000c0c01,0x00206700
+	.long	0x00604e75,0x122eff41,0x02410007,0x323b1206
+	.long	0x4efb1002,0x00100016,0x001c0020,0x00240028
+	.long	0x002c0030,0x91aeffa4,0x4e7591ae,0xffa84e75
+	.long	0x95c04e75,0x97c04e75,0x99c04e75,0x9bc04e75
+	.long	0x91964e75,0x0c2e0030,0x000a6612,0x082e0005
+	.long	0x0004660a,0x4e7a8800,0x91c04e7b,0x88004e75
+	.long	0x448060a0,0x00000000,0x00000000,0x00000000
diff --git a/arch/m68k/ifpsp060/fskeleton.S b/arch/m68k/ifpsp060/fskeleton.S
new file mode 100644
index 0000000..a45a4ff
--- /dev/null
+++ b/arch/m68k/ifpsp060/fskeleton.S
@@ -0,0 +1,342 @@
+|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+|MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+|M68000 Hi-Performance Microprocessor Division
+|M68060 Software Package
+|Production Release P1.00 -- October 10, 1994
+|
+|M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+|
+|THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+|To the maximum extent permitted by applicable law,
+|MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+|INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+|and any warranty against infringement with regard to the SOFTWARE
+|(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+|
+|To the maximum extent permitted by applicable law,
+|IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+|(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+|BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+|ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+|Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+|
+|You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+|so long as this entire notice is retained without alteration in any modified and/or
+|redistributed versions, and that such modified versions are clearly identified as such.
+|No licenses are granted by implication, estoppel or otherwise under any patents
+|or trademarks of Motorola, Inc.
+|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+| fskeleton.s
+|
+| This file contains:
+|	(1) example "Call-out"s
+|	(2) example package entry code
+|	(3) example "Call-out" table
+|
+
+#include <linux/linkage.h>
+
+|################################
+| (1) EXAMPLE CALL-OUTS		#
+|				#
+| _060_fpsp_done()		#
+| _060_real_ovfl()		#
+| _060_real_unfl()		#
+| _060_real_operr()		#
+| _060_real_snan()		#
+| _060_real_dz()		#
+| _060_real_inex()		#
+| _060_real_bsun()		#
+| _060_real_fline()		#
+| _060_real_fpu_disabled()	#
+| _060_real_trap()		#
+|################################
+
+|
+| _060_fpsp_done():
+|
+| This is the main exit point for the 68060 Floating-Point
+| Software Package. For a normal exit, all 060FPSP routines call this
+| routine. The operating system can do system dependent clean-up or
+| simply execute an "rte" as with the sample code below.
+|
+	.global		_060_fpsp_done
+_060_fpsp_done:
+	bral	 _060_isp_done	| do the same as isp_done
+
+|
+| _060_real_ovfl():
+|
+| This is the exit point for the 060FPSP when an enabled overflow exception
+| is present. The routine below should point to the operating system handler
+| for enabled overflow conditions. The exception stack frame is an overflow
+| stack frame. The FP state frame holds the EXCEPTIONAL OPERAND.
+|
+| The sample routine below simply clears the exception status bit and
+| does an "rte".
+|
+	.global		_060_real_ovfl
+_060_real_ovfl:
+	fsave		-(%sp)
+	move.w		#0x6000,0x2(%sp)
+	frestore	(%sp)+
+	bral		trap	| jump to trap handler
+
+
+|
+| _060_real_unfl():
+|
+| This is the exit point for the 060FPSP when an enabled underflow exception
+| is present. The routine below should point to the operating system handler
+| for enabled underflow conditions. The exception stack frame is an underflow
+| stack frame. The FP state frame holds the EXCEPTIONAL OPERAND.
+|
+| The sample routine below simply clears the exception status bit and
+| does an "rte".
+|
+	.global		_060_real_unfl
+_060_real_unfl:
+	fsave		-(%sp)
+	move.w		#0x6000,0x2(%sp)
+	frestore	(%sp)+
+	bral		trap	| jump to trap handler
+
+|
+| _060_real_operr():
+|
+| This is the exit point for the 060FPSP when an enabled operand error exception
+| is present. The routine below should point to the operating system handler
+| for enabled operand error exceptions. The exception stack frame is an operand error
+| stack frame. The FP state frame holds the source operand of the faulting
+| instruction.
+|
+| The sample routine below simply clears the exception status bit and
+| does an "rte".
+|
+	.global		_060_real_operr
+_060_real_operr:
+	fsave		-(%sp)
+	move.w		#0x6000,0x2(%sp)
+	frestore	(%sp)+
+	bral		trap	| jump to trap handler
+
+|
+| _060_real_snan():
+|
+| This is the exit point for the 060FPSP when an enabled signalling NaN exception
+| is present. The routine below should point to the operating system handler
+| for enabled signalling NaN exceptions. The exception stack frame is a signalling NaN
+| stack frame. The FP state frame holds the source operand of the faulting
+| instruction.
+|
+| The sample routine below simply clears the exception status bit and
+| does an "rte".
+|
+	.global		_060_real_snan
+_060_real_snan:
+	fsave		-(%sp)
+	move.w		#0x6000,0x2(%sp)
+	frestore	(%sp)+
+	bral		trap	| jump to trap handler
+
+|
+| _060_real_dz():
+|
+| This is the exit point for the 060FPSP when an enabled divide-by-zero exception
+| is present. The routine below should point to the operating system handler
+| for enabled divide-by-zero exceptions. The exception stack frame is a divide-by-zero
+| stack frame. The FP state frame holds the source operand of the faulting
+| instruction.
+|
+| The sample routine below simply clears the exception status bit and
+| does an "rte".
+|
+	.global		_060_real_dz
+_060_real_dz:
+	fsave		-(%sp)
+	move.w		#0x6000,0x2(%sp)
+	frestore	(%sp)+
+	bral		trap	| jump to trap handler
+
+|
+| _060_real_inex():
+|
+| This is the exit point for the 060FPSP when an enabled inexact exception
+| is present. The routine below should point to the operating system handler
+| for enabled inexact exceptions. The exception stack frame is an inexact
+| stack frame. The FP state frame holds the source operand of the faulting
+| instruction.
+|
+| The sample routine below simply clears the exception status bit and
+| does an "rte".
+|
+	.global		_060_real_inex
+_060_real_inex:
+	fsave		-(%sp)
+	move.w		#0x6000,0x2(%sp)
+	frestore	(%sp)+
+	bral		trap	| jump to trap handler
+
+|
+| _060_real_bsun():
+|
+| This is the exit point for the 060FPSP when an enabled bsun exception
+| is present. The routine below should point to the operating system handler
+| for enabled bsun exceptions. The exception stack frame is a bsun
+| stack frame.
+|
+| The sample routine below clears the exception status bit, clears the NaN
+| bit in the FPSR, and does an "rte". The instruction that caused the
+| bsun will now be re-executed but with the NaN FPSR bit cleared.
+|
+	.global		_060_real_bsun
+_060_real_bsun:
+|	fsave		-(%sp)
+
+	fmove.l		%fpsr,-(%sp)
+	andi.b		#0xfe,(%sp)
+	fmove.l		(%sp)+,%fpsr
+
+	bral		trap	| jump to trap handler
+
+|
+| _060_real_fline():
+|
+| This is the exit point for the 060FPSP when an F-Line Illegal exception is
+| encountered. Three different types of exceptions can enter the F-Line exception
+| vector number 11: FP Unimplemented Instructions, FP implemented instructions when
+| the FPU is disabled, and F-Line Illegal instructions. The 060FPSP module
+| _fpsp_fline() distinguishes between the three and acts appropriately. F-Line
+| Illegals branch here.
+|
+	.global		_060_real_fline
+_060_real_fline:
+	bral		trap	| jump to trap handler
+
+|
+| _060_real_fpu_disabled():
+|
+| This is the exit point for the 060FPSP when an FPU disabled exception is
+| encountered. Three different types of exceptions can enter the F-Line exception
+| vector number 11: FP Unimplemented Instructions, FP implemented instructions when
+| the FPU is disabled, and F-Line Illegal instructions. The 060FPSP module
+| _fpsp_fline() distinguishes between the three and acts appropriately. FPU disabled
+| exceptions branch here.
+|
+| The sample code below enables the FPU, sets the PC field in the exception stack
+| frame to the PC of the instruction causing the exception, and does an "rte".
+| The execution of the instruction then proceeds with an enabled floating-point
+| unit.
+|
+	.global		_060_real_fpu_disabled
+_060_real_fpu_disabled:
+	move.l		%d0,-(%sp)		| enabled the fpu
+	.long	0x4E7A0808			|movec		pcr,%d0
+	bclr		#0x1,%d0
+	.long	0x4E7B0808			|movec		%d0,pcr
+	move.l		(%sp)+,%d0
+
+	move.l		0xc(%sp),0x2(%sp)	| set "Current PC"
+	rte
+
+|
+| _060_real_trap():
+|
+| This is the exit point for the 060FPSP when an emulated "ftrapcc" instruction
+| discovers that the trap condition is true and it should branch to the operating
+| system handler for the trap exception vector number 7.
+|
+| The sample code below simply executes an "rte".
+|
+	.global		_060_real_trap
+_060_real_trap:
+	bral		trap	| jump to trap handler
+
+|############################################################################
+
+|#################################
+| (2) EXAMPLE PACKAGE ENTRY CODE #
+|#################################
+
+	.global		_060_fpsp_snan
+_060_fpsp_snan:
+	bra.l		_FP_CALL_TOP+0x80+0x00
+
+	.global		_060_fpsp_operr
+_060_fpsp_operr:
+	bra.l		_FP_CALL_TOP+0x80+0x08
+
+	.global		_060_fpsp_ovfl
+_060_fpsp_ovfl:
+	bra.l		_FP_CALL_TOP+0x80+0x10
+
+	.global		_060_fpsp_unfl
+_060_fpsp_unfl:
+	bra.l		_FP_CALL_TOP+0x80+0x18
+
+	.global		_060_fpsp_dz
+_060_fpsp_dz:
+	bra.l		_FP_CALL_TOP+0x80+0x20
+
+	.global		_060_fpsp_inex
+_060_fpsp_inex:
+	bra.l		_FP_CALL_TOP+0x80+0x28
+
+	.global		_060_fpsp_fline
+_060_fpsp_fline:
+	bra.l		_FP_CALL_TOP+0x80+0x30
+
+	.global		_060_fpsp_unsupp
+_060_fpsp_unsupp:
+	bra.l		_FP_CALL_TOP+0x80+0x38
+
+	.global		_060_fpsp_effadd
+_060_fpsp_effadd:
+	bra.l		_FP_CALL_TOP+0x80+0x40
+
+|############################################################################
+
+|###############################
+| (3) EXAMPLE CALL-OUT SECTION #
+|###############################
+
+| The size of this section MUST be 128 bytes!!!
+
+_FP_CALL_TOP:
+	.long	_060_real_bsun		- _FP_CALL_TOP
+	.long	_060_real_snan		- _FP_CALL_TOP
+	.long	_060_real_operr		- _FP_CALL_TOP
+	.long	_060_real_ovfl		- _FP_CALL_TOP
+	.long	_060_real_unfl		- _FP_CALL_TOP
+	.long	_060_real_dz		- _FP_CALL_TOP
+	.long	_060_real_inex		- _FP_CALL_TOP
+	.long	_060_real_fline		- _FP_CALL_TOP
+	.long	_060_real_fpu_disabled	- _FP_CALL_TOP
+	.long	_060_real_trap		- _FP_CALL_TOP
+	.long	_060_real_trace		- _FP_CALL_TOP
+	.long	_060_real_access	- _FP_CALL_TOP
+	.long	_060_fpsp_done		- _FP_CALL_TOP
+
+	.long	0x00000000, 0x00000000, 0x00000000
+
+	.long	_060_imem_read		- _FP_CALL_TOP
+	.long	_060_dmem_read		- _FP_CALL_TOP
+	.long	_060_dmem_write		- _FP_CALL_TOP
+	.long	_060_imem_read_word	- _FP_CALL_TOP
+	.long	_060_imem_read_long	- _FP_CALL_TOP
+	.long	_060_dmem_read_byte	- _FP_CALL_TOP
+	.long	_060_dmem_read_word	- _FP_CALL_TOP
+	.long	_060_dmem_read_long	- _FP_CALL_TOP
+	.long	_060_dmem_write_byte	- _FP_CALL_TOP
+	.long	_060_dmem_write_word	- _FP_CALL_TOP
+	.long	_060_dmem_write_long	- _FP_CALL_TOP
+
+	.long	0x00000000
+
+	.long	0x00000000, 0x00000000, 0x00000000, 0x00000000
+
+|############################################################################
+
+| 060 FPSP KERNEL PACKAGE NEEDS TO GO HERE!!!
+
+#include "fpsp.sa"
diff --git a/arch/m68k/ifpsp060/ftest.sa b/arch/m68k/ifpsp060/ftest.sa
new file mode 100644
index 0000000..b365bc2
--- /dev/null
+++ b/arch/m68k/ifpsp060/ftest.sa
@@ -0,0 +1,371 @@
+	dc.l	$60ff0000,$00d40000,$60ff0000,$016c0000
+	dc.l	$60ff0000,$01a80000,$54657374,$696e6720
+	dc.l	$36383036,$30204650,$53502073,$74617274
+	dc.l	$65643a0a,$00546573,$74696e67,$20363830
+	dc.l	$36302046,$50535020,$756e696d,$706c656d
+	dc.l	$656e7465,$6420696e,$73747275,$6374696f
+	dc.l	$6e207374,$61727465,$643a0a00,$54657374
+	dc.l	$696e6720,$36383036,$30204650,$53502065
+	dc.l	$78636570,$74696f6e,$20656e61,$626c6564
+	dc.l	$20737461,$72746564,$3a0a0070,$61737365
+	dc.l	$640a0020,$6661696c,$65640a00,$4a80660e
+	dc.l	$487affe9,$61ff0000,$1642588f,$4e752f01
+	dc.l	$61ff0000,$164c588f,$487affd9,$61ff0000
+	dc.l	$162a588f,$4e754e56,$fe8048e7,$3f3cf227
+	dc.l	$e0ff487a,$ff3461ff,$00001610,$588f42ae
+	dc.l	$fea0487b,$01700000,$058061ff,$000015fc
+	dc.l	$588f61ff,$00000588,$61ffffff,$ffa242ae
+	dc.l	$fea0487b,$01700000,$126c61ff,$000015dc
+	dc.l	$588f61ff,$00001280,$61ffffff,$ff8242ae
+	dc.l	$fea0487b,$01700000,$0b6461ff,$000015bc
+	dc.l	$61ff0000,$0b7261ff,$ffffff64,$42aefea0
+	dc.l	$487b0170,$00000de2,$61ff0000,$159e61ff
+	dc.l	$00000df0,$61ffffff,$ff464cdf,$3cfcf21f
+	dc.l	$d0ff4e5e,$4e754e56,$fe8048e7,$3f3cf227
+	dc.l	$e0ff487a,$feb161ff,$00001570,$588f42ae
+	dc.l	$fea0487b,$01700000,$00fe61ff,$0000155c
+	dc.l	$588f61ff,$00000110,$61ffffff,$ff024cdf
+	dc.l	$3cfcf21f,$d0ff4e5e,$4e754e56,$fe8048e7
+	dc.l	$3f3cf227,$e0ff487a,$fea461ff,$0000152c
+	dc.l	$588f42ae,$fea0487b,$01700000,$0f1461ff
+	dc.l	$00001518,$61ff0000,$0f1a61ff,$fffffec0
+	dc.l	$42aefea0,$487b0170,$00000fd2,$61ff0000
+	dc.l	$14fa61ff,$00000fd8,$61ffffff,$fea242ae
+	dc.l	$fea0487b,$01700000,$0b6061ff,$000014dc
+	dc.l	$61ff0000,$0b6a61ff,$fffffe84,$42aefea0
+	dc.l	$487b0170,$00000c22,$61ff0000,$14be61ff
+	dc.l	$00000c2c,$61ffffff,$fe6642ae,$fea0487b
+	dc.l	$01700000,$105661ff,$000014a0,$61ff0000
+	dc.l	$105a61ff,$fffffe48,$42aefea0,$487b0170
+	dc.l	$00000da2,$61ff0000,$148261ff,$00000da8
+	dc.l	$61ffffff,$fe2a4cdf,$3cfcf21f,$d0ff4e5e
+	dc.l	$4e750955,$6e696d70,$6c656d65,$6e746564
+	dc.l	$20465020,$696e7374,$72756374,$696f6e73
+	dc.l	$2e2e2e00,$52aefea0,$4cfb3fff,$01700000
+	dc.l	$1390f23b,$d0ff0170,$000013c6,$f23b9c00
+	dc.l	$01700000,$141c3d7c,$0000fea6,$48ee7fff
+	dc.l	$ff80f22e,$f0ffff20,$f22ebc00,$feb42d7c
+	dc.l	$40000000,$fe802d7c,$c90fdaa2,$fe842d7c
+	dc.l	$2168c235,$fe8844fc,$0000f22e,$480efe80
+	dc.l	$42eefea4,$48ee7fff,$ffc0f22e,$f0fffec0
+	dc.l	$f22ebc00,$fea82d7c,$bfbf0000,$ff202d7c
+	dc.l	$80000000,$ff242d7c,$00000000,$ff282d7c
+	dc.l	$08000208,$feb841fa,$ffc22d48,$febc61ff
+	dc.l	$00001288,$4a0066ff,$000012ae,$61ff0000
+	dc.l	$12b04a00,$66ff0000,$12a052ae,$fea04cfb
+	dc.l	$3fff0170,$000012da,$f23bd0ff,$01700000
+	dc.l	$1310f23b,$9c000170,$00001366,$3d7c0000
+	dc.l	$fea648ee,$7fffff80,$f22ef0ff,$ff20f22e
+	dc.l	$bc00feb4,$2d7c3ffe,$0000fe80,$2d7cc90f
+	dc.l	$daa2fe84,$2d7c2168,$c235fe88,$44fc0000
+	dc.l	$f22e480f,$fe8042ee,$fea448ee,$7fffffc0
+	dc.l	$f22ef0ff,$fec0f22e,$bc00fea8,$2d7c3fff
+	dc.l	$0000ff20,$2d7c8000,$0000ff24,$2d7c0000
+	dc.l	$0000ff28,$2d7c0000,$0208feb8,$41faffc2
+	dc.l	$2d48febc,$61ff0000,$11d24a00,$66ff0000
+	dc.l	$11f861ff,$000011fa,$4a0066ff,$000011ea
+	dc.l	$52aefea0,$4cfb3fff,$01700000,$1224f23b
+	dc.l	$d0ff0170,$0000125a,$f23b9c00,$01700000
+	dc.l	$12b03d7c,$0000fea6,$48ee7fff,$ff80f22e
+	dc.l	$f0ffff20,$f22ebc00,$feb444fc,$0000f200
+	dc.l	$5c3142ee,$fea448ee,$7fffffc0,$f22ef0ff
+	dc.l	$fec0f22e,$bc00fea8,$2d7c4000,$0000ff20
+	dc.l	$2d7c935d,$8dddff24,$2d7caaa8,$ac17ff28
+	dc.l	$2d7c0000,$0208feb8,$41faffc4,$2d48febc
+	dc.l	$61ff0000,$11364a00,$66ff0000,$115c61ff
+	dc.l	$0000115e,$4a0066ff,$0000114e,$52aefea0
+	dc.l	$4cfb3fff,$01700000,$1188f23b,$d0ff0170
+	dc.l	$000011be,$f23b9c00,$01700000,$1214f23c
+	dc.l	$88000f00,$00007e00,$3d7c0000,$fea648ee
+	dc.l	$7fffff80,$f22ef0ff,$ff20f22e,$bc00feb4
+	dc.l	$44fc0000,$f2470012,$42eefea4,$48ee7fff
+	dc.l	$ffc0f22e,$f0fffec0,$f22ebc00,$fea82d7c
+	dc.l	$0f008080,$feb841fa,$ffdc2d48,$febc61ff
+	dc.l	$000010a8,$4a0066ff,$000010ce,$61ff0000
+	dc.l	$10d04a00,$66ff0000,$10c052ae,$fea04cfb
+	dc.l	$3fff0170,$000010fa,$f23bd0ff,$01700000
+	dc.l	$1130f23b,$9c000170,$00001186,$f23c8800
+	dc.l	$0f000000,$7e023d7c,$0000fea6,$48ee7fff
+	dc.l	$ff80f22e,$f0ffff20,$f22ebc00,$feb444fc
+	dc.l	$0000f24f,$0012fffc,$42eefea4,$48ee7fff
+	dc.l	$ffc0f22e,$f0fffec0,$f22ebc00,$fea83d7c
+	dc.l	$ffffff9e,$2d7c0f00,$8080feb8,$41faffd4
+	dc.l	$2d48febc,$61ff0000,$10124a00,$66ff0000
+	dc.l	$103861ff,$0000103a,$4a0066ff,$0000102a
+	dc.l	$52aefea0,$4cfb3fff,$01700000,$1064f23b
+	dc.l	$d0ff0170,$0000109a,$f23b9c00,$01700000
+	dc.l	$10f0f23c,$88000f00,$00003d7c,$0000fea6
+	dc.l	$48ee7fff,$ff80f22e,$f0ffff20,$f22ebc00
+	dc.l	$feb444fc,$0000f27b,$0012abcd,$ef0142ee
+	dc.l	$fea448ee,$7fffffc0,$f22ef0ff,$fec0f22e
+	dc.l	$bc00fea8,$2d7c0f00,$8080feb8,$41faffd8
+	dc.l	$2d48febc,$61ff0000,$0f824a00,$66ff0000
+	dc.l	$0fa861ff,$00000faa,$4a0066ff,$00000f9a
+	dc.l	$42804e75,$09556e69,$6d706c65,$6d656e74
+	dc.l	$6564203c,$65613e2e,$2e2e0000,$52aefea0
+	dc.l	$4cfb3fff,$01700000,$0fb8f23b,$d0ff0170
+	dc.l	$00000fee,$f23b9c00,$01700000,$10443d7c
+	dc.l	$0000fea6,$48ee7fff,$ff80f22e,$f0ffff20
+	dc.l	$f22ebc00,$feb4f23c,$58000002,$44fc0000
+	dc.l	$f23c4823,$c0000000,$80000000,$00000000
+	dc.l	$42eefea4,$48ee7fff,$ffc0f22e,$f0fffec0
+	dc.l	$f22ebc00,$fea82d7c,$c0010000,$ff202d7c
+	dc.l	$80000000,$ff242d7c,$00000000,$ff282d7c
+	dc.l	$08000000,$feb841fa,$ffb82d48,$febc61ff
+	dc.l	$00000eb8,$4a0066ff,$00000ede,$61ff0000
+	dc.l	$0ee04a00,$66ff0000,$0ed052ae,$fea04cfb
+	dc.l	$3fff0170,$00000f0a,$f23bd0ff,$01700000
+	dc.l	$0f40f23b,$9c000170,$00000f96,$3d7c0000
+	dc.l	$fea648ee,$7fffff80,$f22ef0ff,$ff20f22e
+	dc.l	$bc00feb4,$44fc0000,$f23c4c18,$c1230001
+	dc.l	$23456789,$12345678,$42eefea4,$48ee7fff
+	dc.l	$ffc0f22e,$f0fffec0,$f22ebc00,$fea82d7c
+	dc.l	$3e660000,$ff202d7c,$d0ed23e8,$ff242d7c
+	dc.l	$d14035bc,$ff282d7c,$00000108,$feb841fa
+	dc.l	$ffb82d48,$febc61ff,$00000e10,$4a0066ff
+	dc.l	$00000e36,$61ff0000,$0e384a00,$66ff0000
+	dc.l	$0e2852ae,$fea04cfb,$3fff0170,$00000e62
+	dc.l	$f23bd0ff,$01700000,$0e98f23b,$9c000170
+	dc.l	$00000eee,$3d7c0000,$fea644fc,$000048ee
+	dc.l	$7fffff80,$f22ef0ff,$ff20f22e,$bc00feb4
+	dc.l	$f23c9800,$ffffffff,$ffffffff,$42eefea4
+	dc.l	$48ee7fff,$ffc0f22e,$f0fffec0,$f22ebc00
+	dc.l	$fea82d7c,$0000fff0,$feb42d7c,$0ffffff8
+	dc.l	$feb861ff,$00000d84,$4a0066ff,$00000daa
+	dc.l	$61ff0000,$0dac4a00,$66ff0000,$0d9c52ae
+	dc.l	$fea04cfb,$3fff0170,$00000dd6,$f23bd0ff
+	dc.l	$01700000,$0e0cf23b,$9c000170,$00000e62
+	dc.l	$3d7c0000,$fea644fc,$000048ee,$7fffff80
+	dc.l	$f22ef0ff,$ff20f22e,$bc00feb4,$f23c9400
+	dc.l	$ffffffff,$ffffffff,$42eefea4,$48ee7fff
+	dc.l	$ffc0f22e,$f0fffec0,$f22ebc00,$fea82d7c
+	dc.l	$0000fff0,$feb42d7c,$ffffffff,$febc61ff
+	dc.l	$00000cf8,$4a0066ff,$00000d1e,$61ff0000
+	dc.l	$0d204a00,$66ff0000,$0d1052ae,$fea04cfb
+	dc.l	$3fff0170,$00000d4a,$f23bd0ff,$01700000
+	dc.l	$0d80f23b,$9c000170,$00000dd6,$3d7c0000
+	dc.l	$fea644fc,$000048ee,$7fffff80,$f22ef0ff
+	dc.l	$ff20f22e,$bc00feb4,$f23c8c00,$ffffffff
+	dc.l	$ffffffff,$42eefea4,$48ee7fff,$ffc0f22e
+	dc.l	$f0fffec0,$f22ebc00,$fea82d7c,$0ffffff8
+	dc.l	$feb82d7c,$ffffffff,$febc61ff,$00000c6c
+	dc.l	$4a0066ff,$00000c92,$61ff0000,$0c944a00
+	dc.l	$66ff0000,$0c8452ae,$fea04cfb,$3fff0170
+	dc.l	$00000cbe,$f23bd0ff,$01700000,$0cf4f23b
+	dc.l	$9c000170,$00000d4a,$3d7c0000,$fea644fc
+	dc.l	$000048ee,$7fffff80,$f22ef0ff,$ff20f22e
+	dc.l	$bc00feb4,$f23c9c00,$ffffffff,$ffffffff
+	dc.l	$ffffffff,$42eefea4,$48ee7fff,$ffc0f22e
+	dc.l	$f0fffec0,$f22ebc00,$fea82d7c,$0000fff0
+	dc.l	$feb42d7c,$0ffffff8,$feb82d7c,$ffffffff
+	dc.l	$febc61ff,$00000bd4,$4a0066ff,$00000bfa
+	dc.l	$61ff0000,$0bfc4a00,$66ff0000,$0bec52ae
+	dc.l	$fea04cfb,$3fff0170,$00000c26,$f23bd0ff
+	dc.l	$01700000,$0c5cf23b,$9c000170,$00000cb2
+	dc.l	$f23c5800,$0001f23c,$58800002,$f23c5900
+	dc.l	$0003f23c,$59800004,$f23c5a00,$0005f23c
+	dc.l	$5a800006,$f23c5b00,$0007f23c,$5b800008
+	dc.l	$f23c8400,$00000000,$70aa3d7c,$0000fea6
+	dc.l	$48eeffff,$ff80f22e,$bc00feb4,$f22ef0ff
+	dc.l	$ff2044fc,$0000f227,$e80042ee,$fea4f22e
+	dc.l	$bc00fea8,$f23c4480,$7f800000,$f23c4580
+	dc.l	$7f800000,$f23c4680,$7f800000,$f23c4780
+	dc.l	$7f800000,$f21f4880,$f21f4980,$f21f4a80
+	dc.l	$f21f4b80,$48eeffff,$ffc0f22e,$f0fffec0
+	dc.l	$61ff0000,$0af64a00,$66ff0000,$0b1c61ff
+	dc.l	$00000b1e,$4a0066ff,$00000b0e,$52aefea0
+	dc.l	$4cfb3fff,$01700000,$0b48f23b,$d0ff0170
+	dc.l	$00000b7e,$f23b9c00,$01700000,$0bd4f23c
+	dc.l	$58000001,$f23c5880,$0002f23c,$59000003
+	dc.l	$f23c5980,$0004f23c,$5a000005,$f23c5a80
+	dc.l	$0006f23c,$5b000007,$f23c5b80,$0008f227
+	dc.l	$6b00f227,$6a00f227,$6900f227,$6800f22e
+	dc.l	$f0ffff20,$f23c4700,$7f800000,$f23c4600
+	dc.l	$7f800000,$f23c4500,$7f800000,$f23c4400
+	dc.l	$7f800000,$f23c8400,$00000000,$f23c8800
+	dc.l	$00000000,$70aa3d7c,$0000fea6,$48eeffff
+	dc.l	$ff80f22e,$bc00feb4,$44fc0000,$f21fd800
+	dc.l	$42eefea4,$f22ebc00,$fea848ee,$ffffffc0
+	dc.l	$f22ef0ff,$fec061ff,$00000a10,$4a0066ff
+	dc.l	$00000a36,$61ff0000,$0a384a00,$66ff0000
+	dc.l	$0a2852ae,$fea04cfb,$3fff0170,$00000a62
+	dc.l	$f23bd0ff,$01700000,$0a98f23b,$9c000170
+	dc.l	$00000aee,$f23c5800,$0001f23c,$58800002
+	dc.l	$f23c5900,$0003f23c,$59800004,$f23c5a00
+	dc.l	$0005f23c,$5a800006,$f23c5b00,$0007f23c
+	dc.l	$5b800008,$f23c8400,$00000000,$203cffff
+	dc.l	$ff003d7c,$0000fea6,$48eeffff,$ff80f22e
+	dc.l	$bc00feb4,$f22ef0ff,$ff2044fc,$0000f227
+	dc.l	$e80042ee,$fea4f22e,$bc00fea8,$48eeffff
+	dc.l	$ffc0f22e,$f0fffec0,$61ff0000,$095e4a00
+	dc.l	$66ff0000,$098461ff,$00000986,$4a0066ff
+	dc.l	$00000976,$42804e75,$094e6f6e,$2d6d6173
+	dc.l	$6b61626c,$65206f76,$6572666c,$6f772e2e
+	dc.l	$2e0051fc,$52aefea0,$4cfb3fff,$01700000
+	dc.l	$0990f23b,$d0ff0170,$000009c6,$f23b9c00
+	dc.l	$01700000,$0a1c3d7c,$0000fea6,$48ee7fff
+	dc.l	$ff80f22e,$f0ffff20,$f22ebc00,$feb4f23c
+	dc.l	$58000002,$2d7c7ffe,$0000fe80,$2d7c8000
+	dc.l	$0000fe84,$2d7c0000,$0000fe88,$44fc0000
+	dc.l	$f22e4823,$fe8042ee,$fea448ee,$7fffffc0
+	dc.l	$f22ef0ff,$fec0f22e,$bc00fea8,$2d7c7fff
+	dc.l	$0000ff20,$2d7c0000,$0000ff24,$2d7c0000
+	dc.l	$0000ff28,$2d7c0200,$1048feb8,$41faffc2
+	dc.l	$2d48febc,$61ff0000,$08824a00,$66ff0000
+	dc.l	$08a861ff,$000008aa,$4a0066ff,$0000089a
+	dc.l	$42804e75,$09456e61,$626c6564,$206f7665
+	dc.l	$72666c6f,$772e2e2e,$000051fc,$52aefea0
+	dc.l	$4cfb3fff,$01700000,$08b8f23b,$d0ff0170
+	dc.l	$000008ee,$f23b9c00,$01700000,$09443d7c
+	dc.l	$0000fea6,$48ee7fff,$ff80f22e,$f0ffff20
+	dc.l	$f23c9000,$00001000,$f22ebc00,$feb4f23c
+	dc.l	$58000002,$2d7c7ffe,$0000fe80,$2d7c8000
+	dc.l	$0000fe84,$2d7c0000,$0000fe88,$44fc0000
+	dc.l	$f22e4823,$fe8042ee,$fea448ee,$7fffffc0
+	dc.l	$f22ef0ff,$fec0f22e,$bc00fea8,$2d7c7fff
+	dc.l	$0000ff20,$2d7c0000,$0000ff24,$2d7c0000
+	dc.l	$0000ff28,$2d7c0200,$1048feb8,$41faffc2
+	dc.l	$2d48febc,$61ff0000,$07a24a00,$66ff0000
+	dc.l	$07c861ff,$000007ca,$4a0066ff,$000007ba
+	dc.l	$42804e75,$09456e61,$626c6564,$20756e64
+	dc.l	$6572666c,$6f772e2e,$2e0051fc,$52aefea0
+	dc.l	$4cfb3fff,$01700000,$07d8f23b,$d0ff0170
+	dc.l	$0000080e,$f23b9c00,$01700000,$08643d7c
+	dc.l	$0000fea6,$48ee7fff,$ff80f22e,$f0ffff20
+	dc.l	$f23c9000,$00000800,$f22ebc00,$feb42d7c
+	dc.l	$00000000,$fe802d7c,$80000000,$fe842d7c
+	dc.l	$00000000,$fe88f22e,$d080fe80,$44fc0000
+	dc.l	$f23c5820,$000242ee,$fea448ee,$7fffffc0
+	dc.l	$f22ef0ff,$fec0f22e,$bc00fea8,$2d7c0000
+	dc.l	$0000ff20,$2d7c4000,$0000ff24,$2d7c0000
+	dc.l	$0000ff28,$2d7c0000,$0800feb8,$41faffc2
+	dc.l	$2d48febc,$61ff0000,$06c24a00,$66ff0000
+	dc.l	$06e861ff,$000006ea,$4a0066ff,$000006da
+	dc.l	$42804e75,$094e6f6e,$2d6d6173,$6b61626c
+	dc.l	$6520756e,$64657266,$6c6f772e,$2e2e0000
+	dc.l	$52aefea0,$4cfb3fff,$01700000,$06f4f23b
+	dc.l	$d0ff0170,$0000072a,$f23b9c00,$01700000
+	dc.l	$07803d7c,$0000fea6,$48ee7fff,$ff80f22e
+	dc.l	$f0ffff20,$f22ebc00,$feb42d7c,$00000000
+	dc.l	$fe802d7c,$80000000,$fe842d7c,$00000000
+	dc.l	$fe88f22e,$d080fe80,$44fc0000,$f23c5820
+	dc.l	$000242ee,$fea448ee,$7fffffc0,$f22ef0ff
+	dc.l	$fec0f22e,$bc00fea8,$2d7c0000,$0000ff20
+	dc.l	$2d7c4000,$0000ff24,$2d7c0000,$0000ff28
+	dc.l	$2d7c0000,$0800feb8,$41faffc2,$2d48febc
+	dc.l	$61ff0000,$05e64a00,$66ff0000,$060c61ff
+	dc.l	$0000060e,$4a0066ff,$000005fe,$42804e75
+	dc.l	$09456e61,$626c6564,$20696e65,$78616374
+	dc.l	$2e2e2e00,$52aefea0,$4cfb3fff,$01700000
+	dc.l	$0620f23b,$d0ff0170,$00000656,$f23b9c00
+	dc.l	$01700000,$06ac3d7c,$0000fea6,$48ee7fff
+	dc.l	$ff80f22e,$f0ffff20,$f23c9000,$00000200
+	dc.l	$f22ebc00,$feb42d7c,$50000000,$fe802d7c
+	dc.l	$80000000,$fe842d7c,$00000000,$fe88f22e
+	dc.l	$d080fe80,$44fc0000,$f23c5822,$000242ee
+	dc.l	$fea448ee,$7fffffc0,$f22ef0ff,$fec0f22e
+	dc.l	$bc00fea8,$2d7c5000,$0000ff20,$2d7c8000
+	dc.l	$0000ff24,$2d7c0000,$0000ff28,$2d7c0000
+	dc.l	$0208feb8,$41faffc2,$2d48febc,$61ff0000
+	dc.l	$050a4a00,$66ff0000,$053061ff,$00000532
+	dc.l	$4a0066ff,$00000522,$42804e75,$09456e61
+	dc.l	$626c6564,$20534e41,$4e2e2e2e,$000051fc
+	dc.l	$52aefea0,$4cfb3fff,$01700000,$0544f23b
+	dc.l	$d0ff0170,$0000057a,$f23b9c00,$01700000
+	dc.l	$05d03d7c,$0000fea6,$48ee7fff,$ff80f22e
+	dc.l	$f0ffff20,$f23c9000,$00004000,$f22ebc00
+	dc.l	$feb42d7c,$ffff0000,$fe802d7c,$00000000
+	dc.l	$fe842d7c,$00000001,$fe88f22e,$d080fe80
+	dc.l	$44fc0000,$f23c5822,$000242ee,$fea448ee
+	dc.l	$7fffffc0,$f22ef0ff,$fec0f22e,$bc00fea8
+	dc.l	$2d7cffff,$0000ff20,$2d7c0000,$0000ff24
+	dc.l	$2d7c0000,$0001ff28,$2d7c0900,$4080feb8
+	dc.l	$41faffc2,$2d48febc,$61ff0000,$042e4a00
+	dc.l	$66ff0000,$045461ff,$00000456,$4a0066ff
+	dc.l	$00000446,$42804e75,$09456e61,$626c6564
+	dc.l	$204f5045,$52522e2e,$2e0051fc,$52aefea0
+	dc.l	$4cfb3fff,$01700000,$0468f23b,$d0ff0170
+	dc.l	$0000049e,$f23b9c00,$01700000,$04f43d7c
+	dc.l	$0000fea6,$48ee7fff,$ff80f22e,$f0ffff20
+	dc.l	$f23c9000,$00002000,$f22ebc00,$feb42d7c
+	dc.l	$ffff0000,$fe802d7c,$00000000,$fe842d7c
+	dc.l	$00000000,$fe88f22e,$d080fe80,$44fc0000
+	dc.l	$f23c4422,$7f800000,$42eefea4,$48ee7fff
+	dc.l	$ffc0f22e,$f0fffec0,$f22ebc00,$fea82d7c
+	dc.l	$ffff0000,$ff202d7c,$00000000,$ff242d7c
+	dc.l	$00000000,$ff282d7c,$01002080,$feb841fa
+	dc.l	$ffc02d48,$febc61ff,$00000350,$4a0066ff
+	dc.l	$00000376,$61ff0000,$03784a00,$66ff0000
+	dc.l	$03684280,$4e750945,$6e61626c,$65642044
+	dc.l	$5a2e2e2e,$000051fc,$52aefea0,$4cfb3fff
+	dc.l	$01700000,$038cf23b,$d0ff0170,$000003c2
+	dc.l	$f23b9c00,$01700000,$04183d7c,$0000fea6
+	dc.l	$48ee7fff,$ff80f22e,$f0ffff20,$f23c9000
+	dc.l	$00000400,$f22ebc00,$feb42d7c,$40000000
+	dc.l	$fe802d7c,$80000000,$fe842d7c,$00000000
+	dc.l	$fe88f22e,$d080fe80,$44fc0000,$f23c5820
+	dc.l	$000042ee,$fea448ee,$7fffffc0,$f22ef0ff
+	dc.l	$fec0f22e,$bc00fea8,$2d7c4000,$0000ff20
+	dc.l	$2d7c8000,$0000ff24,$2d7c0000,$0000ff28
+	dc.l	$2d7c0200,$0410feb8,$41faffc2,$2d48febc
+	dc.l	$61ff0000,$02764a00,$66ff0000,$029c61ff
+	dc.l	$0000029e,$4a0066ff,$0000028e,$42804e75
+	dc.l	$09556e69,$6d706c65,$6d656e74,$65642064
+	dc.l	$61746120,$74797065,$2f666f72,$6d61742e
+	dc.l	$2e2e0000,$52aefea0,$4cfb3fff,$01700000
+	dc.l	$02a0f23b,$d0ff0170,$000002d6,$f23b9c00
+	dc.l	$01700000,$032c3d7c,$0000fea6,$48ee7fff
+	dc.l	$ff80f22e,$f0ffff20,$f22ebc00,$feb42d7c
+	dc.l	$c03f0000,$fe802d7c,$00000000,$fe842d7c
+	dc.l	$00000001,$fe88f23c,$58000002,$44fc0000
+	dc.l	$f22e4823,$fe8042ee,$fea448ee,$7fffffc0
+	dc.l	$f22ef0ff,$fec0f22e,$bc00fea8,$2d7cc001
+	dc.l	$0000ff20,$2d7c8000,$0000ff24,$2d7c0000
+	dc.l	$0000ff28,$2d7c0800,$0000feb8,$41faffc2
+	dc.l	$2d48febc,$61ff0000,$01924a00,$66ff0000
+	dc.l	$01b861ff,$000001ba,$4a0066ff,$000001aa
+	dc.l	$52aefea0,$4cfb3fff,$01700000,$01e4f23b
+	dc.l	$d0ff0170,$0000021a,$f23b9c00,$01700000
+	dc.l	$02703d7c,$0000fea6,$48ee7fff,$ff80f22e
+	dc.l	$f0ffff20,$f22ebc00,$feb42d7c,$80000000
+	dc.l	$fe802d7c,$01000000,$fe842d7c,$00000000
+	dc.l	$fe88f23c,$40007fff,$ffff44fc,$0000f22e
+	dc.l	$4823fe80,$42eefea4,$48ee7fff,$ffc0f22e
+	dc.l	$f0fffec0,$f22ebc00,$fea82d7c,$80170000
+	dc.l	$ff202d7c,$fffffffe,$ff242d7c,$00000000
+	dc.l	$ff282d7c,$08000000,$feb841fa,$ffc22d48
+	dc.l	$febc61ff,$000000d4,$4a0066ff,$000000fa
+	dc.l	$61ff0000,$00fc4a00,$66ff0000,$00ec52ae
+	dc.l	$fea04cfb,$3fff0170,$00000126,$f23bd0ff
+	dc.l	$01700000,$015cf23b,$9c000170,$000001b2
+	dc.l	$3d7c0000,$fea648ee,$7fffff80,$f22ef0ff
+	dc.l	$ff20f22e,$bc00feb4,$2d7cc123,$0001fe80
+	dc.l	$2d7c2345,$6789fe84,$2d7c1234,$5678fe88
+	dc.l	$44fc0000,$f22e4c18,$fe8042ee,$fea448ee
+	dc.l	$7fffffc0,$f22ef0ff,$fec0f22e,$bc00fea8
+	dc.l	$2d7c3e66,$0000ff20,$2d7cd0ed,$23e8ff24
+	dc.l	$2d7cd140,$35bcff28,$2d7c0000,$0108feb8
+	dc.l	$41faffc2,$2d48febc,$61ff0000,$001e4a00
+	dc.l	$66ff0000,$004461ff,$00000046,$4a0066ff
+	dc.l	$00000036,$42804e75,$41eeff80,$43eeffc0
+	dc.l	$700eb189,$66ff0000,$001c51c8,$fff6302e
+	dc.l	$fea6322e,$fea4b041,$66ff0000,$00084280
+	dc.l	$4e757001,$4e75222e,$fea07001,$4e7541ee
+	dc.l	$ff2043ee,$fec07017,$b18966ff,$0000002c
+	dc.l	$51c8fff6,$41eefeb4,$43eefea8,$b18966ff
+	dc.l	$00000018,$b18966ff,$00000010,$b18966ff
+	dc.l	$00000008,$42804e75,$70014e75,$acacacac
+	dc.l	$acacacac,$acacacac,$acacacac,$acacacac
+	dc.l	$acacacac,$acacacac,$acacacac,$acacacac
+	dc.l	$acacacac,$acacacac,$acacacac,$acacacac
+	dc.l	$acacacac,$acacacac,$acacacac,$7fff0000
+	dc.l	$ffffffff,$ffffffff,$7fff0000,$ffffffff
+	dc.l	$ffffffff,$7fff0000,$ffffffff,$ffffffff
+	dc.l	$7fff0000,$ffffffff,$ffffffff,$7fff0000
+	dc.l	$ffffffff,$ffffffff,$7fff0000,$ffffffff
+	dc.l	$ffffffff,$7fff0000,$ffffffff,$ffffffff
+	dc.l	$7fff0000,$ffffffff,$ffffffff,$00000000
+	dc.l	$00000000,$00000000,$2f00203a,$e884487b
+	dc.l	$0930ffff,$e880202f,$00044e74,$00042f00
+	dc.l	$203ae872,$487b0930,$ffffe86a,$202f0004
+	dc.l	$4e740004,$00000000,$00000000,$00000000
diff --git a/arch/m68k/ifpsp060/ilsp.doc b/arch/m68k/ifpsp060/ilsp.doc
new file mode 100644
index 0000000..f6fae6d
--- /dev/null
+++ b/arch/m68k/ifpsp060/ilsp.doc
@@ -0,0 +1,150 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+68060 INTEGER SOFTWARE PACKAGE (Library version)
+-------------------------------------------------
+
+The file ilsp.s contains the "Library version" of the
+68060 Integer Software Package. Routines included in this
+module can be used to emulate 64-bit divide and multiply,
+and the "cmp2" instruction. These instructions are not
+implemented in hardware on the 68060 and normally take
+exception vector #61 "Unimplemented Integer Instruction".
+
+By re-compiling a program that uses these instructions, and
+making subroutine calls in place of the unimplemented
+instructions, a program can avoid the overhead associated with
+taking the exception.
+
+Release file format:
+--------------------
+The file ilsp.sa is essentially a hexadecimal image of the
+release package. This is the ONLY format which will be supported.
+The hex image was created by assembling the source code and
+then converting the resulting binary output image into an
+ASCII text file. The hexadecimal numbers are listed
+using the Motorola Assembly Syntax assembler directive "dc.l"
+(define constant longword). The file can be converted to other
+assembly syntaxes by using any word processor with a global
+search and replace function.
+
+To assist in assembling and linking this module with other modules,
+the installer should add a symbolic label to the top of the file.
+This will allow calling routines to access the entry points
+of this package.
+
+The source code ilsp.s has also been included but only for
+documentation purposes.
+
+Release file structure:
+-----------------------
+The file ilsp.sa contains an "Entry-Point" section and a
+code section. The ILSP has no "Call-Out" section. The first section
+is the "Entry-Point" section. In order to access a function in the
+package, a program must "bsr" or "jsr" to the location listed
+below in "68060ILSP Entry Points" that corresponds to the desired
+function. A branch instruction located at the selected entry point
+within the package will then enter the correct emulation code routine.
+
+The entry point addresses at the beginning of the package will remain
+fixed so that a program calling the routines will not have to be
+re-compiled with every new 68060ILSP release.
+
+For example, to use a 64-bit multiply instruction,
+do a "bsr" or "jsr" to the entry point defined by
+the 060ILSP entry table. A compiler generated code sequence
+for unsigned multiply could look like:
+
+# mulu.l <ea>,Dh:Dl
+# mulu.l _multiplier,%d1:%d0
+
+	subq.l	&0x8,%sp	# make room for result on stack
+	pea	(%sp)		# pass: result addr on stack
+	mov.l	%d0,-(%sp)	# pass: multiplicand on stack
+	mov.l	_multiplier,-(%sp) # pass: multiplier on stack
+	bsr.l	_060LISP_TOP+0x18 # branch to multiply routine
+	add.l	&0xc,%sp	# clear arguments from stack
+	mov.l	(%sp)+,%d1	# load result[63:32]
+	mov.l	(%sp)+,%d0	# load result[31:0]
+
+For a divide:
+
+# divu.l <ea>,Dr:Dq
+# divu.l _divisor,%d1:%d0
+
+	subq.l	&0x8,%sp	# make room for result on stack
+	pea	(%sp)		# pass: result addr on stack
+	mov.l	%d0,-(%sp)	# pass: dividend hi on stack
+	mov.l	%d1,-(%sp)	# pass: dividend hi on stack
+	mov.l	_divisor,-(%sp) # pass: divisor on stack
+	bsr.l	_060LISP_TOP+0x08 # branch to divide routine
+	add.l	&0xc,%sp	# clear arguments from stack
+	mov.l	(%sp)+,%d1	# load remainder
+	mov.l	(%sp)+,%d0	# load quotient
+
+The library routines also return the correct condition code
+register value. If this is important, then the caller of the library
+routine must make sure that the value isn't lost while popping
+other items off of the stack.
+
+An example of using the "cmp2" instruction is as follows:
+
+# cmp2.l <ea>,Rn
+# cmp2.l _bounds,%d0
+
+	pea	_bounds		# pass ptr to bounds
+	mov.l	%d0,-(%sp)	# pass Rn
+	bsr.l	_060LSP_TOP_+0x48 # branch to "cmp2" routine
+	mov.w	%cc,_tmp	# save off condition codes
+	addq.l	&0x8,%sp	# clear arguments from stack
+
+Exception reporting:
+--------------------
+If the instruction being emulated is a divide and the source
+operand is a zero, then the library routine, as its last
+instruction, executes an implemented divide using a zero
+source operand so that an "Integer Divide-by-Zero" exception
+will be taken. Although the exception stack frame will not
+point to the correct instruction, the user will at least be able
+to record that such an event occurred if desired.
+
+68060ILSP entry points:
+-----------------------
+_060ILSP_TOP:
+0x000:	_060LSP__idivs64_
+0x008:	_060LSP__idivu64_
+
+0x010:	_060LSP__imuls64_
+0x018:	_060LSP__imulu64_
+
+0x020:	_060LSP__cmp2_Ab_
+0x028:	_060LSP__cmp2_Aw_
+0x030:	_060LSP__cmp2_Al_
+0x038:	_060LSP__cmp2_Db_
+0x040:	_060LSP__cmp2_Dw_
+0x048:	_060LSP__cmp2_Dl_
diff --git a/arch/m68k/ifpsp060/ilsp.sa b/arch/m68k/ifpsp060/ilsp.sa
new file mode 100644
index 0000000..2757d50
--- /dev/null
+++ b/arch/m68k/ifpsp060/ilsp.sa
@@ -0,0 +1,101 @@
+	dc.l	$60ff0000,$01fe0000,$60ff0000,$02080000
+	dc.l	$60ff0000,$04900000,$60ff0000,$04080000
+	dc.l	$60ff0000,$051e0000,$60ff0000,$053c0000
+	dc.l	$60ff0000,$055a0000,$60ff0000,$05740000
+	dc.l	$60ff0000,$05940000,$60ff0000,$05b40000
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$4e56fff0,$48e73f00,$42eefff0,$50eeffff
+	dc.l	$60104e56,$fff048e7,$3f0042ee,$fff051ee
+	dc.l	$ffff2e2e,$00086700,$00ae2a2e,$000c2c2e
+	dc.l	$00104a2e,$ffff671a,$4a875dee,$fffe6a02
+	dc.l	$44874a85,$5deefffd,$6a0844fc,$00004086
+	dc.l	$40854a85,$66164a86,$67000046,$be866306
+	dc.l	$cb466000,$00124c47,$6005600a,$be85634c
+	dc.l	$61ff0000,$00864a2e,$ffff6724,$4a2efffd
+	dc.l	$67024485,$102efffe,$b12efffd,$670c0c86
+	dc.l	$80000000,$62264486,$60060806,$001f661c
+	dc.l	$026e0010,$fff044ee,$fff04a86,$48f60060
+	dc.l	$01610014,$4cdf00fc,$4e5e4e75,$2a2e000c
+	dc.l	$2c2e0010,$026e001c,$fff0006e,$0002fff0
+	dc.l	$44eefff0,$60d62dae,$000c0161,$00142dae
+	dc.l	$00100162,$00140004,$44eefff0,$4cdf00fc
+	dc.l	$4e5e80fc,$00004e75,$0c870000,$ffff621e
+	dc.l	$42814845,$48463a06,$8ac73205,$48463a06
+	dc.l	$8ac74841,$32054245,$48452c01,$4e7542ae
+	dc.l	$fff8422e,$fffc4281,$0807001f,$660e52ae
+	dc.l	$fff8e38f,$e38ee395,$6000ffee,$26072405
+	dc.l	$48424843,$b4436606,$323cffff,$600a2205
+	dc.l	$82c30281,$0000ffff,$2f064246,$48462607
+	dc.l	$2401c4c7,$4843c6c1,$28059883,$48443004
+	dc.l	$38064a40,$6600000a,$b4846304,$538160de
+	dc.l	$2f052c01,$48462a07,$61ff0000,$006a2405
+	dc.l	$26062a1f,$2c1f9c83,$9b8264ff,$0000001a
+	dc.l	$53814282,$26074843,$4243dc83,$db822607
+	dc.l	$42434843,$da834a2e,$fffc6616,$3d41fff4
+	dc.l	$42814845,$48463a06,$424650ee,$fffc6000
+	dc.l	$ff6c3d41,$fff63c05,$48464845,$2e2efff8
+	dc.l	$670a5387,$e28de296,$51cffffa,$2a062c2e
+	dc.l	$fff44e75,$24062606,$28054843,$4844ccc5
+	dc.l	$cac3c4c4,$c6c44284,$4846dc45,$d744dc42
+	dc.l	$d7444846,$42454242,$48454842,$da82da83
+	dc.l	$4e754e56,$fffc48e7,$380042ee,$fffc202e
+	dc.l	$00086700,$005a222e,$000c6700,$00522400
+	dc.l	$26002801,$48434844,$c0c1c2c3,$c4c4c6c4
+	dc.l	$42844840,$d041d784,$d042d784,$48404241
+	dc.l	$42424841,$4842d282,$d283382e,$fffc0204
+	dc.l	$00104a81,$6a040004,$000844c4,$c34048f6
+	dc.l	$00030161,$00104cdf,$001c4e5e,$4e754280
+	dc.l	$4281382e,$fffc0204,$00100004,$000444c4
+	dc.l	$60da4e56,$fffc48e7,$3c0042ee,$fffc202e
+	dc.l	$000867da,$222e000c,$67d44205,$4a806c06
+	dc.l	$44800005,$00014a81,$6c064481,$0a050001
+	dc.l	$24002600,$28014843,$4844c0c1,$c2c3c4c4
+	dc.l	$c6c44284,$4840d041,$d784d042,$d7844840
+	dc.l	$42414242,$48414842,$d282d283,$4a056708
+	dc.l	$46804681,$5280d384,$382efffc,$02040010
+	dc.l	$4a816a04,$00040008,$44c4c340,$48f60003
+	dc.l	$01610010,$4cdf003c,$4e5e4e75,$42804281
+	dc.l	$382efffc,$02040010,$00040004,$44c460da
+	dc.l	$4e56fffc,$48e73800,$42eefffc,$242e0008
+	dc.l	$10360161,$000c1236,$0162000c,$000149c0
+	dc.l	$49c16000,$00b84e56,$fffc48e7,$380042ee
+	dc.l	$fffc242e,$00083036,$0161000c,$32360162
+	dc.l	$000c0002,$48c048c1,$60000092,$4e56fffc
+	dc.l	$48e73800,$42eefffc,$242e0008,$20360161
+	dc.l	$000c2236,$0162000c,$00046000,$00704e56
+	dc.l	$fffc48e7,$380042ee,$fffc242e,$00081036
+	dc.l	$0161000c,$12360162,$000c0001,$49c049c1
+	dc.l	$49c26000,$00484e56,$fffc48e7,$380042ee
+	dc.l	$fffc242e,$00083036,$0161000c,$32360162
+	dc.l	$000c0002,$48c048c1,$48c26000,$00204e56
+	dc.l	$fffc48e7,$380042ee,$fffc242e,$00082036
+	dc.l	$0161000c,$22360162,$000c0004,$948042c3
+	dc.l	$02030004,$9280b282,$42c48604,$02030005
+	dc.l	$382efffc,$0204001a,$880344c4,$4cdf001c
+	dc.l	$4e5e4e75,$00000000,$00000000,$00000000
diff --git a/arch/m68k/ifpsp060/iskeleton.S b/arch/m68k/ifpsp060/iskeleton.S
new file mode 100644
index 0000000..803a6ec
--- /dev/null
+++ b/arch/m68k/ifpsp060/iskeleton.S
@@ -0,0 +1,349 @@
+|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+|MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+|M68000 Hi-Performance Microprocessor Division
+|M68060 Software Package
+|Production Release P1.00 -- October 10, 1994
+|
+|M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+|
+|THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+|To the maximum extent permitted by applicable law,
+|MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+|INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+|and any warranty against infringement with regard to the SOFTWARE
+|(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+|
+|To the maximum extent permitted by applicable law,
+|IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+|(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+|BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+|ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+|Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+|
+|You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+|so long as this entire notice is retained without alteration in any modified and/or
+|redistributed versions, and that such modified versions are clearly identified as such.
+|No licenses are granted by implication, estoppel or otherwise under any patents
+|or trademarks of Motorola, Inc.
+|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+| iskeleton.s
+|
+| This file contains:
+|	(1) example "Call-out"s
+|	(2) example package entry code
+|	(3) example "Call-out" table
+|
+
+#include <linux/linkage.h>
+#include <asm/entry.h>
+#include <asm/offsets.h>
+
+
+|################################
+| (1) EXAMPLE CALL-OUTS		#
+|				#
+| _060_isp_done()		#
+| _060_real_chk()		#
+| _060_real_divbyzero()		#
+|				#
+| _060_real_cas()		#
+| _060_real_cas2()		#
+| _060_real_lock_page()		#
+| _060_real_unlock_page()	#
+|################################
+
+|
+| _060_isp_done():
+|
+| This is and example main exit point for the Unimplemented Integer
+| Instruction exception handler. For a normal exit, the
+| _isp_unimp() branches to here so that the operating system
+| can do any clean-up desired. The stack frame is the
+| Unimplemented Integer Instruction stack frame with
+| the PC pointing to the instruction following the instruction
+| just emulated.
+| To simply continue execution at the next instruction, just
+| do an "rte".
+|
+| Linux/68k: If returning to user space, check for needed reselections.
+
+	.global		_060_isp_done
+_060_isp_done:
+	btst	#0x5,%sp@		| supervisor bit set in saved SR?
+	beq	.Lnotkern
+	rte
+.Lnotkern:
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	tstb	%curptr@(TASK_NEEDRESCHED)
+	jne	ret_from_exception	| deliver signals,
+					| reschedule etc..
+	RESTORE_ALL
+
+|
+| _060_real_chk():
+|
+| This is an alternate exit point for the Unimplemented Integer
+| Instruction exception handler. If the instruction was a "chk2"
+| and the operand was out of bounds, then _isp_unimp() creates
+| a CHK exception stack frame from the Unimplemented Integer Instrcution
+| stack frame and branches to this routine.
+|
+| Linux/68k: commented out test for tracing
+
+	.global		_060_real_chk
+_060_real_chk:
+|	tst.b		(%sp)			| is tracing enabled?
+|	bpls		real_chk_end		| no
+
+|
+|	    CHK FRAME		   TRACE FRAME
+|	*****************	*****************
+|	*   Current PC	*	*   Current PC	*
+|	*****************	*****************
+|	* 0x2 *  0x018	*	* 0x2 *  0x024	*
+|	*****************	*****************
+|	*     Next	*	*     Next	*
+|	*      PC	*	*      PC	*
+|	*****************	*****************
+|	*      SR	*	*      SR	*
+|	*****************	*****************
+|
+|	move.b		#0x24,0x7(%sp)		| set trace vecno
+|	bral		_060_real_trace
+
+real_chk_end:
+	bral		trap			| jump to trap handler
+
+|
+| _060_real_divbyzero:
+|
+| This is an alternate exit point for the Unimplemented Integer
+| Instruction exception handler isp_unimp(). If the instruction is a 64-bit
+| integer divide where the source operand is a zero, then the _isp_unimp()
+| creates a Divide-by-zero exception stack frame from the Unimplemented
+| Integer Instruction stack frame and branches to this routine.
+|
+| Remember that a trace exception may be pending. The code below performs
+| no action associated with the "chk" exception. If tracing is enabled,
+| then it create a Trace exception stack frame from the "chk" exception
+| stack frame and branches to the _real_trace() entry point.
+|
+| Linux/68k: commented out test for tracing
+
+	.global		_060_real_divbyzero
+_060_real_divbyzero:
+|	tst.b		(%sp)			| is tracing enabled?
+|	bpls		real_divbyzero_end	| no
+
+|
+|	 DIVBYZERO FRAME	   TRACE FRAME
+|	*****************	*****************
+|	*   Current PC	*	*   Current PC	*
+|	*****************	*****************
+|	* 0x2 *  0x014	*	* 0x2 *  0x024	*
+|	*****************	*****************
+|	*     Next	*	*     Next	*
+|	*      PC	*	*      PC	*
+|	*****************	*****************
+|	*      SR	*	*      SR	*
+|	*****************	*****************
+|
+|	move.b		#0x24,0x7(%sp)		| set trace vecno
+|	bral		_060_real_trace
+
+real_divbyzero_end:
+	bral		trap			| jump to trap handler
+
+|##########################
+
+|
+| _060_real_cas():
+|
+| Entry point for the selected cas emulation code implementation.
+| If the implementation provided by the 68060ISP is sufficient,
+| then this routine simply re-enters the package through _isp_cas.
+|
+	.global		_060_real_cas
+_060_real_cas:
+	bral		_I_CALL_TOP+0x80+0x08
+
+|
+| _060_real_cas2():
+|
+| Entry point for the selected cas2 emulation code implementation.
+| If the implementation provided by the 68060ISP is sufficient,
+| then this routine simply re-enters the package through _isp_cas2.
+|
+	.global		_060_real_cas2
+_060_real_cas2:
+	bral		_I_CALL_TOP+0x80+0x10
+
+|
+| _060_lock_page():
+|
+| Entry point for the operating system`s routine to "lock" a page
+| from being paged out. This routine is needed by the cas/cas2
+| algorithms so that no page faults occur within the "core" code
+| region. Note: the routine must lock two pages if the operand
+| spans two pages.
+| NOTE: THE ROUTINE SHOULD RETURN AN FSLW VALUE IN D0 ON FAILURE
+| SO THAT THE 060SP CAN CREATE A PROPER ACCESS ERROR FRAME.
+| Arguments:
+|	a0 = operand address
+|	d0 = `xxxxxxff -> supervisor; `xxxxxx00 -> user
+|	d1 = `xxxxxxff -> longword; `xxxxxx00 -> word
+| Expected outputs:
+|	d0 = 0 -> success; non-zero -> failure
+|
+| Linux/m68k: Make sure the page is properly paged in, so we use
+| plpaw and handle any exception here. The kernel must not be
+| preempted until _060_unlock_page(), so that the page stays mapped.
+|
+	.global		_060_real_lock_page
+_060_real_lock_page:
+	move.l	%d2,-(%sp)
+	| load sfc/dfc
+	tst.b	%d0
+	jne	1f
+	moveq	#1,%d0
+	jra	2f
+1:	moveq	#5,%d0
+2:	movec.l	%dfc,%d2
+	movec.l	%d0,%dfc
+	movec.l	%d0,%sfc
+
+	clr.l	%d0
+	| prefetch address
+	.chip	68060
+	move.l	%a0,%a1
+1:	plpaw	(%a1)
+	addq.w	#1,%a0
+	tst.b	%d1
+	jeq	2f
+	addq.w	#2,%a0
+2:	plpaw	(%a0)
+3:	.chip	68k
+
+	| restore sfc/dfc
+	movec.l	%d2,%dfc
+	movec.l	%d2,%sfc
+	move.l	(%sp)+,%d2
+	rts
+
+.section __ex_table,"a"
+	.align	4
+	.long	1b,11f
+	.long	2b,21f
+.previous
+.section .fixup,"ax"
+	.even
+11:	move.l	#0x020003c0,%d0
+	or.l	%d2,%d0
+	swap	%d0
+	jra	3b
+21:	move.l	#0x02000bc0,%d0
+	or.l	%d2,%d0
+	swap	%d0
+	jra	3b
+.previous
+
+|
+| _060_unlock_page():
+|
+| Entry point for the operating system`s routine to "unlock" a
+| page that has been "locked" previously with _real_lock_page.
+| Note: the routine must unlock two pages if the operand spans
+| two pages.
+| Arguments:
+|	a0 = operand address
+|	d0 = `xxxxxxff -> supervisor; `xxxxxx00 -> user
+|	d1 = `xxxxxxff -> longword; `xxxxxx00 -> word
+|
+| Linux/m68k: perhaps reenable preemption here...
+
+	.global		_060_real_unlock_page
+_060_real_unlock_page:
+	clr.l		%d0
+	rts
+
+|###########################################################################
+
+|#################################
+| (2) EXAMPLE PACKAGE ENTRY CODE #
+|#################################
+
+	.global		_060_isp_unimp
+_060_isp_unimp:
+	bral		_I_CALL_TOP+0x80+0x00
+
+	.global		_060_isp_cas
+_060_isp_cas:
+	bral		_I_CALL_TOP+0x80+0x08
+
+	.global		_060_isp_cas2
+_060_isp_cas2:
+	bral		_I_CALL_TOP+0x80+0x10
+
+	.global		_060_isp_cas_finish
+_060_isp_cas_finish:
+	bra.l		_I_CALL_TOP+0x80+0x18
+
+	.global		_060_isp_cas2_finish
+_060_isp_cas2_finish:
+	bral		_I_CALL_TOP+0x80+0x20
+
+	.global		_060_isp_cas_inrange
+_060_isp_cas_inrange:
+	bral		_I_CALL_TOP+0x80+0x28
+
+	.global		_060_isp_cas_terminate
+_060_isp_cas_terminate:
+	bral		_I_CALL_TOP+0x80+0x30
+
+	.global		_060_isp_cas_restart
+_060_isp_cas_restart:
+	bral		_I_CALL_TOP+0x80+0x38
+
+|###########################################################################
+
+|###############################
+| (3) EXAMPLE CALL-OUT SECTION #
+|###############################
+
+| The size of this section MUST be 128 bytes!!!
+
+_I_CALL_TOP:
+	.long	_060_real_chk		- _I_CALL_TOP
+	.long	_060_real_divbyzero	- _I_CALL_TOP
+	.long	_060_real_trace		- _I_CALL_TOP
+	.long	_060_real_access	- _I_CALL_TOP
+	.long	_060_isp_done		- _I_CALL_TOP
+
+	.long	_060_real_cas		- _I_CALL_TOP
+	.long	_060_real_cas2		- _I_CALL_TOP
+	.long	_060_real_lock_page	- _I_CALL_TOP
+	.long	_060_real_unlock_page	- _I_CALL_TOP
+
+	.long	0x00000000, 0x00000000, 0x00000000, 0x00000000
+	.long	0x00000000, 0x00000000, 0x00000000
+
+	.long	_060_imem_read		- _I_CALL_TOP
+	.long	_060_dmem_read		- _I_CALL_TOP
+	.long	_060_dmem_write		- _I_CALL_TOP
+	.long	_060_imem_read_word	- _I_CALL_TOP
+	.long	_060_imem_read_long	- _I_CALL_TOP
+	.long	_060_dmem_read_byte	- _I_CALL_TOP
+	.long	_060_dmem_read_word	- _I_CALL_TOP
+	.long	_060_dmem_read_long	- _I_CALL_TOP
+	.long	_060_dmem_write_byte	- _I_CALL_TOP
+	.long	_060_dmem_write_word	- _I_CALL_TOP
+	.long	_060_dmem_write_long	- _I_CALL_TOP
+
+	.long	0x00000000
+	.long	0x00000000, 0x00000000, 0x00000000, 0x00000000
+
+|###########################################################################
+
+| 060 INTEGER KERNEL PACKAGE MUST GO HERE!!!
+#include "isp.sa"
diff --git a/arch/m68k/ifpsp060/isp.doc b/arch/m68k/ifpsp060/isp.doc
new file mode 100644
index 0000000..5a90fde
--- /dev/null
+++ b/arch/m68k/ifpsp060/isp.doc
@@ -0,0 +1,218 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+68060 INTEGER SOFTWARE PACKAGE (Kernel version)
+------------------------------------------------
+
+The file isp.sa contains the 68060 Integer Software Package.
+This package is essentially an exception handler that can be
+integrated into an operating system to handle the "Unimplemented
+Integer Instruction" exception vector #61.
+This exception is taken when any of the integer instructions
+not hardware implemented on the 68060 are encountered. The
+isp.sa provides full emulation support for these instructions.
+
+The unimplemented integer instructions are:
+	64-bit divide
+	64-bit multiply
+	movep
+	cmp2
+	chk2
+	cas (w/ a misaligned effective address)
+	cas2
+
+Release file format:
+--------------------
+The file isp.sa is essentially a hexadecimal image of the
+release package. This is the ONLY format which will be supported.
+The hex image was created by assembling the source code and
+then converting the resulting binary output image into an
+ASCII text file. The hexadecimal numbers are listed
+using the Motorola Assembly Syntax assembler directive "dc.l"
+(define constant longword). The file can be converted to other
+assembly syntaxes by using any word processor with a global
+search and replace function.
+
+To assist in assembling and linking this module with other modules,
+the installer should add a symbolic label to the top of the file.
+This will allow calling routines to access the entry points
+of this package.
+
+The source code isp.s has also been included but only for
+documentation purposes.
+
+Release file structure:
+-----------------------
+
+(top of module)
+	-----------------
+	|		| - 128 byte-sized section
+   (1)  |   Call-Out    | - 4 bytes per entry (user fills these in)
+	|		| - example routines in iskeleton.s
+	-----------------
+	|		| - 8 bytes per entry
+   (2)  | Entry Point   | - user does a "bra" or "jmp" to this address
+	|               |
+	-----------------
+	|		| - code section
+   (3)  ~		~
+	|		|
+	-----------------
+(bottom of module)
+
+The first section of this module is the "Call-out" section. This section
+is NOT INCLUDED in isp.sa (an example "Call-out" section is provided at
+the end of the file iskeleton.s). The purpose of this section is to allow
+the ISP routines to reference external functions that must be provided
+by the host operating system. This section MUST be exactly 128 bytes in
+size. There are 32 fields, each 4 bytes in size. Each field corresponds
+to a function required by the ISP (these functions and their location are
+listed in "68060ISP call-outs" below). Each field entry should contain
+the address of the corresponding function RELATIVE to the starting address
+of the "call-out" section. The "Call-out" section must sit adjacent to the
+isp.sa image in memory.
+
+The second section, the "Entry-point" section, is used by external routines
+to access the functions within the ISP. Since the isp.sa hex file contains
+no symbol names, this section contains function entry points that are fixed
+with respect to the top of the package. The currently defined entry-points
+are listed in section "68060 ISP entry points" below. A calling routine
+would simply execute a "bra" or "jmp" that jumped to the selected function
+entry-point.
+
+For example, if the 68060 hardware took a "Unimplemented Integer Instruction"
+exception (vector #61), the operating system should execute something
+similar to:
+
+	bra	_060ISP_TOP+128+0
+
+(_060ISP_TOP is the starting address of the "Call-out" section; the "Call-out"
+section is 128 bytes long; and the Unimplemented Integer ISP handler entry
+point is located 0 bytes from the top of the "Entry-point" section.)
+
+The third section is the code section. After entering through an "Entry-point",
+the entry code jumps to the appropriate emulation code within the code section.
+
+68060ISP call-outs: (details in iskeleton.s)
+--------------------
+0x000:	_060_real_chk
+0x004:	_060_real_divbyzero
+0x008:	_060_real_trace
+0x00c:	_060_real_access
+0x010:	_060_isp_done
+
+0x014:	_060_real_cas
+0x018:	_060_real_cas2
+0x01c:	_060_real_lock_page
+0x020:	_060_real_unlock_page
+
+0x024:	(Motorola reserved)
+0x028:	(Motorola reserved)
+0x02c:	(Motorola reserved)
+0x030:	(Motorola reserved)
+0x034:	(Motorola reserved)
+0x038:	(Motorola reserved)
+0x03c:	(Motorola reserved)
+
+0x040:	_060_imem_read
+0x044:	_060_dmem_read
+0x048:	_060_dmem_write
+0x04c:	_060_imem_read_word
+0x050:	_060_imem_read_long
+0x054:	_060_dmem_read_byte
+0x058:	_060_dmem_read_word
+0x05c:	_060_dmem_read_long
+0x060:	_060_dmem_write_byte
+0x064:	_060_dmem_write_word
+0x068:	_060_dmem_write_long
+
+0x06c:	(Motorola reserved)
+0x070:	(Motorola reserved)
+0x074:	(Motorola reserved)
+0x078:	(Motorola reserved)
+0x07c:	(Motorola reserved)
+
+68060ISP entry points:
+-----------------------
+0x000:	_060_isp_unimp
+
+0x008:	_060_isp_cas
+0x010:	_060_isp_cas2
+0x018:	_060_isp_cas_finish
+0x020:	_060_isp_cas2_finish
+0x028:	_060_isp_cas_inrange
+0x030:	_060_isp_cas_terminate
+0x038:	_060_isp_cas_restart
+
+Integrating cas/cas2:
+---------------------
+The instructions "cas2" and "cas" (when used with a misaligned effective
+address) take the Unimplemented Integer Instruction exception. When the
+060ISP is installed properly, these instructions will enter through the
+_060_isp_unimp() entry point of the ISP.
+
+After the 060ISP decodes the instruction type and fetches the appropriate
+data registers, and BEFORE the actual emulated transfers occur, the
+package calls either the "Call-out" _060_real_cas() or _060_real_cas2().
+If the emulation code provided by the 060ISP is sufficient for the
+host system (see isp.s source code), then these "Call-out"s should be
+made, by the system integrator, to point directly back into the package
+through the "Entry-point"s _060_isp_cas() or _060_isp_cas2().
+
+One other necessary action by the integrator is to supply the routines
+_060_real_lock_page() and _060_real_unlock_page(). These functions are
+defined further in iskeleton.s and the 68060 Software Package Specification.
+
+If the "core" emulation routines of either "cas" or "cas2" perform some
+actions which are too system-specific, then the system integrator must
+supply new emulation code. This new emulation code should reside within
+the functions _060_real_cas() or _060_real_cas2(). When this new emulation
+code has completed, then it should re-enter the 060ISP package through the
+"Entry-point" _060_isp_cas_finish() or _060_isp_cas2_finish().
+To see what the register state is upon entering _060_real_cas() or
+_060_real_cas2() and what it should be upon return to the package through
+_060_isp_cas_finish() or _060_isp_cas2_finish(), please refer to the
+source code in isp.s.
+
+Miscellaneous:
+--------------
+
+_060_isp_unimp:
+----------------
+- documented in 2.2 in spec.
+- Basic flow:
+	exception taken ---> enter _060_isp_unimp   --|
+						      |
+						      |
+            may exit through _060_real_itrace    <----|
+						  or  |
+            may exit through _060_real_chk       <----|
+						  or  |
+            may exit through _060_real_divbyzero <----|
+						  or  |
+            may exit through _060_isp_done       <----|
diff --git a/arch/m68k/ifpsp060/isp.sa b/arch/m68k/ifpsp060/isp.sa
new file mode 100644
index 0000000..2f88d2a
--- /dev/null
+++ b/arch/m68k/ifpsp060/isp.sa
@@ -0,0 +1,392 @@
+	.long	0x60ff0000,0x02360000,0x60ff0000,0x16260000
+	.long	0x60ff0000,0x12dc0000,0x60ff0000,0x11ea0000
+	.long	0x60ff0000,0x10de0000,0x60ff0000,0x12a40000
+	.long	0x60ff0000,0x12560000,0x60ff0000,0x122a0000
+	.long	0x51fc51fc,0x51fc51fc,0x51fc51fc,0x51fc51fc
+	.long	0x51fc51fc,0x51fc51fc,0x51fc51fc,0x51fc51fc
+	.long	0x51fc51fc,0x51fc51fc,0x51fc51fc,0x51fc51fc
+	.long	0x51fc51fc,0x51fc51fc,0x51fc51fc,0x51fc51fc
+	.long	0x2f00203a,0xfefc487b,0x0930ffff,0xfef8202f
+	.long	0x00044e74,0x00042f00,0x203afeea,0x487b0930
+	.long	0xfffffee2,0x202f0004,0x4e740004,0x2f00203a
+	.long	0xfed8487b,0x0930ffff,0xfecc202f,0x00044e74
+	.long	0x00042f00,0x203afec6,0x487b0930,0xfffffeb6
+	.long	0x202f0004,0x4e740004,0x2f00203a,0xfeb4487b
+	.long	0x0930ffff,0xfea0202f,0x00044e74,0x00042f00
+	.long	0x203afea2,0x487b0930,0xfffffe8a,0x202f0004
+	.long	0x4e740004,0x2f00203a,0xfe90487b,0x0930ffff
+	.long	0xfe74202f,0x00044e74,0x00042f00,0x203afe7e
+	.long	0x487b0930,0xfffffe5e,0x202f0004,0x4e740004
+	.long	0x2f00203a,0xfe6c487b,0x0930ffff,0xfe48202f
+	.long	0x00044e74,0x00042f00,0x203afe76,0x487b0930
+	.long	0xfffffe32,0x202f0004,0x4e740004,0x2f00203a
+	.long	0xfe64487b,0x0930ffff,0xfe1c202f,0x00044e74
+	.long	0x00042f00,0x203afe52,0x487b0930,0xfffffe06
+	.long	0x202f0004,0x4e740004,0x2f00203a,0xfe40487b
+	.long	0x0930ffff,0xfdf0202f,0x00044e74,0x00042f00
+	.long	0x203afe2e,0x487b0930,0xfffffdda,0x202f0004
+	.long	0x4e740004,0x2f00203a,0xfe1c487b,0x0930ffff
+	.long	0xfdc4202f,0x00044e74,0x00042f00,0x203afe0a
+	.long	0x487b0930,0xfffffdae,0x202f0004,0x4e740004
+	.long	0x2f00203a,0xfdf8487b,0x0930ffff,0xfd98202f
+	.long	0x00044e74,0x00042f00,0x203afde6,0x487b0930
+	.long	0xfffffd82,0x202f0004,0x4e740004,0x2f00203a
+	.long	0xfdd4487b,0x0930ffff,0xfd6c202f,0x00044e74
+	.long	0x00042f00,0x203afdc2,0x487b0930,0xfffffd56
+	.long	0x202f0004,0x4e740004,0x4e56ffa0,0x48ee3fff
+	.long	0xffc02d56,0xfff8082e,0x00050004,0x66084e68
+	.long	0x2d48fffc,0x600841ee,0x000c2d48,0xfffc422e
+	.long	0xffaa3d6e,0x0004ffa8,0x2d6e0006,0xffa4206e
+	.long	0xffa458ae,0xffa461ff,0xffffff26,0x2d40ffa0
+	.long	0x0800001e,0x67680800,0x00166628,0x61ff0000
+	.long	0x0cb0082e,0x00050004,0x670000ac,0x082e0002
+	.long	0xffaa6700,0x00a2082e,0x00070004,0x66000186
+	.long	0x600001b0,0x61ff0000,0x0a28082e,0x0002ffaa
+	.long	0x660e082e,0x0005ffaa,0x6600010a,0x60000078
+	.long	0x082e0005,0x000467ea,0x082e0005,0xffaa6600
+	.long	0x01264a2e,0x00046b00,0x014c6000,0x01760800
+	.long	0x0018670a,0x61ff0000,0x07ae6000,0x004a0800
+	.long	0x001b6730,0x48400c00,0x00fc670a,0x61ff0000
+	.long	0x0e926000,0x0032206e,0xffa454ae,0xffa461ff
+	.long	0xfffffe68,0x4a816600,0x019861ff,0x00000d20
+	.long	0x60000014,0x61ff0000,0x08c40c2e,0x0010ffaa
+	.long	0x66000004,0x605c1d6e,0xffa90005,0x082e0005
+	.long	0x00046606,0x206efffc,0x4e604cee,0x3fffffc0
+	.long	0x082e0007,0x00046612,0x2d6effa4,0x00062cae
+	.long	0xfff84e5e,0x60ffffff,0xfd622d6e,0xfff8fffc
+	.long	0x3d6e0004,0x00002d6e,0x00060008,0x2d6effa4
+	.long	0x00023d7c,0x20240006,0x598e4e5e,0x60ffffff
+	.long	0xfd0e1d6e,0xffa90005,0x4cee3fff,0xffc03cae
+	.long	0x00042d6e,0x00060008,0x2d6effa4,0x00023d7c
+	.long	0x20180006,0x2c6efff8,0xdffc0000,0x006060ff
+	.long	0xfffffcb0,0x1d6effa9,0x00054cee,0x3fffffc0
+	.long	0x3cae0004,0x2d6e0006,0x00082d6e,0xffa40002
+	.long	0x3d7c2014,0x00062c6e,0xfff8dffc,0x00000060
+	.long	0x60ffffff,0xfc941d6e,0xffa90005,0x4cee3fff
+	.long	0xffc02d6e,0x0006000c,0x3d7c2014,0x000a2d6e
+	.long	0xffa40006,0x2c6efff8,0xdffc0000,0x006460ff
+	.long	0xfffffc66,0x1d6effa9,0x00054cee,0x3fffffc0
+	.long	0x2d6e0006,0x000c3d7c,0x2024000a,0x2d6effa4
+	.long	0x00062c6e,0xfff8dffc,0x00000064,0x60ffffff
+	.long	0xfc4e1d6e,0xffa90005,0x4cee3fff,0xffc03d7c
+	.long	0x00f4000e,0x2d6effa4,0x000a3d6e,0x00040008
+	.long	0x2c6efff8,0xdffc0000,0x006860ff,0xfffffc4c
+	.long	0x2c882d40,0xfffc4fee,0xffc04cdf,0x7fff2f2f
+	.long	0x000c2f6f,0x00040010,0x2f6f000c,0x00042f6f
+	.long	0x0008000c,0x2f5f0004,0x3f7c4008,0x00066028
+	.long	0x4cee3fff,0xffc04e5e,0x514f2eaf,0x00083f6f
+	.long	0x000c0004,0x3f7c4008,0x00062f6f,0x00020008
+	.long	0x2f7c0942,0x8001000c,0x08170005,0x670608ef
+	.long	0x0002000d,0x60ffffff,0xfbcc0c2e,0x0040ffaa
+	.long	0x660c4280,0x102effab,0x2daeffac,0x0ce04e75
+	.long	0x2040302e,0xffa03200,0x0240003f,0x02810000
+	.long	0x0007303b,0x020a4efb,0x00064afc,0x00400000
+	.long	0x00000000,0x00000000,0x00000000,0x00000000
+	.long	0x00000000,0x00000000,0x00000000,0x00000080
+	.long	0x0086008c,0x00920098,0x009e00a4,0x00aa00b0
+	.long	0x00ce00ec,0x010a0128,0x01460164,0x01820196
+	.long	0x01b401d2,0x01f0020e,0x022c024a,0x0268027c
+	.long	0x029a02b8,0x02d602f4,0x03120330,0x034e036c
+	.long	0x036c036c,0x036c036c,0x036c036c,0x036c03d6
+	.long	0x03f0040a,0x042a03ca,0x00000000,0x0000206e
+	.long	0xffe04e75,0x206effe4,0x4e75206e,0xffe84e75
+	.long	0x206effec,0x4e75206e,0xfff04e75,0x206efff4
+	.long	0x4e75206e,0xfff84e75,0x206efffc,0x4e752008
+	.long	0x206effe0,0xd0882d40,0xffe02d48,0xffac1d7c
+	.long	0x0000ffab,0x1d7c0040,0xffaa4e75,0x2008206e
+	.long	0xffe4d088,0x2d40ffe4,0x2d48ffac,0x1d7c0001
+	.long	0xffab1d7c,0x0040ffaa,0x4e752008,0x206effe8
+	.long	0xd0882d40,0xffe82d48,0xffac1d7c,0x0002ffab
+	.long	0x1d7c0040,0xffaa4e75,0x2008206e,0xffecd088
+	.long	0x2d40ffec,0x2d48ffac,0x1d7c0003,0xffab1d7c
+	.long	0x0040ffaa,0x4e752008,0x206efff0,0xd0882d40
+	.long	0xfff02d48,0xffac1d7c,0x0004ffab,0x1d7c0040
+	.long	0xffaa4e75,0x2008206e,0xfff4d088,0x2d40fff4
+	.long	0x2d48ffac,0x1d7c0005,0xffab1d7c,0x0040ffaa
+	.long	0x4e752008,0x206efff8,0xd0882d40,0xfff82d48
+	.long	0xffac1d7c,0x0006ffab,0x1d7c0040,0xffaa4e75
+	.long	0x1d7c0004,0xffaa2008,0x206efffc,0xd0882d40
+	.long	0xfffc4e75,0x202effe0,0x2d40ffac,0x90882d40
+	.long	0xffe02040,0x1d7c0000,0xffab1d7c,0x0040ffaa
+	.long	0x4e75202e,0xffe42d40,0xffac9088,0x2d40ffe4
+	.long	0x20401d7c,0x0001ffab,0x1d7c0040,0xffaa4e75
+	.long	0x202effe8,0x2d40ffac,0x90882d40,0xffe82040
+	.long	0x1d7c0002,0xffab1d7c,0x0040ffaa,0x4e75202e
+	.long	0xffec2d40,0xffac9088,0x2d40ffec,0x20401d7c
+	.long	0x0003ffab,0x1d7c0040,0xffaa4e75,0x202efff0
+	.long	0x2d40ffac,0x90882d40,0xfff02040,0x1d7c0004
+	.long	0xffab1d7c,0x0040ffaa,0x4e75202e,0xfff42d40
+	.long	0xffac9088,0x2d40fff4,0x20401d7c,0x0005ffab
+	.long	0x1d7c0040,0xffaa4e75,0x202efff8,0x2d40ffac
+	.long	0x90882d40,0xfff82040,0x1d7c0006,0xffab1d7c
+	.long	0x0040ffaa,0x4e751d7c,0x0008ffaa,0x202efffc
+	.long	0x90882d40,0xfffc2040,0x4e75206e,0xffa454ae
+	.long	0xffa461ff,0xfffff9d4,0x4a8166ff,0xfffffd04
+	.long	0x3040d1ee,0xffe04e75,0x206effa4,0x54aeffa4
+	.long	0x61ffffff,0xf9b64a81,0x66ffffff,0xfce63040
+	.long	0xd1eeffe4,0x4e75206e,0xffa454ae,0xffa461ff
+	.long	0xfffff998,0x4a8166ff,0xfffffcc8,0x3040d1ee
+	.long	0xffe84e75,0x206effa4,0x54aeffa4,0x61ffffff
+	.long	0xf97a4a81,0x66ffffff,0xfcaa3040,0xd1eeffec
+	.long	0x4e75206e,0xffa454ae,0xffa461ff,0xfffff95c
+	.long	0x4a8166ff,0xfffffc8c,0x3040d1ee,0xfff04e75
+	.long	0x206effa4,0x54aeffa4,0x61ffffff,0xf93e4a81
+	.long	0x66ffffff,0xfc6e3040,0xd1eefff4,0x4e75206e
+	.long	0xffa454ae,0xffa461ff,0xfffff920,0x4a8166ff
+	.long	0xfffffc50,0x3040d1ee,0xfff84e75,0x206effa4
+	.long	0x54aeffa4,0x61ffffff,0xf9024a81,0x66ffffff
+	.long	0xfc323040,0xd1eefffc,0x4e752f01,0x206effa4
+	.long	0x54aeffa4,0x61ffffff,0xf8e24a81,0x66ffffff
+	.long	0xfc12221f,0x207614e0,0x08000008,0x670e48e7
+	.long	0x3c002a00,0x260860ff,0x000000ec,0x2f022200
+	.long	0xe9590241,0x000f2236,0x14c00800,0x000b6602
+	.long	0x48c12400,0xef5a0282,0x00000003,0xe5a949c0
+	.long	0xd081d1c0,0x241f4e75,0x1d7c0080,0xffaa206e
+	.long	0xffa44e75,0x206effa4,0x54aeffa4,0x61ffffff
+	.long	0xf87a4a81,0x66ffffff,0xfbaa3040,0x4e75206e
+	.long	0xffa458ae,0xffa461ff,0xfffff876,0x4a8166ff
+	.long	0xfffffb90,0x20404e75,0x206effa4,0x54aeffa4
+	.long	0x61ffffff,0xf8464a81,0x66ffffff,0xfb763040
+	.long	0xd1eeffa4,0x55884e75,0x206effa4,0x54aeffa4
+	.long	0x61ffffff,0xf8264a81,0x66ffffff,0xfb56206e
+	.long	0xffa45588,0x08000008,0x670e48e7,0x3c002a00
+	.long	0x260860ff,0x00000030,0x2f022200,0xe9590241
+	.long	0x000f2236,0x14c00800,0x000b6602,0x48c12400
+	.long	0xef5a0282,0x00000003,0xe5a949c0,0xd081d1c0
+	.long	0x241f4e75,0x08050006,0x67044282,0x6016e9c5
+	.long	0x24042436,0x24c00805,0x000b6602,0x48c2e9c5
+	.long	0x0542e1aa,0x08050007,0x67024283,0xe9c50682
+	.long	0x0c000002,0x6d346718,0x206effa4,0x58aeffa4
+	.long	0x61ffffff,0xf7ac4a81,0x66ffffff,0xfac66018
+	.long	0x206effa4,0x54aeffa4,0x61ffffff,0xf77e4a81
+	.long	0x66ffffff,0xfaae48c0,0xd680e9c5,0x07826700
+	.long	0x006a0c00,0x00026d34,0x6718206e,0xffa458ae
+	.long	0xffa461ff,0xfffff76a,0x4a8166ff,0xfffffa84
+	.long	0x601c206e,0xffa454ae,0xffa461ff,0xfffff73c
+	.long	0x4a8166ff,0xfffffa6c,0x48c06002,0x42802800
+	.long	0x08050002,0x67122043,0x61ffffff,0xf7764a81
+	.long	0x6624d082,0xd0846016,0xd6822043,0x61ffffff
+	.long	0xf7624a81,0x6610d084,0x6004d682,0x20032040
+	.long	0x4cdf003c,0x4e752043,0x203c0101,0x000160ff
+	.long	0xfffff9f0,0x322effa0,0x10010240,0x00072076
+	.long	0x04e0d0ee,0xffa20801,0x00076700,0x008c3001
+	.long	0xef580240,0x00072036,0x04c00801,0x00066752
+	.long	0x24002448,0xe19a2002,0x61ffffff,0xf71c4a81
+	.long	0x660000fc,0x544a204a,0xe19a2002,0x61ffffff
+	.long	0xf7084a81,0x660000e8,0x544a204a,0xe19a2002
+	.long	0x61ffffff,0xf6f44a81,0x660000d4,0x544a204a
+	.long	0xe19a2002,0x61ffffff,0xf6e04a81,0x660000c0
+	.long	0x4e752400,0x2448e048,0x61ffffff,0xf6cc4a81
+	.long	0x660000ac,0x544a204a,0x200261ff,0xfffff6ba
+	.long	0x4a816600,0x009a4e75,0x08010006,0x675c2448
+	.long	0x61ffffff,0xf6624a81,0x66000092,0x2400544a
+	.long	0x204a61ff,0xfffff650,0x4a816600,0x0080e14a
+	.long	0x1400544a,0x204a61ff,0xfffff63c,0x4a816600
+	.long	0x006ce18a,0x1400544a,0x204a61ff,0xfffff628
+	.long	0x4a816600,0x0058e18a,0x1400122e,0xffa0e209
+	.long	0x02410007,0x2d8214c0,0x4e752448,0x61ffffff
+	.long	0xf6064a81,0x66000036,0x2400544a,0x204a61ff
+	.long	0xfffff5f4,0x4a816600,0x0024e14a,0x1400122e
+	.long	0xffa0e209,0x02410007,0x3d8214c2,0x4e75204a
+	.long	0x203c00a1,0x000160ff,0xfffff8a8,0x204a203c
+	.long	0x01210001,0x60ffffff,0xf89a61ff,0xfffff914
+	.long	0x102effa2,0xe9180240,0x000f2436,0x04c00c2e
+	.long	0x0002ffa0,0x6d506728,0x244861ff,0xfffff5c4
+	.long	0x4a816600,0x009e2600,0x588a204a,0x61ffffff
+	.long	0xf5b24a81,0x6600008c,0x22002003,0x60000048
+	.long	0x244861ff,0xfffff59c,0x4a816600,0x00763200
+	.long	0x484048c0,0x48c1082e,0x0007ffa2,0x66000028
+	.long	0x48c26000,0x00222448,0x61ffffff,0xf5604a81
+	.long	0x6600005e,0x1200e048,0x49c049c1,0x082e0007
+	.long	0xffa26602,0x49c29480,0x42c30203,0x00049280
+	.long	0xb28242c4,0x86040203,0x0005382e,0xffa80204
+	.long	0x001a8803,0x3d44ffa8,0x082e0003,0xffa26602
+	.long	0x4e750804,0x00006602,0x4e751d7c,0x0010ffaa
+	.long	0x4e75204a,0x203c0101,0x000160ff,0xfffff7c4
+	.long	0x204a203c,0x01410001,0x60ffffff,0xf7b6102e
+	.long	0xffa10200,0x00386600,0x0208102e,0xffa10240
+	.long	0x00072e36,0x04c06700,0x00c0102e,0xffa3122e
+	.long	0xffa20240,0x0007e809,0x02410007,0x3d40ffb2
+	.long	0x3d41ffb4,0x2a3604c0,0x2c3614c0,0x082e0003
+	.long	0xffa2671a,0x4a875dee,0xffb06a02,0x44874a85
+	.long	0x5deeffb1,0x6a0844fc,0x00004086,0x40854a85
+	.long	0x66164a86,0x67000048,0xbe866306,0xcb466000
+	.long	0x00124c47,0x6005600a,0xbe85634e,0x61ff0000
+	.long	0x0068082e,0x0003ffa2,0x67244a2e,0xffb16702
+	.long	0x4485102e,0xffb0b12e,0xffb1670c,0x0c868000
+	.long	0x00006226,0x44866006,0x0806001f,0x661c44ee
+	.long	0xffa84a86,0x42eeffa8,0x302effb2,0x322effb4
+	.long	0x2d8504c0,0x2d8614c0,0x4e7508ee,0x0001ffa9
+	.long	0x08ae0000,0xffa94e75,0x022e001e,0xffa9002e
+	.long	0x0020ffaa,0x4e750c87,0x0000ffff,0x621e4281
+	.long	0x48454846,0x3a068ac7,0x32054846,0x3a068ac7
+	.long	0x48413205,0x42454845,0x2c014e75,0x42aeffbc
+	.long	0x422effb6,0x42810807,0x001f660e,0x52aeffbc
+	.long	0xe38fe38e,0xe3956000,0xffee2607,0x24054842
+	.long	0x4843b443,0x6606323c,0xffff600a,0x220582c3
+	.long	0x02810000,0xffff2f06,0x42464846,0x26072401
+	.long	0xc4c74843,0xc6c12805,0x98834844,0x30043806
+	.long	0x4a406600,0x000ab484,0x63045381,0x60de2f05
+	.long	0x2c014846,0x2a0761ff,0x0000006a,0x24052606
+	.long	0x2a1f2c1f,0x9c839b82,0x64ff0000,0x001a5381
+	.long	0x42822607,0x48434243,0xdc83db82,0x26074243
+	.long	0x4843da83,0x4a2effb6,0x66163d41,0xffb84281
+	.long	0x48454846,0x3a064246,0x50eeffb6,0x6000ff6c
+	.long	0x3d41ffba,0x3c054846,0x48452e2e,0xffbc670a
+	.long	0x5387e28d,0xe29651cf,0xfffa2a06,0x2c2effb8
+	.long	0x4e752406,0x26062805,0x48434844,0xccc5cac3
+	.long	0xc4c4c6c4,0x42844846,0xdc45d744,0xdc42d744
+	.long	0x48464245,0x42424845,0x4842da82,0xda834e75
+	.long	0x700461ff,0xfffff61c,0x0c2e0080,0xffaa6712
+	.long	0x244861ff,0xfffff2dc,0x4a81661e,0x2e006000
+	.long	0xfde658ae,0xffa461ff,0xfffff286,0x4a8166ff
+	.long	0xfffff5a0,0x2e006000,0xfdce61ff,0xfffff5ce
+	.long	0x204a203c,0x01010001,0x60ffffff,0xf556102e
+	.long	0xffa10c00,0x00076e00,0x00b40240,0x00072636
+	.long	0x04c0342e,0xffa24241,0x1202e95a,0x02420007
+	.long	0x283624c0,0x4a846700,0x00884a83,0x67000082
+	.long	0x422effb0,0x082e0003,0xffa26718,0x4a836c08
+	.long	0x4483002e,0x0001ffb0,0x4a846c08,0x44840a2e
+	.long	0x0001ffb0,0x2a032c03,0x2e044846,0x4847c6c4
+	.long	0xc8c6cac7,0xccc74287,0x4843d644,0xdd87d645
+	.long	0xdd874843,0x42444245,0x48444845,0xd885d886
+	.long	0x4a2effb0,0x67084683,0x46845283,0xd9872d83
+	.long	0x24c044fc,0x00002d84,0x14c042c7,0x02070008
+	.long	0x1c2effa9,0x02060010,0x8c071d46,0xffa94e75
+	.long	0x42b624c0,0x42b614c0,0x7e0460e4,0x700461ff
+	.long	0xfffff510,0x0c2e0080,0xffaa6714,0x244861ff
+	.long	0xfffff1d0,0x4a816600,0x00202600,0x6000ff34
+	.long	0x58aeffa4,0x61ffffff,0xf1784a81,0x66ffffff
+	.long	0xf4922600,0x6000ff1c,0x61ffffff,0xf4c0204a
+	.long	0x203c0101,0x000160ff,0xfffff448,0x2d40ffb4
+	.long	0x2200e958,0x0240000f,0x227604c0,0x2d49ffb0
+	.long	0x2001ec49,0x02410007,0x2a3614c0,0x02400007
+	.long	0x263604c0,0x3d40ffba,0x302effa2,0x2200e958
+	.long	0x0240000f,0x207604c0,0x2d48ffbc,0x2001ec49
+	.long	0x02410007,0x283614c0,0x02400007,0x243604c0
+	.long	0x3d40ffb8,0x082e0001,0xffa056c7,0x082e0005
+	.long	0x000456c6,0x24482649,0x22072006,0x61ffffff
+	.long	0xf05c204a,0x4a8066ff,0x000001c8,0x22072006
+	.long	0x204b61ff,0xfffff046,0x204b4a80,0x660a204a
+	.long	0x224b60ff,0xfffff020,0x2f002207,0x2006204a
+	.long	0x61ffffff,0xf03e201f,0x204b60ff,0x00000194
+	.long	0x082e0001,0xffa06648,0x44eeffa8,0xb0426602
+	.long	0xb24342ee,0xffa84a04,0x6610362e,0xffba3d81
+	.long	0x34c2342e,0xffb83d80,0x24c2082e,0x00050004
+	.long	0x56c22002,0x51c1206e,0xffbc61ff,0xffffeff4
+	.long	0x200251c1,0x206effb0,0x61ffffff,0xefe64e75
+	.long	0x44eeffa8,0xb0826602,0xb28342ee,0xffa84a04
+	.long	0x6610362e,0xffba2d81,0x34c0342e,0xffb82d80
+	.long	0x24c0082e,0x00050004,0x56c22002,0x50c1206e
+	.long	0xffbc61ff,0xffffefac,0x200250c1,0x206effb0
+	.long	0x61ffffff,0xef9e4e75,0x202effb4,0x6000feae
+	.long	0x082e0001,0xffa06610,0x700261ff,0xfffff364
+	.long	0x2d48ffb4,0x51c7600e,0x700461ff,0xfffff354
+	.long	0x2d48ffb4,0x50c7302e,0xffa22200,0xec480240
+	.long	0x00072436,0x04c00241,0x00072836,0x14c03d41
+	.long	0xffb8082e,0x00050004,0x56c62448,0x22072006
+	.long	0x61ffffff,0xef284a80,0x66000096,0x204a60ff
+	.long	0xffffeeee,0x082e0001,0xffa0662c,0x44eeffa8
+	.long	0xb04442ee,0xffa84a01,0x6608362e,0xffb83d80
+	.long	0x34c2206e,0xffb451c1,0x082e0005,0x000456c0
+	.long	0x61ffffff,0xeefe4e75,0x44eeffa8,0xb08442ee
+	.long	0xffa84a01,0x6608362e,0xffb82d80,0x34c0206e
+	.long	0xffb450c1,0x082e0005,0x000456c0,0x61ffffff
+	.long	0xeed24e75,0x4e7b6000,0x4e7b6001,0x0c2e00fc
+	.long	0xffa167ff,0xffffff24,0x206effb4,0x082e0001
+	.long	0xffa056c7,0x6000ff40,0x4e7b6000,0x4e7b6001
+	.long	0x24482f00,0x61ffffff,0xf264201f,0x588f518f
+	.long	0x518e721a,0x41ef0008,0x43ef0000,0x22d851c9
+	.long	0xfffc3d7c,0x4008000a,0x2d4a000c,0x2d400010
+	.long	0x4cee3fff,0xffc04e5e,0x60ffffff,0xedf84280
+	.long	0x43fb0170,0x000005ae,0xb3c86d0e,0x43fb0170
+	.long	0x00000010,0xb1c96d02,0x4e7570ff,0x4e754a06
+	.long	0x66047001,0x60027005,0x4a076700,0x01e42448
+	.long	0x26492848,0x2a49568c,0x568d220a,0x40c7007c
+	.long	0x07004e7a,0x60004e7b,0x00004e7b,0x0001f58a
+	.long	0xf58cf58b,0xf58df46a,0xf46cf46b,0xf46d2441
+	.long	0x56812841,0xf5caf5cc,0x247c8000,0x0000267c
+	.long	0xa0000000,0x287c0000,0x00002008,0x02000003
+	.long	0x671c0c00,0x00026700,0x00966000,0x010251fc
+	.long	0x4e7ba008,0x0e911000,0x0e900000,0x6002600e
+	.long	0xb082661c,0xb2836618,0x0e915800,0x6002600e
+	.long	0x4e7bb008,0x0e904800,0x4e7bc008,0x6034600e
+	.long	0x4e7bb008,0x0e900800,0x4e7bc008,0x6012600e
+	.long	0x4e714e71,0x4e714e71,0x4e714e71,0x4e7160b0
+	.long	0x4e7b6000,0x4e7b6001,0x46c751c4,0x60ffffff
+	.long	0xfd424e7b,0x60004e7b,0x600146c7,0x50c460ff
+	.long	0xfffffd30,0x51fc51fc,0x51fc51fc,0x51fc51fc
+	.long	0x4e7ba008,0x0e911000,0x0e900000,0x6002600e
+	.long	0xb082662c,0xb2836628,0x0e915800,0x6002600e
+	.long	0x48440e58,0x48004e7b,0xb0084844,0x6002600e
+	.long	0x0e504800,0x4e7bc008,0x6000ffa8,0x4e71600e
+	.long	0x48400e58,0x08004e7b,0xb0084840,0x6002600e
+	.long	0x0e500800,0x4e7bc008,0x6000ff76,0x4e71600e
+	.long	0x4e714e71,0x4e714e71,0x4e714e71,0x4e716090
+	.long	0x4e7ba008,0x0e911000,0x0e900000,0x6002600e
+	.long	0xb082663c,0xb2836638,0x0e915800,0x6002600e
+	.long	0xe19c0e18,0x48004844,0x0e584800,0x6002600e
+	.long	0xe19c4e7b,0xb0080e10,0x48006004,0x4e71600e
+	.long	0x4e7bc008,0x6000ff2c,0x4e714e71,0x4e71600e
+	.long	0xe1980e18,0x08004840,0x0e580800,0x6002600e
+	.long	0xe1984e7b,0xb0080e10,0x08006004,0x4e71600e
+	.long	0x4e7bc008,0x6000feea,0x4e714e71,0x4e71600c
+	.long	0x4e714e71,0x4e714e71,0x4e714e71,0x6000ff72
+	.long	0x24482649,0x28482a49,0x528c528d,0x220a40c7
+	.long	0x007c0700,0x4e7a6000,0x4e7b0000,0x4e7b0001
+	.long	0xf58af58c,0xf58bf58d,0xf46af46c,0xf46bf46d
+	.long	0x24415681,0x2841f5ca,0xf5cc247c,0x80000000
+	.long	0x267ca000,0x0000287c,0x00000000,0x20080800
+	.long	0x00006600,0x009a6016,0x51fc51fc,0x51fc51fc
+	.long	0x4e7ba008,0x0e511000,0x0e500000,0x6002600e
+	.long	0xb042661c,0xb2436618,0x0e515800,0x6002600e
+	.long	0x4e7bb008,0x0e504800,0x4e7bc008,0x6034600e
+	.long	0x4e7bb008,0x0e500800,0x4e7bc008,0x6012600e
+	.long	0x4e714e71,0x4e714e71,0x4e714e71,0x4e7160b0
+	.long	0x4e7b6000,0x4e7b6001,0x46c751c4,0x60ffffff
+	.long	0xfb624e7b,0x60004e7b,0x600146c7,0x50c460ff
+	.long	0xfffffb50,0x51fc51fc,0x51fc51fc,0x51fc51fc
+	.long	0x4e7ba008,0x0e511000,0x0e500000,0x6002600e
+	.long	0xb042662c,0xb2436628,0x0e515800,0x6002600e
+	.long	0xe09c0e18,0x48004e7b,0xb008e19c,0x6002600e
+	.long	0x0e104800,0x4e7bc008,0x6000ffa8,0x4e71600e
+	.long	0xe0980e18,0x08004e7b,0xb008e198,0x6002600e
+	.long	0x0e100800,0x4e7bc008,0x6000ff76,0x4e71600e
+	.long	0x4e714e71,0x4e714e71,0x4e714e71,0x4e716090
+	.long	0x4a066604,0x70016002,0x70054a07,0x660000c6
+	.long	0x22482448,0x528a2602,0xe04a40c7,0x007c0700
+	.long	0x4e7a6000,0x4e7b0000,0x4e7b0001,0xf589f58a
+	.long	0xf469f46a,0x227c8000,0x0000247c,0xa0000000
+	.long	0x267c0000,0x00006016,0x51fc51fc,0x51fc51fc
+	.long	0x4e7b9008,0x0e500000,0xb0446624,0x6002600e
+	.long	0x0e182800,0x4e7ba008,0x0e103800,0x6002600e
+	.long	0x4e7bb008,0x604c4e71,0x4e714e71,0x4e71600e
+	.long	0xe0980e18,0x08004e7b,0xa008e198,0x6002600e
+	.long	0x0e100800,0x4e7bb008,0x60164e71,0x4e71600e
+	.long	0x4e714e71,0x4e714e71,0x4e714e71,0x4e7160a0
+	.long	0x4e7b6000,0x4e7b6001,0x46c751c1,0x60ffffff
+	.long	0xfb164e7b,0x60004e7b,0x600146c7,0x50c160ff
+	.long	0xfffffb04,0x22482448,0x568a2208,0x08010000
+	.long	0x660000c2,0x26024842,0x40c7007c,0x07004e7a
+	.long	0x60004e7b,0x00004e7b,0x0001f589,0xf58af469
+	.long	0xf46a227c,0x80000000,0x247ca000,0x0000267c
+	.long	0x00000000,0x601851fc,0x51fc51fc,0x51fc51fc
+	.long	0x4e7b9008,0x0e900000,0xb0846624,0x6002600e
+	.long	0x0e582800,0x4e7ba008,0x0e503800,0x6002600e
+	.long	0x4e7bb008,0x604c4e71,0x4e714e71,0x4e71600e
+	.long	0x48400e58,0x08004840,0x4e7ba008,0x6002600e
+	.long	0x0e500800,0x4e7bb008,0x60164e71,0x4e71600e
+	.long	0x4e714e71,0x4e714e71,0x4e714e71,0x4e7160a0
+	.long	0x4e7b6000,0x4e7b6001,0x46c751c1,0x60ffffff
+	.long	0xfa464e7b,0x60004e7b,0x600146c7,0x50c160ff
+	.long	0xfffffa34,0x2a02e08a,0x26024842,0x40c7007c
+	.long	0x07004e7a,0x60004e7b,0x00004e7b,0x0001f589
+	.long	0xf58af469,0xf46a227c,0x80000000,0x247ca000
+	.long	0x0000267c,0x00000000,0x601451fc,0x51fc51fc
+	.long	0x4e7b9008,0x0e900000,0xb0846624,0x6002600e
+	.long	0x0e182800,0x0e583800,0x4e7ba008,0x6002600e
+	.long	0x0e105800,0x4e7bb008,0x6000ff88,0x4e71600e
+	.long	0xe1980e18,0x08004840,0x0e580800,0x6002600e
+	.long	0xe1984e7b,0xa0080e10,0x08006004,0x4e71600e
+	.long	0x4e7bb008,0x6000ff4a,0x4e714e71,0x4e71600e
+	.long	0x4e714e71,0x4e714e71,0x4e714e71,0x4e716090
diff --git a/arch/m68k/ifpsp060/itest.sa b/arch/m68k/ifpsp060/itest.sa
new file mode 100644
index 0000000..7b15eaf
--- /dev/null
+++ b/arch/m68k/ifpsp060/itest.sa
@@ -0,0 +1,1281 @@
+	dc.l	$60ff0000,$005c5465,$7374696e,$67203638
+	dc.l	$30363020,$49535020,$73746172,$7465643a
+	dc.l	$0a007061,$73736564,$0a002066,$61696c65
+	dc.l	$640a0000,$4a80660e,$487affe8,$61ff0000
+	dc.l	$4f9a588f,$4e752f01,$61ff0000,$4fa4588f
+	dc.l	$487affd8,$61ff0000,$4f82588f,$4e754e56
+	dc.l	$ff6048e7,$3f3c487a,$ff9e61ff,$00004f6c
+	dc.l	$588f42ae,$ff78487b,$01700000,$00ea61ff
+	dc.l	$00004f58,$588f61ff,$000000f0,$61ffffff
+	dc.l	$ffa642ae,$ff78487b,$01700000,$0af661ff
+	dc.l	$00004f38,$588f61ff,$00000af8,$61ffffff
+	dc.l	$ff8642ae,$ff78487b,$01700000,$179c61ff
+	dc.l	$00004f18,$588f61ff,$0000179c,$61ffffff
+	dc.l	$ff6642ae,$ff78487b,$01700000,$038661ff
+	dc.l	$00004ef8,$588f61ff,$00000380,$61ffffff
+	dc.l	$ff4642ae,$ff78487b,$01700000,$202c61ff
+	dc.l	$00004ed8,$588f2d7c,$00000002,$ff7461ff
+	dc.l	$0000202c,$61ffffff,$ff1e42ae,$ff78487b
+	dc.l	$01700000,$0d7c61ff,$00004eb0,$588f61ff
+	dc.l	$00000d74,$61ffffff,$fefe42ae,$ff78487b
+	dc.l	$01700000,$0f8e61ff,$00004e90,$588f61ff
+	dc.l	$00000f88,$61ffffff,$fede4cdf,$3cfc4e5e
+	dc.l	$4e750936,$342d6269,$74206d75,$6c746970
+	dc.l	$6c792e2e,$2e0051fc,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$4e184281,$243c9999,$9999263c
+	dc.l	$88888888,$3d7c0004,$ff7c44fc,$000048ee
+	dc.l	$7fffff80,$4c013402,$42eeff7e,$48ee7fff
+	dc.l	$ffc042ae,$ff8842ae,$ff8c61ff,$00004da6
+	dc.l	$4a0066ff,$00004dcc,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$4dc8223c,$77777777,$243c9999
+	dc.l	$99997600,$3d7c0004,$ff7c44fc,$000048ee
+	dc.l	$7fffff80,$4c013402,$42eeff7e,$48ee7fff
+	dc.l	$ffc042ae,$ff8842ae,$ff8c61ff,$00004d56
+	dc.l	$4a0066ff,$00004d7c,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$4d787210,$243c6666,$66663d7c
+	dc.l	$0000ff7c,$44fc0000,$48ee7fff,$ff804c01
+	dc.l	$240242ee,$ff7e48ee,$7fffffc0,$2d7c0000
+	dc.l	$0006ff88,$61ff0000,$4d0c4a00,$66ff0000
+	dc.l	$4d3252ae,$ff784cfb,$3fff0170,$00004d2e
+	dc.l	$223c5555,$55557400,$76033d7c,$0000ff7c
+	dc.l	$44fc0000,$48ee7fff,$ff804c01,$340242ee
+	dc.l	$ff7e48ee,$7fffffc0,$2d7c0000,$0000ff88
+	dc.l	$2d7cffff,$ffffff8c,$61ff0000,$4cb84a00
+	dc.l	$66ff0000,$4cde52ae,$ff784cfb,$3fff0170
+	dc.l	$00004cda,$223c4000,$00007400,$76043d7c
+	dc.l	$0000ff7c,$44fc0000,$48ee7fff,$ff804c01
+	dc.l	$340242ee,$ff7e48ee,$7fffffc0,$2d7c0000
+	dc.l	$0001ff88,$2d7c0000,$0000ff8c,$61ff0000
+	dc.l	$4c644a00,$66ff0000,$4c8a52ae,$ff784cfb
+	dc.l	$3fff0170,$00004c86,$72ff7400,$76ff3d7c
+	dc.l	$0008ff7c,$44fc0000,$48ee7fff,$ff804c01
+	dc.l	$340242ee,$ff7e48ee,$7fffffc0,$2d7cffff
+	dc.l	$fffeff88,$2d7c0000,$0001ff8c,$61ff0000
+	dc.l	$4c144a00,$66ff0000,$4c3a52ae,$ff784cfb
+	dc.l	$3fff0170,$00004c36,$223c8000,$00007400
+	dc.l	$76ff3d7c,$0000ff7c,$44fc0000,$48ee7fff
+	dc.l	$ff804c01,$3c0242ee,$ff7e48ee,$7fffffc0
+	dc.l	$2d7c0000,$0000ff88,$2d7c8000,$0000ff8c
+	dc.l	$61ff0000,$4bc04a00,$66ff0000,$4be652ae
+	dc.l	$ff784cfb,$3fff0170,$00004be2,$223c8000
+	dc.l	$00007400,$76013d7c,$0008ff7c,$44fc0000
+	dc.l	$48ee7fff,$ff804c01,$3c0242ee,$ff7e48ee
+	dc.l	$7fffffc0,$2d7cffff,$ffffff88,$2d7c8000
+	dc.l	$0000ff8c,$61ff0000,$4b6c4a00,$66ff0000
+	dc.l	$4b9252ae,$ff784cfb,$3fff0170,$00004b8e
+	dc.l	$72017400,$263c8000,$00003d7c,$0008ff7c
+	dc.l	$44fc0000,$48ee7fff,$ff804c01,$3c0242ee
+	dc.l	$ff7e48ee,$7fffffc0,$2d7cffff,$ffffff88
+	dc.l	$2d7c8000,$0000ff8c,$61ff0000,$4b184a00
+	dc.l	$66ff0000,$4b3e222e,$ff784280,$4e75096d
+	dc.l	$6f766570,$2e2e2e00,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$4b2841ee,$ff60303c,$aaaa4228
+	dc.l	$00004228,$00023d7c,$001fff7c,$44fc001f
+	dc.l	$48ee7fff,$ff800188,$000042ee,$ff7e48ee
+	dc.l	$7fffffc0,$12280002,$e1491228,$0000b041
+	dc.l	$66ff0000,$4ade61ff,$00004aaa,$4a0066ff
+	dc.l	$00004ad0,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$4acc41ee,$ff64303c,$aaaa42a8,$fffc4290
+	dc.l	$42a80004,$3d7c001f,$ff7c44fc,$001f48ee
+	dc.l	$7fffff80,$01880000,$42eeff7e,$48ee7fff
+	dc.l	$ffc04aa8,$fffc66ff,$00004a88,$4aa80004
+	dc.l	$66ff0000,$4a7e0c90,$aa00aa00,$66ff0000
+	dc.l	$4a7261ff,$00004a3e,$4a0066ff,$00004a64
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$4a6041ee
+	dc.l	$ff60303c,$aaaa4228,$00004228,$00023d7c
+	dc.l	$0000ff7c,$44fc0000,$48ee7fff,$ff800188
+	dc.l	$000042ee,$ff7e48ee,$7fffffc0,$12280002
+	dc.l	$e1491228,$0000b041,$66ff0000,$4a1661ff
+	dc.l	$000049e2,$4a0066ff,$00004a08,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$4a0441ee,$ff60117c
+	dc.l	$00aa0000,$117c00aa,$00023d7c,$001fff7c
+	dc.l	$44fc001f,$48ee7fff,$ff800108,$000042ee
+	dc.l	$ff7e48ee,$7fffffc0,$3d7caaaa,$ff82323c
+	dc.l	$aaaab041,$66ff0000,$49ba61ff,$00004986
+	dc.l	$4a0066ff,$000049ac,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$49a841ee,$ff60203c,$aaaaaaaa
+	dc.l	$42280000,$42280002,$42280004,$42280006
+	dc.l	$3d7c001f,$ff7c44fc,$001f48ee,$7fffff80
+	dc.l	$01c80000,$42eeff7e,$48ee7fff,$ffc01228
+	dc.l	$0006e189,$12280004,$e1891228,$0002e189
+	dc.l	$12280000,$b08166ff,$00004948,$61ff0000
+	dc.l	$49144a00,$66ff0000,$493a52ae,$ff784cfb
+	dc.l	$3fff0170,$00004936,$41eeff64,$203caaaa
+	dc.l	$aaaa42a8,$fffc4290,$42a80004,$42a80008
+	dc.l	$3d7c001f,$ff7c44fc,$001f48ee,$7fffff80
+	dc.l	$01c80000,$42eeff7e,$48ee7fff,$ffc04aa8
+	dc.l	$fffc66ff,$000048ec,$4aa80008,$66ff0000
+	dc.l	$48e20c90,$aa00aa00,$66ff0000,$48d60ca8
+	dc.l	$aa00aa00,$000466ff,$000048c8,$61ff0000
+	dc.l	$48944a00,$66ff0000,$48ba52ae,$ff784cfb
+	dc.l	$3fff0170,$000048b6,$41eeff60,$117c00aa
+	dc.l	$0000117c,$00aa0002,$117c00aa,$0004117c
+	dc.l	$00aa0006,$3d7c001f,$ff7c44fc,$001f48ee
+	dc.l	$7fffff80,$01480000,$42eeff7e,$48ee7fff
+	dc.l	$ffc02d7c,$aaaaaaaa,$ff80223c,$aaaaaaaa
+	dc.l	$b08166ff,$0000485c,$61ff0000,$48284a00
+	dc.l	$66ff0000,$484e52ae,$ff784cfb,$3fff0170
+	dc.l	$0000484a,$41eeff60,$3e3caaaa,$42280000
+	dc.l	$42280002,$3d7c001f,$ff7c44fc,$001f48ee
+	dc.l	$7fffff80,$0f880000,$42eeff7e,$48ee7fff
+	dc.l	$ffc01228,$0002e149,$12280000,$be4166ff
+	dc.l	$00004800,$61ff0000,$47cc4a00,$66ff0000
+	dc.l	$47f252ae,$ff784cfb,$3fff0170,$000047ee
+	dc.l	$41eeff60,$117c00aa,$0000117c,$00aa0002
+	dc.l	$3d7c001f,$ff7c44fc,$001f48ee,$7fffff80
+	dc.l	$0f080000,$42eeff7e,$48ee7fff,$ffc03d7c
+	dc.l	$aaaaff9e,$323caaaa,$be4166ff,$000047a4
+	dc.l	$61ff0000,$47704a00,$66ff0000,$479652ae
+	dc.l	$ff784cfb,$3fff0170,$00004792,$41eeff60
+	dc.l	$303caaaa,$42280000,$42280002,$3d7c001f
+	dc.l	$ff7c44fc,$001f48ee,$7fffff80,$01880000
+	dc.l	$42eeff7e,$48ee7fff,$ffc01228,$0002e149
+	dc.l	$12280000,$b04166ff,$00004748,$61ff0000
+	dc.l	$47144a00,$66ff0000,$473a52ae,$ff784cfb
+	dc.l	$3fff0170,$00004736,$41eeff60,$303caaaa
+	dc.l	$42280008,$4228000a,$3d7c001f,$ff7c44fc
+	dc.l	$001f48ee,$7fffff80,$01880008,$42eeff7e
+	dc.l	$48ee7fff,$ffc01228,$000ae149,$12280008
+	dc.l	$b04166ff,$000046ec,$61ff0000,$46b84a00
+	dc.l	$66ff0000,$46de52ae,$ff784cfb,$3fff0170
+	dc.l	$000046da,$41eeff60,$117c00aa,$0008117c
+	dc.l	$00aa000a,$3d7c001f,$ff7c44fc,$001f48ee
+	dc.l	$7fffff80,$01080008,$42eeff7e,$48ee7fff
+	dc.l	$ffc03d7c,$aaaaff82,$323caaaa,$b04166ff
+	dc.l	$00004690,$61ff0000,$465c4a00,$66ff0000
+	dc.l	$468252ae,$ff784cfb,$3fff0170,$0000467e
+	dc.l	$41eeff60,$203caaaa,$aaaa4228,$00084228
+	dc.l	$000a4228,$000c4228,$000e3d7c,$001fff7c
+	dc.l	$44fc001f,$48ee7fff,$ff8001c8,$000842ee
+	dc.l	$ff7e48ee,$7fffffc0,$1228000e,$e1891228
+	dc.l	$000ce189,$1228000a,$e1891228,$0008b081
+	dc.l	$66ff0000,$461e61ff,$000045ea,$4a0066ff
+	dc.l	$00004610,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$460c41ee,$ff60117c,$00aa0008,$117c00aa
+	dc.l	$000a117c,$00aa000c,$117c00aa,$000e3d7c
+	dc.l	$001fff7c,$44fc001f,$48ee7fff,$ff800148
+	dc.l	$000842ee,$ff7e48ee,$7fffffc0,$2d7caaaa
+	dc.l	$aaaaff80,$223caaaa,$aaaab081,$66ff0000
+	dc.l	$45b261ff,$0000457e,$4a0066ff,$000045a4
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$45a041ee
+	dc.l	$ff68303c,$aaaa4228,$fff84228,$fffa3d7c
+	dc.l	$001fff7c,$44fc001f,$48ee7fff,$ff800188
+	dc.l	$fff842ee,$ff7e48ee,$7fffffc0,$1228fffa
+	dc.l	$e1491228,$fff8b041,$66ff0000,$455661ff
+	dc.l	$00004522,$4a0066ff,$00004548,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$454441ee,$ff68117c
+	dc.l	$00aafff8,$117c00aa,$fffa3d7c,$001fff7c
+	dc.l	$44fc001f,$48ee7fff,$ff800108,$fff842ee
+	dc.l	$ff7e48ee,$7fffffc0,$3d7caaaa,$ff82323c
+	dc.l	$aaaab041,$66ff0000,$44fa61ff,$000044c6
+	dc.l	$4a0066ff,$000044ec,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$44e841ee,$ff68203c,$aaaaaaaa
+	dc.l	$4228fff8,$4228fffa,$4228fffc,$42280000
+	dc.l	$3d7c001f,$ff7c44fc,$001f48ee,$7fffff80
+	dc.l	$01c8fff8,$42eeff7e,$48ee7fff,$ffc01228
+	dc.l	$fffee189,$1228fffc,$e1891228,$fffae189
+	dc.l	$1228fff8,$b08166ff,$00004488,$61ff0000
+	dc.l	$44544a00,$66ff0000,$447a52ae,$ff784cfb
+	dc.l	$3fff0170,$00004476,$41eeff68,$117c00aa
+	dc.l	$fff8117c,$00aafffa,$117c00aa,$fffc117c
+	dc.l	$00aa0000,$3d7c001f,$ff7c44fc,$001f48ee
+	dc.l	$7fffff80,$0148fff8,$42eeff7e,$48ee7fff
+	dc.l	$ffc02d7c,$aaaaaaaa,$ff80223c,$aaaaaaaa
+	dc.l	$b08166ff,$0000441c,$61ff0000,$43e84a00
+	dc.l	$66ff0000,$440e222e,$ff784280,$4e750936
+	dc.l	$342d6269,$74206469,$76696465,$2e2e2e00
+	dc.l	$52aeff78,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$43ec7201,$74007600,$3d7c0014,$ff7c44fc
+	dc.l	$001f48ee,$7fffff80,$4c413402,$42eeff7e
+	dc.l	$48ee7fff,$ffc061ff,$0000438a,$4a0066ff
+	dc.l	$000043b0,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$43ac223c,$44444444,$7400263c,$55555555
+	dc.l	$3d7c0010,$ff7c44fc,$001f48ee,$7fffff80
+	dc.l	$4c413402,$42eeff7e,$48ee7fff,$ffc02d7c
+	dc.l	$11111111,$ff882d7c,$00000001,$ff8c61ff
+	dc.l	$00004332,$4a0066ff,$00004358,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$4354223c,$55555555
+	dc.l	$7400263c,$44444444,$3d7c0014,$ff7c44fc
+	dc.l	$001f48ee,$7fffff80,$4c413402,$42eeff7e
+	dc.l	$48ee7fff,$ffc02d7c,$44444444,$ff882d7c
+	dc.l	$00000000,$ff8c61ff,$000042da,$4a0066ff
+	dc.l	$00004300,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$42fc223c,$11111111,$243c4444,$4444263c
+	dc.l	$44444444,$3d7c001e,$ff7c44fc,$001d48ee
+	dc.l	$7fffff80,$4c413402,$42eeff7e,$48ee7fff
+	dc.l	$ffc061ff,$0000428e,$4a0066ff,$000042b4
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$42b072fe
+	dc.l	$74017602,$3d7c001e,$ff7c44fc,$001d48ee
+	dc.l	$7fffff80,$4c413c02,$42eeff7e,$48ee7fff
+	dc.l	$ffc061ff,$0000424e,$4a0066ff,$00004274
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$427072fe
+	dc.l	$74017600,$3d7c0018,$ff7c44fc,$001d48ee
+	dc.l	$7fffff80,$4c413c02,$42eeff7e,$48ee7fff
+	dc.l	$ffc02d7c,$00000000,$ff882d7c,$80000000
+	dc.l	$ff8c61ff,$000041fe,$4a0066ff,$00004224
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$42207202
+	dc.l	$74017600,$3d7c001e,$ff7c44fc,$001d48ee
+	dc.l	$7fffff80,$4c413c02,$42eeff7e,$48ee7fff
+	dc.l	$ffc061ff,$000041be,$4a0066ff,$000041e4
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$41e072ff
+	dc.l	$74fe76ff,$3d7c0008,$ff7c44fc,$000048ee
+	dc.l	$7fffff80,$4c413402,$42eeff7e,$48ee7fff
+	dc.l	$ffc061ff,$0000417e,$4a0066ff,$000041a4
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$41a072ff
+	dc.l	$74fe76ff,$3d7c0008,$ff7c44fc,$000048ee
+	dc.l	$7fffff80,$4c7c2402,$ffffffff,$42eeff7e
+	dc.l	$48ee7fff,$ffc02d7c,$ffffffff,$ff8861ff
+	dc.l	$00004132,$4a0066ff,$00004158,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$4154223c,$0000ffff
+	dc.l	$7401263c,$55555555,$3d7c0000,$ff7c44fc
+	dc.l	$000048ee,$7fffff80,$4c413402,$42eeff7e
+	dc.l	$48ee7fff,$ffc02d7c,$0000aaab,$ff882d7c
+	dc.l	$00015556,$ff8c61ff,$000040da,$4a0066ff
+	dc.l	$00004100,$222eff78,$42804e75,$09636173
+	dc.l	$2e2e2e00,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$40ec41ee,$ff6130bc,$aaaa323c,$aaaa343c
+	dc.l	$bbbb3d7c,$0014ff7c,$44fc0010,$48ee7fff
+	dc.l	$ff800cd0,$008142ee,$ff7e3610,$3d7cbbbb
+	dc.l	$ff8e48ee,$7fffffc0,$61ff0000,$40784a00
+	dc.l	$66ff0000,$409e52ae,$ff784cfb,$3fff0170
+	dc.l	$0000409a,$41eeff61,$30bceeee,$323caaaa
+	dc.l	$343cbbbb,$3d7c0000,$ff7c44fc,$000048ee
+	dc.l	$7fffff80,$0cd00081,$42eeff7e,$36103d7c
+	dc.l	$eeeeff86,$3d7ceeee,$ff8e48ee,$7fffffc0
+	dc.l	$61ff0000,$40204a00,$66ff0000,$404652ae
+	dc.l	$ff784cfb,$3fff0170,$00004042,$41eeff62
+	dc.l	$20bcaaaa,$aaaa223c,$aaaaaaaa,$243cbbbb
+	dc.l	$bbbb3d7c,$0004ff7c,$44fc0000,$48ee7fff
+	dc.l	$ff800ed0,$008142ee,$ff7e2610,$2d7cbbbb
+	dc.l	$bbbbff8c,$48ee7fff,$ffc061ff,$00003fc6
+	dc.l	$4a0066ff,$00003fec,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$3fe841ee,$ff6220bc,$eeeeeeee
+	dc.l	$223caaaa,$aaaa243c,$bbbbbbbb,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$7fffff80,$0ed00081
+	dc.l	$42eeff7e,$26102d7c,$eeeeeeee,$ff842d7c
+	dc.l	$eeeeeeee,$ff8c48ee,$7fffffc0,$61ff0000
+	dc.l	$3f644a00,$66ff0000,$3f8a52ae,$ff784cfb
+	dc.l	$3fff0170,$00003f86,$41eeff61,$20bcaaaa
+	dc.l	$aaaa223c,$aaaaaaaa,$243cbbbb,$bbbb3d7c
+	dc.l	$0004ff7c,$44fc0000,$48ee7fff,$ff800ed0
+	dc.l	$008142ee,$ff7e2610,$2d7cbbbb,$bbbbff8c
+	dc.l	$48ee7fff,$ffc061ff,$00003f0a,$4a0066ff
+	dc.l	$00003f30,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$3f2c41ee,$ff6120bc,$7fffffff,$223c8000
+	dc.l	$0000243c,$bbbbbbbb,$3d7c001b,$ff7c44fc
+	dc.l	$001048ee,$7fffff80,$0ed00081,$42eeff7e
+	dc.l	$26102d7c,$7fffffff,$ff842d7c,$7fffffff
+	dc.l	$ff8c48ee,$7fffffc0,$61ff0000,$3ea84a00
+	dc.l	$66ff0000,$3ece222e,$ff784280,$4e750963
+	dc.l	$6173322e,$2e2e0000,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$3eb841ee,$ff6043ee,$ff6420bc
+	dc.l	$aaaaaaaa,$22bcbbbb,$bbbb223c,$aaaaaaaa
+	dc.l	$243cbbbb,$bbbb263c,$cccccccc,$283cdddd
+	dc.l	$dddd3d7c,$0014ff7c,$44fc0010,$48ee7fff
+	dc.l	$ff800efc,$80c19102,$42eeff7e,$2a102c11
+	dc.l	$2d7ccccc,$ccccff94,$2d7cdddd,$ddddff98
+	dc.l	$48ee7fff,$ffc061ff,$00003e1a,$4a0066ff
+	dc.l	$00003e40,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$3e3c41ee,$ff6143ee,$ff6520bc,$aaaaaaaa
+	dc.l	$22bcbbbb,$bbbb223c,$aaaaaaaa,$243cbbbb
+	dc.l	$bbbb263c,$cccccccc,$283cdddd,$dddd3d7c
+	dc.l	$0014ff7c,$44fc0010,$48ee7fff,$ff800efc
+	dc.l	$80c19102,$42eeff7e,$2a102c11,$2d7ccccc
+	dc.l	$ccccff94,$2d7cdddd,$ddddff98,$48ee7fff
+	dc.l	$ffc061ff,$00003d9e,$4a0066ff,$00003dc4
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$3dc041ee
+	dc.l	$ff6243ee,$ff6620bc,$aaaaaaaa,$22bcbbbb
+	dc.l	$bbbb223c,$aaaaaaaa,$243cbbbb,$bbbb263c
+	dc.l	$cccccccc,$283cdddd,$dddd3d7c,$0014ff7c
+	dc.l	$44fc0010,$48ee7fff,$ff800efc,$80c19102
+	dc.l	$42eeff7e,$2a102c11,$2d7ccccc,$ccccff94
+	dc.l	$2d7cdddd,$ddddff98,$48ee7fff,$ffc061ff
+	dc.l	$00003d22,$4a0066ff,$00003d48,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$3d4441ee,$ff6043ee
+	dc.l	$ff6420bc,$eeeeeeee,$22bcbbbb,$bbbb223c
+	dc.l	$aaaaaaaa,$243cbbbb,$bbbb263c,$cccccccc
+	dc.l	$283cdddd,$dddd3d7c,$0000ff7c,$44fc0000
+	dc.l	$48ee7fff,$ff800efc,$80c19102,$42eeff7e
+	dc.l	$2a102c11,$2d7ceeee,$eeeeff84,$2d7cbbbb
+	dc.l	$bbbbff88,$2d7ceeee,$eeeeff94,$2d7cbbbb
+	dc.l	$bbbbff98,$48ee7fff,$ffc061ff,$00003c96
+	dc.l	$4a0066ff,$00003cbc,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$3cb841ee,$ff6143ee,$ff6520bc
+	dc.l	$eeeeeeee,$22bcbbbb,$bbbb223c,$aaaaaaaa
+	dc.l	$243cbbbb,$bbbb263c,$cccccccc,$283cdddd
+	dc.l	$dddd3d7c,$0000ff7c,$44fc0000,$48ee7fff
+	dc.l	$ff800efc,$80c19102,$42eeff7e,$2a102c11
+	dc.l	$2d7ceeee,$eeeeff84,$2d7cbbbb,$bbbbff88
+	dc.l	$2d7ceeee,$eeeeff94,$2d7cbbbb,$bbbbff98
+	dc.l	$48ee7fff,$ffc061ff,$00003c0a,$4a0066ff
+	dc.l	$00003c30,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$3c2c41ee,$ff6243ee,$ff6620bc,$eeeeeeee
+	dc.l	$22bcbbbb,$bbbb223c,$aaaaaaaa,$243cbbbb
+	dc.l	$bbbb263c,$cccccccc,$283cdddd,$dddd3d7c
+	dc.l	$0000ff7c,$44fc0000,$48ee7fff,$ff800efc
+	dc.l	$80c19102,$42eeff7e,$2a102c11,$2d7ceeee
+	dc.l	$eeeeff84,$2d7cbbbb,$bbbbff88,$2d7ceeee
+	dc.l	$eeeeff94,$2d7cbbbb,$bbbbff98,$48ee7fff
+	dc.l	$ffc061ff,$00003b7e,$4a0066ff,$00003ba4
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$3ba041ee
+	dc.l	$ff6043ee,$ff6420bc,$aaaaaaaa,$22bceeee
+	dc.l	$eeee223c,$aaaaaaaa,$243cbbbb,$bbbb263c
+	dc.l	$cccccccc,$283cdddd,$dddd3d7c,$0000ff7c
+	dc.l	$44fc0000,$48ee7fff,$ff800efc,$80c19102
+	dc.l	$42eeff7e,$2a102c11,$2d7caaaa,$aaaaff84
+	dc.l	$2d7ceeee,$eeeeff88,$2d7caaaa,$aaaaff94
+	dc.l	$2d7ceeee,$eeeeff98,$48ee7fff,$ffc061ff
+	dc.l	$00003af2,$4a0066ff,$00003b18,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$3b1441ee,$ff6143ee
+	dc.l	$ff6520bc,$aaaaaaaa,$22bceeee,$eeee223c
+	dc.l	$aaaaaaaa,$243cbbbb,$bbbb263c,$cccccccc
+	dc.l	$283cdddd,$dddd3d7c,$0000ff7c,$44fc0000
+	dc.l	$48ee7fff,$ff800efc,$80c19102,$42eeff7e
+	dc.l	$2a102c11,$2d7caaaa,$aaaaff84,$2d7ceeee
+	dc.l	$eeeeff88,$2d7caaaa,$aaaaff94,$2d7ceeee
+	dc.l	$eeeeff98,$48ee7fff,$ffc061ff,$00003a66
+	dc.l	$4a0066ff,$00003a8c,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$3a8841ee,$ff6243ee,$ff6620bc
+	dc.l	$aaaaaaaa,$22bc7fff,$ffff223c,$aaaaaaaa
+	dc.l	$243c8000,$0000263c,$cccccccc,$283cdddd
+	dc.l	$dddd3d7c,$000bff7c,$44fc0000,$48ee7fff
+	dc.l	$ff800efc,$80c19102,$42eeff7e,$2a102c11
+	dc.l	$2d7caaaa,$aaaaff84,$2d7c7fff,$ffffff88
+	dc.l	$2d7caaaa,$aaaaff94,$2d7c7fff,$ffffff98
+	dc.l	$48ee7fff,$ffc061ff,$000039da,$4a0066ff
+	dc.l	$00003a00,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$39fc41ee,$ff6043ee,$ff6430bc,$aaaa32bc
+	dc.l	$bbbb323c,$aaaa343c,$bbbb363c,$cccc383c
+	dc.l	$dddd3d7c,$0014ff7c,$44fc0010,$48ee7fff
+	dc.l	$ff800cfc,$80c19102,$42eeff7e,$3a103c11
+	dc.l	$3d7ccccc,$ff963d7c,$ddddff9a,$48ee7fff
+	dc.l	$ffc061ff,$0000396e,$4a0066ff,$00003994
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$399041ee
+	dc.l	$ff6143ee,$ff6530bc,$aaaa32bc,$bbbb323c
+	dc.l	$aaaa343c,$bbbb363c,$cccc383c,$dddd3d7c
+	dc.l	$0004ff7c,$44fc0000,$48ee7fff,$ff800cfc
+	dc.l	$80c19102,$42eeff7e,$3a103c11,$3d7ccccc
+	dc.l	$ff963d7c,$ddddff9a,$48ee7fff,$ffc061ff
+	dc.l	$00003902,$4a0066ff,$00003928,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$392441ee,$ff6043ee
+	dc.l	$ff6430bc,$eeee32bc,$bbbb323c,$aaaa343c
+	dc.l	$bbbb363c,$cccc383c,$dddd3d7c,$0000ff7c
+	dc.l	$44fc0000,$48ee7fff,$ff800cfc,$80c19102
+	dc.l	$42eeff7e,$3a103c11,$3d7ceeee,$ff863d7c
+	dc.l	$bbbbff8a,$3d7ceeee,$ff963d7c,$bbbbff9a
+	dc.l	$48ee7fff,$ffc061ff,$0000388a,$4a0066ff
+	dc.l	$000038b0,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$38ac41ee,$ff6143ee,$ff6530bc,$eeee32bc
+	dc.l	$bbbb323c,$aaaa343c,$bbbb363c,$cccc383c
+	dc.l	$dddd3d7c,$0000ff7c,$44fc0000,$48ee7fff
+	dc.l	$ff800cfc,$80c19102,$42eeff7e,$3a103c11
+	dc.l	$3d7ceeee,$ff863d7c,$bbbbff8a,$3d7ceeee
+	dc.l	$ff963d7c,$bbbbff9a,$48ee7fff,$ffc061ff
+	dc.l	$00003812,$4a0066ff,$00003838,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$383441ee,$ff6043ee
+	dc.l	$ff6430bc,$aaaa32bc,$eeee323c,$aaaa343c
+	dc.l	$bbbb363c,$cccc383c,$dddd3d7c,$0000ff7c
+	dc.l	$44fc0000,$48ee7fff,$ff800cfc,$80c19102
+	dc.l	$42eeff7e,$3a103c11,$3d7caaaa,$ff863d7c
+	dc.l	$eeeeff8a,$3d7caaaa,$ff963d7c,$eeeeff9a
+	dc.l	$48ee7fff,$ffc061ff,$0000379a,$4a0066ff
+	dc.l	$000037c0,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$37bc41ee,$ff6143ee,$ff6530bc,$aaaa32bc
+	dc.l	$7fff323c,$aaaa343c,$8000363c,$cccc383c
+	dc.l	$dddd3d7c,$001bff7c,$44fc0010,$48ee7fff
+	dc.l	$ff800cfc,$80c19102,$42eeff7e,$3a103c11
+	dc.l	$3d7caaaa,$ff863d7c,$7fffff8a,$3d7caaaa
+	dc.l	$ff963d7c,$7fffff9a,$48ee7fff,$ffc061ff
+	dc.l	$00003722,$4a0066ff,$00003748,$222eff78
+	dc.l	$42804e75,$09636d70,$322c6368,$6b322e2e
+	dc.l	$2e0051fc,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$372c3d7c,$2040ff60,$223c1111,$11203d7c
+	dc.l	$0004ff7c,$44fc0000,$48ee7fff,$ff8000ee
+	dc.l	$1000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+	dc.l	$000036c2,$4a0066ff,$000036e8,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$36e43d7c,$2040ff60
+	dc.l	$227c0000,$00403d7c,$0004ff7c,$44fc0000
+	dc.l	$48ee7fff,$ff8000ee,$9000ff60,$42eeff7e
+	dc.l	$48ee7fff,$ffc061ff,$0000367a,$4a0066ff
+	dc.l	$000036a0,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$369c3d7c,$2040ff60,$223c1111,$11303d7c
+	dc.l	$0000ff7c,$44fc0000,$48ee7fff,$ff8000ee
+	dc.l	$1800ff60,$42eeff7e,$48ee7fff,$ffc061ff
+	dc.l	$00003632,$4a0066ff,$00003658,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$36543d7c,$2040ff60
+	dc.l	$227c0000,$00103d7c,$0001ff7c,$44fc0000
+	dc.l	$48ee7fff,$ff8000ee,$9000ff60,$42eeff7e
+	dc.l	$48ee7fff,$ffc061ff,$000035ea,$4a0066ff
+	dc.l	$00003610,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$360c3d7c,$2040ff60,$223c1111,$11503d7c
+	dc.l	$0001ff7c,$44fc0000,$48ee7fff,$ff8000ee
+	dc.l	$1000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+	dc.l	$000035a2,$4a0066ff,$000035c8,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$35c43d7c,$2040ff60
+	dc.l	$227c0000,$00903d7c,$0001ff7c,$44fc0000
+	dc.l	$48ee7fff,$ff8000ee,$9000ff60,$42eeff7e
+	dc.l	$48ee7fff,$ffc061ff,$0000355a,$4a0066ff
+	dc.l	$00003580,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$357c2d7c,$2000a000,$ff60223c,$11112000
+	dc.l	$3d7c0004,$ff7c44fc,$000048ee,$7fffff80
+	dc.l	$02ee1000,$ff6042ee,$ff7e48ee,$7fffffc0
+	dc.l	$61ff0000,$35104a00,$66ff0000,$353652ae
+	dc.l	$ff784cfb,$3fff0170,$00003532,$2d7c2000
+	dc.l	$a000ff60,$227cffff,$a0003d7c,$0004ff7c
+	dc.l	$44fc0000,$48ee7fff,$ff8002ee,$9000ff60
+	dc.l	$42eeff7e,$48ee7fff,$ffc061ff,$000034c6
+	dc.l	$4a0066ff,$000034ec,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$34e82d7c,$2000a000,$ff60223c
+	dc.l	$11113000,$3d7c0000,$ff7c44fc,$000048ee
+	dc.l	$7fffff80,$02ee1800,$ff6042ee,$ff7e48ee
+	dc.l	$7fffffc0,$61ff0000,$347c4a00,$66ff0000
+	dc.l	$34a252ae,$ff784cfb,$3fff0170,$0000349e
+	dc.l	$2d7c2000,$a000ff60,$227cffff,$90003d7c
+	dc.l	$0000ff7c,$44fc0000,$48ee7fff,$ff8002ee
+	dc.l	$9000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+	dc.l	$00003432,$4a0066ff,$00003458,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$34542d7c,$2000a000
+	dc.l	$ff60223c,$11111000,$3d7c0001,$ff7c44fc
+	dc.l	$000048ee,$7fffff80,$02ee1000,$ff6042ee
+	dc.l	$ff7e48ee,$7fffffc0,$61ff0000,$33e84a00
+	dc.l	$66ff0000,$340e52ae,$ff784cfb,$3fff0170
+	dc.l	$0000340a,$2d7c2000,$a000ff60,$227cffff
+	dc.l	$b0003d7c,$0001ff7c,$44fc0000,$48ee7fff
+	dc.l	$ff8002ee,$9000ff60,$42eeff7e,$48ee7fff
+	dc.l	$ffc061ff,$0000339e,$4a0066ff,$000033c4
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$33c02d7c
+	dc.l	$a0000000,$ff602d7c,$c0000000,$ff64223c
+	dc.l	$a0000000,$3d7c000c,$ff7c44fc,$000848ee
+	dc.l	$7fffff80,$04ee1000,$ff6042ee,$ff7e48ee
+	dc.l	$7fffffc0,$61ff0000,$334c4a00,$66ff0000
+	dc.l	$337252ae,$ff784cfb,$3fff0170,$0000336e
+	dc.l	$2d7ca000,$0000ff60,$2d7cc000,$0000ff64
+	dc.l	$227cc000,$00003d7c,$000cff7c,$44fc0008
+	dc.l	$48ee7fff,$ff8004ee,$9000ff60,$42eeff7e
+	dc.l	$48ee7fff,$ffc061ff,$000032fa,$4a0066ff
+	dc.l	$00003320,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$331c2d7c,$a0000000,$ff602d7c,$c0000000
+	dc.l	$ff64223c,$b0000000,$3d7c0008,$ff7c44fc
+	dc.l	$000848ee,$7fffff80,$04ee1800,$ff6042ee
+	dc.l	$ff7e48ee,$7fffffc0,$61ff0000,$32a84a00
+	dc.l	$66ff0000,$32ce52ae,$ff784cfb,$3fff0170
+	dc.l	$000032ca,$2d7ca000,$0000ff60,$2d7cc000
+	dc.l	$0000ff64,$227c1000,$00003d7c,$0009ff7c
+	dc.l	$44fc0008,$48ee7fff,$ff8004ee,$9000ff60
+	dc.l	$42eeff7e,$48ee7fff,$ffc061ff,$00003256
+	dc.l	$4a0066ff,$0000327c,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$32782d7c,$a0000000,$ff602d7c
+	dc.l	$c0000000,$ff64223c,$90000000,$3d7c0009
+	dc.l	$ff7c44fc,$000848ee,$7fffff80,$04ee1000
+	dc.l	$ff6042ee,$ff7e48ee,$7fffffc0,$61ff0000
+	dc.l	$32044a00,$66ff0000,$322a52ae,$ff784cfb
+	dc.l	$3fff0170,$00003226,$2d7ca000,$0000ff60
+	dc.l	$2d7cc000,$0000ff64,$227cd000,$00003d7c
+	dc.l	$0009ff7c,$44fc0008,$48ee7fff,$ff8004ee
+	dc.l	$9000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+	dc.l	$000031b2,$4a0066ff,$000031d8,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$31d43d7c,$a040ff60
+	dc.l	$223c1111,$11a03d7c,$0004ff7c,$44fc0000
+	dc.l	$48ee7fff,$ff8000ee,$1000ff60,$42eeff7e
+	dc.l	$48ee7fff,$ffc061ff,$0000316a,$4a0066ff
+	dc.l	$00003190,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$318c3d7c,$a040ff60,$227c0000,$00403d7c
+	dc.l	$0004ff7c,$44fc0000,$48ee7fff,$ff8000ee
+	dc.l	$9800ff60,$42eeff7e,$48ee7fff,$ffc061ff
+	dc.l	$00003122,$4a0066ff,$00003148,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$31443d7c,$a040ff60
+	dc.l	$223c1111,$11b03d7c,$0000ff7c,$44fc0000
+	dc.l	$48ee7fff,$ff8000ee,$1000ff60,$42eeff7e
+	dc.l	$48ee7fff,$ffc061ff,$000030da,$4a0066ff
+	dc.l	$00003100,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$30fc3d7c,$a040ff60,$227c0000,$00103d7c
+	dc.l	$0000ff7c,$44fc0000,$48ee7fff,$ff8000ee
+	dc.l	$9000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+	dc.l	$00003092,$4a0066ff,$000030b8,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$30b43d7c,$a040ff60
+	dc.l	$223c1111,$11903d7c,$0001ff7c,$44fc0000
+	dc.l	$48ee7fff,$ff8000ee,$1000ff60,$42eeff7e
+	dc.l	$48ee7fff,$ffc061ff,$0000304a,$4a0066ff
+	dc.l	$00003070,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$306c3d7c,$a040ff60,$227c0000,$00503d7c
+	dc.l	$0001ff7c,$44fc0000,$48ee7fff,$ff8000ee
+	dc.l	$9000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+	dc.l	$00003002,$4a0066ff,$00003028,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$30243d7c,$a0c0ff60
+	dc.l	$223c1111,$11a03d7c,$0004ff7c,$44fc0000
+	dc.l	$48ee7fff,$ff8000ee,$1000ff60,$42eeff7e
+	dc.l	$48ee7fff,$ffc061ff,$00002fba,$4a0066ff
+	dc.l	$00002fe0,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$2fdc3d7c,$a0c0ff60,$227cffff,$ffc03d7c
+	dc.l	$0004ff7c,$44fc0000,$48ee7fff,$ff8000ee
+	dc.l	$9000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+	dc.l	$00002f72,$4a0066ff,$00002f98,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$2f943d7c,$a0c0ff60
+	dc.l	$223c1111,$11b03d7c,$0000ff7c,$44fc0000
+	dc.l	$48ee7fff,$ff8000ee,$1800ff60,$42eeff7e
+	dc.l	$48ee7fff,$ffc061ff,$00002f2a,$4a0066ff
+	dc.l	$00002f50,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$2f4c3d7c,$a0c0ff60,$227c1111,$11903d7c
+	dc.l	$0001ff7c,$44fc0000,$48ee7fff,$ff8000ee
+	dc.l	$9000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+	dc.l	$00002ee2,$4a0066ff,$00002f08,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$2f043d7c,$a0c0ff60
+	dc.l	$223c1111,$11d03d7c,$0001ff7c,$44fc0000
+	dc.l	$48ee7fff,$ff8000ee,$1000ff60,$42eeff7e
+	dc.l	$48ee7fff,$ffc061ff,$00002e9a,$4a0066ff
+	dc.l	$00002ec0,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$2ebc3d7c,$a0c0ff60,$227c0000,$00503d7c
+	dc.l	$001bff7c,$44fc001f,$48ee7fff,$ff8000ee
+	dc.l	$9000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+	dc.l	$00002e52,$4a0066ff,$00002e78,$222eff78
+	dc.l	$42804e75,$09456666,$65637469,$76652061
+	dc.l	$64647265,$73736573,$2e2e2e00,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$2e544282,$760241ee
+	dc.l	$ff743d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c10,$340242ee,$ff7e48ee,$ffffffc0
+	dc.l	$2d7c0000,$0004ff8c,$61ff0000,$2de84a00
+	dc.l	$66ff0000,$2e0e52ae,$ff784cfb,$3fff0170
+	dc.l	$00002e0a,$42827602,$41eeff74,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c183402
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c41ee,$ff782d48,$ffa061ff,$00002d96
+	dc.l	$4a0066ff,$00002dbc,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$2db84282,$760241ee,$ff783d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c20
+	dc.l	$340242ee,$ff7e48ee,$ffffffc0,$2d7c0000
+	dc.l	$0004ff8c,$41eeff74,$2d48ffa0,$61ff0000
+	dc.l	$2d444a00,$66ff0000,$2d6a52ae,$ff784cfb
+	dc.l	$3fff0170,$00002d66,$42827602,$41ee0f74
+	dc.l	$3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+	dc.l	$4c283402,$f00042ee,$ff7e48ee,$ffffffc0
+	dc.l	$2d7c0000,$0004ff8c,$61ff0000,$2cf84a00
+	dc.l	$66ff0000,$2d1e52ae,$ff784cfb,$3fff0170
+	dc.l	$00002d1a,$42827602,$41eeef74,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c283402
+	dc.l	$100042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+	dc.l	$0004ff8c,$61ff0000,$2cac4a00,$66ff0000
+	dc.l	$2cd252ae,$ff7852ae,$ff7852ae,$ff784cfb
+	dc.l	$3fff0170,$00002cc6,$42827602,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c3c3402
+	dc.l	$00000002,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$00002c5a,$4a0066ff
+	dc.l	$00002c80,$52aeff78,$60040000,$00024cfb
+	dc.l	$3fff0170,$00002c76,$42827602,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c3a3402
+	dc.l	$ffda42ee,$ff7e48ee,$ffffffc0,$2d7c0000
+	dc.l	$0004ff8c,$61ff0000,$2c0c4a00,$66ff0000
+	dc.l	$2c3252ae,$ff784cfb,$3fff0170,$00002c2e
+	dc.l	$42827602,$43eeff78,$3d7c0000,$ff7c44fc
+	dc.l	$000048ee,$ffffff80,$4c213402,$42eeff7e
+	dc.l	$48eeffff,$ffc02d7c,$00000004,$ff8c41ee
+	dc.l	$ff742d48,$ffa461ff,$00002bba,$4a0066ff
+	dc.l	$00002be0,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$2bdc4282,$760245ee,$ff783d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c22,$340242ee
+	dc.l	$ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+	dc.l	$41eeff74,$2d48ffa8,$61ff0000,$2b684a00
+	dc.l	$66ff0000,$2b8e52ae,$ff784cfb,$3fff0170
+	dc.l	$00002b8a,$42827602,$47eeff78,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c233402
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c41ee,$ff742d48,$ffac61ff,$00002b16
+	dc.l	$4a0066ff,$00002b3c,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$2b384282,$760249ee,$ff783d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c24
+	dc.l	$340242ee,$ff7e48ee,$ffffffc0,$2d7c0000
+	dc.l	$0004ff8c,$41eeff74,$2d48ffb0,$61ff0000
+	dc.l	$2ac44a00,$66ff0000,$2aea52ae,$ff784cfb
+	dc.l	$3fff0170,$00002ae6,$42827602,$4beeff78
+	dc.l	$3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+	dc.l	$4c253402,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c41ee,$ff742d48,$ffb461ff
+	dc.l	$00002a72,$4a0066ff,$00002a98,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$2a94224e,$42827602
+	dc.l	$4de9ff78,$337c0000,$ff7c44fc,$000048e9
+	dc.l	$ffffff80,$4c263402,$42e9ff7e,$48e9ffff
+	dc.l	$ffc0237c,$00000004,$ff8c41e9,$ff742348
+	dc.l	$ffb82c49,$61ff0000,$2a1c4a00,$66ff0000
+	dc.l	$2a4252ae,$ff784cfb,$3fff0170,$00002a3e
+	dc.l	$42827602,$204f4fee,$ff783d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c27,$340242ee
+	dc.l	$ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+	dc.l	$43eeff74,$2d49ffbc,$2e4861ff,$000029c6
+	dc.l	$4a0066ff,$000029ec,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$29e84282,$760241ee,$ff7478f0
+	dc.l	$3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+	dc.l	$4c303402,$401042ee,$ff7e48ee,$ffffffc0
+	dc.l	$2d7c0000,$0004ff8c,$61ff0000,$29784a00
+	dc.l	$66ff0000,$299e52ae,$ff784cfb,$3fff0170
+	dc.l	$0000299a,$42827602,$41eeff74,$78f83d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c30
+	dc.l	$34024210,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$0000292a,$4a0066ff
+	dc.l	$00002950,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$294c4282,$760241ee,$ff7478fc,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c303402
+	dc.l	$441042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+	dc.l	$0004ff8c,$61ff0000,$28dc4a00,$66ff0000
+	dc.l	$290252ae,$ff784cfb,$3fff0170,$000028fe
+	dc.l	$42827602,$41eeff74,$78fe3d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c30,$34024610
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$0000288e,$4a0066ff,$000028b4
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$28b04282
+	dc.l	$760241ee,$ff7478f0,$3d7c0000,$ff7c44fc
+	dc.l	$000048ee,$ffffff80,$4c303402,$481042ee
+	dc.l	$ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+	dc.l	$61ff0000,$28404a00,$66ff0000,$286652ae
+	dc.l	$ff784cfb,$3fff0170,$00002862,$42827602
+	dc.l	$41eeff74,$78f83d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c30,$34024a10,$42eeff7e
+	dc.l	$48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+	dc.l	$000027f2,$4a0066ff,$00002818,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$28144282,$760241ee
+	dc.l	$ff7478fc,$3d7c0000,$ff7c44fc,$000048ee
+	dc.l	$ffffff80,$4c303402,$4c1042ee,$ff7e48ee
+	dc.l	$ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+	dc.l	$27a44a00,$66ff0000,$27ca52ae,$ff784cfb
+	dc.l	$3fff0170,$000027c6,$42827602,$41eeff74
+	dc.l	$78fe3d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c30,$34024e10,$42eeff7e,$48eeffff
+	dc.l	$ffc02d7c,$00000004,$ff8c61ff,$00002756
+	dc.l	$4a0066ff,$0000277c,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$27784282,$760241ee,$ff74287c
+	dc.l	$fffffffe,$3d7c0000,$ff7c44fc,$000048ee
+	dc.l	$ffffff80,$4c303402,$ce1042ee,$ff7e48ee
+	dc.l	$ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+	dc.l	$27044a00,$66ff0000,$272a52ae,$ff784cfb
+	dc.l	$3fff0170,$00002726,$42827602,$41eeff74
+	dc.l	$287c0000,$00023d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c30,$3402cef0,$42eeff7e
+	dc.l	$48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+	dc.l	$000026b2,$4a0066ff,$000026d8,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$26d44282,$760243ee
+	dc.l	$ff7478f0,$3d7c0000,$ff7c44fc,$000048ee
+	dc.l	$ffffff80,$4c313402,$401042ee,$ff7e48ee
+	dc.l	$ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+	dc.l	$26644a00,$66ff0000,$268a52ae,$ff784cfb
+	dc.l	$3fff0170,$00002686,$42827602,$45eeff74
+	dc.l	$78f03d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c32,$34024010,$42eeff7e,$48eeffff
+	dc.l	$ffc02d7c,$00000004,$ff8c61ff,$00002616
+	dc.l	$4a0066ff,$0000263c,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$26384282,$760247ee,$ff7478f0
+	dc.l	$3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+	dc.l	$4c333402,$401042ee,$ff7e48ee,$ffffffc0
+	dc.l	$2d7c0000,$0004ff8c,$61ff0000,$25c84a00
+	dc.l	$66ff0000,$25ee52ae,$ff784cfb,$3fff0170
+	dc.l	$000025ea,$42827602,$49eeff74,$78f03d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c34
+	dc.l	$34024010,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$0000257a,$4a0066ff
+	dc.l	$000025a0,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$259c4282,$76024bee,$ff7478f0,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c353402
+	dc.l	$401042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+	dc.l	$0004ff8c,$61ff0000,$252c4a00,$66ff0000
+	dc.l	$255252ae,$ff784cfb,$3fff0170,$0000254e
+	dc.l	$224e4282,$76024de9,$ff7478f0,$337c0000
+	dc.l	$ff7c44fc,$000048e9,$ffffff80,$4c363402
+	dc.l	$401042e9,$ff7e48e9,$ffffffc0,$237c0000
+	dc.l	$0004ff8c,$2c4961ff,$000024da,$4a0066ff
+	dc.l	$00002500,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$24fc4282,$7602204f,$4feeff74,$78f03d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c37
+	dc.l	$34024010,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c2e48,$61ff0000,$24884a00
+	dc.l	$66ff0000,$24ae52ae,$ff784cfb,$3fff0170
+	dc.l	$000024aa,$42827602,$43eeff74,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c113402
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$0000243e,$4a0066ff,$00002464
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$24604282
+	dc.l	$760245ee,$ff743d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c12,$340242ee,$ff7e48ee
+	dc.l	$ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+	dc.l	$23f44a00,$66ff0000,$241a52ae,$ff784cfb
+	dc.l	$3fff0170,$00002416,$42827602,$47eeff74
+	dc.l	$3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+	dc.l	$4c133402,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$000023aa,$4a0066ff
+	dc.l	$000023d0,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$23cc4282,$760249ee,$ff743d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c14,$340242ee
+	dc.l	$ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+	dc.l	$61ff0000,$23604a00,$66ff0000,$238652ae
+	dc.l	$ff784cfb,$3fff0170,$00002382,$42827602
+	dc.l	$4beeff74,$3d7c0000,$ff7c44fc,$000048ee
+	dc.l	$ffffff80,$4c153402,$42eeff7e,$48eeffff
+	dc.l	$ffc02d7c,$00000004,$ff8c61ff,$00002316
+	dc.l	$4a0066ff,$0000233c,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$2338224e,$42827602,$4de9ff74
+	dc.l	$337c0000,$ff7c44fc,$000048e9,$ffffff80
+	dc.l	$4c163402,$42e9ff7e,$48e9ffff,$ffc0237c
+	dc.l	$00000004,$ff8c2c49,$61ff0000,$22c84a00
+	dc.l	$66ff0000,$22ee52ae,$ff784cfb,$3fff0170
+	dc.l	$000022ea,$42827602,$204f4fee,$ff743d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c17
+	dc.l	$340242ee,$ff7e48ee,$ffffffc0,$2d7c0000
+	dc.l	$0004ff8c,$2e4861ff,$0000227a,$4a0066ff
+	dc.l	$000022a0,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$229c4282,$760243ee,$ff743d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c19,$340242ee
+	dc.l	$ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+	dc.l	$41eeff78,$2d48ffa4,$61ff0000,$22284a00
+	dc.l	$66ff0000,$224e52ae,$ff784cfb,$3fff0170
+	dc.l	$0000224a,$42827602,$45eeff74,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c1a3402
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c41ee,$ff782d48,$ffa861ff,$000021d6
+	dc.l	$4a0066ff,$000021fc,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$21f84282,$760247ee,$ff743d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c1b
+	dc.l	$340242ee,$ff7e48ee,$ffffffc0,$2d7c0000
+	dc.l	$0004ff8c,$41eeff78,$2d48ffac,$61ff0000
+	dc.l	$21844a00,$66ff0000,$21aa52ae,$ff784cfb
+	dc.l	$3fff0170,$000021a6,$42827602,$49eeff74
+	dc.l	$3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+	dc.l	$4c1c3402,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c41ee,$ff782d48,$ffb061ff
+	dc.l	$00002132,$4a0066ff,$00002158,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$21544282,$76024bee
+	dc.l	$ff743d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c1d,$340242ee,$ff7e48ee,$ffffffc0
+	dc.l	$2d7c0000,$0004ff8c,$41eeff78,$2d48ffb4
+	dc.l	$61ff0000,$20e04a00,$66ff0000,$210652ae
+	dc.l	$ff784cfb,$3fff0170,$00002102,$224e4282
+	dc.l	$76024de9,$ff74337c,$0000ff7c,$44fc0000
+	dc.l	$48e9ffff,$ff804c1e,$340242e9,$ff7e48e9
+	dc.l	$ffffffc0,$237c0000,$0004ff8c,$41e9ff78
+	dc.l	$2348ffb8,$2c4961ff,$0000208a,$4a0066ff
+	dc.l	$000020b0,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$20ac4282,$7602204f,$4feeff74,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c1f3402
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c43ee,$ff782d49,$ffbc2e48,$61ff0000
+	dc.l	$20344a00,$66ff0000,$205a52ae,$ff784cfb
+	dc.l	$3fff0170,$00002056,$42827602,$43eeef74
+	dc.l	$3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+	dc.l	$4c293402,$100042ee,$ff7e48ee,$ffffffc0
+	dc.l	$2d7c0000,$0004ff8c,$61ff0000,$1fe84a00
+	dc.l	$66ff0000,$200e52ae,$ff784cfb,$3fff0170
+	dc.l	$0000200a,$42827602,$45eeef74,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c2a3402
+	dc.l	$100042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+	dc.l	$0004ff8c,$61ff0000,$1f9c4a00,$66ff0000
+	dc.l	$1fc252ae,$ff784cfb,$3fff0170,$00001fbe
+	dc.l	$42827602,$47eeef74,$3d7c0000,$ff7c44fc
+	dc.l	$000048ee,$ffffff80,$4c2b3402,$100042ee
+	dc.l	$ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+	dc.l	$61ff0000,$1f504a00,$66ff0000,$1f7652ae
+	dc.l	$ff784cfb,$3fff0170,$00001f72,$42827602
+	dc.l	$49eeef74,$3d7c0000,$ff7c44fc,$000048ee
+	dc.l	$ffffff80,$4c2c3402,$100042ee,$ff7e48ee
+	dc.l	$ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+	dc.l	$1f044a00,$66ff0000,$1f2a52ae,$ff784cfb
+	dc.l	$3fff0170,$00001f26,$42827602,$4beeef74
+	dc.l	$3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+	dc.l	$4c2d3402,$100042ee,$ff7e48ee,$ffffffc0
+	dc.l	$2d7c0000,$0004ff8c,$61ff0000,$1eb84a00
+	dc.l	$66ff0000,$1ede52ae,$ff784cfb,$3fff0170
+	dc.l	$00001eda,$224e4282,$76024de9,$ef74337c
+	dc.l	$0000ff7c,$44fc0000,$48e9ffff,$ff804c2e
+	dc.l	$34021000,$42e9ff7e,$48e9ffff,$ffc0237c
+	dc.l	$00000004,$ff8c2c49,$61ff0000,$1e684a00
+	dc.l	$66ff0000,$1e8e52ae,$ff784cfb,$3fff0170
+	dc.l	$00001e8a,$42827602,$204f4fee,$ef743d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c2f
+	dc.l	$34021000,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c2e48,$61ff0000,$1e184a00
+	dc.l	$66ff0000,$1e3e52ae,$ff784cfb,$3fff0170
+	dc.l	$00001e3a,$42827602,$41ee0f74,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c283402
+	dc.l	$f00042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+	dc.l	$0004ff8c,$61ff0000,$1dcc4a00,$66ff0000
+	dc.l	$1df252ae,$ff786004,$00000002,$4cfb3fff
+	dc.l	$01700000,$1de84282,$76023d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c3a,$3402ffda
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$00001d7e,$4a0066ff,$00001da4
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$1da04282
+	dc.l	$760247ee,$ff7478f0,$3d7c0000,$ff7c44fc
+	dc.l	$000048ee,$ffffff80,$4c333402,$401042ee
+	dc.l	$ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+	dc.l	$61ff0000,$1d304a00,$66ff0000,$1d5652ae
+	dc.l	$ff784cfb,$3fff0170,$00001d52,$42827602
+	dc.l	$47eeff74,$78f83d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c33,$34024210,$42eeff7e
+	dc.l	$48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+	dc.l	$00001ce2,$4a0066ff,$00001d08,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$1d044282,$760247ee
+	dc.l	$ff7478fc,$3d7c0000,$ff7c44fc,$000048ee
+	dc.l	$ffffff80,$4c333402,$441042ee,$ff7e48ee
+	dc.l	$ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+	dc.l	$1c944a00,$66ff0000,$1cba52ae,$ff784cfb
+	dc.l	$3fff0170,$00001cb6,$42827602,$47eeff74
+	dc.l	$78fe3d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c33,$34024610,$42eeff7e,$48eeffff
+	dc.l	$ffc02d7c,$00000004,$ff8c61ff,$00001c46
+	dc.l	$4a0066ff,$00001c6c,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$1c684282,$760247ee,$ff7478f0
+	dc.l	$3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+	dc.l	$4c333402,$481042ee,$ff7e48ee,$ffffffc0
+	dc.l	$2d7c0000,$0004ff8c,$61ff0000,$1bf84a00
+	dc.l	$66ff0000,$1c1e52ae,$ff784cfb,$3fff0170
+	dc.l	$00001c1a,$42827602,$47eeff74,$78f83d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c33
+	dc.l	$34024a10,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$00001baa,$4a0066ff
+	dc.l	$00001bd0,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$1bcc4282,$760247ee,$ff7478fc,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c333402
+	dc.l	$4c1042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+	dc.l	$0004ff8c,$61ff0000,$1b5c4a00,$66ff0000
+	dc.l	$1b8252ae,$ff784cfb,$3fff0170,$00001b7e
+	dc.l	$42827602,$47eeff74,$78fe3d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c33,$34024e10
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$00001b0e,$4a0066ff,$00001b34
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$1b304282
+	dc.l	$760247ee,$ff74287c,$00000002,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c333402
+	dc.l	$cef042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+	dc.l	$0004ff8c,$61ff0000,$1abc4a00,$66ff0000
+	dc.l	$1ae252ae,$ff784cfb,$3fff0170,$00001ade
+	dc.l	$42827602,$47eeff74,$287c0000,$00023d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c33
+	dc.l	$34020750,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$00001a6a,$4a0066ff
+	dc.l	$00001a90,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$1a8c4282,$760247ee,$ff74284b,$d9fc0000
+	dc.l	$00103d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c30,$3402c9a0,$fff042ee,$ff7e48ee
+	dc.l	$ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+	dc.l	$1a144a00,$66ff0000,$1a3a52ae,$ff784cfb
+	dc.l	$3fff0170,$00001a36,$42827602,$47eeff74
+	dc.l	$287c0000,$00023d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c33,$3402cef0,$42eeff7e
+	dc.l	$48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+	dc.l	$000019c2,$4a0066ff,$000019e8,$52aeff78
+	dc.l	$60040000,$00024cfb,$3fff0170,$000019de
+	dc.l	$42827602,$47eeff74,$78f03d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c3b,$340240e4
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$0000196e,$4a0066ff,$00001994
+	dc.l	$52aeff78,$60040000,$00024cfb,$3fff0170
+	dc.l	$0000198a,$42827602,$41eeff74,$78f83d7c
+	dc.l	$0000ff7c,$44fc0000,$48ee7fff,$ff804c3b
+	dc.l	$340242e4,$42eeff7e,$48ee7fff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$0000191a,$4a0066ff
+	dc.l	$00001940,$52aeff78,$60040000,$00024cfb
+	dc.l	$3fff0170,$00001936,$42827602,$41eeff74
+	dc.l	$78fc3d7c,$0000ff7c,$44fc0000,$48ee7fff
+	dc.l	$ff804c3b,$340244e4,$42eeff7e,$48ee7fff
+	dc.l	$ffc02d7c,$00000004,$ff8c61ff,$000018c6
+	dc.l	$4a0066ff,$000018ec,$52aeff78,$60040000
+	dc.l	$00024cfb,$3fff0170,$000018e2,$42827602
+	dc.l	$41eeff74,$78fe3d7c,$0000ff7c,$44fc0000
+	dc.l	$48ee7fff,$ff804c3b,$340246e4,$42eeff7e
+	dc.l	$48ee7fff,$ffc02d7c,$00000004,$ff8c61ff
+	dc.l	$00001872,$4a0066ff,$00001898,$52aeff78
+	dc.l	$60040000,$00024cfb,$3fff0170,$0000188e
+	dc.l	$42827602,$41eeff74,$78f03d7c,$0000ff7c
+	dc.l	$44fc0000,$48ee7fff,$ff804c3b,$340248e4
+	dc.l	$42eeff7e,$48ee7fff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$0000181e,$4a0066ff,$00001844
+	dc.l	$52aeff78,$60040000,$00024cfb,$3fff0170
+	dc.l	$0000183a,$42827602,$41eeff74,$78f83d7c
+	dc.l	$0000ff7c,$44fc0000,$48ee7fff,$ff804c3b
+	dc.l	$34024ae4,$42eeff7e,$48ee7fff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$000017ca,$4a0066ff
+	dc.l	$000017f0,$52aeff78,$60040000,$00024cfb
+	dc.l	$3fff0170,$000017e6,$42827602,$41eeff74
+	dc.l	$78fc3d7c,$0000ff7c,$44fc0000,$48ee7fff
+	dc.l	$ff804c3b,$34024ce4,$42eeff7e,$48ee7fff
+	dc.l	$ffc02d7c,$00000004,$ff8c61ff,$00001776
+	dc.l	$4a0066ff,$0000179c,$52aeff78,$60040000
+	dc.l	$00024cfb,$3fff0170,$00001792,$42827602
+	dc.l	$41eeff74,$78fe3d7c,$0000ff7c,$44fc0000
+	dc.l	$48ee7fff,$ff804c3b,$34024ee4,$42eeff7e
+	dc.l	$48ee7fff,$ffc02d7c,$00000004,$ff8c61ff
+	dc.l	$00001722,$4a0066ff,$00001748,$52aeff78
+	dc.l	$60040000,$00024cfb,$3fff0170,$0000173e
+	dc.l	$42827602,$47eeff74,$287cffff,$fffe3d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c3b
+	dc.l	$3402cee0,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$000016ca,$4a0066ff
+	dc.l	$000016f0,$52aeff78,$60040000,$00024cfb
+	dc.l	$3fff0170,$000016e6,$42827602,$47eeff74
+	dc.l	$287c0000,$00023d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c3b,$34020760,$ffd042ee
+	dc.l	$ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+	dc.l	$61ff0000,$16704a00,$66ff0000,$169652ae
+	dc.l	$ff7852ae,$ff784cfb,$3fff0170,$0000168e
+	dc.l	$42827602,$47f9ffff,$ff74287c,$00000002
+	dc.l	$3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+	dc.l	$4c3b3402,$cf300000,$000a42ee,$ff7e48ee
+	dc.l	$ffffffc0,$2d7c0000,$0004ff8c,$60040000
+	dc.l	$000261ff,$0000160e,$4a0066ff,$00001634
+	dc.l	$52aeff78,$60040000,$00024cfb,$3fff0170
+	dc.l	$0000162a,$42827602,$43eeff74,$78f03d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c3b
+	dc.l	$340240e4,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$000015ba,$4a0066ff
+	dc.l	$000015e0,$52aeff78,$60040000,$00024cfb
+	dc.l	$3fff0170,$000015d6,$42827602,$41eeff74
+	dc.l	$78f83d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c3b,$340242e4,$42eeff7e,$48eeffff
+	dc.l	$ffc02d7c,$00000004,$ff8c61ff,$00001566
+	dc.l	$4a0066ff,$0000158c,$52aeff78,$60040000
+	dc.l	$00024cfb,$3fff0170,$00001582,$42827602
+	dc.l	$41eeff74,$78fc3d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c3b,$340244e4,$42eeff7e
+	dc.l	$48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+	dc.l	$00001512,$4a0066ff,$00001538,$52aeff78
+	dc.l	$60040000,$00024cfb,$3fff0170,$0000152e
+	dc.l	$42827602,$41eeff74,$78fe3d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c3b,$340246e4
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$000014be,$4a0066ff,$000014e4
+	dc.l	$52aeff78,$60040000,$00024cfb,$3fff0170
+	dc.l	$000014da,$42827602,$41eeff74,$78f03d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c3b
+	dc.l	$340248e4,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$0000146a,$4a0066ff
+	dc.l	$00001490,$52aeff78,$60040000,$00024cfb
+	dc.l	$3fff0170,$00001486,$42827602,$41eeff74
+	dc.l	$78f83d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c3b,$34024ae4,$42eeff7e,$48eeffff
+	dc.l	$ffc02d7c,$00000004,$ff8c61ff,$00001416
+	dc.l	$4a0066ff,$0000143c,$52aeff78,$60040000
+	dc.l	$00024cfb,$3fff0170,$00001432,$42827602
+	dc.l	$41eeff74,$78fc3d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c3b,$34024ce4,$42eeff7e
+	dc.l	$48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+	dc.l	$000013c2,$4a0066ff,$000013e8,$52aeff78
+	dc.l	$60040000,$00024cfb,$3fff0170,$000013de
+	dc.l	$42827602,$41eeff74,$78fe3d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c3b,$34024ee4
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$0000136e,$4a0066ff,$00001394
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$13904282
+	dc.l	$760241ee,$ff7478fe,$3d7c0000,$ff7c44fc
+	dc.l	$000048ee,$ffffff80,$4c3b3402,$4e2642ee
+	dc.l	$ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+	dc.l	$60040000,$000261ff,$0000131a,$4a0066ff
+	dc.l	$00001340,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$133c4282,$760247ee,$ef7449ee,$ff70288b
+	dc.l	$78f03d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c34,$34024122,$00101000,$42eeff7e
+	dc.l	$48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+	dc.l	$000012c2,$4a0066ff,$000012e8,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$12e44282,$760247ee
+	dc.l	$ef7449ee,$ff70288b,$78f83d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c34,$34024322
+	dc.l	$00101000,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$0000126a,$4a0066ff
+	dc.l	$00001290,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$128c4282,$760247ee,$ef7449ee,$ff70288b
+	dc.l	$78fc3d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c34,$34024522,$00101000,$42eeff7e
+	dc.l	$48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+	dc.l	$00001212,$4a0066ff,$00001238,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$12344282,$760247ee
+	dc.l	$ef7449ee,$ff70288b,$78fe3d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c34,$34024722
+	dc.l	$00101000,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$000011ba,$4a0066ff
+	dc.l	$000011e0,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$11dc4282,$760247ee,$ef7449ee,$ff70288b
+	dc.l	$78f03d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c34,$34024922,$00101000,$42eeff7e
+	dc.l	$48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+	dc.l	$00001162,$4a0066ff,$00001188,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$11844282,$760247ee
+	dc.l	$ef7449ee,$ff70288b,$78f83d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c34,$34024b22
+	dc.l	$00101000,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$0000110a,$4a0066ff
+	dc.l	$00001130,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$112c4282,$760247ee,$ef7449ee,$ff70288b
+	dc.l	$78fc3d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c34,$34024d22,$00101000,$42eeff7e
+	dc.l	$48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+	dc.l	$000010b2,$4a0066ff,$000010d8,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$10d44282,$760247ee
+	dc.l	$ef7449ee,$ff70288b,$78fe3d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c34,$34024f22
+	dc.l	$00101000,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$0000105a,$4a0066ff
+	dc.l	$00001080,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$107c4282,$760247ee,$ef7449ee,$ff70288b
+	dc.l	$78fe3d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c34,$34024f33,$00000010,$00001000
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$00000ffe,$4a0066ff,$00001024
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$10204282
+	dc.l	$760247ee,$ef7449ee,$ff70288b,$78fe3d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c34
+	dc.l	$34020753,$00001000,$42eeff7e,$48eeffff
+	dc.l	$ffc02d7c,$00000004,$ff8c61ff,$00000fa6
+	dc.l	$4a0066ff,$00000fcc,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$0fc84282,$760247ee,$ef7449ee
+	dc.l	$ff70288b,$78fe3d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c34,$34020753,$00001000
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$00000f4e,$4a0066ff,$00000f74
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$0f704282
+	dc.l	$760247ee,$ef7449ee,$ff70288b,$78f0d88c
+	dc.l	$3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+	dc.l	$4c303402,$49b30000,$00100000,$100042ee
+	dc.l	$ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+	dc.l	$61ff0000,$0ef04a00,$66ff0000,$0f1652ae
+	dc.l	$ff7852ae,$ff784cfb,$3fff0170,$00000f0e
+	dc.l	$224e4282,$760247e9,$0f7449e9,$ff70288b
+	dc.l	$2c7cffff,$fffe337c,$0000ff7c,$44fc0000
+	dc.l	$48e9ffff,$ff804c34,$3402ef22,$0010f000
+	dc.l	$42e9ff7e,$48e9ffff,$ffc0237c,$00000004
+	dc.l	$ff8c2c49,$61ff0000,$0e8c4a00,$66ff0000
+	dc.l	$0eb252ae,$ff784cfb,$3fff0170,$00000eae
+	dc.l	$224e4282,$760247e9,$0f7449e9,$ff70288b
+	dc.l	$2c7c0000,$0002337c,$0000ff7c,$44fc0000
+	dc.l	$48e9ffff,$ff804c34,$3402ef22,$fff0f000
+	dc.l	$42e9ff7e,$48e9ffff,$ffc0237c,$00000004
+	dc.l	$ff8c2c49,$61ff0000,$0e2c4a00,$66ff0000
+	dc.l	$0e5252ae,$ff784cfb,$3fff0170,$00000e4e
+	dc.l	$42827602,$47eeff54,$49eeff70,$288b99fc
+	dc.l	$00000010,$78103d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c34,$34024126,$00100010
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$00000dce,$4a0066ff,$00000df4
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$0df04282
+	dc.l	$760247ee,$ff5449ee,$ff70288b,$99fc0000
+	dc.l	$00107808,$3d7c0000,$ff7c44fc,$000048ee
+	dc.l	$ffffff80,$4c343402,$43260010,$001042ee
+	dc.l	$ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+	dc.l	$61ff0000,$0d704a00,$66ff0000,$0d9652ae
+	dc.l	$ff784cfb,$3fff0170,$00000d92,$42827602
+	dc.l	$47eeff54,$49eeff70,$288b99fc,$00000010
+	dc.l	$78043d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c34,$34024526,$00100010,$42eeff7e
+	dc.l	$48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+	dc.l	$00000d12,$4a0066ff,$00000d38,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$0d344282,$760247ee
+	dc.l	$ff5449ee,$ff70288b,$99fc0000,$00107802
+	dc.l	$3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+	dc.l	$4c343402,$47260010,$001042ee,$ff7e48ee
+	dc.l	$ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+	dc.l	$0cb44a00,$66ff0000,$0cda52ae,$ff784cfb
+	dc.l	$3fff0170,$00000cd6,$42827602,$47eeff54
+	dc.l	$49eeff70,$288b99fc,$00000010,$78103d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c34
+	dc.l	$34024926,$00100010,$42eeff7e,$48eeffff
+	dc.l	$ffc02d7c,$00000004,$ff8c61ff,$00000c56
+	dc.l	$4a0066ff,$00000c7c,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$0c784282,$760247ee,$ff5449ee
+	dc.l	$ff70288b,$99fc0000,$00107808,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c343402
+	dc.l	$43260010,$001042ee,$ff7e48ee,$ffffffc0
+	dc.l	$2d7c0000,$0004ff8c,$61ff0000,$0bf84a00
+	dc.l	$66ff0000,$0c1e52ae,$ff784cfb,$3fff0170
+	dc.l	$00000c1a,$42827602,$47eeff54,$49eeff70
+	dc.l	$288b99fc,$00000010,$78043d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c34,$34024d26
+	dc.l	$00100010,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$00000b9a,$4a0066ff
+	dc.l	$00000bc0,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$0bbc4282,$760247ee,$ff5449ee,$ff70288b
+	dc.l	$99fc0000,$00107802,$3d7c0000,$ff7c44fc
+	dc.l	$000048ee,$ffffff80,$4c343402,$4f260010
+	dc.l	$001042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+	dc.l	$0004ff8c,$61ff0000,$0b3c4a00,$66ff0000
+	dc.l	$0b6252ae,$ff784cfb,$3fff0170,$00000b5e
+	dc.l	$42827602,$47eeff54,$49eeff70,$288b99fc
+	dc.l	$00000010,$78023d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c34,$34024f37,$00000010
+	dc.l	$00000010,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$00000ada,$4a0066ff
+	dc.l	$00000b00,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$0afc4282,$760247ee,$ff5449ee,$ff70288b
+	dc.l	$78023d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c34,$34020753,$00000020,$42eeff7e
+	dc.l	$48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+	dc.l	$00000a82,$4a0066ff,$00000aa8,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$0aa4204f,$42827602
+	dc.l	$47eeff54,$4feeff70,$2e8b7820,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c373402
+	dc.l	$491542ee,$ff7e48ee,$ffffffc0,$2d7c0000
+	dc.l	$0004ff8c,$2e4861ff,$00000a2a,$4a0066ff
+	dc.l	$00000a50,$52aeff78,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$0a48224e,$42827602,$47e9ff74
+	dc.l	$4de9ff70,$2c8bddfc,$00000010,$2a7cffff
+	dc.l	$fffe337c,$0000ff7c,$44fc0000,$48e9ffff
+	dc.l	$ff804c36,$3402df27,$fff00000,$001042e9
+	dc.l	$ff7e48e9,$ffffffc0,$237c0000,$0004ff8c
+	dc.l	$2c4961ff,$000009be,$4a0066ff,$000009e4
+	dc.l	$222eff78,$42804e75,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$09d84282,$760247fa,$ef7449fa
+	dc.l	$ff70288b,$78f03d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c3b,$34024122,$ff801000
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$0000095e,$4a0066ff,$00000984
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$09804282
+	dc.l	$760247fa,$ef7449fa,$ff70288b,$78f83d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c3b
+	dc.l	$34024322,$ff801000,$42eeff7e,$48eeffff
+	dc.l	$ffc02d7c,$00000004,$ff8c61ff,$00000906
+	dc.l	$4a0066ff,$0000092c,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$09284282,$760247fa,$ef7449fa
+	dc.l	$ff70288b,$78fc3d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c3b,$34024522,$ff801000
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$000008ae,$4a0066ff,$000008d4
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$08d04282
+	dc.l	$760247fa,$ef7449fa,$ff70288b,$78fe3d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c3b
+	dc.l	$34024722,$ff801000,$42eeff7e,$48eeffff
+	dc.l	$ffc02d7c,$00000004,$ff8c61ff,$00000856
+	dc.l	$4a0066ff,$0000087c,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$08784282,$760247fa,$ef7449fa
+	dc.l	$ff70288b,$78f03d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c3b,$34024922,$ff801000
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$000007fe,$4a0066ff,$00000824
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$08204282
+	dc.l	$760247fa,$ef7449fa,$ff70288b,$78f83d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c3b
+	dc.l	$34024b22,$ff801000,$42eeff7e,$48eeffff
+	dc.l	$ffc02d7c,$00000004,$ff8c61ff,$000007a6
+	dc.l	$4a0066ff,$000007cc,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$07c84282,$760247fa,$ef7449fa
+	dc.l	$ff70288b,$78fc3d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c3b,$34024d22,$ff801000
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$0000074e,$4a0066ff,$00000774
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$07704282
+	dc.l	$760247fa,$ef7449fa,$ff70288b,$78fe3d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c3b
+	dc.l	$34024f22,$ff801000,$42eeff7e,$48eeffff
+	dc.l	$ffc02d7c,$00000004,$ff8c61ff,$000006f6
+	dc.l	$4a0066ff,$0000071c,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$07184282,$760247fa,$ef7449fa
+	dc.l	$ff70288b,$78fe3d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c3b,$34024f33,$ffffff80
+	dc.l	$00001000,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$0000069a,$4a0066ff
+	dc.l	$000006c0,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$06bc4282,$760247fa,$ef7449fa,$ff70288b
+	dc.l	$78fe3d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c3b,$34020773,$ffffff70,$00001000
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$0000063e,$4a0066ff,$00000664
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$06604282
+	dc.l	$760247fa,$ef7449fa,$ff70288b,$280c3d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c30
+	dc.l	$34024993,$00001000,$42eeff7e,$48eeffff
+	dc.l	$ffc02d7c,$00000004,$ff8c61ff,$000005e6
+	dc.l	$4a0066ff,$0000060c,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$06084282,$760247fa,$ef7449fa
+	dc.l	$ff70288b,$78f0d88c,$3d7c0000,$ff7c44fc
+	dc.l	$000048ee,$ffffff80,$4c303402,$49b30000
+	dc.l	$00100000,$100042ee,$ff7e48ee,$ffffffc0
+	dc.l	$2d7c0000,$0004ff8c,$61ff0000,$05884a00
+	dc.l	$66ff0000,$05ae52ae,$ff784282,$760247fa
+	dc.l	$ff7449fa,$ff70288b,$78f03d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c30,$340201f1
+	dc.l	$ffffff70,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$0000053a,$4a0066ff
+	dc.l	$00000560,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$055c4282,$760247fa,$0f7449fa,$ff70288b
+	dc.l	$2c7c0000,$00023d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c3b,$3402ef22,$ff60f000
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$000004de,$4a0066ff,$00000504
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$0500204f
+	dc.l	$42827602,$47fa0f74,$49faff70,$288b2e7c
+	dc.l	$00000002,$3d7c0000,$ff7c44fc,$000048ee
+	dc.l	$ffffff80,$4c3b3402,$ff22ff60,$f00042ee
+	dc.l	$ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+	dc.l	$2e4861ff,$0000047e,$4a0066ff,$000004a4
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$04a04282
+	dc.l	$760247fa,$ff5449fa,$ff70288b,$99fc0000
+	dc.l	$00107810,$3d7c0000,$ff7c44fc,$000048ee
+	dc.l	$ffffff80,$4c3b3402,$4126ff70,$001042ee
+	dc.l	$ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+	dc.l	$61ff0000,$04204a00,$66ff0000,$044652ae
+	dc.l	$ff784cfb,$3fff0170,$00000442,$42827602
+	dc.l	$47faff54,$49faff70,$288b99fc,$00000010
+	dc.l	$78083d7c,$0000ff7c,$44fc0000,$48eeffff
+	dc.l	$ff804c3b,$34024326,$ff700010,$42eeff7e
+	dc.l	$48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+	dc.l	$000003c2,$4a0066ff,$000003e8,$52aeff78
+	dc.l	$4cfb3fff,$01700000,$03e44282,$760247fa
+	dc.l	$ff5449fa,$ff70288b,$99fc0000,$00107804
+	dc.l	$3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+	dc.l	$4c3b3402,$4526ff70,$001042ee,$ff7e48ee
+	dc.l	$ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+	dc.l	$03644a00,$66ff0000,$038a52ae,$ff784cfb
+	dc.l	$3fff0170,$00000386,$42827602,$47faff54
+	dc.l	$49faff70,$288b99fc,$00000010,$78023d7c
+	dc.l	$0000ff7c,$44fc0000,$48eeffff,$ff804c3b
+	dc.l	$34024726,$ff700010,$42eeff7e,$48eeffff
+	dc.l	$ffc02d7c,$00000004,$ff8c61ff,$00000306
+	dc.l	$4a0066ff,$0000032c,$52aeff78,$4cfb3fff
+	dc.l	$01700000,$03284282,$760247fa,$ff5449fa
+	dc.l	$ff70288b,$99fc0000,$00107810,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c3b3402
+	dc.l	$4926ff70,$001042ee,$ff7e48ee,$ffffffc0
+	dc.l	$2d7c0000,$0004ff8c,$61ff0000,$02a84a00
+	dc.l	$66ff0000,$02ce52ae,$ff784cfb,$3fff0170
+	dc.l	$000002ca,$42827602,$47faff54,$49faff70
+	dc.l	$288b99fc,$00000010,$78083d7c,$0000ff7c
+	dc.l	$44fc0000,$48eeffff,$ff804c3b,$34024326
+	dc.l	$ff700010,$42eeff7e,$48eeffff,$ffc02d7c
+	dc.l	$00000004,$ff8c61ff,$0000024a,$4a0066ff
+	dc.l	$00000270,$52aeff78,$4cfb3fff,$01700000
+	dc.l	$026c4282,$760247fa,$ff5449fa,$ff70288b
+	dc.l	$99fc0000,$00107804,$3d7c0000,$ff7c44fc
+	dc.l	$000048ee,$ffffff80,$4c3b3402,$4d26ff70
+	dc.l	$001042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+	dc.l	$0004ff8c,$61ff0000,$01ec4a00,$66ff0000
+	dc.l	$021252ae,$ff784cfb,$3fff0170,$0000020e
+	dc.l	$42827602,$47faff54,$49faff70,$288b99fc
+	dc.l	$00000010,$78023d7c,$0000ff7c,$44fc0000
+	dc.l	$48eeffff,$ff804c3b,$34024f26,$ff700010
+	dc.l	$42eeff7e,$48eeffff,$ffc02d7c,$00000004
+	dc.l	$ff8c61ff,$0000018e,$4a0066ff,$000001b4
+	dc.l	$52aeff78,$4cfb3fff,$01700000,$01b04282
+	dc.l	$760247fa,$ff5449fa,$ff70288b,$99fc0000
+	dc.l	$00107802,$3d7c0000,$ff7c44fc,$000048ee
+	dc.l	$ffffff80,$4c3b3402,$4f37ffff,$ff700000
+	dc.l	$001042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+	dc.l	$0004ff8c,$61ff0000,$012c4a00,$66ff0000
+	dc.l	$015252ae,$ff784cfb,$3fff0170,$0000014e
+	dc.l	$42827602,$47faff54,$49faff70,$288b7802
+	dc.l	$3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+	dc.l	$4c3b3402,$0773ffff,$ff700000,$002042ee
+	dc.l	$ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+	dc.l	$61ff0000,$00d04a00,$66ff0000,$00f652ae
+	dc.l	$ff784cfb,$3fff0170,$000000f2,$42827602
+	dc.l	$47faff54,$49faff70,$288b7804,$3d7c0000
+	dc.l	$ff7c44fc,$000048ee,$ffffff80,$4c303402
+	dc.l	$4fb5ffff,$ff7042ee,$ff7e48ee,$ffffffc0
+	dc.l	$2d7c0000,$0004ff8c,$61ff0000,$00784a00
+	dc.l	$66ff0000,$009e52ae,$ff784cfb,$3fff0170
+	dc.l	$0000009a,$204f4282,$760247fa,$ff744dfa
+	dc.l	$ff702c8b,$ddfc0000,$00102e7c,$fffffffe
+	dc.l	$3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+	dc.l	$4c3b3402,$ff27ff70,$00000010,$42eeff7e
+	dc.l	$48eeffff,$ffc02d7c,$00000004,$ff8c2e48
+	dc.l	$61ff0000,$00104a00,$66ff0000,$00364280
+	dc.l	$4e7541ee,$ff8043ee,$ffc0700e,$b18966ff
+	dc.l	$0000001c,$51c8fff6,$302eff7c,$322eff7e
+	dc.l	$b04166ff,$00000008,$42804e75,$70014e75
+	dc.l	$222eff78,$70014e75,$acacacac,$acacacac
+	dc.l	$acacacac,$acacacac,$acacacac,$acacacac
+	dc.l	$acacacac,$acacacac,$acacacac,$acacacac
+	dc.l	$acacacac,$acacacac,$acacacac,$acacacac
+	dc.l	$acacacac,$acacacac,$2f00203a,$afa4487b
+	dc.l	$0930ffff,$afa0202f,$00044e74,$00042f00
+	dc.l	$203aaf92,$487b0930,$ffffaf8a,$202f0004
+	dc.l	$4e740004,$00000000,$00000000,$00000000
diff --git a/arch/m68k/ifpsp060/os.S b/arch/m68k/ifpsp060/os.S
new file mode 100644
index 0000000..aa4df87
--- /dev/null
+++ b/arch/m68k/ifpsp060/os.S
@@ -0,0 +1,396 @@
+|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+|MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+|M68000 Hi-Performance Microprocessor Division
+|M68060 Software Package
+|Production Release P1.00 -- October 10, 1994
+|
+|M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+|
+|THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+|To the maximum extent permitted by applicable law,
+|MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+|INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+|and any warranty against infringement with regard to the SOFTWARE
+|(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+|
+|To the maximum extent permitted by applicable law,
+|IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+|(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+|BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+|ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+|Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+|
+|You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+|so long as this entire notice is retained without alteration in any modified and/or
+|redistributed versions, and that such modified versions are clearly identified as such.
+|No licenses are granted by implication, estoppel or otherwise under any patents
+|or trademarks of Motorola, Inc.
+|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+| os.s
+|
+| This file contains:
+|	- example "Call-Out"s required by both the ISP and FPSP.
+|
+
+#include <linux/linkage.h>
+
+|################################
+| EXAMPLE CALL-OUTS		#
+|				#
+| _060_dmem_write()		#
+| _060_dmem_read()		#
+| _060_imem_read()		#
+| _060_dmem_read_byte()		#
+| _060_dmem_read_word()		#
+| _060_dmem_read_long()		#
+| _060_imem_read_word()		#
+| _060_imem_read_long()		#
+| _060_dmem_write_byte()	#
+| _060_dmem_write_word()	#
+| _060_dmem_write_long()	#
+|				#
+| _060_real_trace()		#
+| _060_real_access()		#
+|################################
+
+|
+| Each IO routine checks to see if the memory write/read is to/from user
+| or supervisor application space. The examples below use simple "move"
+| instructions for supervisor mode applications and call _copyin()/_copyout()
+| for user mode applications.
+| When installing the 060SP, the _copyin()/_copyout() equivalents for a
+| given operating system should be substituted.
+|
+| The addresses within the 060SP are guaranteed to be on the stack.
+| The result is that Unix processes are allowed to sleep as a consequence
+| of a page fault during a _copyout.
+|
+| Linux/68k: The _060_[id]mem_{read,write}_{byte,word,long} functions
+| (i.e. all the known length <= 4) are implemented by single moves
+| statements instead of (more expensive) copy{in,out} calls, if
+| working in user space
+
+|
+| _060_dmem_write():
+|
+| Writes to data memory while in supervisor mode.
+|
+| INPUTS:
+|	a0 - supervisor source address
+|	a1 - user destination address
+|	d0 - number of bytes to write
+|	0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+|	d1 - 0 = success, !0 = failure
+|
+	.global		_060_dmem_write
+_060_dmem_write:
+	subq.l		#1,%d0
+	btst		#0x5,0x4(%a6)		| check for supervisor state
+	beqs		user_write
+super_write:
+	move.b		(%a0)+,(%a1)+		| copy 1 byte
+	dbra		%d0,super_write		| quit if --ctr < 0
+	clr.l		%d1			| return success
+	rts
+user_write:
+	move.b		(%a0)+,%d1		| copy 1 byte
+copyoutae:
+	movs.b		%d1,(%a1)+
+	dbra		%d0,user_write		| quit if --ctr < 0
+	clr.l		%d1			| return success
+	rts
+
+|
+| _060_imem_read(), _060_dmem_read():
+|
+| Reads from data/instruction memory while in supervisor mode.
+|
+| INPUTS:
+|	a0 - user source address
+|	a1 - supervisor destination address
+|	d0 - number of bytes to read
+|	0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+|	d1 - 0 = success, !0 = failure
+|
+	.global		_060_imem_read
+	.global		_060_dmem_read
+_060_imem_read:
+_060_dmem_read:
+	subq.l		#1,%d0
+	btst		#0x5,0x4(%a6)		| check for supervisor state
+	beqs		user_read
+super_read:
+	move.b		(%a0)+,(%a1)+		| copy 1 byte
+	dbra		%d0,super_read		| quit if --ctr < 0
+	clr.l		%d1			| return success
+	rts
+user_read:
+copyinae:
+	movs.b		(%a0)+,%d1
+	move.b		%d1,(%a1)+		| copy 1 byte
+	dbra		%d0,user_read		| quit if --ctr < 0
+	clr.l		%d1			| return success
+	rts
+
+|
+| _060_dmem_read_byte():
+|
+| Read a data byte from user memory.
+|
+| INPUTS:
+|	a0 - user source address
+|	0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+|	d0 - data byte in d0
+|	d1 - 0 = success, !0 = failure
+|
+	.global		_060_dmem_read_byte
+_060_dmem_read_byte:
+	clr.l		%d0			| clear whole longword
+	clr.l		%d1			| assume success
+	btst		#0x5,0x4(%a6)		| check for supervisor state
+	bnes		dmrbs			| supervisor
+dmrbuae:movs.b		(%a0),%d0		| fetch user byte
+	rts
+dmrbs:	move.b		(%a0),%d0		| fetch super byte
+	rts
+
+|
+| _060_dmem_read_word():
+|
+| Read a data word from user memory.
+|
+| INPUTS:
+|	a0 - user source address
+|	0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+|	d0 - data word in d0
+|	d1 - 0 = success, !0 = failure
+|
+| _060_imem_read_word():
+|
+| Read an instruction word from user memory.
+|
+| INPUTS:
+|	a0 - user source address
+|	0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+|	d0 - instruction word in d0
+|	d1 - 0 = success, !0 = failure
+|
+	.global		_060_dmem_read_word
+	.global		_060_imem_read_word
+_060_dmem_read_word:
+_060_imem_read_word:
+	clr.l		%d1			| assume success
+	clr.l		%d0			| clear whole longword
+	btst		#0x5,0x4(%a6)		| check for supervisor state
+	bnes		dmrws			| supervisor
+dmrwuae:movs.w		(%a0), %d0		| fetch user word
+	rts
+dmrws:	move.w		(%a0), %d0		| fetch super word
+	rts
+
+|
+| _060_dmem_read_long():
+|
+
+|
+| INPUTS:
+|	a0 - user source address
+|	0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+|	d0 - data longword in d0
+|	d1 - 0 = success, !0 = failure
+|
+| _060_imem_read_long():
+|
+| Read an instruction longword from user memory.
+|
+| INPUTS:
+|	a0 - user source address
+|	0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+|	d0 - instruction longword in d0
+|	d1 - 0 = success, !0 = failure
+|
+	.global		_060_dmem_read_long
+	.global		_060_imem_read_long
+_060_dmem_read_long:
+_060_imem_read_long:
+	clr.l		%d1			| assume success
+	btst		#0x5,0x4(%a6)		| check for supervisor state
+	bnes		dmrls			| supervisor
+dmrluae:movs.l		(%a0),%d0		| fetch user longword
+	rts
+dmrls:	move.l		(%a0),%d0		| fetch super longword
+	rts
+
+|
+| _060_dmem_write_byte():
+|
+| Write a data byte to user memory.
+|
+| INPUTS:
+|	a0 - user destination address
+|	d0 - data byte in d0
+|	0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+|	d1 - 0 = success, !0 = failure
+|
+	.global		_060_dmem_write_byte
+_060_dmem_write_byte:
+	clr.l		%d1			| assume success
+	btst		#0x5,0x4(%a6)		| check for supervisor state
+	bnes		dmwbs			| supervisor
+dmwbuae:movs.b		%d0,(%a0)		| store user byte
+	rts
+dmwbs:	move.b		%d0,(%a0)		| store super byte
+	rts
+
+|
+| _060_dmem_write_word():
+|
+| Write a data word to user memory.
+|
+| INPUTS:
+|	a0 - user destination address
+|	d0 - data word in d0
+|	0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+|	d1 - 0 = success, !0 = failure
+|
+	.global		_060_dmem_write_word
+_060_dmem_write_word:
+	clr.l		%d1			| assume success
+	btst		#0x5,0x4(%a6)		| check for supervisor state
+	bnes		dmwws			| supervisor
+dmwwu:
+dmwwuae:movs.w		%d0,(%a0)		| store user word
+	bras		dmwwr
+dmwws:	move.w		%d0,(%a0)		| store super word
+dmwwr:	clr.l		%d1			| return success
+	rts
+
+|
+| _060_dmem_write_long():
+|
+| Write a data longword to user memory.
+|
+| INPUTS:
+|	a0 - user destination address
+|	d0 - data longword in d0
+|	0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+|	d1 - 0 = success, !0 = failure
+|
+	.global		_060_dmem_write_long
+_060_dmem_write_long:
+	clr.l		%d1			| assume success
+	btst		#0x5,0x4(%a6)		| check for supervisor state
+	bnes		dmwls			| supervisor
+dmwluae:movs.l		%d0,(%a0)		| store user longword
+	rts
+dmwls:	move.l		%d0,(%a0)		| store super longword
+	rts
+
+
+#if 0
+|###############################################
+
+|
+| Use these routines if your kernel doesn't have _copyout/_copyin equivalents.
+| Assumes that D0/D1/A0/A1 are scratch registers. The _copyin/_copyout
+| below assume that the SFC/DFC have been set previously.
+|
+| Linux/68k: These are basically non-inlined versions of
+| memcpy_{to,from}fs, but without long-transfer optimization
+| Note: Assumed that SFC/DFC are pointing correctly to user data
+| space... Should be right, or are there any exceptions?
+
+|
+| int _copyout(supervisor_addr, user_addr, nbytes)
+|
+	.global		_copyout
+_copyout:
+	move.l		4(%sp),%a0		| source
+	move.l		8(%sp),%a1		| destination
+	move.l		12(%sp),%d0		| count
+	subq.l		#1,%d0
+moreout:
+	move.b		(%a0)+,%d1		| fetch supervisor byte
+copyoutae:
+	movs.b		%d1,(%a1)+		| store user byte
+	dbra		%d0,moreout		| are we through yet?
+	moveq		#0,%d0			| return success
+	rts
+
+|
+| int _copyin(user_addr, supervisor_addr, nbytes)
+|
+	.global		_copyin
+_copyin:
+	move.l		4(%sp),%a0		| source
+	move.l		8(%sp),%a1		| destination
+	move.l		12(%sp),%d0		| count
+    subq.l      #1,%d0
+morein:
+copyinae:
+	movs.b		(%a0)+,%d1		| fetch user byte
+	move.b		%d1,(%a1)+		| write supervisor byte
+	dbra		%d0,morein		| are we through yet?
+	moveq		#0,%d0			| return success
+	rts
+#endif
+
+|###########################################################################
+
+|
+| _060_real_trace():
+|
+| This is the exit point for the 060FPSP when an instruction is being traced
+| and there are no other higher priority exceptions pending for this instruction
+| or they have already been processed.
+|
+| The sample code below simply executes an "rte".
+|
+	.global		_060_real_trace
+_060_real_trace:
+	bral	trap
+
+|
+| _060_real_access():
+|
+| This is the exit point for the 060FPSP when an access error exception
+| is encountered. The routine below should point to the operating system
+| handler for access error exceptions. The exception stack frame is an
+| 8-word access error frame.
+|
+| The sample routine below simply executes an "rte" instruction which
+| is most likely the incorrect thing to do and could put the system
+| into an infinite loop.
+|
+	.global		_060_real_access
+_060_real_access:
+	bral	buserr
+
+
+
+| Execption handling for movs access to illegal memory
+	.section .fixup,#alloc,#execinstr
+	.even
+1:	moveq		#-1,%d1
+	rts
+.section __ex_table,#alloc
+	.align 4
+	.long	dmrbuae,1b
+	.long	dmrwuae,1b
+	.long	dmrluae,1b
+	.long	dmwbuae,1b
+	.long	dmwwuae,1b
+	.long	dmwluae,1b
+	.long	copyoutae,1b
+	.long	copyinae,1b
+	.text
diff --git a/arch/m68k/ifpsp060/pfpsp.sa b/arch/m68k/ifpsp060/pfpsp.sa
new file mode 100644
index 0000000..d276b27
--- /dev/null
+++ b/arch/m68k/ifpsp060/pfpsp.sa
@@ -0,0 +1,1730 @@
+	dc.l	$60ff0000,$17400000,$60ff0000,$15f40000
+	dc.l	$60ff0000,$02b60000,$60ff0000,$04700000
+	dc.l	$60ff0000,$1b100000,$60ff0000,$19aa0000
+	dc.l	$60ff0000,$1b5a0000,$60ff0000,$062e0000
+	dc.l	$60ff0000,$102c0000,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+	dc.l	$2f00203a,$ff2c487b,$0930ffff,$fef8202f
+	dc.l	$00044e74,$00042f00,$203afef2,$487b0930
+	dc.l	$fffffee2,$202f0004,$4e740004,$2f00203a
+	dc.l	$fee0487b,$0930ffff,$fecc202f,$00044e74
+	dc.l	$00042f00,$203afed2,$487b0930,$fffffeb6
+	dc.l	$202f0004,$4e740004,$2f00203a,$fea4487b
+	dc.l	$0930ffff,$fea0202f,$00044e74,$00042f00
+	dc.l	$203afe96,$487b0930,$fffffe8a,$202f0004
+	dc.l	$4e740004,$2f00203a,$fe7c487b,$0930ffff
+	dc.l	$fe74202f,$00044e74,$00042f00,$203afe76
+	dc.l	$487b0930,$fffffe5e,$202f0004,$4e740004
+	dc.l	$2f00203a,$fe68487b,$0930ffff,$fe48202f
+	dc.l	$00044e74,$00042f00,$203afe56,$487b0930
+	dc.l	$fffffe32,$202f0004,$4e740004,$2f00203a
+	dc.l	$fe44487b,$0930ffff,$fe1c202f,$00044e74
+	dc.l	$00042f00,$203afe32,$487b0930,$fffffe06
+	dc.l	$202f0004,$4e740004,$2f00203a,$fe20487b
+	dc.l	$0930ffff,$fdf0202f,$00044e74,$00042f00
+	dc.l	$203afe1e,$487b0930,$fffffdda,$202f0004
+	dc.l	$4e740004,$2f00203a,$fe0c487b,$0930ffff
+	dc.l	$fdc4202f,$00044e74,$00042f00,$203afdfa
+	dc.l	$487b0930,$fffffdae,$202f0004,$4e740004
+	dc.l	$2f00203a,$fde8487b,$0930ffff,$fd98202f
+	dc.l	$00044e74,$00042f00,$203afdd6,$487b0930
+	dc.l	$fffffd82,$202f0004,$4e740004,$2f00203a
+	dc.l	$fdc4487b,$0930ffff,$fd6c202f,$00044e74
+	dc.l	$00042f00,$203afdb2,$487b0930,$fffffd56
+	dc.l	$202f0004,$4e740004,$2f00203a,$fda0487b
+	dc.l	$0930ffff,$fd40202f,$00044e74,$00042f00
+	dc.l	$203afd8e,$487b0930,$fffffd2a,$202f0004
+	dc.l	$4e740004,$2f00203a,$fd7c487b,$0930ffff
+	dc.l	$fd14202f,$00044e74,$00042f00,$203afd6a
+	dc.l	$487b0930,$fffffcfe,$202f0004,$4e740004
+	dc.l	$40c62d38,$d3d64634,$3d6f90ae,$b1e75cc7
+	dc.l	$40000000,$c90fdaa2,$2168c235,$00000000
+	dc.l	$3fff0000,$c90fdaa2,$2168c235,$00000000
+	dc.l	$3fe45f30,$6dc9c883,$4e56ff40,$f32eff6c
+	dc.l	$48ee0303,$ff9cf22e,$bc00ff60,$f22ef0c0
+	dc.l	$ffdc2d6e,$ff68ff44,$206eff44,$58aeff44
+	dc.l	$61ffffff,$ff042d40,$ff40082e,$0005ff42
+	dc.l	$66000116,$41eeff6c,$61ff0000,$051c41ee
+	dc.l	$ff6c61ff,$00002aec,$1d40ff4e,$082e0005
+	dc.l	$ff436726,$e9ee0183,$ff4261ff,$00005cac
+	dc.l	$41eeff78,$61ff0000,$2aca0c00,$00066606
+	dc.l	$61ff0000,$2a2e1d40,$ff4f4280,$102eff63
+	dc.l	$122eff43,$0241007f,$02ae00ff,$01ffff64
+	dc.l	$f23c9000,$00000000,$f23c8800,$00000000
+	dc.l	$41eeff6c,$43eeff78,$223b1530,$00001974
+	dc.l	$4ebb1930,$0000196c,$e9ee0183,$ff4261ff
+	dc.l	$00005cd8,$082e0004,$ff626622,$082e0001
+	dc.l	$ff626644,$f22ed0c0,$ffdcf22e,$9c00ff60
+	dc.l	$4cee0303,$ff9c4e5e,$60ffffff,$fcc6f22e
+	dc.l	$f040ff6c,$3d7ce005,$ff6ef22e,$d0c0ffdc
+	dc.l	$f22e9c00,$ff604cee,$0303ff9c,$f36eff6c
+	dc.l	$4e5e60ff,$fffffcb2,$f22ef040,$ff6c1d7c
+	dc.l	$00c4000b,$3d7ce001,$ff6ef22e,$d0c0ffdc
+	dc.l	$f22e9c00,$ff604cee,$0303ff9c,$f36eff6c
+	dc.l	$4e5e60ff,$fffffcae,$1d7c0000,$ff4e4280
+	dc.l	$102eff63,$02aeffff,$00ffff64,$f23c9000
+	dc.l	$00000000,$f23c8800,$00000000,$41eeff6c
+	dc.l	$61ff0000,$2e0c082e,$0004ff62,$6600ff70
+	dc.l	$082e0001,$ff626600,$ff90f22e,$d0c0ffdc
+	dc.l	$f22e9c00,$ff604cee,$0303ff9c,$4e5e0817
+	dc.l	$000767ff,$fffffc0c,$f22fa400,$00083f7c
+	dc.l	$20240006,$60ffffff,$fcec4e56,$ff40f32e
+	dc.l	$ff6c48ee,$0303ff9c,$f22ebc00,$ff60f22e
+	dc.l	$f0c0ffdc,$2d6eff68,$ff44206e,$ff4458ae
+	dc.l	$ff4461ff,$fffffd42,$2d40ff40,$082e0005
+	dc.l	$ff426600,$013241ee,$ff6c61ff,$0000035a
+	dc.l	$41eeff6c,$61ff0000,$292a1d40,$ff4e082e
+	dc.l	$0005ff43,$672e082e,$0004ff43,$6626e9ee
+	dc.l	$0183ff42,$61ff0000,$5ae241ee,$ff7861ff
+	dc.l	$00002900,$0c000006,$660661ff,$00002864
+	dc.l	$1d40ff4f,$4280102e,$ff63122e,$ff430241
+	dc.l	$007f02ae,$00ff01ff,$ff64f23c,$90000000
+	dc.l	$0000f23c,$88000000,$000041ee,$ff6c43ee
+	dc.l	$ff78223b,$15300000,$17aa4ebb,$19300000
+	dc.l	$17a2e9ee,$0183ff42,$61ff0000,$5b0e082e
+	dc.l	$0003ff62,$6622082e,$0001ff62,$664ef22e
+	dc.l	$d0c0ffdc,$f22e9c00,$ff604cee,$0303ff9c
+	dc.l	$4e5e60ff,$fffffafc,$082e0003,$ff666700
+	dc.l	$ffd6f22e,$f040ff6c,$3d7ce003,$ff6ef22e
+	dc.l	$d0c0ffdc,$f22e9c00,$ff604cee,$0303ff9c
+	dc.l	$f36eff6c,$4e5e60ff,$fffffaf4,$082e0001
+	dc.l	$ff666700,$ffaaf22e,$f040ff6c,$1d7c00c4
+	dc.l	$000b3d7c,$e001ff6e,$f22ed0c0,$ffdcf22e
+	dc.l	$9c00ff60,$4cee0303,$ff9cf36e,$ff6c4e5e
+	dc.l	$60ffffff,$fad01d7c,$0000ff4e,$4280102e
+	dc.l	$ff6302ae,$ffff00ff,$ff64f23c,$90000000
+	dc.l	$0000f23c,$88000000,$000041ee,$ff6c61ff
+	dc.l	$00002c2e,$082e0003,$ff626600,$ff66082e
+	dc.l	$0001ff62,$6600ff90,$f22ed0c0,$ffdcf22e
+	dc.l	$9c00ff60,$4cee0303,$ff9c4e5e,$08170007
+	dc.l	$67ffffff,$fa2ef22f,$a4000008,$3f7c2024
+	dc.l	$000660ff,$fffffb0e,$4e56ff40,$f32eff6c
+	dc.l	$48ee0303,$ff9cf22e,$bc00ff60,$f22ef0c0
+	dc.l	$ffdc082e,$00050004,$66084e68,$2d48ffd8
+	dc.l	$600841ee,$00102d48,$ffd82d6e,$ff68ff44
+	dc.l	$206eff44,$58aeff44,$61ffffff,$fb4c2d40
+	dc.l	$ff40422e,$ff4a082e,$0005ff42,$66000208
+	dc.l	$e9ee0006,$ff420c00,$00136700,$049e02ae
+	dc.l	$00ff00ff,$ff64f23c,$90000000,$0000f23c
+	dc.l	$88000000,$000041ee,$ff6c61ff,$0000013a
+	dc.l	$41eeff6c,$61ff0000,$270a0c00,$00066606
+	dc.l	$61ff0000,$266e1d40,$ff4ee9ee,$0183ff42
+	dc.l	$082e0005,$ff436728,$0c2e003a,$ff436720
+	dc.l	$61ff0000,$58b641ee,$ff7861ff,$000026d4
+	dc.l	$0c000006,$660661ff,$00002638,$1d40ff4f
+	dc.l	$4280102e,$ff63e9ee,$1047ff43,$41eeff6c
+	dc.l	$43eeff78,$223b1d30,$00001598,$4ebb1930
+	dc.l	$00001590,$102eff62,$6634102e,$ff430200
+	dc.l	$00380c00,$0038670c,$e9ee0183,$ff4261ff
+	dc.l	$000058e8,$f22ed0c0,$ffdcf22e,$9c00ff60
+	dc.l	$4cee0303,$ff9c4e5e,$60ffffff,$f8e6c02e
+	dc.l	$ff66edc0,$06086614,$082e0004,$ff6667ba
+	dc.l	$082e0001,$ff6267b2,$60000066,$04800000
+	dc.l	$00180c00,$00066614,$082e0003,$ff666600
+	dc.l	$004a082e,$0004ff66,$66000046,$2f0061ff
+	dc.l	$000007e0,$201f3d7b,$0222ff6e,$f22ed0c0
+	dc.l	$ffdcf22e,$9c00ff60,$4cee0303,$ff9cf36e
+	dc.l	$ff6c4e5e,$60ffffff,$f87ae000,$e006e004
+	dc.l	$e005e003,$e002e001,$e001303c,$000460bc
+	dc.l	$303c0003,$60b6e9ee,$0006ff42,$0c000011
+	dc.l	$67080c00,$00156750,$4e753028,$00000240
+	dc.l	$7fff0c40,$3f806708,$0c40407f,$672c4e75
+	dc.l	$02a87fff,$ffff0004,$671861ff,$000024cc
+	dc.l	$44400640,$3f810268,$80000000,$81680000
+	dc.l	$4e750268,$80000000,$4e750228,$007f0004
+	dc.l	$00687fff,$00004e75,$30280000,$02407fff
+	dc.l	$0c403c00,$67080c40,$43ff67de,$4e7502a8
+	dc.l	$7fffffff,$00046606,$4aa80008,$67c461ff
+	dc.l	$00002478,$44400640,$3c010268,$80000000
+	dc.l	$81680000,$4e75e9ee,$00c3ff42,$0c000003
+	dc.l	$670004a2,$0c000007,$6700049a,$02aeffff
+	dc.l	$00ffff64,$f23c9000,$00000000,$f23c8800
+	dc.l	$00000000,$302eff6c,$02407fff,$671041ee
+	dc.l	$ff6c61ff,$0000246c,$1d40ff4e,$60061d7c
+	dc.l	$0004ff4e,$4280102e,$ff6341ee,$ff6c2d56
+	dc.l	$ffd461ff,$0000292a,$102eff62,$66000086
+	dc.l	$2caeffd4,$082e0005,$00046626,$206effd8
+	dc.l	$4e60f22e,$d0c0ffdc,$f22e9c00,$ff604cee
+	dc.l	$0303ff9c,$4e5e0817,$0007667a,$60ffffff
+	dc.l	$f7220c2e,$0008ff4a,$66d8f22e,$f080ff6c
+	dc.l	$f22ed0c0,$ffdcf22e,$9c00ff60,$4cee0303
+	dc.l	$ff9c2c56,$2f6f00c4,$00b82f6f,$00c800bc
+	dc.l	$2f6f002c,$00c42f6f,$003000c8,$2f6f0034
+	dc.l	$00ccdffc,$000000b8,$08170007,$662860ff
+	dc.l	$fffff6d0,$c02eff66,$edc00608,$662a082e
+	dc.l	$0004ff66,$6700ff6a,$082e0001,$ff626700
+	dc.l	$ff606000,$01663f7c,$20240006,$f22fa400
+	dc.l	$000860ff,$fffff78e,$04800000,$0018303b
+	dc.l	$020a4efb,$00064afc,$00080000,$0000003a
+	dc.l	$00640094,$00000140,$0000f22e,$d0c0ffdc
+	dc.l	$f22e9c00,$ff604cee,$0303ff9c,$3d7c30d8
+	dc.l	$000a3d7c,$e006ff6e,$f36eff6c,$4e5e60ff
+	dc.l	$fffff6d4,$f22ed0c0,$ffdcf22e,$9c00ff60
+	dc.l	$4cee0303,$ff9c3d7c,$30d0000a,$3d7ce004
+	dc.l	$ff6ef36e,$ff6c4e5e,$60ffffff,$f694f22e
+	dc.l	$f040ff6c,$f22ed0c0,$ffdcf22e,$9c00ff60
+	dc.l	$4cee0303,$ff9c3d7c,$30d4000a,$3d7ce005
+	dc.l	$ff6ef36e,$ff6c4e5e,$60ffffff,$f60c2cae
+	dc.l	$ffd4082e,$00050004,$66000038,$206effd8
+	dc.l	$4e60f22e,$f040ff6c,$f22ed0c0,$ffdcf22e
+	dc.l	$9c00ff60,$4cee0303,$ff9c3d7c,$30cc000a
+	dc.l	$3d7ce003,$ff6ef36e,$ff6c4e5e,$60ffffff
+	dc.l	$f5de0c2e,$0008ff4a,$66c8f22e,$f080ff6c
+	dc.l	$f22ef040,$ff78f22e,$d0c0ffdc,$f22e9c00
+	dc.l	$ff604cee,$0303ff9c,$3d7c30cc,$000a3d7c
+	dc.l	$e003ff7a,$f36eff78,$2c562f6f,$00c400b8
+	dc.l	$2f6f00c8,$00bc2f6f,$00cc00c0,$2f6f002c
+	dc.l	$00c42f6f,$003000c8,$2f6f0034,$00ccdffc
+	dc.l	$000000b8,$60ffffff,$f576f22e,$f040ff6c
+	dc.l	$f22ed0c0,$ffdcf22e,$9c00ff60,$4cee0303
+	dc.l	$ff9c3d7c,$30c4000a,$3d7ce001,$ff6ef36e
+	dc.l	$ff6c4e5e,$60ffffff,$f55c02ae,$00ff00ff
+	dc.l	$ff64f23c,$90000000,$0000f23c,$88000000
+	dc.l	$000061ff,$00005548,$41eeff6c,$61ff0000
+	dc.l	$22721d40,$ff4ee9ee,$0183ff42,$082e0005
+	dc.l	$ff436728,$0c2e003a,$ff436720,$61ff0000
+	dc.l	$542a41ee,$ff7861ff,$00002248,$0c000006
+	dc.l	$660661ff,$000021ac,$1d40ff4f,$4280102e
+	dc.l	$ff63e9ee,$1047ff43,$41eeff6c,$43eeff78
+	dc.l	$223b1d30,$0000110c,$4ebb1930,$00001104
+	dc.l	$102eff62,$6600008a,$102eff43,$02000038
+	dc.l	$0c000038,$670ce9ee,$0183ff42,$61ff0000
+	dc.l	$545a082e,$00050004,$6600002a,$206effd8
+	dc.l	$4e60f22e,$d0c0ffdc,$f22e9c00,$ff604cee
+	dc.l	$0303ff9c,$4e5e0817,$00076600,$012660ff
+	dc.l	$fffff440,$082e0002,$ff4a67d6,$f22ed0c0
+	dc.l	$ffdcf22e,$9c00ff60,$4cee0303,$ff9c4e5e
+	dc.l	$2f6f0004,$00102f6f,$0000000c,$dffc0000
+	dc.l	$000c0817,$00076600,$00ea60ff,$fffff404
+	dc.l	$c02eff66,$edc00608,$6618082e,$0004ff66
+	dc.l	$6700ff66,$082e0001,$ff626700,$ff5c6000
+	dc.l	$006e0480,$00000018,$0c000006,$6d14082e
+	dc.l	$0003ff66,$66000060,$082e0004,$ff666600
+	dc.l	$004e082e,$00050004,$66000054,$206effd8
+	dc.l	$4e603d7b,$022aff6e,$f22ed0c0,$ffdcf22e
+	dc.l	$9c00ff60,$4cee0303,$ff9cf36e,$ff6c4e5e
+	dc.l	$08170007,$6600006c,$60ffffff,$f386e000
+	dc.l	$e006e004,$e005e003,$e002e001,$e001303c
+	dc.l	$00036000,$ffae303c,$00046000,$ffa6082e
+	dc.l	$0002ff4a,$67ac3d7b,$02d6ff6e,$f22ed0c0
+	dc.l	$ffdcf22e,$9c00ff60,$4cee0303,$ff9cf36e
+	dc.l	$ff6c4e5e,$2f6f0004,$00102f6f,$0000000c
+	dc.l	$dffc0000,$000c0817,$00076606,$60ffffff
+	dc.l	$f3223f7c,$20240006,$f22fa400,$000860ff
+	dc.l	$fffff402,$02aeffff,$00ffff64,$f23c9000
+	dc.l	$00000000,$f23c8800,$00000000,$e9ee0183
+	dc.l	$ff4261ff,$000051b4,$41eeff6c,$61ff0000
+	dc.l	$20620c00,$00066606,$61ff0000,$1fc61d40
+	dc.l	$ff4e4280,$102eff63,$41eeff6c,$2d56ffd4
+	dc.l	$61ff0000,$248c102e,$ff626600,$00842cae
+	dc.l	$ffd4082e,$00050004,$6628206e,$ffd84e60
+	dc.l	$f22ed0c0,$ffdcf22e,$9c00ff60,$4cee0303
+	dc.l	$ff9c4e5e,$08170007,$6600ff68,$60ffffff
+	dc.l	$f282082e,$0003ff4a,$67d6f22e,$d0c0ffdc
+	dc.l	$f22e9c00,$ff604cee,$0303ff9c,$2c562f6f
+	dc.l	$00c400b8,$2f6f00c8,$00bc2f6f,$003800c4
+	dc.l	$2f6f003c,$00c82f6f,$004000cc,$dffc0000
+	dc.l	$00b80817,$00076600,$ff1a60ff,$fffff234
+	dc.l	$c02eff66,$edc00608,$6700ff74,$2caeffd4
+	dc.l	$0c00001a,$6e0000e8,$67000072,$082e0005
+	dc.l	$0004660a,$206effd8,$4e606000,$fb8e0c2e
+	dc.l	$0008ff4a,$6600fb84,$f22ed0c0,$ffdcf22e
+	dc.l	$9c00ff60,$4cee0303,$ff9c3d7c,$30d8000a
+	dc.l	$3d7ce006,$ff6ef36e,$ff6c2c56,$2f6f00c4
+	dc.l	$00b82f6f,$00c800bc,$2f6f00cc,$00c02f6f
+	dc.l	$003800c4,$2f6f003c,$00c82f6f,$004000cc
+	dc.l	$dffc0000,$00b860ff,$fffff22c,$082e0005
+	dc.l	$00046600,$000c206e,$ffd84e60,$6000fb46
+	dc.l	$0c2e0008,$ff4a6600,$fb3cf22e,$d0c0ffdc
+	dc.l	$f22e9c00,$ff604cee,$0303ff9c,$3d7c30d0
+	dc.l	$000a3d7c,$e004ff6e,$f36eff6c,$2c562f6f
+	dc.l	$00c400b8,$2f6f00c8,$00bc2f6f,$00cc00c0
+	dc.l	$2f6f0038,$00c42f6f,$003c00c8,$2f6f0040
+	dc.l	$00ccdffc,$000000b8,$60ffffff,$f1a4082e
+	dc.l	$00050004,$6600000c,$206effd8,$4e606000
+	dc.l	$fbda0c2e,$0008ff4a,$6600fbd0,$f22ed0c0
+	dc.l	$ffdcf22e,$9c00ff60,$4cee0303,$ff9c3d7c
+	dc.l	$30c4000a,$3d7ce001,$ff6ef36e,$ff6c2c56
+	dc.l	$2f6f00c4,$00b82f6f,$00c800bc,$2f6f00cc
+	dc.l	$00c02f6f,$003800c4,$2f6f003c,$00c82f6f
+	dc.l	$004000cc,$dffc0000,$00b860ff,$fffff106
+	dc.l	$e9ee00c3,$ff420c00,$00016708,$0c000005
+	dc.l	$67344e75,$302eff6c,$02407fff,$67260c40
+	dc.l	$3f806e20,$44400640,$3f81222e,$ff70e0a9
+	dc.l	$08c1001f,$2d41ff70,$026e8000,$ff6c006e
+	dc.l	$3f80ff6c,$4e75302e,$ff6c0240,$7fff673a
+	dc.l	$0c403c00,$6e344a2e,$ff6c5bee,$ff6e3d40
+	dc.l	$ff6c4280,$41eeff6c,$323c3c01,$61ff0000
+	dc.l	$1a66303c,$3c004a2e,$ff6e6704,$08c0000f
+	dc.l	$08ee0007,$ff703d40,$ff6c4e75,$082e0005
+	dc.l	$000467ff,$fffff176,$2d680000,$ff782d68
+	dc.l	$0004ff7c,$2d680008,$ff804281,$4e752f00
+	dc.l	$4e7a0808,$08000001,$66000460,$201f4e56
+	dc.l	$ff4048ee,$0303ff9c,$f22ebc00,$ff60f22e
+	dc.l	$f0c0ffdc,$2d6e0006,$ff44206e,$ff4458ae
+	dc.l	$ff4461ff,$fffff152,$2d40ff40,$4a406b00
+	dc.l	$020e02ae,$00ff00ff,$ff640800,$000a6618
+	dc.l	$206eff44,$43eeff6c,$700c61ff,$fffff0d2
+	dc.l	$4a816600,$04926048,$206eff44,$43eeff6c
+	dc.l	$700c61ff,$fffff0ba,$4a816600,$047ae9ee
+	dc.l	$004fff6c,$0c407fff,$6726102e,$ff6f0200
+	dc.l	$000f660c,$4aaeff70,$66064aae,$ff746710
+	dc.l	$41eeff6c,$61ff0000,$501af22e,$f080ff6c
+	dc.l	$06ae0000,$000cff44,$41eeff6c,$61ff0000
+	dc.l	$1cd21d40,$ff4e0c00,$0006660a,$61ff0000
+	dc.l	$1c321d40,$ff4e422e,$ff53082e,$0005ff43
+	dc.l	$6748082e,$0004ff43,$662ce9ee,$0183ff42
+	dc.l	$61ff0000,$4e7641ee,$ff7861ff,$00001c94
+	dc.l	$1d40ff4f,$0c000006,$662061ff,$00001bf4
+	dc.l	$1d40ff4f,$6014082e,$0003ff43,$670c50ee
+	dc.l	$ff53082e,$0001ff43,$67c04280,$102eff63
+	dc.l	$122eff43,$0241007f,$f23c9000,$00000000
+	dc.l	$f23c8800,$00000000,$41eeff6c,$43eeff78
+	dc.l	$223b1530,$00000b2c,$4ebb1930,$00000b24
+	dc.l	$102eff62,$66404a2e,$ff53660c,$e9ee0183
+	dc.l	$ff4261ff,$00004e84,$2d6e0006,$ff682d6e
+	dc.l	$ff440006,$f22ed0c0,$ffdcf22e,$9c00ff60
+	dc.l	$4cee0303,$ff9c4e5e,$08170007,$66000096
+	dc.l	$60ffffff,$ee6ec02e,$ff66edc0,$06086612
+	dc.l	$082e0004,$ff6667ae,$082e0001,$ff6267ac
+	dc.l	$60340480,$00000018,$0c000006,$6610082e
+	dc.l	$0004ff66,$6620082e,$0003ff66,$66203d7b
+	dc.l	$0206ff6e,$601ee002,$e006e004,$e005e003
+	dc.l	$e002e001,$e0013d7c,$e005ff6e,$60063d7c
+	dc.l	$e003ff6e,$2d6e0006,$ff682d6e,$ff440006
+	dc.l	$f22ed0c0,$ffdcf22e,$9c00ff60,$4cee0303
+	dc.l	$ff9cf36e,$ff6c4e5e,$08170007,$660660ff
+	dc.l	$ffffede0,$2f173f6f,$00080004,$3f7c2024
+	dc.l	$0006f22f,$a4000008,$60ffffff,$eeb80800
+	dc.l	$000e6700,$01c2082e,$00050004,$66164e68
+	dc.l	$2d48ffd8,$61ff0000,$0bce206e,$ffd84e60
+	dc.l	$600001aa,$422eff4a,$41ee000c,$2d48ffd8
+	dc.l	$61ff0000,$0bb20c2e,$0008ff4a,$67000086
+	dc.l	$0c2e0004,$ff4a6600,$0184082e,$00070004
+	dc.l	$66363dae,$00040804,$2daeff44,$08063dbc
+	dc.l	$00f0080a,$41f60804,$2d480004,$f22ed0c0
+	dc.l	$ffdcf22e,$9c00ff60,$4cee0303,$ff9c4e5e
+	dc.l	$2e5f60ff,$ffffed3c,$3dae0004,$08002dae
+	dc.l	$ff440802,$3dbc2024,$08062dae,$00060808
+	dc.l	$41f60800,$2d480004,$f22ed0c0,$ffdcf22e
+	dc.l	$9c00ff60,$4cee0303,$ff9c4e5e,$2e5f60ff
+	dc.l	$ffffedf2,$1d41000a,$1d40000b,$f22ed0c0
+	dc.l	$ffdcf22e,$9c00ff60,$4cee0303,$ff9c2f16
+	dc.l	$2f002f01,$2f2eff44,$4280102e,$000b4480
+	dc.l	$082e0007,$0004671c,$3dae0004,$08002dae
+	dc.l	$00060808,$2d9f0802,$3dbc2024,$08064876
+	dc.l	$08006014,$3dae0004,$08042d9f,$08063dbc
+	dc.l	$00f0080a,$48760804,$4281122e,$000a4a01
+	dc.l	$6a0cf236,$f080080c,$06800000,$000ce309
+	dc.l	$6a0cf236,$f040080c,$06800000,$000ce309
+	dc.l	$6a0cf236,$f020080c,$06800000,$000ce309
+	dc.l	$6a0cf236,$f010080c,$06800000,$000ce309
+	dc.l	$6a0cf236,$f008080c,$06800000,$000ce309
+	dc.l	$6a0cf236,$f004080c,$06800000,$000ce309
+	dc.l	$6a0cf236,$f002080c,$06800000,$000ce309
+	dc.l	$6a06f236,$f001080c,$222f0004,$202f0008
+	dc.l	$2c6f000c,$2e5f0817,$000767ff,$ffffec04
+	dc.l	$60ffffff,$ecf061ff,$00001244,$f22ed0c0
+	dc.l	$ffdcf22e,$9c00ff60,$4cee0303,$ff9c082e
+	dc.l	$00070004,$660e2d6e,$ff440006,$4e5e60ff
+	dc.l	$ffffebd0,$2c563f6f,$00c400c0,$2f6f00c6
+	dc.l	$00c82f6f,$000400c2,$3f7c2024,$00c6dffc
+	dc.l	$000000c0,$60ffffff,$ec9c201f,$4e56ff40
+	dc.l	$48ee0303,$ff9c2d6e,$0006ff44,$206eff44
+	dc.l	$58aeff44,$61ffffff,$ed002d40,$ff404a40
+	dc.l	$6b047010,$60260800,$000e6610,$e9c014c3
+	dc.l	$700c0c01,$00076614,$58806010,$428061ff
+	dc.l	$00000ce6,$202eff44,$90ae0006,$3d40000a
+	dc.l	$4cee0303,$ff9c4e5e,$518f2f00,$3f6f000c
+	dc.l	$00042f6f,$000e0006,$4280302f,$00122f6f
+	dc.l	$00060010,$d1af0006,$3f7c402c,$000a201f
+	dc.l	$60ffffff,$ebe44e7a,$08080800,$0001660c
+	dc.l	$f22e9c00,$ff60f22e,$d0c0ffdc,$4cee0303
+	dc.l	$ff9c4e5e,$514f2eaf,$00083f6f,$000c0004
+	dc.l	$3f7c4008,$00062f6f,$00020008,$2f7c0942
+	dc.l	$8001000c,$08170005,$670608ef,$0002000d
+	dc.l	$60ffffff,$ebd64fee,$ff404e7a,$18080801
+	dc.l	$0001660c,$f22ed0c0,$ffdcf22f,$9c000020
+	dc.l	$2c562f6f,$00c400bc,$3f6f00c8,$00c03f7c
+	dc.l	$400800c2,$2f4800c4,$3f4000c8,$3f7c0001
+	dc.l	$00ca4cef,$0303005c,$defc00bc,$60a64e56
+	dc.l	$ff40f32e,$ff6c48ee,$0303ff9c,$f22ebc00
+	dc.l	$ff60f22e,$f0c0ffdc,$2d6eff68,$ff44206e
+	dc.l	$ff4458ae,$ff4461ff,$ffffebce,$2d40ff40
+	dc.l	$0800000d,$662841ee,$ff6c61ff,$fffff1ea
+	dc.l	$f22ed0c0,$ffdcf22e,$9c00ff60,$4cee0303
+	dc.l	$ff9cf36e,$ff6c4e5e,$60ffffff,$ea94322e
+	dc.l	$ff6c0241,$7fff0c41,$7fff661a,$4aaeff74
+	dc.l	$660c222e,$ff700281,$7fffffff,$67082d6e
+	dc.l	$ff70ff54,$6012223c,$7fffffff,$4a2eff6c
+	dc.l	$6a025281,$2d41ff54,$e9c004c3,$122eff41
+	dc.l	$307b0206,$4efb8802,$006c0000,$0000ff98
+	dc.l	$003e0000,$00100000,$102eff54,$0c010007
+	dc.l	$6f16206e,$000c61ff,$ffffeb86,$4a8166ff
+	dc.l	$00005436,$6000ff6a,$02410007,$61ff0000
+	dc.l	$478e6000,$ff5c302e,$ff540c01,$00076f16
+	dc.l	$206e000c,$61ffffff,$eb6e4a81,$66ff0000
+	dc.l	$54166000,$ff3c0241,$000761ff,$00004724
+	dc.l	$6000ff2e,$202eff54,$0c010007,$6f16206e
+	dc.l	$000c61ff,$ffffeb56,$4a8166ff,$000053f6
+	dc.l	$6000ff0e,$02410007,$61ff0000,$46ba6000
+	dc.l	$ff004e56,$ff40f32e,$ff6c48ee,$0303ff9c
+	dc.l	$f22ebc00,$ff60f22e,$f0c0ffdc,$2d6eff68
+	dc.l	$ff44206e,$ff4458ae,$ff4461ff,$ffffea8a
+	dc.l	$2d40ff40,$0800000d,$6600002a,$41eeff6c
+	dc.l	$61ffffff,$f0a4f22e,$d0c0ffdc,$f22e9c00
+	dc.l	$ff604cee,$0303ff9c,$f36eff6c,$4e5e60ff
+	dc.l	$ffffe964,$e9c004c3,$122eff41,$307b0206
+	dc.l	$4efb8802,$007400a6,$015a0000,$00420104
+	dc.l	$00100000,$102eff70,$08c00006,$0c010007
+	dc.l	$6f16206e,$000c61ff,$ffffea76,$4a8166ff
+	dc.l	$00005326,$6000ffa0,$02410007,$61ff0000
+	dc.l	$467e6000,$ff92302e,$ff7008c0,$000e0c01
+	dc.l	$00076f16,$206e000c,$61ffffff,$ea5a4a81
+	dc.l	$66ff0000,$53026000,$ff6e0241,$000761ff
+	dc.l	$00004610,$6000ff60,$202eff70,$08c0001e
+	dc.l	$0c010007,$6f16206e,$000c61ff,$ffffea3e
+	dc.l	$4a8166ff,$000052de,$6000ff3c,$02410007
+	dc.l	$61ff0000,$45a26000,$ff2e0c01,$00076f2e
+	dc.l	$202eff6c,$02808000,$00000080,$7fc00000
+	dc.l	$222eff70,$e0898081,$206e000c,$61ffffff
+	dc.l	$e9fc4a81,$66ff0000,$529c6000,$fefa202e
+	dc.l	$ff6c0280,$80000000,$00807fc0,$00002f01
+	dc.l	$222eff70,$e0898081,$221f0241,$000761ff
+	dc.l	$00004544,$6000fed0,$202eff6c,$02808000
+	dc.l	$00000080,$7ff80000,$222eff70,$2d40ff84
+	dc.l	$700be0a9,$83aeff84,$222eff70,$02810000
+	dc.l	$07ffe0b9,$2d41ff88,$222eff74,$e0a983ae
+	dc.l	$ff8841ee,$ff84226e,$000c7008,$61ffffff
+	dc.l	$e8cc4a81,$66ff0000,$522a6000,$fe7a422e
+	dc.l	$ff4a3d6e,$ff6cff84,$426eff86,$202eff70
+	dc.l	$08c0001e,$2d40ff88,$2d6eff74,$ff8c082e
+	dc.l	$00050004,$66384e68,$2d48ffd8,$2d56ffd4
+	dc.l	$61ff0000,$02c22248,$2d48000c,$206effd8
+	dc.l	$4e602cae,$ffd441ee,$ff84700c,$61ffffff
+	dc.l	$e86c4a81,$66ff0000,$51d86000,$fe1a2d56
+	dc.l	$ffd461ff,$00000290,$22482d48,$000c2cae
+	dc.l	$ffd40c2e,$0008ff4a,$66ccf22e,$d0c0ffdc
+	dc.l	$f22e9c00,$ff604cee,$0303ff9c,$f36eff6c
+	dc.l	$2c6effd4,$2f6f00c4,$00b82f6f,$00c800bc
+	dc.l	$2f6f00cc,$00c02f6f,$004400c4,$2f6f0048
+	dc.l	$00c82f6f,$004c00cc,$dffc0000,$00b860ff
+	dc.l	$ffffe734,$4e56ff40,$f32eff6c,$48ee0303
+	dc.l	$ff9cf22e,$bc00ff60,$f22ef0c0,$ffdc2d6e
+	dc.l	$ff68ff44,$206eff44,$58aeff44,$61ffffff
+	dc.l	$e7f82d40,$ff400800,$000d6600,$0106e9c0
+	dc.l	$04c36622,$0c6e401e,$ff6c661a,$f23c9000
+	dc.l	$00000000,$f22e4000,$ff70f22e,$6800ff6c
+	dc.l	$3d7ce001,$ff6e41ee,$ff6c61ff,$ffffedea
+	dc.l	$02ae00ff,$01ffff64,$f23c9000,$00000000
+	dc.l	$f23c8800,$00000000,$e9ee1006,$ff420c01
+	dc.l	$00176700,$009641ee,$ff6c61ff,$00001394
+	dc.l	$1d40ff4e,$082e0005,$ff43672e,$082e0004
+	dc.l	$ff436626,$e9ee0183,$ff4261ff,$0000454c
+	dc.l	$41eeff78,$61ff0000,$136a0c00,$00066606
+	dc.l	$61ff0000,$12ce1d40,$ff4f4280,$102eff63
+	dc.l	$122eff43,$0241007f,$41eeff6c,$43eeff78
+	dc.l	$223b1530,$0000022c,$4ebb1930,$00000224
+	dc.l	$e9ee0183,$ff4261ff,$00004590,$f22ed0c0
+	dc.l	$ffdcf22e,$9c00ff60,$4cee0303,$ff9cf36e
+	dc.l	$ff6c4e5e,$60ffffff,$e5cc4280,$102eff63
+	dc.l	$122eff43,$02810000,$007f61ff,$00000396
+	dc.l	$60be1d7c,$0000ff4e,$4280102e,$ff6302ae
+	dc.l	$ffff00ff,$ff6441ee,$ff6c61ff,$00001722
+	dc.l	$60aa4e56,$ff40f32e,$ff6c48ee,$0303ff9c
+	dc.l	$f22ebc00,$ff60f22e,$f0c0ffdc,$2d6eff68
+	dc.l	$ff44206e,$ff4458ae,$ff4461ff,$ffffe69a
+	dc.l	$2d40ff40,$41eeff6c,$61ffffff,$ecbcf22e
+	dc.l	$d0c0ffdc,$f22e9c00,$ff604cee,$0303ff9c
+	dc.l	$f36eff6c,$4e5e60ff,$ffffe592,$0c6f402c
+	dc.l	$000667ff,$ffffe5b2,$60ffffff,$e5962040
+	dc.l	$102eff41,$22000240,$00380281,$00000007
+	dc.l	$0c000018,$67240c00,$0020672c,$80410c00
+	dc.l	$003c6706,$206e000c,$4e751d7c,$0080ff4a
+	dc.l	$41f60162,$ff680004,$4e752008,$61ff0000
+	dc.l	$42ca206e,$000c4e75,$200861ff,$0000430c
+	dc.l	$206e000c,$0c00000c,$67024e75,$51882d48
+	dc.l	$000c4e75,$102eff41,$22000240,$00380281
+	dc.l	$00000007,$0c000018,$670e0c00,$00206700
+	dc.l	$0076206e,$000c4e75,$323b120e,$206e000c
+	dc.l	$4efb1006,$4afc0008,$0010001a,$0024002c
+	dc.l	$0034003c,$0044004e,$06ae0000,$000cffa4
+	dc.l	$4e7506ae,$0000000c,$ffa84e75,$d5fc0000
+	dc.l	$000c4e75,$d7fc0000,$000c4e75,$d9fc0000
+	dc.l	$000c4e75,$dbfc0000,$000c4e75,$06ae0000
+	dc.l	$000cffd4,$4e751d7c,$0004ff4a,$06ae0000
+	dc.l	$000cffd8,$4e75323b,$1214206e,$000c5188
+	dc.l	$51ae000c,$4efb1006,$4afc0008,$00100016
+	dc.l	$001c0020,$00240028,$002c0032,$2d48ffa4
+	dc.l	$4e752d48,$ffa84e75,$24484e75,$26484e75
+	dc.l	$28484e75,$2a484e75,$2d48ffd4,$4e752d48
+	dc.l	$ffd81d7c,$0008ff4a,$4e754afc,$006d0000
+	dc.l	$20700000,$2a660000,$00000000,$2b0a0000
+	dc.l	$3db20000,$00000000,$00000000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$2bb00000,$00000000,$27460000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$233c0000,$00000000,$36220000,$1c7c0000
+	dc.l	$32f20000,$00000000,$00000000,$2fb00000
+	dc.l	$39ea0000,$00000000,$00000000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$2e4e0000,$00000000,$29f40000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$205e0000,$3da00000,$00000000,$00000000
+	dc.l	$20680000,$3daa0000,$00000000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$2b9e0000,$00000000,$27340000,$00000000
+	dc.l	$2ba80000,$00000000,$273e0000,$00000000
+	dc.l	$232a0000,$00000000,$36100000,$1c6a0000
+	dc.l	$23340000,$00000000,$361a0000,$1c740000
+	dc.l	$39d80000,$00000000,$00000000,$00000000
+	dc.l	$39e260fe,$122eff43,$02410070,$e80961ff
+	dc.l	$00003ed2,$02800000,$00ff2f00,$103b0920
+	dc.l	$01482f00,$61ff0000,$0340201f,$221f6700
+	dc.l	$0134082e,$0005ff42,$670000b8,$082e0004
+	dc.l	$ff426600,$001a123b,$1120021e,$082e0005
+	dc.l	$0004670a,$0c2e0008,$ff4a6602,$4e752248
+	dc.l	$9fc041d7,$4a016a0c,$20eeffdc,$20eeffe0
+	dc.l	$20eeffe4,$e3096a0c,$20eeffe8,$20eeffec
+	dc.l	$20eefff0,$e3096a0a,$f210f020,$d1fc0000
+	dc.l	$000ce309,$6a0af210,$f010d1fc,$0000000c
+	dc.l	$e3096a0a,$f210f008,$d1fc0000,$000ce309
+	dc.l	$6a0af210,$f004d1fc,$0000000c,$e3096a0a
+	dc.l	$f210f002,$d1fc0000,$000ce309,$6a0af210
+	dc.l	$f001d1fc,$0000000c,$2d49ff54,$41d72f00
+	dc.l	$61ffffff,$e248201f,$dfc04a81,$6600071e
+	dc.l	$4e752d48,$ff549fc0,$43d72f01,$2f0061ff
+	dc.l	$ffffe214,$201f4a81,$6600070e,$221f41d7
+	dc.l	$4a016a0c,$2d58ffdc,$2d58ffe0,$2d58ffe4
+	dc.l	$e3096a0c,$2d58ffe8,$2d58ffec,$2d58fff0
+	dc.l	$e3096a04,$f218d020,$e3096a04,$f218d010
+	dc.l	$e3096a04,$f218d008,$e3096a04,$f218d004
+	dc.l	$e3096a04,$f218d002,$e3096a04,$f218d001
+	dc.l	$dfc04e75,$4e75000c,$0c180c18,$18240c18
+	dc.l	$18241824,$24300c18,$18241824,$24301824
+	dc.l	$24302430,$303c0c18,$18241824,$24301824
+	dc.l	$24302430,$303c1824,$24302430,$303c2430
+	dc.l	$303c303c,$3c480c18,$18241824,$24301824
+	dc.l	$24302430,$303c1824,$24302430,$303c2430
+	dc.l	$303c303c,$3c481824,$24302430,$303c2430
+	dc.l	$303c303c,$3c482430,$303c303c,$3c48303c
+	dc.l	$3c483c48,$48540c18,$18241824,$24301824
+	dc.l	$24302430,$303c1824,$24302430,$303c2430
+	dc.l	$303c303c,$3c481824,$24302430,$303c2430
+	dc.l	$303c303c,$3c482430,$303c303c,$3c48303c
+	dc.l	$3c483c48,$48541824,$24302430,$303c2430
+	dc.l	$303c303c,$3c482430,$303c303c,$3c48303c
+	dc.l	$3c483c48,$48542430,$303c303c,$3c48303c
+	dc.l	$3c483c48,$4854303c,$3c483c48,$48543c48
+	dc.l	$48544854,$54600080,$40c020a0,$60e01090
+	dc.l	$50d030b0,$70f00888,$48c828a8,$68e81898
+	dc.l	$58d838b8,$78f80484,$44c424a4,$64e41494
+	dc.l	$54d434b4,$74f40c8c,$4ccc2cac,$6cec1c9c
+	dc.l	$5cdc3cbc,$7cfc0282,$42c222a2,$62e21292
+	dc.l	$52d232b2,$72f20a8a,$4aca2aaa,$6aea1a9a
+	dc.l	$5ada3aba,$7afa0686,$46c626a6,$66e61696
+	dc.l	$56d636b6,$76f60e8e,$4ece2eae,$6eee1e9e
+	dc.l	$5ede3ebe,$7efe0181,$41c121a1,$61e11191
+	dc.l	$51d131b1,$71f10989,$49c929a9,$69e91999
+	dc.l	$59d939b9,$79f90585,$45c525a5,$65e51595
+	dc.l	$55d535b5,$75f50d8d,$4dcd2dad,$6ded1d9d
+	dc.l	$5ddd3dbd,$7dfd0383,$43c323a3,$63e31393
+	dc.l	$53d333b3,$73f30b8b,$4bcb2bab,$6beb1b9b
+	dc.l	$5bdb3bbb,$7bfb0787,$47c727a7,$67e71797
+	dc.l	$57d737b7,$77f70f8f,$4fcf2faf,$6fef1f9f
+	dc.l	$5fdf3fbf,$7fff2040,$302eff40,$32000240
+	dc.l	$003f0281,$00000007,$303b020a,$4efb0006
+	dc.l	$4afc0040,$00000000,$00000000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$00000000,$00800086,$008c0090,$00940098
+	dc.l	$009c00a0,$00a600b6,$00c600d2,$00de00ea
+	dc.l	$00f60102,$01180126,$0134013e,$01480152
+	dc.l	$015c0166,$017a0198,$01b601d2,$01ee020a
+	dc.l	$02260242,$02600260,$02600260,$02600260
+	dc.l	$02600260,$02c002da,$02f40314,$00000000
+	dc.l	$00000000,$206effa4,$4e75206e,$ffa84e75
+	dc.l	$204a4e75,$204b4e75,$204c4e75,$204d4e75
+	dc.l	$20564e75,$206effd8,$4e75202e,$ffa42200
+	dc.l	$d2882d41,$ffa42040,$4e75202e,$ffa82200
+	dc.l	$d2882d41,$ffa82040,$4e75200a,$2200d288
+	dc.l	$24412040,$4e75200b,$2200d288,$26412040
+	dc.l	$4e75200c,$2200d288,$28412040,$4e75200d
+	dc.l	$2200d288,$2a412040,$4e752016,$2200d288
+	dc.l	$2c812040,$4e751d7c,$0004ff4a,$202effd8
+	dc.l	$2200d288,$2d41ffd8,$20404e75,$202effa4
+	dc.l	$90882d40,$ffa42040,$4e75202e,$ffa89088
+	dc.l	$2d40ffa8,$20404e75,$200a9088,$24402040
+	dc.l	$4e75200b,$90882640,$20404e75,$200c9088
+	dc.l	$28402040,$4e75200d,$90882a40,$20404e75
+	dc.l	$20169088,$2c802040,$4e751d7c,$0008ff4a
+	dc.l	$202effd8,$90882d40,$ffd82040,$4e75206e
+	dc.l	$ff4454ae,$ff4461ff,$ffffde38,$4a8166ff
+	dc.l	$fffff1b6,$3040d1ee,$ffa44e75,$206eff44
+	dc.l	$54aeff44,$61ffffff,$de1a4a81,$66ffffff
+	dc.l	$f1983040,$d1eeffa8,$4e75206e,$ff4454ae
+	dc.l	$ff4461ff,$ffffddfc,$4a8166ff,$fffff17a
+	dc.l	$3040d1ca,$4e75206e,$ff4454ae,$ff4461ff
+	dc.l	$ffffdde0,$4a8166ff,$fffff15e,$3040d1cb
+	dc.l	$4e75206e,$ff4454ae,$ff4461ff,$ffffddc4
+	dc.l	$4a8166ff,$fffff142,$3040d1cc,$4e75206e
+	dc.l	$ff4454ae,$ff4461ff,$ffffdda8,$4a8166ff
+	dc.l	$fffff126,$3040d1cd,$4e75206e,$ff4454ae
+	dc.l	$ff4461ff,$ffffdd8c,$4a8166ff,$fffff10a
+	dc.l	$3040d1d6,$4e75206e,$ff4454ae,$ff4461ff
+	dc.l	$ffffdd70,$4a8166ff,$fffff0ee,$3040d1ee
+	dc.l	$ffd84e75,$508161ff,$000038fa,$2f00206e
+	dc.l	$ff4454ae,$ff4461ff,$ffffdd48,$4a8166ff
+	dc.l	$fffff0c6,$205f0800,$00086600,$00e62d40
+	dc.l	$ff542200,$e9590241,$000f61ff,$000038c6
+	dc.l	$2f02242e,$ff540802,$000b6602,$48c02202
+	dc.l	$ef590281,$00000003,$e3a849c2,$d082d1c0
+	dc.l	$241f4e75,$206eff44,$54aeff44,$61ffffff
+	dc.l	$dcf24a81,$66ffffff,$f0703040,$4e75206e
+	dc.l	$ff4458ae,$ff4461ff,$ffffdcee,$4a8166ff
+	dc.l	$fffff056,$20404e75,$206eff44,$54aeff44
+	dc.l	$61ffffff,$dcbe4a81,$66ffffff,$f03c3040
+	dc.l	$d1eeff44,$55884e75,$206eff44,$54aeff44
+	dc.l	$61ffffff,$dc9e4a81,$66ffffff,$f01c206e
+	dc.l	$ff445588,$08000008,$66000038,$2d40ff54
+	dc.l	$2200e959,$0241000f,$61ff0000,$38182f02
+	dc.l	$242eff54,$0802000b,$660248c0,$2202ef59
+	dc.l	$02810000,$0003e3a8,$49c2d082,$d1c0241f
+	dc.l	$4e750800,$0006670c,$48e73c00,$2a002608
+	dc.l	$42826028,$2d40ff54,$e9c01404,$61ff0000
+	dc.l	$37d448e7,$3c002400,$2a2eff54,$26080805
+	dc.l	$000b6602,$48c2e9c5,$0542e1aa,$08050007
+	dc.l	$67024283,$e9c50682,$0c000002,$6d346718
+	dc.l	$206eff44,$58aeff44,$61ffffff,$dc0c4a81
+	dc.l	$66ff0000,$00b06018,$206eff44,$54aeff44
+	dc.l	$61ffffff,$dbde4a81,$66ff0000,$009848c0
+	dc.l	$d680e9c5,$07826700,$006e0c00,$00026d34
+	dc.l	$6718206e,$ff4458ae,$ff4461ff,$ffffdbca
+	dc.l	$4a8166ff,$0000006e,$601c206e,$ff4454ae
+	dc.l	$ff4461ff,$ffffdb9c,$4a8166ff,$00000056
+	dc.l	$48c06002,$42802800,$08050002,$67142043
+	dc.l	$61ffffff,$dbd64a81,$66000028,$d082d084
+	dc.l	$6018d682,$204361ff,$ffffdbc0,$4a816600
+	dc.l	$0012d084,$6004d682,$20032040,$4cdf003c
+	dc.l	$4e752043,$4cdf003c,$303c0101,$60ffffff
+	dc.l	$ef184cdf,$003c60ff,$ffffeebe,$61ff0000
+	dc.l	$44ea303c,$00e1600a,$61ff0000,$44de303c
+	dc.l	$0161206e,$ff5460ff,$ffffeeee,$102eff42
+	dc.l	$0c00009c,$670000b2,$0c000098,$67000074
+	dc.l	$0c000094,$6736206e,$ff4458ae,$ff4461ff
+	dc.l	$ffffdb06,$4a8166ff,$ffffee6e,$2d40ff64
+	dc.l	$206eff44,$58aeff44,$61ffffff,$daec4a81
+	dc.l	$66ffffff,$ee542d40,$ff684e75,$206eff44
+	dc.l	$58aeff44,$61ffffff,$dad04a81,$66ffffff
+	dc.l	$ee382d40,$ff60206e,$ff4458ae,$ff4461ff
+	dc.l	$ffffdab6,$4a8166ff,$ffffee1e,$2d40ff68
+	dc.l	$4e75206e,$ff4458ae,$ff4461ff,$ffffda9a
+	dc.l	$4a8166ff,$ffffee02,$2d40ff60,$206eff44
+	dc.l	$58aeff44,$61ffffff,$da804a81,$66ffffff
+	dc.l	$ede82d40,$ff644e75,$206eff44,$58aeff44
+	dc.l	$61ffffff,$da644a81,$66ffffff,$edcc2d40
+	dc.l	$ff60206e,$ff4458ae,$ff4461ff,$ffffda4a
+	dc.l	$4a8166ff,$ffffedb2,$2d40ff64,$206eff44
+	dc.l	$58aeff44,$61ffffff,$da304a81,$66ffffff
+	dc.l	$ed982d40,$ff684e75,$2d680004,$ff882d69
+	dc.l	$0004ff94,$2d680008,$ff8c2d69,$0008ff98
+	dc.l	$30280000,$32290000,$3d40ff84,$3d41ff90
+	dc.l	$02407fff,$02417fff,$3d40ff54,$3d41ff56
+	dc.l	$b0416cff,$0000005c,$61ff0000,$015a2f00
+	dc.l	$0c2e0004,$ff4e6610,$41eeff84,$61ff0000
+	dc.l	$04fa4440,$3d40ff54,$302eff56,$04400042
+	dc.l	$b06eff54,$6c1a302e,$ff54d06f,$0002322e
+	dc.l	$ff840241,$80008041,$3d40ff84,$201f4e75
+	dc.l	$026e8000,$ff8408ee,$0000ff85,$201f4e75
+	dc.l	$61ff0000,$00562f00,$0c2e0004,$ff4f6610
+	dc.l	$41eeff90,$61ff0000,$04a24440,$3d40ff56
+	dc.l	$302eff54,$04400042,$b06eff56,$6c1a302e
+	dc.l	$ff56d06f,$0002322e,$ff900241,$80008041
+	dc.l	$3d40ff90,$201f4e75,$026e8000,$ff9008ee
+	dc.l	$0000ff91,$201f4e75,$322eff84,$30010281
+	dc.l	$00007fff,$02408000,$00403fff,$3d40ff84
+	dc.l	$0c2e0004,$ff4e670a,$203c0000,$3fff9081
+	dc.l	$4e7541ee,$ff8461ff,$00000430,$44802200
+	dc.l	$60e60c2e,$0004ff4e,$673a322e,$ff840281
+	dc.l	$00007fff,$026e8000,$ff840801,$00006712
+	dc.l	$006e3fff,$ff84203c,$00003fff,$9081e280
+	dc.l	$4e75006e,$3ffeff84,$203c0000,$3ffe9081
+	dc.l	$e2804e75,$41eeff84,$61ff0000,$03de0800
+	dc.l	$00006710,$006e3fff,$ff840680,$00003fff
+	dc.l	$e2804e75,$006e3ffe,$ff840680,$00003ffe
+	dc.l	$e2804e75,$322eff90,$30010281,$00007fff
+	dc.l	$02408000,$00403fff,$3d40ff90,$0c2e0004
+	dc.l	$ff4f670a,$203c0000,$3fff9081,$4e7541ee
+	dc.l	$ff9061ff,$00000384,$44802200,$60e60c2e
+	dc.l	$0005ff4f,$67320c2e,$0003ff4f,$673e0c2e
+	dc.l	$0003ff4e,$671408ee,$0006ff70,$00ae0100
+	dc.l	$4080ff64,$41eeff6c,$604200ae,$01000000
+	dc.l	$ff6441ee,$ff6c6034,$00ae0100,$4080ff64
+	dc.l	$08ee0006,$ff7c41ee,$ff786020,$41eeff78
+	dc.l	$0c2e0005,$ff4e66ff,$0000000c,$00ae0000
+	dc.l	$4080ff64,$00ae0100,$0000ff64,$08280007
+	dc.l	$00006708,$00ae0800,$0000ff64,$f210d080
+	dc.l	$4e7500ae,$01002080,$ff64f23b,$d0800170
+	dc.l	$00000008,$4e757fff,$0000ffff,$ffffffff
+	dc.l	$ffff0000,$3f813c01,$e408323b,$02f63001
+	dc.l	$90680000,$0c400042,$6a164280,$082e0001
+	dc.l	$ff666704,$08c0001d,$61ff0000,$001a4e75
+	dc.l	$203c2000,$00003141,$000042a8,$000442a8
+	dc.l	$00084e75,$2d680008,$ff542d40,$ff582001
+	dc.l	$92680000,$6f100c41,$00206d10,$0c410040
+	dc.l	$6d506000,$009a202e,$ff584e75,$2f023140
+	dc.l	$00007020,$90410c41,$001d6d08,$142eff58
+	dc.l	$852eff57,$e9e82020,$0004e9e8,$18000004
+	dc.l	$e9ee0800,$ff542142,$00042141,$0008e8c0
+	dc.l	$009e6704,$08c0001d,$0280e000,$0000241f
+	dc.l	$4e752f02,$31400000,$04410020,$70209041
+	dc.l	$142eff58,$852eff57,$e9e82020,$0004e9e8
+	dc.l	$18000004,$e8c1009e,$660ce8ee,$081fff54
+	dc.l	$66042001,$60062001,$08c0001d,$42a80004
+	dc.l	$21420008,$0280e000,$0000241f,$4e753140
+	dc.l	$00000c41,$00416d12,$672442a8,$000442a8
+	dc.l	$0008203c,$20000000,$4e752028,$00042200
+	dc.l	$0280c000,$00000281,$3fffffff,$60122028
+	dc.l	$00040280,$80000000,$e2880281,$7fffffff
+	dc.l	$66164aa8,$00086610,$4a2eff58,$660a42a8
+	dc.l	$000442a8,$00084e75,$08c0001d,$42a80004
+	dc.l	$42a80008,$4e7561ff,$00000110,$4a806700
+	dc.l	$00fa006e,$0208ff66,$327b1206,$4efb9802
+	dc.l	$004000ea,$00240008,$4a280002,$6b0000dc
+	dc.l	$70ff4841,$0c010004,$6700003e,$6e000094
+	dc.l	$60000064,$4a280002,$6a0000c0,$70ff4841
+	dc.l	$0c010004,$67000022,$6e000078,$60000048
+	dc.l	$e3806400,$00a64841,$0c010004,$6700000a
+	dc.l	$6e000060,$60000030,$06a80000,$01000004
+	dc.l	$640ce4e8,$0004e4e8,$00065268,$00004a80
+	dc.l	$66060268,$fe000006,$02a8ffff,$ff000004
+	dc.l	$42a80008,$4e7552a8,$0008641a,$52a80004
+	dc.l	$6414e4e8,$0004e4e8,$0006e4e8,$0008e4e8
+	dc.l	$000a5268,$00004a80,$66060228,$00fe000b
+	dc.l	$4e7506a8,$00000800,$0008641a,$52a80004
+	dc.l	$6414e4e8,$0004e4e8,$0006e4e8,$0008e4e8
+	dc.l	$000a5268,$00004a80,$66060268,$f000000a
+	dc.l	$02a8ffff,$f8000008,$4e754841,$0c010004
+	dc.l	$6700ff86,$6eea4e75,$48414a01,$66044841
+	dc.l	$4e7548e7,$30000c01,$00046622,$e9e83602
+	dc.l	$0004741e,$e5ab2428,$00040282,$0000003f
+	dc.l	$66284aa8,$00086622,$4a80661e,$6020e9e8
+	dc.l	$35420008,$741ee5ab,$24280008,$02820000
+	dc.l	$01ff6606,$4a806602,$600408c3,$001d2003
+	dc.l	$4cdf000c,$48414e75,$2f022f03,$20280004
+	dc.l	$22280008,$edc02000,$671ae5a8,$e9c13022
+	dc.l	$8083e5a9,$21400004,$21410008,$2002261f
+	dc.l	$241f4e75,$edc12000,$e5a90682,$00000020
+	dc.l	$21410004,$42a80008,$2002261f,$241f4e75
+	dc.l	$ede80000,$0004660e,$ede80000,$00086700
+	dc.l	$00740640,$00204281,$32280000,$02417fff
+	dc.l	$b0416e1c,$92403028,$00000240,$80008240
+	dc.l	$31410000,$61ffffff,$ff82103c,$00004e75
+	dc.l	$0c010020,$6e20e9e8,$08400004,$21400004
+	dc.l	$20280008,$e3a82140,$00080268,$80000000
+	dc.l	$103c0004,$4e750441,$00202028,$0008e3a8
+	dc.l	$21400004,$42a80008,$02688000,$0000103c
+	dc.l	$00044e75,$02688000,$0000103c,$00014e75
+	dc.l	$30280000,$02407fff,$0c407fff,$67480828
+	dc.l	$00070004,$6706103c,$00004e75,$4a406618
+	dc.l	$4aa80004,$660c4aa8,$00086606,$103c0001
+	dc.l	$4e75103c,$00044e75,$4aa80004,$66124aa8
+	dc.l	$0008660c,$02688000,$0000103c,$00014e75
+	dc.l	$103c0006,$4e754aa8,$00086612,$20280004
+	dc.l	$02807fff,$ffff6606,$103c0002,$4e750828
+	dc.l	$00060004,$6706103c,$00034e75,$103c0005
+	dc.l	$4e752028,$00002200,$02807ff0,$0000670e
+	dc.l	$0c807ff0,$00006728,$103c0000,$4e750281
+	dc.l	$000fffff,$66ff0000,$00144aa8,$000466ff
+	dc.l	$0000000a,$103c0001,$4e75103c,$00044e75
+	dc.l	$0281000f,$ffff66ff,$00000014,$4aa80004
+	dc.l	$66ff0000,$000a103c,$00024e75,$08010013
+	dc.l	$66ff0000,$000a103c,$00054e75,$103c0003
+	dc.l	$4e752028,$00002200,$02807f80,$0000670e
+	dc.l	$0c807f80,$0000671e,$103c0000,$4e750281
+	dc.l	$007fffff,$66ff0000,$000a103c,$00014e75
+	dc.l	$103c0004,$4e750281,$007fffff,$66ff0000
+	dc.l	$000a103c,$00024e75,$08010016,$66ff0000
+	dc.l	$000a103c,$00054e75,$103c0003,$4e752f01
+	dc.l	$08280007,$000056e8,$00023228,$00000241
+	dc.l	$7fff9240,$31410000,$2f08202f,$00040240
+	dc.l	$00c0e848,$61ffffff,$fae22057,$322f0006
+	dc.l	$024100c0,$e8494841,$322f0006,$02410030
+	dc.l	$e84961ff,$fffffc22,$205f08a8,$00070000
+	dc.l	$4a280002,$670a08e8,$00070000,$42280002
+	dc.l	$42804aa8,$0004660a,$4aa80008,$660408c0
+	dc.l	$0002082e,$0001ff66,$670608ee,$0005ff67
+	dc.l	$588f4e75,$2f010828,$00070000,$56e80002
+	dc.l	$32280000,$02417fff,$92403141,$00002f08
+	dc.l	$428061ff,$fffffa64,$2057323c,$00044841
+	dc.l	$322f0006,$02410030,$e84961ff,$fffffbaa
+	dc.l	$205f08a8,$00070000,$4a280002,$670a08e8
+	dc.l	$00070000,$42280002,$42804aa8,$0004660a
+	dc.l	$4aa80008,$660408c0,$0002082e,$0001ff66
+	dc.l	$670608ee,$0005ff67,$588f4e75,$02410010
+	dc.l	$e8088200,$3001e309,$600e0241,$00108200
+	dc.l	$48408200,$3001e309,$103b0008,$41fb1620
+	dc.l	$4e750200,$00020200,$00020200,$00020000
+	dc.l	$00000a08,$0a080a08,$0a080a08,$0a087fff
+	dc.l	$00000000,$00000000,$00000000,$00007ffe
+	dc.l	$0000ffff,$ffffffff,$ffff0000,$00007ffe
+	dc.l	$0000ffff,$ffffffff,$ffff0000,$00007fff
+	dc.l	$00000000,$00000000,$00000000,$00007fff
+	dc.l	$00000000,$00000000,$00000000,$0000407e
+	dc.l	$0000ffff,$ff000000,$00000000,$0000407e
+	dc.l	$0000ffff,$ff000000,$00000000,$00007fff
+	dc.l	$00000000,$00000000,$00000000,$00007fff
+	dc.l	$00000000,$00000000,$00000000,$000043fe
+	dc.l	$0000ffff,$ffffffff,$f8000000,$000043fe
+	dc.l	$0000ffff,$ffffffff,$f8000000,$00007fff
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$00000000
+	dc.l	$00000000,$00000000,$00000000,$0000ffff
+	dc.l	$00000000,$00000000,$00000000,$0000fffe
+	dc.l	$0000ffff,$ffffffff,$ffff0000,$0000ffff
+	dc.l	$00000000,$00000000,$00000000,$0000fffe
+	dc.l	$0000ffff,$ffffffff,$ffff0000,$0000ffff
+	dc.l	$00000000,$00000000,$00000000,$0000c07e
+	dc.l	$0000ffff,$ff000000,$00000000,$0000ffff
+	dc.l	$00000000,$00000000,$00000000,$0000c07e
+	dc.l	$0000ffff,$ff000000,$00000000,$0000ffff
+	dc.l	$00000000,$00000000,$00000000,$0000c3fe
+	dc.l	$0000ffff,$ffffffff,$f8000000,$0000ffff
+	dc.l	$00000000,$00000000,$00000000,$0000c3fe
+	dc.l	$0000ffff,$ffffffff,$f8000000,$0000e9ee
+	dc.l	$10c3ff42,$327b120a,$4efb9806,$4afc0008
+	dc.l	$00e001e0,$01480620,$0078041a,$00100620
+	dc.l	$4a2eff4e,$664cf228,$d0800000,$f2009000
+	dc.l	$f2007800,$f23c9000,$00000000,$f201a800
+	dc.l	$836eff66,$122eff41,$02010038,$6714206e
+	dc.l	$000c61ff,$ffffcfaa,$4a8166ff,$0000385a
+	dc.l	$4e75122e,$ff410241,$000761ff,$00002bb0
+	dc.l	$4e752228,$00000281,$80000000,$00810080
+	dc.l	$0000f201,$440060a4,$4a2eff4e,$664cf228
+	dc.l	$d0800000,$f2009000,$f2007000,$f23c9000
+	dc.l	$00000000,$f201a800,$836eff66,$122eff41
+	dc.l	$02010038,$6714206e,$000c61ff,$ffffcf58
+	dc.l	$4a8166ff,$00003800,$4e75122e,$ff410241
+	dc.l	$000761ff,$00002b0c,$4e752228,$00000281
+	dc.l	$80000000,$00810080,$0000f201,$440060a4
+	dc.l	$4a2eff4e,$664cf228,$d0800000,$f2009000
+	dc.l	$f2006000,$f23c9000,$00000000,$f201a800
+	dc.l	$836eff66,$122eff41,$02010038,$6714206e
+	dc.l	$000c61ff,$ffffcf06,$4a8166ff,$000037a6
+	dc.l	$4e75122e,$ff410241,$000761ff,$00002a68
+	dc.l	$4e752228,$00000281,$80000000,$00810080
+	dc.l	$0000f201,$440060a4,$3d680000,$ff84426e
+	dc.l	$ff862d68,$0004ff88,$2d680008,$ff8cf228
+	dc.l	$d0800000,$61ffffff,$e83e2248,$41eeff84
+	dc.l	$700c0c2e,$0008ff4a,$672661ff,$ffffcdee
+	dc.l	$4a816600,$00524a2e,$ff4e6602,$4e7508ee
+	dc.l	$0003ff66,$102eff62,$0200000a,$66164e75
+	dc.l	$61ffffff,$dc4a4a81,$6600002c,$4a2eff4e
+	dc.l	$66dc4e75,$41eeff84,$61ffffff,$f90e4440
+	dc.l	$02407fff,$026e8000,$ff84816e,$ff84f22e
+	dc.l	$d040ff84,$4e752cae,$ffd460ff,$00003702
+	dc.l	$02000030,$00000040,$2d40ff5c,$30280000
+	dc.l	$02407fff,$0c40407e,$6e0000e6,$67000152
+	dc.l	$0c403f81,$6d000058,$f228d080,$0000f22e
+	dc.l	$9000ff5c,$f23c8800,$00000000,$f2006400
+	dc.l	$f23c9000,$00000000,$f201a800,$836eff66
+	dc.l	$122eff41,$02010038,$6714206e,$000c61ff
+	dc.l	$ffffcdda,$4a8166ff,$0000367a,$4e75122e
+	dc.l	$ff410241,$000761ff,$0000293c,$4e7508ee
+	dc.l	$0003ff66,$3d680000,$ff842d68,$0004ff88
+	dc.l	$2d680008,$ff8c2f08,$42800c2e,$0004ff4e
+	dc.l	$660a41ee,$ff8461ff,$fffff840,$41eeff84
+	dc.l	$222eff5c,$61ffffff,$fa5841ee,$ff8461ff
+	dc.l	$0000034c,$122eff41,$02010038,$6714206e
+	dc.l	$000c61ff,$ffffcd66,$4a8166ff,$00003606
+	dc.l	$600e122e,$ff410241,$000761ff,$000028c8
+	dc.l	$122eff62,$0201000a,$660000b8,$588f4e75
+	dc.l	$4a280007,$660e4aa8,$00086608,$006e1048
+	dc.l	$ff666006,$006e1248,$ff662f08,$4a280000
+	dc.l	$5bc1202e,$ff5c61ff,$fffffae4,$f210d080
+	dc.l	$f2006400,$122eff41,$02010038,$6714206e
+	dc.l	$000c61ff,$ffffccf6,$4a8166ff,$00003596
+	dc.l	$600e122e,$ff410241,$000761ff,$00002858
+	dc.l	$122eff62,$0201000a,$6600007c,$588f4e75
+	dc.l	$32280000,$02418000,$00413fff,$3d41ff84
+	dc.l	$2d680004,$ff882d68,$0008ff8c,$f22e9000
+	dc.l	$ff5cf22e,$4800ff84,$f23c9000,$00000000
+	dc.l	$f2000018,$f23c5838,$0002f294,$fe7c6000
+	dc.l	$ff50205f,$3d680000,$ff842d68,$0004ff88
+	dc.l	$2d680008,$ff8c0c2e,$0004ff4e,$662c41ee
+	dc.l	$ff8461ff,$fffff714,$44800240,$7fffefee
+	dc.l	$004fff84,$6014205f,$3d680000,$ff842d68
+	dc.l	$0004ff88,$2d680008,$ff8c08ae,$0007ff84
+	dc.l	$56eeff86,$41eeff84,$122eff5f,$e8090241
+	dc.l	$000c4841,$122eff5f,$e8090241,$00034280
+	dc.l	$61ffffff,$f5544a2e,$ff866706,$08ee0007
+	dc.l	$ff84f22e,$d040ff84,$4e750200,$00300000
+	dc.l	$00802d40,$ff5c3028,$00000240,$7fff0c40
+	dc.l	$43fe6e00,$00c86700,$01200c40,$3c016d00
+	dc.l	$0046f228,$d0800000,$f22e9000,$ff5cf23c
+	dc.l	$88000000,$0000f22e,$7400ff54,$f23c9000
+	dc.l	$00000000,$f200a800,$816eff66,$226e000c
+	dc.l	$41eeff54,$700861ff,$ffffcaf2,$4a8166ff
+	dc.l	$00003450,$4e7508ee,$0003ff66,$3d680000
+	dc.l	$ff842d68,$0004ff88,$2d680008,$ff8c2f08
+	dc.l	$42800c2e,$0004ff4e,$660a41ee,$ff8461ff
+	dc.l	$fffff618,$41eeff84,$222eff5c,$61ffffff
+	dc.l	$f83041ee,$ff8461ff,$000000d2,$2d40ff54
+	dc.l	$2d41ff58,$226e000c,$41eeff54,$700861ff
+	dc.l	$ffffca8a,$4a8166ff,$000033e8,$122eff62
+	dc.l	$0201000a,$6600fe9c,$588f4e75,$3028000a
+	dc.l	$024007ff,$6608006e,$1048ff66,$6006006e
+	dc.l	$1248ff66,$2f084a28,$00005bc1,$202eff5c
+	dc.l	$61ffffff,$f8caf210,$d080f22e,$7400ff54
+	dc.l	$226e000c,$41eeff54,$700861ff,$ffffca2e
+	dc.l	$4a8166ff,$0000338c,$122eff62,$0201000a
+	dc.l	$6600fe74,$588f4e75,$32280000,$02418000
+	dc.l	$00413fff,$3d41ff84,$2d680004,$ff882d68
+	dc.l	$0008ff8c,$f22e9000,$ff5cf22e,$4800ff84
+	dc.l	$f23c9000,$00000000,$f2000018,$f23c5838
+	dc.l	$0002f294,$feae6000,$ff644280,$30280000
+	dc.l	$04403fff,$064003ff,$4a280004,$6b025340
+	dc.l	$4840e988,$4a280000,$6a0408c0,$001f2228
+	dc.l	$0004e9c1,$10548081,$2d40ff54,$22280004
+	dc.l	$7015e1a9,$2d41ff58,$22280008,$e9c10015
+	dc.l	$222eff58,$8280202e,$ff544e75,$42803028
+	dc.l	$00000440,$3fff0640,$007f4a28,$00046b02
+	dc.l	$53404840,$ef884a28,$00006a04,$08c0001f
+	dc.l	$22280004,$02817fff,$ff00e089,$80814e75
+	dc.l	$61ffffff,$e3822f08,$102eff4e,$66000082
+	dc.l	$082e0004,$ff426712,$122eff43,$e8090241
+	dc.l	$000761ff,$000024de,$6004102e,$ff43ebc0
+	dc.l	$06472f00,$41eeff6c,$61ff0000,$2b2002ae
+	dc.l	$cffff00f,$ff84201f,$4a2eff87,$66164aae
+	dc.l	$ff886610,$4aaeff8c,$660a4a80,$6606026e
+	dc.l	$f000ff84,$41eeff84,$225f700c,$0c2e0008
+	dc.l	$ff4a670e,$61ffffff,$c8d44a81,$6600fb38
+	dc.l	$4e7561ff,$ffffd748,$4a816600,$fb2a4e75
+	dc.l	$0c000004,$6700ff7a,$41eeff6c,$426eff6e
+	dc.l	$0c000005,$670260c0,$006e4080,$ff6608ee
+	dc.l	$0006ff70,$60b251fc,$51fc51fc,$51fc51fc
+	dc.l	$ffffc001,$ffffff81,$fffffc01,$00004000
+	dc.l	$0000007f,$000003ff,$02000030,$00000040
+	dc.l	$60080200,$00300000,$00802d40,$ff5c4241
+	dc.l	$122eff4f,$e709822e,$ff4e6600,$02e43d69
+	dc.l	$0000ff90,$2d690004,$ff942d69,$0008ff98
+	dc.l	$3d680000,$ff842d68,$0004ff88,$2d680008
+	dc.l	$ff8c61ff,$ffffef24,$2f0061ff,$ffffefc8
+	dc.l	$d197322e,$ff5eec09,$201fb0bb,$14846700
+	dc.l	$011e6d00,$0062b0bb,$14846700,$021a6e00
+	dc.l	$014af22e,$d080ff90,$f22e9000,$ff5cf23c
+	dc.l	$88000000,$0000f22e,$4823ff84,$f201a800
+	dc.l	$f23c9000,$00000000,$83aeff64,$f22ef080
+	dc.l	$ff842f02,$322eff84,$24010281,$00007fff
+	dc.l	$02428000,$92808242,$3d41ff84,$241ff22e
+	dc.l	$d080ff84,$4e75f22e,$d080ff90,$f22e9000
+	dc.l	$ff5cf23c,$88000000,$0000f22e,$4823ff84
+	dc.l	$f201a800,$f23c9000,$00000000,$83aeff64
+	dc.l	$00ae0000,$1048ff64,$122eff62,$02010013
+	dc.l	$661c082e,$0003ff64,$56c1202e,$ff5c61ff
+	dc.l	$fffff5dc,$812eff64,$f210d080,$4e75222e
+	dc.l	$ff5c0201,$00c06634,$f22ef080,$ff842f02
+	dc.l	$322eff84,$34010281,$00007fff,$92800481
+	dc.l	$00006000,$02417fff,$02428000,$82423d41
+	dc.l	$ff84241f,$f22ed040,$ff8460a6,$f22ed080
+	dc.l	$ff90222e,$ff5c0201,$0030f201,$9000f22e
+	dc.l	$4823ff84,$f23c9000,$00000000,$60aaf22e
+	dc.l	$d080ff90,$f22e9000,$ff5cf23c,$88000000
+	dc.l	$0000f22e,$4823ff84,$f201a800,$f23c9000
+	dc.l	$00000000,$83aeff64,$f2000098,$f23c58b8
+	dc.l	$0002f293,$ff3c6000,$fee408ee,$0003ff66
+	dc.l	$f22ed080,$ff90f23c,$90000000,$0010f23c
+	dc.l	$88000000,$0000f22e,$4823ff84,$f201a800
+	dc.l	$f23c9000,$00000000,$83aeff64,$122eff62
+	dc.l	$0201000b,$6620f22e,$f080ff84,$41eeff84
+	dc.l	$222eff5c,$61ffffff,$f3e8812e,$ff64f22e
+	dc.l	$d080ff84,$4e75f22e,$d040ff90,$222eff5c
+	dc.l	$020100c0,$6652f22e,$9000ff5c,$f23c8800
+	dc.l	$00000000,$f22e48a3,$ff84f23c,$90000000
+	dc.l	$0000f22e,$f040ff84,$2f02322e,$ff842401
+	dc.l	$02810000,$7fff0242,$80009280,$06810000
+	dc.l	$60000241,$7fff8242,$3d41ff84,$241ff22e
+	dc.l	$d040ff84,$6000ff80,$222eff5c,$02010030
+	dc.l	$f2019000,$60a6f22e,$d080ff90,$f22e9000
+	dc.l	$ff5cf23c,$88000000,$0000f22e,$4823ff84
+	dc.l	$f201a800,$f23c9000,$00000000,$83aeff64
+	dc.l	$f2000098,$f23c58b8,$0002f292,$fde0f294
+	dc.l	$fefaf22e,$d040ff90,$222eff5c,$020100c0
+	dc.l	$00010010,$f2019000,$f23c8800,$00000000
+	dc.l	$f22e48a3,$ff84f23c,$90000000,$0000f200
+	dc.l	$0498f23c,$58b80002,$f293fda2,$6000febc
+	dc.l	$323b120a,$4efb1006,$4afc0030,$fd120072
+	dc.l	$00cc006c,$fd120066,$00000000,$00720072
+	dc.l	$0060006c,$00720066,$00000000,$009e0060
+	dc.l	$009e006c,$009e0066,$00000000,$006c006c
+	dc.l	$006c006c,$006c0066,$00000000,$fd120072
+	dc.l	$00cc006c,$fd120066,$00000000,$00660066
+	dc.l	$00660066,$00660066,$00000000,$60ffffff
+	dc.l	$ed6460ff,$ffffecda,$60ffffff,$ecd41028
+	dc.l	$00001229,$0000b101,$6a10f23c,$44008000
+	dc.l	$00001d7c,$000cff64,$4e75f23c,$44000000
+	dc.l	$00001d7c,$0004ff64,$4e75f229,$d0800000
+	dc.l	$10280000,$12290000,$b1016a10,$f2000018
+	dc.l	$f200001a,$1d7c000a,$ff644e75,$f2000018
+	dc.l	$1d7c0002,$ff644e75,$f228d080,$00001028
+	dc.l	$00001229,$0000b101,$6ae260d0,$02000030
+	dc.l	$00000040,$60080200,$00300000,$00802d40
+	dc.l	$ff5c122e,$ff4e6600,$02620200,$00c06600
+	dc.l	$007c4a28,$00006a06,$08ee0003,$ff64f228
+	dc.l	$d0800000,$4e750200,$00c06600,$006008ee
+	dc.l	$0003ff66,$4a280000,$6a0608ee,$0003ff64
+	dc.l	$f228d080,$0000082e,$0003ff62,$66024e75
+	dc.l	$3d680000,$ff842d68,$0004ff88,$2d680008
+	dc.l	$ff8c41ee,$ff8461ff,$ffffef60,$44400640
+	dc.l	$6000322e,$ff840241,$80000240,$7fff8041
+	dc.l	$3d40ff84,$f22ed040,$ff844e75,$0c000040
+	dc.l	$667e3d68,$0000ff84,$2d680004,$ff882d68
+	dc.l	$0008ff8c,$61ffffff,$eac20c80,$0000007f
+	dc.l	$6c000092,$0c80ffff,$ff816700,$01786d00
+	dc.l	$00f4f23c,$88000000,$0000f22e,$9000ff5c
+	dc.l	$f22e4800,$ff84f201,$a800f23c,$90000000
+	dc.l	$000083ae,$ff642f02,$f22ef080,$ff84322e
+	dc.l	$ff843401,$02810000,$7fff9280,$02428000
+	dc.l	$84413d42,$ff84241f,$f22ed080,$ff844e75
+	dc.l	$3d680000,$ff842d68,$0004ff88,$2d680008
+	dc.l	$ff8c61ff,$ffffea44,$0c800000,$03ff6c00
+	dc.l	$00140c80,$fffffc01,$670000fa,$6d000076
+	dc.l	$6000ff80,$08ee0003,$ff664a2e,$ff846a06
+	dc.l	$08ee0003,$ff64122e,$ff620201,$000b661a
+	dc.l	$41eeff84,$222eff5c,$61ffffff,$f084812e
+	dc.l	$ff64f22e,$d080ff84,$4e752d6e,$ff88ff94
+	dc.l	$2d6eff8c,$ff98322e,$ff842f02,$34010281
+	dc.l	$00007fff,$92800242,$80000681,$00006000
+	dc.l	$02417fff,$84413d42,$ff90f22e,$d040ff90
+	dc.l	$241f60ac,$f23c8800,$00000000,$f22e9000
+	dc.l	$ff5cf22e,$4800ff84,$f23c9000,$00000000
+	dc.l	$f201a800,$83aeff64,$00ae0000,$1048ff64
+	dc.l	$122eff62,$02010013,$661c082e,$0003ff64
+	dc.l	$56c1202e,$ff5c61ff,$fffff0f4,$812eff64
+	dc.l	$f210d080,$4e752f02,$322eff84,$24010281
+	dc.l	$00007fff,$02428000,$92800481,$00006000
+	dc.l	$02417fff,$82423d41,$ff84241f,$f22ed040
+	dc.l	$ff8460b6,$f23c8800,$00000000,$f22e9000
+	dc.l	$ff5cf22e,$4800ff84,$f201a800,$f23c9000
+	dc.l	$00000000,$83aeff64,$f2000098,$f23c58b8
+	dc.l	$0002f293,$ff746000,$fe7e0c01,$00046700
+	dc.l	$fdb60c01,$000567ff,$ffffe9ee,$0c010003
+	dc.l	$67ffffff,$e9f8f228,$48000000,$f200a800
+	dc.l	$e1981d40,$ff644e75,$51fc51fc,$51fc51fc
+	dc.l	$00003fff,$0000007e,$000003fe,$ffffc001
+	dc.l	$ffffff81,$fffffc01,$02000030,$00000040
+	dc.l	$60080200,$00300000,$00802d40,$ff5c4241
+	dc.l	$122eff4f,$e709822e,$ff4e6600,$02d63d69
+	dc.l	$0000ff90,$2d690004,$ff942d69,$0008ff98
+	dc.l	$3d680000,$ff842d68,$0004ff88,$2d680008
+	dc.l	$ff8c61ff,$ffffe864,$2f0061ff,$ffffe908
+	dc.l	$4497d197,$322eff5e,$ec09201f,$b0bb148e
+	dc.l	$6f000074,$b0bb1520,$ff7a6700,$020c6e00
+	dc.l	$013cf22e,$d080ff90,$f22e9000,$ff5cf23c
+	dc.l	$88000000,$0000f22e,$4820ff84,$f201a800
+	dc.l	$f23c9000,$00000000,$83aeff64,$f22ef080
+	dc.l	$ff842f02,$322eff84,$24010281,$00007fff
+	dc.l	$02428000,$92808242,$3d41ff84,$241ff22e
+	dc.l	$d080ff84,$4e750000,$7fff0000,$407f0000
+	dc.l	$43ff201f,$60c62f00,$f22ed080,$ff90f22e
+	dc.l	$9000ff5c,$f23c8800,$00000000,$f22e4820
+	dc.l	$ff84f200,$a800f23c,$90000000,$000081ae
+	dc.l	$ff64f227,$e0013017,$dffc0000,$000c0280
+	dc.l	$00007fff,$9097b0bb,$14ae6db6,$201f00ae
+	dc.l	$00001048,$ff64122e,$ff620201,$0013661c
+	dc.l	$082e0003,$ff6456c1,$202eff5c,$61ffffff
+	dc.l	$eeee812e,$ff64f210,$d0804e75,$222eff5c
+	dc.l	$020100c0,$6634f22e,$f080ff84,$2f02322e
+	dc.l	$ff843401,$02810000,$7fff9280,$04810000
+	dc.l	$60000241,$7fff0242,$80008242,$3d41ff84
+	dc.l	$241ff22e,$d040ff84,$60a6f22e,$d080ff90
+	dc.l	$222eff5c,$02010030,$f2019000,$f22e4820
+	dc.l	$ff84f23c,$90000000,$000060aa,$08ee0003
+	dc.l	$ff66f22e,$d080ff90,$f23c9000,$00000010
+	dc.l	$f23c8800,$00000000,$f22e4820,$ff84f201
+	dc.l	$a800f23c,$90000000,$000083ae,$ff64122e
+	dc.l	$ff620201,$000b6620,$f22ef080,$ff8441ee
+	dc.l	$ff84222e,$ff5c61ff,$ffffed36,$812eff64
+	dc.l	$f22ed080,$ff844e75,$f22ed040,$ff90222e
+	dc.l	$ff5c0201,$00c06652,$f22e9000,$ff5cf23c
+	dc.l	$88000000,$0000f22e,$48a0ff84,$f23c9000
+	dc.l	$00000000,$f22ef040,$ff842f02,$322eff84
+	dc.l	$24010281,$00007fff,$02428000,$92800681
+	dc.l	$00006000,$02417fff,$82423d41,$ff84241f
+	dc.l	$f22ed040,$ff846000,$ff80222e,$ff5c0201
+	dc.l	$0030f201,$900060a6,$f22ed080,$ff90f22e
+	dc.l	$9000ff5c,$f23c8800,$00000000,$f22e4820
+	dc.l	$ff84f201,$a800f23c,$90000000,$000083ae
+	dc.l	$ff64f200,$0098f23c,$58b80001,$f292fdee
+	dc.l	$f294fefa,$f22ed040,$ff90222e,$ff5c0201
+	dc.l	$00c00001,$0010f201,$9000f23c,$88000000
+	dc.l	$0000f22e,$48a0ff84,$f23c9000,$00000000
+	dc.l	$f2000498,$f23c58b8,$0001f293,$fdb06000
+	dc.l	$febc323b,$120a4efb,$10064afc,$0030fd20
+	dc.l	$009e0072,$0060fd20,$00660000,$00000072
+	dc.l	$006c0072,$00600072,$00660000,$000000d0
+	dc.l	$00d0006c,$006000d0,$00660000,$00000060
+	dc.l	$00600060,$00600060,$00660000,$0000fd20
+	dc.l	$009e0072,$0060fd20,$00660000,$00000066
+	dc.l	$00660066,$00660066,$00660000,$000060ff
+	dc.l	$ffffe62e,$60ffffff,$e62860ff,$ffffe6a6
+	dc.l	$10280000,$12290000,$b1016a10,$f23c4400
+	dc.l	$80000000,$1d7c000c,$ff644e75,$f23c4400
+	dc.l	$00000000,$1d7c0004,$ff644e75,$006e0410
+	dc.l	$ff661028,$00001229,$0000b101,$6a10f23c
+	dc.l	$4400ff80,$00001d7c,$000aff64,$4e75f23c
+	dc.l	$44007f80,$00001d7c,$0002ff64,$4e751029
+	dc.l	$00001228,$0000b101,$6a16f229,$d0800000
+	dc.l	$f2000018,$f200001a,$1d7c000a,$ff644e75
+	dc.l	$f229d080,$0000f200,$00181d7c,$0002ff64
+	dc.l	$4e750200,$00300000,$00406008,$02000030
+	dc.l	$00000080,$2d40ff5c,$122eff4e,$66000276
+	dc.l	$020000c0,$66000090,$2d680004,$ff882d68
+	dc.l	$0008ff8c,$30280000,$0a408000,$6a061d7c
+	dc.l	$0008ff64,$3d40ff84,$f22ed080,$ff844e75
+	dc.l	$020000c0,$666008ee,$0003ff66,$2d680004
+	dc.l	$ff882d68,$0008ff8c,$30280000,$0a408000
+	dc.l	$6a061d7c,$0008ff64,$3d40ff84,$f22ed080
+	dc.l	$ff84082e,$0003ff62,$66024e75,$41eeff84
+	dc.l	$61ffffff,$e8764440,$06406000,$322eff84
+	dc.l	$02418000,$02407fff,$80413d40,$ff84f22e
+	dc.l	$d040ff84,$4e750c00,$0040667e,$3d680000
+	dc.l	$ff842d68,$0004ff88,$2d680008,$ff8c61ff
+	dc.l	$ffffe3d8,$0c800000,$007f6c00,$00900c80
+	dc.l	$ffffff81,$67000178,$6d0000f4,$f23c8800
+	dc.l	$00000000,$f22e9000,$ff5cf22e,$481aff84
+	dc.l	$f201a800,$f23c9000,$00000000,$83aeff64
+	dc.l	$2f02f22e,$f080ff84,$322eff84,$34010281
+	dc.l	$00007fff,$92800242,$80008441,$3d42ff84
+	dc.l	$241ff22e,$d080ff84,$4e753d68,$0000ff84
+	dc.l	$2d680004,$ff882d68,$0008ff8c,$61ffffff
+	dc.l	$e35a0c80,$000003ff,$6c120c80,$fffffc01
+	dc.l	$670000fc,$6d000078,$6000ff82,$08ee0003
+	dc.l	$ff660a2e,$0080ff84,$6a0608ee,$0003ff64
+	dc.l	$122eff62,$0201000b,$661a41ee,$ff84222e
+	dc.l	$ff5c61ff,$ffffe99a,$812eff64,$f22ed080
+	dc.l	$ff844e75,$2d6eff88,$ff942d6e,$ff8cff98
+	dc.l	$322eff84,$2f022401,$02810000,$7fff0242
+	dc.l	$80009280,$06810000,$60000241,$7fff8242
+	dc.l	$3d41ff90,$f22ed040,$ff90241f,$60acf23c
+	dc.l	$88000000,$0000f22e,$9000ff5c,$f22e481a
+	dc.l	$ff84f23c,$90000000,$0000f201,$a80083ae
+	dc.l	$ff6400ae,$00001048,$ff64122e,$ff620201
+	dc.l	$0013661c,$082e0003,$ff6456c1,$202eff5c
+	dc.l	$61ffffff,$ea0a812e,$ff64f210,$d0804e75
+	dc.l	$2f02322e,$ff842401,$02810000,$7fff0242
+	dc.l	$80009280,$04810000,$60000241,$7fff8242
+	dc.l	$3d41ff84,$f22ed040,$ff84241f,$60b6f23c
+	dc.l	$88000000,$0000f22e,$9000ff5c,$f22e481a
+	dc.l	$ff84f201,$a800f23c,$90000000,$000083ae
+	dc.l	$ff64f200,$0098f23c,$58b80002,$f293ff74
+	dc.l	$6000fe7e,$0c010004,$6700fdb6,$0c010005
+	dc.l	$67ffffff,$e3040c01,$000367ff,$ffffe30e
+	dc.l	$f228481a,$0000f200,$a800e198,$1d40ff64
+	dc.l	$4e75122e,$ff4e6610,$4a280000,$6b024e75
+	dc.l	$1d7c0008,$ff644e75,$0c010001,$67400c01
+	dc.l	$00026724,$0c010005,$67ffffff,$e2bc0c01
+	dc.l	$000367ff,$ffffe2c6,$4a280000,$6b024e75
+	dc.l	$1d7c0008,$ff644e75,$4a280000,$6b081d7c
+	dc.l	$0002ff64,$4e751d7c,$000aff64,$4e754a28
+	dc.l	$00006b08,$1d7c0004,$ff644e75,$1d7c000c
+	dc.l	$ff644e75,$122eff4e,$66280200,$0030f200
+	dc.l	$9000f23c,$88000000,$0000f228,$48010000
+	dc.l	$f23c9000,$00000000,$f200a800,$81aeff64
+	dc.l	$4e750c01,$0001672e,$0c010002,$674e0c01
+	dc.l	$00046710,$0c010005,$67ffffff,$e22c60ff
+	dc.l	$ffffe23a,$3d680000,$ff841d7c,$0080ff88
+	dc.l	$41eeff84,$60a44a28,$00006b10,$f23c4400
+	dc.l	$00000000,$1d7c0004,$ff644e75,$f23c4400
+	dc.l	$80000000,$1d7c000c,$ff644e75,$f228d080
+	dc.l	$00004a28,$00006b08,$1d7c0002,$ff644e75
+	dc.l	$1d7c000a,$ff644e75,$122eff4e,$6618f23c
+	dc.l	$88000000,$0000f228,$48030000,$f200a800
+	dc.l	$81aeff64,$4e750c01,$0001672e,$0c010002
+	dc.l	$674e0c01,$00046710,$0c010005,$67ffffff
+	dc.l	$e19860ff,$ffffe1a6,$3d680000,$ff841d7c
+	dc.l	$0080ff88,$41eeff84,$60b44a28,$00006b10
+	dc.l	$f23c4400,$00000000,$1d7c0004,$ff644e75
+	dc.l	$f23c4400,$80000000,$1d7c000c,$ff644e75
+	dc.l	$f228d080,$00004a28,$00006b08,$1d7c0002
+	dc.l	$ff644e75,$1d7c000a,$ff644e75,$02000030
+	dc.l	$00000040,$60080200,$00300000,$00802d40
+	dc.l	$ff5c122e,$ff4e6600,$025c0200,$00c0667e
+	dc.l	$2d680004,$ff882d68,$0008ff8c,$32280000
+	dc.l	$0881000f,$3d41ff84,$f22ed080,$ff844e75
+	dc.l	$020000c0,$665808ee,$0003ff66,$2d680004
+	dc.l	$ff882d68,$0008ff8c,$30280000,$0880000f
+	dc.l	$3d40ff84,$f22ed080,$ff84082e,$0003ff62
+	dc.l	$66024e75,$41eeff84,$61ffffff,$e41e4440
+	dc.l	$06406000,$322eff84,$02418000,$02407fff
+	dc.l	$80413d40,$ff84f22e,$d040ff84,$4e750c00
+	dc.l	$0040667e,$3d680000,$ff842d68,$0004ff88
+	dc.l	$2d680008,$ff8c61ff,$ffffdf80,$0c800000
+	dc.l	$007f6c00,$00900c80,$ffffff81,$67000170
+	dc.l	$6d0000ec,$f23c8800,$00000000,$f22e9000
+	dc.l	$ff5cf22e,$4818ff84,$f201a800,$f23c9000
+	dc.l	$00000000,$83aeff64,$2f02f22e,$f080ff84
+	dc.l	$322eff84,$24010281,$00007fff,$92800242
+	dc.l	$80008441,$3d42ff84,$241ff22e,$d080ff84
+	dc.l	$4e753d68,$0000ff84,$2d680004,$ff882d68
+	dc.l	$0008ff8c,$61ffffff,$df020c80,$000003ff
+	dc.l	$6c120c80,$fffffc01,$670000f4,$6d000070
+	dc.l	$6000ff82,$08ee0003,$ff6608ae,$0007ff84
+	dc.l	$122eff62,$0201000b,$661a41ee,$ff84222e
+	dc.l	$ff5c61ff,$ffffe54a,$812eff64,$f22ed080
+	dc.l	$ff844e75,$2d6eff88,$ff942d6e,$ff8cff98
+	dc.l	$322eff84,$2f022401,$02810000,$7fff0242
+	dc.l	$80009280,$06810000,$60000241,$7fff8242
+	dc.l	$3d41ff90,$f22ed040,$ff90241f,$60acf23c
+	dc.l	$88000000,$0000f22e,$9000ff5c,$f22e4818
+	dc.l	$ff84f23c,$90000000,$0000f201,$a80083ae
+	dc.l	$ff6400ae,$00001048,$ff64122e,$ff620201
+	dc.l	$0013661c,$082e0003,$ff6456c1,$202eff5c
+	dc.l	$61ffffff,$e5ba812e,$ff64f210,$d0804e75
+	dc.l	$2f02322e,$ff842401,$02810000,$7fff0242
+	dc.l	$80009280,$04810000,$60000241,$7fff8242
+	dc.l	$3d41ff84,$f22ed040,$ff84241f,$60b6f23c
+	dc.l	$88000000,$0000f22e,$9000ff5c,$f22e4818
+	dc.l	$ff84f201,$a800f23c,$90000000,$000083ae
+	dc.l	$ff64f200,$0098f23c,$58b80002,$f293ff74
+	dc.l	$6000fe86,$0c010004,$6700fdc6,$0c010005
+	dc.l	$67ffffff,$deb40c01,$000367ff,$ffffdebe
+	dc.l	$f2284818,$00000c01,$00026708,$1d7c0004
+	dc.l	$ff644e75,$1d7c0002,$ff644e75,$4241122e
+	dc.l	$ff4fe709,$822eff4e,$6618f229,$d0800000
+	dc.l	$f2284838,$0000f200,$a800e198,$1d40ff64
+	dc.l	$4e75323b,$120a4efb,$10064afc,$0030ffdc
+	dc.l	$ffdcffdc,$006000f8,$006e0000,$0000ffdc
+	dc.l	$ffdcffdc,$0060007c,$006e0000,$0000ffdc
+	dc.l	$ffdcffdc,$0060007c,$006e0000,$00000060
+	dc.l	$00600060,$00600060,$006e0000,$00000114
+	dc.l	$009c009c,$006000bc,$006e0000,$0000006e
+	dc.l	$006e006e,$006e006e,$006e0000,$000061ff
+	dc.l	$ffffddde,$022e00f7,$ff644e75,$61ffffff
+	dc.l	$ddd0022e,$00f7ff64,$4e753d68,$0000ff84
+	dc.l	$20280004,$08c0001f,$2d40ff88,$2d680008
+	dc.l	$ff8c41ee,$ff846000,$ff422d69,$0000ff84
+	dc.l	$20290004,$08c0001f,$2d40ff88,$2d690008
+	dc.l	$ff8c43ee,$ff846000,$ff223d69,$0000ff90
+	dc.l	$3d680000,$ff842029,$000408c0,$001f2d40
+	dc.l	$ff942028,$000408c0,$001f2d40,$ff882d69
+	dc.l	$0008ff98,$2d680008,$ff8c43ee,$ff9041ee
+	dc.l	$ff846000,$fee61028,$00001229,$0000b101
+	dc.l	$6b00ff78,$4a006b02,$4e751d7c,$0008ff64
+	dc.l	$4e751028,$00001229,$0000b101,$6b00ff7c
+	dc.l	$4a006a02,$4e751d7c,$0008ff64,$4e752d40
+	dc.l	$ff5c4241,$122eff4f,$e709822e,$ff4e6600
+	dc.l	$02a03d69,$0000ff90,$2d690004,$ff942d69
+	dc.l	$0008ff98,$3d680000,$ff842d68,$0004ff88
+	dc.l	$2d680008,$ff8c61ff,$ffffdbf0,$2f0061ff
+	dc.l	$ffffdc94,$d09f0c80,$ffffc001,$670000f8
+	dc.l	$6d000064,$0c800000,$40006700,$01da6e00
+	dc.l	$0122f22e,$d080ff90,$f22e9000,$ff5cf23c
+	dc.l	$88000000,$0000f22e,$4827ff84,$f201a800
+	dc.l	$f23c9000,$00000000,$83aeff64,$f22ef080
+	dc.l	$ff842f02,$322eff84,$24010281,$00007fff
+	dc.l	$02428000,$92808242,$3d41ff84,$241ff22e
+	dc.l	$d080ff84,$4e75f22e,$d080ff90,$f22e9000
+	dc.l	$ff5cf23c,$88000000,$0000f22e,$4827ff84
+	dc.l	$f201a800,$f23c9000,$00000000,$83aeff64
+	dc.l	$00ae0000,$1048ff64,$122eff62,$02010013
+	dc.l	$6620082e,$0003ff64,$56c1202e,$ff5c0200
+	dc.l	$003061ff,$ffffe2a8,$812eff64,$f210d080
+	dc.l	$4e75f22e,$f080ff84,$2f02322e,$ff842401
+	dc.l	$02810000,$7fff9280,$04810000,$60000241
+	dc.l	$7fff0242,$80008242,$3d41ff84,$241ff22e
+	dc.l	$d040ff84,$60acf22e,$d080ff90,$f22e9000
+	dc.l	$ff5cf23c,$88000000,$0000f22e,$4827ff84
+	dc.l	$f201a800,$f23c9000,$00000000,$83aeff64
+	dc.l	$f2000098,$f23c58b8,$0002f293,$ff646000
+	dc.l	$ff0c08ee,$0003ff66,$f22ed080,$ff90f23c
+	dc.l	$90000000,$0010f23c,$88000000,$0000f22e
+	dc.l	$4827ff84,$f201a800,$f23c9000,$00000000
+	dc.l	$83aeff64,$122eff62,$0201000b,$6620f22e
+	dc.l	$f080ff84,$41eeff84,$222eff5c,$61ffffff
+	dc.l	$e166812e,$ff64f22e,$d080ff84,$4e75f22e
+	dc.l	$d040ff90,$f22e9000,$ff5cf23c,$88000000
+	dc.l	$0000f22e,$48a7ff84,$f23c9000,$00000000
+	dc.l	$f22ef040,$ff842f02,$322eff84,$24010281
+	dc.l	$00007fff,$02428000,$92800681,$00006000
+	dc.l	$02417fff,$82423d41,$ff84241f,$f22ed040
+	dc.l	$ff846000,$ff8af22e,$d080ff90,$f22e9000
+	dc.l	$ff5cf23c,$88000000,$0000f22e,$4827ff84
+	dc.l	$f201a800,$f23c9000,$00000000,$83aeff64
+	dc.l	$f2000098,$f23c58b8,$0002f292,$fe20f294
+	dc.l	$ff12f22e,$d040ff90,$222eff5c,$020100c0
+	dc.l	$00010010,$f2019000,$f23c8800,$00000000
+	dc.l	$f22e48a7,$ff84f23c,$90000000,$0000f200
+	dc.l	$0498f23c,$58b80002,$f293fde2,$6000fed4
+	dc.l	$323b120a,$4efb1006,$4afc0030,$fd560072
+	dc.l	$0078006c,$fd560066,$00000000,$00720072
+	dc.l	$0060006c,$00720066,$00000000,$007e0060
+	dc.l	$007e006c,$007e0066,$00000000,$006c006c
+	dc.l	$006c006c,$006c0066,$00000000,$fd560072
+	dc.l	$0078006c,$fd560066,$00000000,$00660066
+	dc.l	$00660066,$00660066,$00000000,$60ffffff
+	dc.l	$da7460ff,$ffffd9ea,$60ffffff,$d9e460ff
+	dc.l	$ffffed0e,$60ffffff,$ed6260ff,$ffffed2e
+	dc.l	$2d40ff5c,$4241122e,$ff4fe709,$822eff4e
+	dc.l	$6600027c,$3d690000,$ff902d69,$0004ff94
+	dc.l	$2d690008,$ff983d68,$0000ff84,$2d680004
+	dc.l	$ff882d68,$0008ff8c,$61ffffff,$d8ae2f00
+	dc.l	$61ffffff,$d9524497,$d197322e,$ff5eec09
+	dc.l	$201f0c80,$ffffc001,$6f000064,$0c800000
+	dc.l	$3fff6700,$01b66e00,$0100f22e,$d080ff90
+	dc.l	$f22e9000,$ff5cf23c,$88000000,$0000f22e
+	dc.l	$4824ff84,$f201a800,$f23c9000,$00000000
+	dc.l	$83aeff64,$f22ef080,$ff842f02,$322eff84
+	dc.l	$24010281,$00007fff,$02428000,$92808242
+	dc.l	$3d41ff84,$241ff22e,$d080ff84,$4e75f22e
+	dc.l	$d080ff90,$f22e9000,$ff5cf23c,$88000000
+	dc.l	$0000f22e,$4824ff84,$f201a800,$f23c9000
+	dc.l	$00000000,$83aeff64,$f227e001,$3217dffc
+	dc.l	$0000000c,$02810000,$7fff9280,$0c810000
+	dc.l	$7fff6d90,$006e1048,$ff66122e,$ff620201
+	dc.l	$00136620,$082e0003,$ff6456c1,$202eff5c
+	dc.l	$02000030,$61ffffff,$df46812e,$ff64f210
+	dc.l	$d0804e75,$f22ef080,$ff842f02,$322eff84
+	dc.l	$24010281,$00007fff,$02428000,$92800481
+	dc.l	$00006000,$02417fff,$82423d41,$ff84241f
+	dc.l	$f22ed040,$ff8460ac,$08ee0003,$ff66f22e
+	dc.l	$d080ff90,$f23c9000,$00000010,$f23c8800
+	dc.l	$00000000,$f22e4824,$ff84f201,$a800f23c
+	dc.l	$90000000,$000083ae,$ff64122e,$ff620201
+	dc.l	$000b6620,$f22ef080,$ff8441ee,$ff84222e
+	dc.l	$ff5c61ff,$ffffde40,$812eff64,$f22ed080
+	dc.l	$ff844e75,$f22ed040,$ff90f22e,$9000ff5c
+	dc.l	$f23c8800,$00000000,$f22e48a4,$ff84f23c
+	dc.l	$90000000,$0000f22e,$f040ff84,$2f02322e
+	dc.l	$ff842401,$02810000,$7fff0242,$80009280
+	dc.l	$06810000,$60000241,$7fff8242,$3d41ff84
+	dc.l	$241ff22e,$d040ff84,$608af22e,$d080ff90
+	dc.l	$f22e9000,$ff5cf23c,$88000000,$0000f22e
+	dc.l	$4824ff84,$f201a800,$f23c9000,$00000000
+	dc.l	$83aeff64,$f2000098,$f23c58b8,$0001f292
+	dc.l	$fe44f294,$ff14f22e,$d040ff90,$42810001
+	dc.l	$0010f201,$9000f23c,$88000000,$0000f22e
+	dc.l	$48a4ff84,$f23c9000,$00000000,$f2000498
+	dc.l	$f23c58b8,$0001f293,$fe0c6000,$fedc323b
+	dc.l	$120a4efb,$10064afc,$0030fd7a,$00720078
+	dc.l	$0060fd7a,$00660000,$00000078,$006c0078
+	dc.l	$00600078,$00660000,$0000007e,$007e006c
+	dc.l	$0060007e,$00660000,$00000060,$00600060
+	dc.l	$00600060,$00660000,$0000fd7a,$00720078
+	dc.l	$0060fd7a,$00660000,$00000066,$00660066
+	dc.l	$00660066,$00660000,$000060ff,$ffffd6d2
+	dc.l	$60ffffff,$d6cc60ff,$ffffd74a,$60ffffff
+	dc.l	$f0ce60ff,$fffff09c,$60ffffff,$f0f40200
+	dc.l	$00300000,$00406008,$02000030,$00000080
+	dc.l	$2d40ff5c,$4241122e,$ff4fe709,$822eff4e
+	dc.l	$6600024c,$61ffffff,$d4b2f22e,$d080ff90
+	dc.l	$f23c8800,$00000000,$f22e9000,$ff5cf22e
+	dc.l	$4822ff84,$f23c9000,$00000000,$f201a800
+	dc.l	$83aeff64,$f281003c,$2f02f227,$e001322e
+	dc.l	$ff5eec09,$34170282,$00007fff,$9480b4bb
+	dc.l	$14246c38,$b4bb142a,$6d0000b8,$67000184
+	dc.l	$32170241,$80008242,$3e81f21f,$d080241f
+	dc.l	$4e754e75,$00007fff,$0000407f,$000043ff
+	dc.l	$00000000,$00003f81,$00003c01,$00ae0000
+	dc.l	$1048ff64,$122eff62,$02010013,$6624dffc
+	dc.l	$0000000c,$082e0003,$ff6456c1,$202eff5c
+	dc.l	$61ffffff,$dc7a812e,$ff64f210,$d080241f
+	dc.l	$4e75122e,$ff5c0201,$00c0661a,$32170241
+	dc.l	$80000482,$00006000,$02427fff,$82423e81
+	dc.l	$f21fd040,$60bef22e,$d080ff90,$222eff5c
+	dc.l	$02010030,$f2019000,$f22e4822,$ff84f23c
+	dc.l	$90000000,$0000dffc,$0000000c,$f227e001
+	dc.l	$60ba08ee,$0003ff66,$dffc0000,$000cf22e
+	dc.l	$d080ff90,$f23c9000,$00000010,$f23c8800
+	dc.l	$00000000,$f22e4822,$ff84f23c,$90000000
+	dc.l	$0000f201,$a80083ae,$ff64122e,$ff620201
+	dc.l	$000b6622,$f22ef080,$ff8441ee,$ff84222e
+	dc.l	$ff5c61ff,$ffffdaca,$812eff64,$f22ed080
+	dc.l	$ff84241f,$4e75f22e,$d040ff90,$222eff5c
+	dc.l	$020100c0,$664ef22e,$9000ff5c,$f23c8800
+	dc.l	$00000000,$f22e48a2,$ff84f23c,$90000000
+	dc.l	$0000f22e,$f040ff84,$322eff84,$24010281
+	dc.l	$00007fff,$02428000,$92800681,$00006000
+	dc.l	$02417fff,$82423d41,$ff84f22e,$d040ff84
+	dc.l	$6000ff82,$222eff5c,$02010030,$f2019000
+	dc.l	$60aa222e,$ff5c0201,$00c06700,$fe74222f
+	dc.l	$00040c81,$80000000,$6600fe66,$4aaf0008
+	dc.l	$6600fe5e,$082e0001,$ff666700,$fe54f22e
+	dc.l	$d040ff90,$222eff5c,$020100c0,$00010010
+	dc.l	$f2019000,$f23c8800,$00000000,$f22e48a2
+	dc.l	$ff84f23c,$90000000,$0000f200,$0018f200
+	dc.l	$0498f200,$0438f292,$feca6000,$fe14323b
+	dc.l	$120a4efb,$10064afc,$0030fdaa,$00e4011c
+	dc.l	$0060fdaa,$00660000,$000000bc,$006c011c
+	dc.l	$006000bc,$00660000,$00000130,$0130010c
+	dc.l	$00600130,$00660000,$00000060,$00600060
+	dc.l	$00600060,$00660000,$0000fdaa,$00e4011c
+	dc.l	$0060fdaa,$00660000,$00000066,$00660066
+	dc.l	$00660066,$00660000,$000060ff,$ffffd3d2
+	dc.l	$60ffffff,$d3cc1028,$00001229,$0000b101
+	dc.l	$6b000016,$4a006b2e,$f23c4400,$00000000
+	dc.l	$1d7c0004,$ff644e75,$122eff5f,$02010030
+	dc.l	$0c010020,$6710f23c,$44000000,$00001d7c
+	dc.l	$0004ff64,$4e75f23c,$44008000,$00001d7c
+	dc.l	$000cff64,$4e753d68,$0000ff84,$2d680004
+	dc.l	$ff882d68,$0008ff8c,$61ffffff,$d27e426e
+	dc.l	$ff9042ae,$ff9442ae,$ff986000,$fcce3d69
+	dc.l	$0000ff90,$2d690004,$ff942d69,$0008ff98
+	dc.l	$61ffffff,$d302426e,$ff8442ae,$ff8842ae
+	dc.l	$ff8c6000,$fca61028,$00001229,$0000b300
+	dc.l	$6bffffff,$d3a0f228,$d0800000,$4a280000
+	dc.l	$6a1c1d7c,$000aff64,$4e75f229,$d0800000
+	dc.l	$4a290000,$6a081d7c,$000aff64,$4e751d7c
+	dc.l	$0002ff64,$4e750200,$00300000,$00406008
+	dc.l	$02000030,$00000080,$2d40ff5c,$4241122e
+	dc.l	$ff4fe709,$822eff4e,$6600024c,$61ffffff
+	dc.l	$d0eaf22e,$d080ff90,$f23c8800,$00000000
+	dc.l	$f22e9000,$ff5cf22e,$4828ff84,$f23c9000
+	dc.l	$00000000,$f201a800,$83aeff64,$f281003c
+	dc.l	$2f02f227,$e001322e,$ff5eec09,$34170282
+	dc.l	$00007fff,$9480b4bb,$14246c38,$b4bb142a
+	dc.l	$6d0000b8,$67000184,$32170241,$80008242
+	dc.l	$3e81f21f,$d080241f,$4e754e75,$00007fff
+	dc.l	$0000407f,$000043ff,$00000000,$00003f81
+	dc.l	$00003c01,$00ae0000,$1048ff64,$122eff62
+	dc.l	$02010013,$6624dffc,$0000000c,$082e0003
+	dc.l	$ff6456c1,$202eff5c,$61ffffff,$d8b2812e
+	dc.l	$ff64f210,$d080241f,$4e75122e,$ff5c0201
+	dc.l	$00c0661a,$32170241,$80000482,$00006000
+	dc.l	$02427fff,$82423e81,$f21fd040,$60bef22e
+	dc.l	$d080ff90,$222eff5c,$02010030,$f2019000
+	dc.l	$f22e4828,$ff84f23c,$90000000,$0000dffc
+	dc.l	$0000000c,$f227e001,$60ba08ee,$0003ff66
+	dc.l	$dffc0000,$000cf22e,$d080ff90,$f23c9000
+	dc.l	$00000010,$f23c8800,$00000000,$f22e4828
+	dc.l	$ff84f23c,$90000000,$0000f201,$a80083ae
+	dc.l	$ff64122e,$ff620201,$000b6622,$f22ef080
+	dc.l	$ff8441ee,$ff84222e,$ff5c61ff,$ffffd702
+	dc.l	$812eff64,$f22ed080,$ff84241f,$4e75f22e
+	dc.l	$d040ff90,$222eff5c,$020100c0,$664ef22e
+	dc.l	$9000ff5c,$f23c8800,$00000000,$f22e48a8
+	dc.l	$ff84f23c,$90000000,$0000f22e,$f040ff84
+	dc.l	$322eff84,$24010281,$00007fff,$02428000
+	dc.l	$92800681,$00006000,$02417fff,$82423d41
+	dc.l	$ff84f22e,$d040ff84,$6000ff82,$222eff5c
+	dc.l	$02010030,$f2019000,$60aa222e,$ff5c0201
+	dc.l	$00c06700,$fe74222f,$00040c81,$80000000
+	dc.l	$6600fe66,$4aaf0008,$6600fe5e,$082e0001
+	dc.l	$ff666700,$fe54f22e,$d040ff90,$222eff5c
+	dc.l	$020100c0,$00010010,$f2019000,$f23c8800
+	dc.l	$00000000,$f22e48a8,$ff84f23c,$90000000
+	dc.l	$0000f200,$0018f200,$0498f200,$0438f292
+	dc.l	$feca6000,$fe14323b,$120a4efb,$10064afc
+	dc.l	$0030fdaa,$00e2011a,$0060fdaa,$00660000
+	dc.l	$000000ba,$006c011a,$006000ba,$00660000
+	dc.l	$00000130,$0130010a,$00600130,$00660000
+	dc.l	$00000060,$00600060,$00600060,$00660000
+	dc.l	$0000fdaa,$00e2011a,$0060fdaa,$00660000
+	dc.l	$00000066,$00660066,$00660066,$00660000
+	dc.l	$000060ff,$ffffd00a,$60ffffff,$d0041028
+	dc.l	$00001229,$0000b300,$6a144a00,$6b2ef23c
+	dc.l	$44000000,$00001d7c,$0004ff64,$4e75122e
+	dc.l	$ff5f0201,$00300c01,$00206710,$f23c4400
+	dc.l	$00000000,$1d7c0004,$ff644e75,$f23c4400
+	dc.l	$80000000,$1d7c000c,$ff644e75,$3d680000
+	dc.l	$ff842d68,$0004ff88,$2d680008,$ff8c61ff
+	dc.l	$ffffceb8,$426eff90,$42aeff94,$42aeff98
+	dc.l	$6000fcd0,$3d690000,$ff902d69,$0004ff94
+	dc.l	$2d690008,$ff9861ff,$ffffcf3c,$426eff84
+	dc.l	$42aeff88,$42aeff8c,$6000fca8,$10280000
+	dc.l	$12290000,$b3006aff,$ffffcfda,$f228d080
+	dc.l	$0000f200,$001af293,$001e1d7c,$000aff64
+	dc.l	$4e75f229,$d0800000,$4a290000,$6a081d7c
+	dc.l	$000aff64,$4e751d7c,$0002ff64,$4e750200
+	dc.l	$00300000,$00406008,$02000030,$00000080
+	dc.l	$2d40ff5c,$4241122e,$ff4e6600,$02744a28
+	dc.l	$00006bff,$ffffcf7e,$020000c0,$6648f22e
+	dc.l	$9000ff5c,$f23c8800,$00000000,$f2104804
+	dc.l	$f201a800,$83aeff64,$4e754a28,$00006bff
+	dc.l	$ffffcf52,$020000c0,$661c3d68,$0000ff84
+	dc.l	$2d680004,$ff882d68,$0008ff8c,$61ffffff
+	dc.l	$ce046000,$003e0c00,$00406600,$00843d68
+	dc.l	$0000ff84,$2d680004,$ff882d68,$0008ff8c
+	dc.l	$61ffffff,$cde00c80,$0000007e,$67000098
+	dc.l	$6e00009e,$0c80ffff,$ff806700,$01a46d00
+	dc.l	$0120f23c,$88000000,$0000f22e,$9000ff5c
+	dc.l	$f22e4804,$ff84f201,$a800f23c,$90000000
+	dc.l	$000083ae,$ff642f02,$f22ef080,$ff84322e
+	dc.l	$ff842401,$02810000,$7fff9280,$02428000
+	dc.l	$84413d42,$ff84241f,$f22ed080,$ff844e75
+	dc.l	$3d680000,$ff842d68,$0004ff88,$2d680008
+	dc.l	$ff8c61ff,$ffffcd5e,$0c800000,$03fe6700
+	dc.l	$00166e1c,$0c80ffff,$fc006700,$01246d00
+	dc.l	$00a06000,$ff7e082e,$0000ff85,$6600ff74
+	dc.l	$08ee0003,$ff66f23c,$90000000,$0010f23c
+	dc.l	$88000000,$0000f22e,$4804ff84,$f201a800
+	dc.l	$f23c9000,$00000000,$83aeff64,$122eff62
+	dc.l	$0201000b,$6620f22e,$f080ff84,$41eeff84
+	dc.l	$222eff5c,$61ffffff,$d338812e,$ff64f22e
+	dc.l	$d080ff84,$4e752d6e,$ff88ff94,$2d6eff8c
+	dc.l	$ff98322e,$ff842f02,$24010281,$00007fff
+	dc.l	$02428000,$92800681,$00006000,$02417fff
+	dc.l	$82423d41,$ff90f22e,$d040ff90,$241f60a6
+	dc.l	$f23c8800,$00000000,$f22e9000,$ff5cf22e
+	dc.l	$4804ff84,$f23c9000,$00000000,$f201a800
+	dc.l	$83aeff64,$00ae0000,$1048ff64,$122eff62
+	dc.l	$02010013,$661c082e,$0003ff64,$56c1202e
+	dc.l	$ff5c61ff,$ffffd3a8,$812eff64,$f210d080
+	dc.l	$4e752f02,$322eff84,$24010281,$00007fff
+	dc.l	$02428000,$92800481,$00006000,$02417fff
+	dc.l	$82423d41,$ff84f22e,$d040ff84,$241f60b6
+	dc.l	$082e0000,$ff856600,$ff78f23c,$88000000
+	dc.l	$0000f22e,$9000ff5c,$f22e4804,$ff84f201
+	dc.l	$a800f23c,$90000000,$000083ae,$ff64f200
+	dc.l	$0080f23c,$58b80001,$f293ff6a,$6000fe48
+	dc.l	$0c010004,$6700fdb4,$0c010001,$67160c01
+	dc.l	$00026736,$0c010005,$67ffffff,$cc8c60ff
+	dc.l	$ffffcc9a,$4a280000,$6b10f23c,$44000000
+	dc.l	$00001d7c,$0004ff64,$4e75f23c,$44008000
+	dc.l	$00001d7c,$000cff64,$4e754a28,$00006bff
+	dc.l	$ffffccc2,$f228d080,$00001d7c,$0002ff64
+	dc.l	$4e75303b,$12064efb,$00020020,$0026002c
+	dc.l	$00300034,$0038003c,$00400044,$004a0050
+	dc.l	$00540058,$005c0060,$0064202e,$ff9c4e75
+	dc.l	$202effa0,$4e752002,$4e752003,$4e752004
+	dc.l	$4e752005,$4e752006,$4e752007,$4e75202e
+	dc.l	$ffa44e75,$202effa8,$4e75200a,$4e75200b
+	dc.l	$4e75200c,$4e75200d,$4e752016,$4e75202e
+	dc.l	$ffd84e75,$323b1206,$4efb1002,$00100016
+	dc.l	$001c0020,$00240028,$002c0030,$2d40ff9c
+	dc.l	$4e752d40,$ffa04e75,$24004e75,$26004e75
+	dc.l	$28004e75,$2a004e75,$2c004e75,$2e004e75
+	dc.l	$323b1206,$4efb1002,$00100016,$001c0020
+	dc.l	$00240028,$002c0030,$3d40ff9e,$4e753d40
+	dc.l	$ffa24e75,$34004e75,$36004e75,$38004e75
+	dc.l	$3a004e75,$3c004e75,$3e004e75,$323b1206
+	dc.l	$4efb1002,$00100016,$001c0020,$00240028
+	dc.l	$002c0030,$1d40ff9f,$4e751d40,$ffa34e75
+	dc.l	$14004e75,$16004e75,$18004e75,$1a004e75
+	dc.l	$1c004e75,$1e004e75,$323b1206,$4efb1002
+	dc.l	$00100016,$001c0020,$00240028,$002c0030
+	dc.l	$d1aeffa4,$4e75d1ae,$ffa84e75,$d5c04e75
+	dc.l	$d7c04e75,$d9c04e75,$dbc04e75,$d1964e75
+	dc.l	$1d7c0004,$ff4a0c00,$00016706,$d1aeffd8
+	dc.l	$4e7554ae,$ffd84e75,$323b1206,$4efb1002
+	dc.l	$00100016,$001c0020,$00240028,$002c0030
+	dc.l	$91aeffa4,$4e7591ae,$ffa84e75,$95c04e75
+	dc.l	$97c04e75,$99c04e75,$9bc04e75,$91964e75
+	dc.l	$1d7c0008,$ff4a0c00,$00016706,$91aeffd8
+	dc.l	$4e7555ae,$ffd84e75,$303b0206,$4efb0002
+	dc.l	$00100028,$0040004c,$00580064,$0070007c
+	dc.l	$2d6effdc,$ff6c2d6e,$ffe0ff70,$2d6effe4
+	dc.l	$ff7441ee,$ff6c4e75,$2d6effe8,$ff6c2d6e
+	dc.l	$ffecff70,$2d6efff0,$ff7441ee,$ff6c4e75
+	dc.l	$f22ef020,$ff6c41ee,$ff6c4e75,$f22ef010
+	dc.l	$ff6c41ee,$ff6c4e75,$f22ef008,$ff6c41ee
+	dc.l	$ff6c4e75,$f22ef004,$ff6c41ee,$ff6c4e75
+	dc.l	$f22ef002,$ff6c41ee,$ff6c4e75,$f22ef001
+	dc.l	$ff6c41ee,$ff6c4e75,$303b0206,$4efb0002
+	dc.l	$00100028,$0040004c,$00580064,$0070007c
+	dc.l	$2d6effdc,$ff782d6e,$ffe0ff7c,$2d6effe4
+	dc.l	$ff8041ee,$ff784e75,$2d6effe8,$ff782d6e
+	dc.l	$ffecff7c,$2d6efff0,$ff8041ee,$ff784e75
+	dc.l	$f22ef020,$ff7841ee,$ff784e75,$f22ef010
+	dc.l	$ff7841ee,$ff784e75,$f22ef008,$ff7841ee
+	dc.l	$ff784e75,$f22ef004,$ff7841ee,$ff784e75
+	dc.l	$f22ef002,$ff7841ee,$ff784e75,$f22ef001
+	dc.l	$ff7841ee,$ff784e75,$303b0206,$4efb0002
+	dc.l	$00100018,$0020002a,$0034003e,$00480052
+	dc.l	$f22ef080,$ffdc4e75,$f22ef080,$ffe84e75
+	dc.l	$f227e001,$f21fd020,$4e75f227,$e001f21f
+	dc.l	$d0104e75,$f227e001,$f21fd008,$4e75f227
+	dc.l	$e001f21f,$d0044e75,$f227e001,$f21fd002
+	dc.l	$4e75f227,$e001f21f,$d0014e75,$700c61ff
+	dc.l	$ffffbace,$43eeff6c,$700c61ff,$ffffa0d8
+	dc.l	$4a8166ff,$00000a14,$e9ee004f,$ff6c0c40
+	dc.l	$7fff6602,$4e75102e,$ff6f0200,$000f660e
+	dc.l	$4aaeff70,$66084aae,$ff746602,$4e7541ee
+	dc.l	$ff6c61ff,$0000001c,$f22ef080,$ff6c4e75
+	dc.l	$00000000,$02030203,$02030302,$03020203
+	dc.l	$2d680000,$ff842d68,$0004ff88,$2d680008
+	dc.l	$ff8c41ee,$ff8448e7,$3c00f227,$e0017402
+	dc.l	$76042810,$42814c3c,$10010000,$000ae9c4
+	dc.l	$08c4d280,$580351ca,$ffee0804,$001e6702
+	dc.l	$44810481,$00000010,$6c0e4481,$00844000
+	dc.l	$00000090,$40000000,$2f017201,$f23c4400
+	dc.l	$00000000,$e9d00704,$f2005822,$28301c00
+	dc.l	$76007407,$f23c4423,$41200000,$e9c408c4
+	dc.l	$f2005822,$580351ca,$ffec5281,$0c810000
+	dc.l	$00026fd8,$0810001f,$6704f200,$001a2217
+	dc.l	$0c810000,$001b6f00,$00e40810,$001e6674
+	dc.l	$42812810,$e9c40704,$66245281,$7a012830
+	dc.l	$5c006608,$50815285,$28305c00,$42837407
+	dc.l	$e9c408c4,$66085883,$528151ca,$fff42001
+	dc.l	$22179280,$6c104481,$28100084,$40000000
+	dc.l	$00904000,$000043fb,$01700000,$06664283
+	dc.l	$f23c4480,$3f800000,$7403e280,$6406f231
+	dc.l	$48a33800,$06830000,$000c4a80,$66ecf200
+	dc.l	$04236068,$42817a02,$28305c00,$66085385
+	dc.l	$50812830,$5c00761c,$7407e9c4,$08c46608
+	dc.l	$59835281,$51cafff4,$20012217,$92806e10
+	dc.l	$44812810,$0284bfff,$ffff0290,$bfffffff
+	dc.l	$43fb0170,$000005fc,$4283f23c,$44803f80
+	dc.l	$00007403,$e2806406,$f23148a3,$38000683
+	dc.l	$0000000c,$4a8066ec,$f2000420,$262eff60
+	dc.l	$e9c32682,$2810e582,$e9c40002,$d48043fa
+	dc.l	$fe501031,$28004283,$efc30682,$f2039000
+	dc.l	$e280640a,$43fb0170,$00000644,$6016e280
+	dc.l	$640a43fb,$01700000,$06d26008,$43fb0170
+	dc.l	$00000590,$20016a08,$44800090,$40000000
+	dc.l	$4283f23c,$44803f80,$0000e280,$6406f231
+	dc.l	$48a33800,$06830000,$000c4a80,$66ec0810
+	dc.l	$001e6706,$f2000420,$6004f200,$0423f200
+	dc.l	$a8000880,$00096706,$006e0108,$ff66588f
+	dc.l	$f21fd040,$4cdf003c,$f23c9000,$00000000
+	dc.l	$f23c8800,$00000000,$4e753ffd,$00009a20
+	dc.l	$9a84fbcf,$f7980000,$00003ffd,$00009a20
+	dc.l	$9a84fbcf,$f7990000,$00003f80,$00000000
+	dc.l	$00000000,$00000000,$00004000,$00000000
+	dc.l	$00000000,$00000000,$00004120,$00000000
+	dc.l	$00000000,$00000000,$0000459a,$28000000
+	dc.l	$00000000,$00000000,$00000000,$00000303
+	dc.l	$02020302,$02030203,$030248e7,$3f20f227
+	dc.l	$e007f23c,$90000000,$00202d50,$ff582e00
+	dc.l	$422eff50,$0c2e0004,$ff4e6600,$00303010
+	dc.l	$02407fff,$22280004,$24280008,$5340e38a
+	dc.l	$e3914a81,$6cf64a40,$6e0450ee,$ff500240
+	dc.l	$7fff3080,$21410004,$21420008,$2d50ff90
+	dc.l	$2d680004,$ff942d68,$0008ff98,$02ae7fff
+	dc.l	$ffffff90,$4a2eff50,$67082c3c,$ffffecbb
+	dc.l	$6038302e,$ff903d7c,$3fffff90,$f22e4800
+	dc.l	$ff900440,$3ffff200,$5022f23a,$4428ff1c
+	dc.l	$f293000e,$f23a4823,$ff02f206,$6000600a
+	dc.l	$f23a4823,$fee6f206,$6000f23c,$88000000
+	dc.l	$00004245,$4a876f04,$28076006,$28069887
+	dc.l	$52844a84,$6f180c84,$00000011,$6f127811
+	dc.l	$4a876f0c,$00ae0000,$2080ff64,$60027801
+	dc.l	$4a876e06,$be866d02,$2c072006,$52809084
+	dc.l	$48454245,$42424a80,$6c145245,$0c80ffff
+	dc.l	$ecd46e08,$06800000,$00187418,$4480f23a
+	dc.l	$4480fe98,$e9ee1682,$ff60e349,$d245e349
+	dc.l	$4aaeff58,$6c025281,$45fafec0,$16321800
+	dc.l	$e98bf203,$9000e88b,$4a03660a,$43fb0170
+	dc.l	$00000370,$6016e20b,$640a43fb,$01700000
+	dc.l	$03fe6008,$43fb0170,$00000490,$4283e288
+	dc.l	$6406f231,$48a33800,$06830000,$000c4a80
+	dc.l	$66ecf23c,$88000000,$0000f23c,$90000000
+	dc.l	$0010f210,$4800f200,$00184a45,$6608f200
+	dc.l	$04206000,$008e4a2e,$ff506700,$0072f227
+	dc.l	$e0023617,$02437fff,$00508000,$d6500443
+	dc.l	$3fffd669,$00240443,$3fffd669,$00300443
+	dc.l	$3fff6b00,$00480257,$80008757,$02507fff
+	dc.l	$2f280008,$2f280004,$2f3c3fff,$0000f21f
+	dc.l	$d080f21f,$48232f29,$002c2f29,$00282f3c
+	dc.l	$3fff0000,$2f290038,$2f290034,$2f3c3fff
+	dc.l	$0000f21f,$4823f21f,$48236016,$60fe4a42
+	dc.l	$670cf229,$48230024,$f2294823,$0030f200
+	dc.l	$0423f200,$a800f22e,$6800ff90,$45eeff90
+	dc.l	$08000009,$670e00aa,$00000001,$0008f22e
+	dc.l	$4800ff90,$2d6eff60,$ff5402ae,$00000030
+	dc.l	$ff6048e7,$c0c02f2e,$ff542f2e,$ff5841ee
+	dc.l	$ff90f210,$68004aae,$ff586c06,$00908000
+	dc.l	$00002f2e,$ff64f22e,$9000ff60,$f23c8800
+	dc.l	$00000000,$f22e4801,$ff90f200,$a800816e
+	dc.l	$ff661d57,$ff64588f,$2d5fff58,$2d5fff54
+	dc.l	$4cdf0303,$2d6eff58,$ff902d6e,$ff54ff60
+	dc.l	$48454a45,$66ff0000,$0086f23a,$4500fcec
+	dc.l	$20045380,$4283e288,$6406f231,$49233800
+	dc.l	$06830000,$000c4a80,$66ec4a2e,$ff50670a
+	dc.l	$f2000018,$60ff0000,$0028f200,$0018f200
+	dc.l	$0838f293,$001a5386,$3a3c0001,$f23c9000
+	dc.l	$00000020,$f23a4523,$fcc26000,$fda8f23a
+	dc.l	$4523fcb8,$f2000838,$f294005c,$f292000c
+	dc.l	$f23a4420,$fca65286,$604c5286,$3a3c0001
+	dc.l	$f23c9000,$00000020,$6000fd7a,$f23a4500
+	dc.l	$fc6a2004,$4283e288,$6406f231,$49233800
+	dc.l	$06830000,$000c4a80,$66ecf200,$0018f200
+	dc.l	$0838f28e,$0012f23a,$4420fc60,$52865284
+	dc.l	$f23a4523,$fc56f23c,$90000000,$0010f200
+	dc.l	$082041ee,$ff84f210,$68002428,$00042628
+	dc.l	$000842a8,$000442a8,$00082010,$48406714
+	dc.l	$04800000,$3ffd4a80,$6e0a4480,$e28ae293
+	dc.l	$51c8fffa,$4a826604,$4a836710,$42810683
+	dc.l	$00000080,$d5810283,$ffffff80,$20045688
+	dc.l	$61ff0000,$02b04a2e,$ff506728,$f200003a
+	dc.l	$f281000c,$f2064000,$f2000018,$602e4a87
+	dc.l	$6d08f23a,$4400fbe4,$6022f206,$4000f200
+	dc.l	$00186018,$f200003a,$f28e000a,$f23a4400
+	dc.l	$fb9a6008,$f2064000,$f2000018,$f2294820
+	dc.l	$0018f22e,$6800ff90,$242a0004,$262a0008
+	dc.l	$3012670e,$04403ffd,$4440e28a,$e29351c8
+	dc.l	$fffa4281,$06830000,$0080d581,$0283ffff
+	dc.l	$ff807004,$41eeff54,$61ff0000,$0228202e
+	dc.l	$ff54720c,$e2a8efee,$010cff84,$e2a8efee
+	dc.l	$0404ff84,$4a006708,$00ae0000,$2080ff64
+	dc.l	$4280022e,$000fff84,$4aaeff58,$6c027002
+	dc.l	$4a866c02,$5280efee,$0002ff84,$f23c8800
+	dc.l	$00000000,$f21fd0e0,$4cdf04fc,$4e754002
+	dc.l	$0000a000,$00000000,$00004005,$0000c800
+	dc.l	$00000000,$0000400c,$00009c40,$00000000
+	dc.l	$00004019,$0000bebc,$20000000,$00004034
+	dc.l	$00008e1b,$c9bf0400,$00004069,$00009dc5
+	dc.l	$ada82b70,$b59e40d3,$0000c278,$1f49ffcf
+	dc.l	$a6d541a8,$000093ba,$47c980e9,$8ce04351
+	dc.l	$0000aa7e,$ebfb9df9,$de8e46a3,$0000e319
+	dc.l	$a0aea60e,$91c74d48,$0000c976,$75868175
+	dc.l	$0c175a92,$00009e8b,$3b5dc53d,$5de57525
+	dc.l	$0000c460,$52028a20,$979b4002,$0000a000
+	dc.l	$00000000,$00004005,$0000c800,$00000000
+	dc.l	$0000400c,$00009c40,$00000000,$00004019
+	dc.l	$0000bebc,$20000000,$00004034,$00008e1b
+	dc.l	$c9bf0400,$00004069,$00009dc5,$ada82b70
+	dc.l	$b59e40d3,$0000c278,$1f49ffcf,$a6d641a8
+	dc.l	$000093ba,$47c980e9,$8ce04351,$0000aa7e
+	dc.l	$ebfb9df9,$de8e46a3,$0000e319,$a0aea60e
+	dc.l	$91c74d48,$0000c976,$75868175,$0c185a92
+	dc.l	$00009e8b,$3b5dc53d,$5de57525,$0000c460
+	dc.l	$52028a20,$979b4002,$0000a000,$00000000
+	dc.l	$00004005,$0000c800,$00000000,$0000400c
+	dc.l	$00009c40,$00000000,$00004019,$0000bebc
+	dc.l	$20000000,$00004034,$00008e1b,$c9bf0400
+	dc.l	$00004069,$00009dc5,$ada82b70,$b59d40d3
+	dc.l	$0000c278,$1f49ffcf,$a6d541a8,$000093ba
+	dc.l	$47c980e9,$8cdf4351,$0000aa7e,$ebfb9df9
+	dc.l	$de8d46a3,$0000e319,$a0aea60e,$91c64d48
+	dc.l	$0000c976,$75868175,$0c175a92,$00009e8b
+	dc.l	$3b5dc53d,$5de47525,$0000c460,$52028a20
+	dc.l	$979a48e7,$ff007e01,$53802802,$2a03e9c2
+	dc.l	$1003e782,$e9c36003,$e7838486,$e385e394
+	dc.l	$4846d346,$d6854e71,$d5844e71,$d3464846
+	dc.l	$4a476712,$4847e947,$de4110c7,$48474247
+	dc.l	$51c8ffc8,$60124847,$3e014847,$524751c8
+	dc.l	$ffba4847,$e94f10c7,$4cdf00ff,$4e757001
+	dc.l	$610000d6,$3d7c0121,$000a6000,$007e7002
+	dc.l	$610000c6,$3d7c0141,$000a606e,$70046100
+	dc.l	$00b83d7c,$0101000a,$60607008,$610000aa
+	dc.l	$3d7c0161,$000a6052,$700c6100,$009c3d7c
+	dc.l	$0161000a,$60447001,$6100008e,$3d7c00a1
+	dc.l	$000a6036,$70026100,$00803d7c,$00c1000a
+	dc.l	$60287004,$61000072,$3d7c0081,$000a601a
+	dc.l	$70086100,$00643d7c,$00e1000a,$600c700c
+	dc.l	$61000056,$3d7c00e1,$000a2d6e,$ff680006
+	dc.l	$f22ed0c0,$ffdcf22e,$9c00ff60,$4cee0303
+	dc.l	$ff9c4e5e,$2f172f6f,$00080004,$2f6f000c
+	dc.l	$00082f7c,$00000001,$000c3f6f,$0006000c
+	dc.l	$3f7c4008,$00060817,$00056706,$08ef0002
+	dc.l	$000d60ff,$ffff95f4,$122eff41,$02010038
+	dc.l	$0c010018,$6700000c,$0c010020,$67000060
+	dc.l	$4e75122e,$ff410241,$0007323b,$12064efb
+	dc.l	$10020010,$0016001c,$00200024,$0028002c
+	dc.l	$003091ae,$ffa44e75,$91aeffa8,$4e7595c0
+	dc.l	$4e7597c0,$4e7599c0,$4e759bc0,$4e759196
+	dc.l	$4e750c2e,$0030000a,$6612082e,$00050004
+	dc.l	$660a4e7a,$880091c0,$4e7b8800,$4e754480
+	dc.l	$60a051fc,$00000000,$00000000,$00000000
diff --git a/arch/m68k/ifpsp060/src/README-SRC b/arch/m68k/ifpsp060/src/README-SRC
new file mode 100644
index 0000000..6be5cff
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/README-SRC
@@ -0,0 +1,12 @@
+This is the original source code from Motorola for the 68060 processor
+support code, providing emulation for rarely used m68k instructions
+not implemented in the 68060 silicon.
+
+The code provided here will not assemble out of the box using the GNU
+assembler, however it is being included in order to comply with the
+GNU General Public License.
+
+You don't need to actually assemble these files in order to compile a
+workin m68k kernel, the precompiled .sa files in arch/m68k/ifpsp060
+are sufficient and were generated from these source files by
+Motorola.
diff --git a/arch/m68k/ifpsp060/src/fplsp.S b/arch/m68k/ifpsp060/src/fplsp.S
new file mode 100644
index 0000000..fdb79b9
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/fplsp.S
@@ -0,0 +1,10980 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# lfptop.s:
+#	This file is appended to the top of the 060ILSP package
+# and contains the entry points into the package. The user, in
+# effect, branches to one of the branch table entries located here.
+#
+
+	bra.l	_facoss_
+	short	0x0000
+	bra.l	_facosd_
+	short	0x0000
+	bra.l	_facosx_
+	short	0x0000
+
+	bra.l	_fasins_
+	short	0x0000
+	bra.l	_fasind_
+	short	0x0000
+	bra.l	_fasinx_
+	short	0x0000
+
+	bra.l	_fatans_
+	short	0x0000
+	bra.l	_fatand_
+	short	0x0000
+	bra.l	_fatanx_
+	short	0x0000
+
+	bra.l	_fatanhs_
+	short	0x0000
+	bra.l	_fatanhd_
+	short	0x0000
+	bra.l	_fatanhx_
+	short	0x0000
+
+	bra.l	_fcoss_
+	short	0x0000
+	bra.l	_fcosd_
+	short	0x0000
+	bra.l	_fcosx_
+	short	0x0000
+
+	bra.l	_fcoshs_
+	short	0x0000
+	bra.l	_fcoshd_
+	short	0x0000
+	bra.l	_fcoshx_
+	short	0x0000
+
+	bra.l	_fetoxs_
+	short	0x0000
+	bra.l	_fetoxd_
+	short	0x0000
+	bra.l	_fetoxx_
+	short	0x0000
+
+	bra.l	_fetoxm1s_
+	short	0x0000
+	bra.l	_fetoxm1d_
+	short	0x0000
+	bra.l	_fetoxm1x_
+	short	0x0000
+
+	bra.l	_fgetexps_
+	short	0x0000
+	bra.l	_fgetexpd_
+	short	0x0000
+	bra.l	_fgetexpx_
+	short	0x0000
+
+	bra.l	_fgetmans_
+	short	0x0000
+	bra.l	_fgetmand_
+	short	0x0000
+	bra.l	_fgetmanx_
+	short	0x0000
+
+	bra.l	_flog10s_
+	short	0x0000
+	bra.l	_flog10d_
+	short	0x0000
+	bra.l	_flog10x_
+	short	0x0000
+
+	bra.l	_flog2s_
+	short	0x0000
+	bra.l	_flog2d_
+	short	0x0000
+	bra.l	_flog2x_
+	short	0x0000
+
+	bra.l	_flogns_
+	short	0x0000
+	bra.l	_flognd_
+	short	0x0000
+	bra.l	_flognx_
+	short	0x0000
+
+	bra.l	_flognp1s_
+	short	0x0000
+	bra.l	_flognp1d_
+	short	0x0000
+	bra.l	_flognp1x_
+	short	0x0000
+
+	bra.l	_fmods_
+	short	0x0000
+	bra.l	_fmodd_
+	short	0x0000
+	bra.l	_fmodx_
+	short	0x0000
+
+	bra.l	_frems_
+	short	0x0000
+	bra.l	_fremd_
+	short	0x0000
+	bra.l	_fremx_
+	short	0x0000
+
+	bra.l	_fscales_
+	short	0x0000
+	bra.l	_fscaled_
+	short	0x0000
+	bra.l	_fscalex_
+	short	0x0000
+
+	bra.l	_fsins_
+	short	0x0000
+	bra.l	_fsind_
+	short	0x0000
+	bra.l	_fsinx_
+	short	0x0000
+
+	bra.l	_fsincoss_
+	short	0x0000
+	bra.l	_fsincosd_
+	short	0x0000
+	bra.l	_fsincosx_
+	short	0x0000
+
+	bra.l	_fsinhs_
+	short	0x0000
+	bra.l	_fsinhd_
+	short	0x0000
+	bra.l	_fsinhx_
+	short	0x0000
+
+	bra.l	_ftans_
+	short	0x0000
+	bra.l	_ftand_
+	short	0x0000
+	bra.l	_ftanx_
+	short	0x0000
+
+	bra.l	_ftanhs_
+	short	0x0000
+	bra.l	_ftanhd_
+	short	0x0000
+	bra.l	_ftanhx_
+	short	0x0000
+
+	bra.l	_ftentoxs_
+	short	0x0000
+	bra.l	_ftentoxd_
+	short	0x0000
+	bra.l	_ftentoxx_
+	short	0x0000
+
+	bra.l	_ftwotoxs_
+	short	0x0000
+	bra.l	_ftwotoxd_
+	short	0x0000
+	bra.l	_ftwotoxx_
+	short	0x0000
+
+	bra.l	_fabss_
+	short	0x0000
+	bra.l	_fabsd_
+	short	0x0000
+	bra.l	_fabsx_
+	short	0x0000
+
+	bra.l	_fadds_
+	short	0x0000
+	bra.l	_faddd_
+	short	0x0000
+	bra.l	_faddx_
+	short	0x0000
+
+	bra.l	_fdivs_
+	short	0x0000
+	bra.l	_fdivd_
+	short	0x0000
+	bra.l	_fdivx_
+	short	0x0000
+
+	bra.l	_fints_
+	short	0x0000
+	bra.l	_fintd_
+	short	0x0000
+	bra.l	_fintx_
+	short	0x0000
+
+	bra.l	_fintrzs_
+	short	0x0000
+	bra.l	_fintrzd_
+	short	0x0000
+	bra.l	_fintrzx_
+	short	0x0000
+
+	bra.l	_fmuls_
+	short	0x0000
+	bra.l	_fmuld_
+	short	0x0000
+	bra.l	_fmulx_
+	short	0x0000
+
+	bra.l	_fnegs_
+	short	0x0000
+	bra.l	_fnegd_
+	short	0x0000
+	bra.l	_fnegx_
+	short	0x0000
+
+	bra.l	_fsqrts_
+	short	0x0000
+	bra.l	_fsqrtd_
+	short	0x0000
+	bra.l	_fsqrtx_
+	short	0x0000
+
+	bra.l	_fsubs_
+	short	0x0000
+	bra.l	_fsubd_
+	short	0x0000
+	bra.l	_fsubx_
+	short	0x0000
+
+# leave room for future possible additions
+	align	0x400
+
+#
+# This file contains a set of define statements for constants
+# in order to promote readability within the corecode itself.
+#
+
+set LOCAL_SIZE,		192			# stack frame size(bytes)
+set LV,			-LOCAL_SIZE		# stack offset
+
+set EXC_SR,		0x4			# stack status register
+set EXC_PC,		0x6			# stack pc
+set EXC_VOFF,		0xa			# stacked vector offset
+set EXC_EA,		0xc			# stacked <ea>
+
+set EXC_FP,		0x0			# frame pointer
+
+set EXC_AREGS,		-68			# offset of all address regs
+set EXC_DREGS,		-100			# offset of all data regs
+set EXC_FPREGS,		-36			# offset of all fp regs
+
+set EXC_A7,		EXC_AREGS+(7*4)		# offset of saved a7
+set OLD_A7,		EXC_AREGS+(6*4)		# extra copy of saved a7
+set EXC_A6,		EXC_AREGS+(6*4)		# offset of saved a6
+set EXC_A5,		EXC_AREGS+(5*4)
+set EXC_A4,		EXC_AREGS+(4*4)
+set EXC_A3,		EXC_AREGS+(3*4)
+set EXC_A2,		EXC_AREGS+(2*4)
+set EXC_A1,		EXC_AREGS+(1*4)
+set EXC_A0,		EXC_AREGS+(0*4)
+set EXC_D7,		EXC_DREGS+(7*4)
+set EXC_D6,		EXC_DREGS+(6*4)
+set EXC_D5,		EXC_DREGS+(5*4)
+set EXC_D4,		EXC_DREGS+(4*4)
+set EXC_D3,		EXC_DREGS+(3*4)
+set EXC_D2,		EXC_DREGS+(2*4)
+set EXC_D1,		EXC_DREGS+(1*4)
+set EXC_D0,		EXC_DREGS+(0*4)
+
+set EXC_FP0,		EXC_FPREGS+(0*12)	# offset of saved fp0
+set EXC_FP1,		EXC_FPREGS+(1*12)	# offset of saved fp1
+set EXC_FP2,		EXC_FPREGS+(2*12)	# offset of saved fp2 (not used)
+
+set FP_SCR1,		LV+80			# fp scratch 1
+set FP_SCR1_EX,		FP_SCR1+0
+set FP_SCR1_SGN,	FP_SCR1+2
+set FP_SCR1_HI,		FP_SCR1+4
+set FP_SCR1_LO,		FP_SCR1+8
+
+set FP_SCR0,		LV+68			# fp scratch 0
+set FP_SCR0_EX,		FP_SCR0+0
+set FP_SCR0_SGN,	FP_SCR0+2
+set FP_SCR0_HI,		FP_SCR0+4
+set FP_SCR0_LO,		FP_SCR0+8
+
+set FP_DST,		LV+56			# fp destination operand
+set FP_DST_EX,		FP_DST+0
+set FP_DST_SGN,		FP_DST+2
+set FP_DST_HI,		FP_DST+4
+set FP_DST_LO,		FP_DST+8
+
+set FP_SRC,		LV+44			# fp source operand
+set FP_SRC_EX,		FP_SRC+0
+set FP_SRC_SGN,		FP_SRC+2
+set FP_SRC_HI,		FP_SRC+4
+set FP_SRC_LO,		FP_SRC+8
+
+set USER_FPIAR,		LV+40			# FP instr address register
+
+set USER_FPSR,		LV+36			# FP status register
+set FPSR_CC,		USER_FPSR+0		# FPSR condition codes
+set FPSR_QBYTE,		USER_FPSR+1		# FPSR qoutient byte
+set FPSR_EXCEPT,	USER_FPSR+2		# FPSR exception status byte
+set FPSR_AEXCEPT,	USER_FPSR+3		# FPSR accrued exception byte
+
+set USER_FPCR,		LV+32			# FP control register
+set FPCR_ENABLE,	USER_FPCR+2		# FPCR exception enable
+set FPCR_MODE,		USER_FPCR+3		# FPCR rounding mode control
+
+set L_SCR3,		LV+28			# integer scratch 3
+set L_SCR2,		LV+24			# integer scratch 2
+set L_SCR1,		LV+20			# integer scratch 1
+
+set STORE_FLG,		LV+19			# flag: operand store (ie. not fcmp/ftst)
+
+set EXC_TEMP2,		LV+24			# temporary space
+set EXC_TEMP,		LV+16			# temporary space
+
+set DTAG,		LV+15			# destination operand type
+set STAG,		LV+14			# source operand type
+
+set SPCOND_FLG,		LV+10			# flag: special case (see below)
+
+set EXC_CC,		LV+8			# saved condition codes
+set EXC_EXTWPTR,	LV+4			# saved current PC (active)
+set EXC_EXTWORD,	LV+2			# saved extension word
+set EXC_CMDREG,		LV+2			# saved extension word
+set EXC_OPWORD,		LV+0			# saved operation word
+
+################################
+
+# Helpful macros
+
+set FTEMP,		0			# offsets within an
+set FTEMP_EX,		0			# extended precision
+set FTEMP_SGN,		2			# value saved in memory.
+set FTEMP_HI,		4
+set FTEMP_LO,		8
+set FTEMP_GRS,		12
+
+set LOCAL,		0			# offsets within an
+set LOCAL_EX,		0			# extended precision
+set LOCAL_SGN,		2			# value saved in memory.
+set LOCAL_HI,		4
+set LOCAL_LO,		8
+set LOCAL_GRS,		12
+
+set DST,		0			# offsets within an
+set DST_EX,		0			# extended precision
+set DST_HI,		4			# value saved in memory.
+set DST_LO,		8
+
+set SRC,		0			# offsets within an
+set SRC_EX,		0			# extended precision
+set SRC_HI,		4			# value saved in memory.
+set SRC_LO,		8
+
+set SGL_LO,		0x3f81			# min sgl prec exponent
+set SGL_HI,		0x407e			# max sgl prec exponent
+set DBL_LO,		0x3c01			# min dbl prec exponent
+set DBL_HI,		0x43fe			# max dbl prec exponent
+set EXT_LO,		0x0			# min ext prec exponent
+set EXT_HI,		0x7ffe			# max ext prec exponent
+
+set EXT_BIAS,		0x3fff			# extended precision bias
+set SGL_BIAS,		0x007f			# single precision bias
+set DBL_BIAS,		0x03ff			# double precision bias
+
+set NORM,		0x00			# operand type for STAG/DTAG
+set ZERO,		0x01			# operand type for STAG/DTAG
+set INF,		0x02			# operand type for STAG/DTAG
+set QNAN,		0x03			# operand type for STAG/DTAG
+set DENORM,		0x04			# operand type for STAG/DTAG
+set SNAN,		0x05			# operand type for STAG/DTAG
+set UNNORM,		0x06			# operand type for STAG/DTAG
+
+##################
+# FPSR/FPCR bits #
+##################
+set neg_bit,		0x3			# negative result
+set z_bit,		0x2			# zero result
+set inf_bit,		0x1			# infinite result
+set nan_bit,		0x0			# NAN result
+
+set q_sn_bit,		0x7			# sign bit of quotient byte
+
+set bsun_bit,		7			# branch on unordered
+set snan_bit,		6			# signalling NAN
+set operr_bit,		5			# operand error
+set ovfl_bit,		4			# overflow
+set unfl_bit,		3			# underflow
+set dz_bit,		2			# divide by zero
+set inex2_bit,		1			# inexact result 2
+set inex1_bit,		0			# inexact result 1
+
+set aiop_bit,		7			# accrued inexact operation bit
+set aovfl_bit,		6			# accrued overflow bit
+set aunfl_bit,		5			# accrued underflow bit
+set adz_bit,		4			# accrued dz bit
+set ainex_bit,		3			# accrued inexact bit
+
+#############################
+# FPSR individual bit masks #
+#############################
+set neg_mask,		0x08000000		# negative bit mask (lw)
+set inf_mask,		0x02000000		# infinity bit mask (lw)
+set z_mask,		0x04000000		# zero bit mask (lw)
+set nan_mask,		0x01000000		# nan bit mask (lw)
+
+set neg_bmask,		0x08			# negative bit mask (byte)
+set inf_bmask,		0x02			# infinity bit mask (byte)
+set z_bmask,		0x04			# zero bit mask (byte)
+set nan_bmask,		0x01			# nan bit mask (byte)
+
+set bsun_mask,		0x00008000		# bsun exception mask
+set snan_mask,		0x00004000		# snan exception mask
+set operr_mask,		0x00002000		# operr exception mask
+set ovfl_mask,		0x00001000		# overflow exception mask
+set unfl_mask,		0x00000800		# underflow exception mask
+set dz_mask,		0x00000400		# dz exception mask
+set inex2_mask,		0x00000200		# inex2 exception mask
+set inex1_mask,		0x00000100		# inex1 exception mask
+
+set aiop_mask,		0x00000080		# accrued illegal operation
+set aovfl_mask,		0x00000040		# accrued overflow
+set aunfl_mask,		0x00000020		# accrued underflow
+set adz_mask,		0x00000010		# accrued divide by zero
+set ainex_mask,		0x00000008		# accrued inexact
+
+######################################
+# FPSR combinations used in the FPSP #
+######################################
+set dzinf_mask,		inf_mask+dz_mask+adz_mask
+set opnan_mask,		nan_mask+operr_mask+aiop_mask
+set nzi_mask,		0x01ffffff		#clears N, Z, and I
+set unfinx_mask,	unfl_mask+inex2_mask+aunfl_mask+ainex_mask
+set unf2inx_mask,	unfl_mask+inex2_mask+ainex_mask
+set ovfinx_mask,	ovfl_mask+inex2_mask+aovfl_mask+ainex_mask
+set inx1a_mask,		inex1_mask+ainex_mask
+set inx2a_mask,		inex2_mask+ainex_mask
+set snaniop_mask,	nan_mask+snan_mask+aiop_mask
+set snaniop2_mask,	snan_mask+aiop_mask
+set naniop_mask,	nan_mask+aiop_mask
+set neginf_mask,	neg_mask+inf_mask
+set infaiop_mask,	inf_mask+aiop_mask
+set negz_mask,		neg_mask+z_mask
+set opaop_mask,		operr_mask+aiop_mask
+set unfl_inx_mask,	unfl_mask+aunfl_mask+ainex_mask
+set ovfl_inx_mask,	ovfl_mask+aovfl_mask+ainex_mask
+
+#########
+# misc. #
+#########
+set rnd_stky_bit,	29			# stky bit pos in longword
+
+set sign_bit,		0x7			# sign bit
+set signan_bit,		0x6			# signalling nan bit
+
+set sgl_thresh,		0x3f81			# minimum sgl exponent
+set dbl_thresh,		0x3c01			# minimum dbl exponent
+
+set x_mode,		0x0			# extended precision
+set s_mode,		0x4			# single precision
+set d_mode,		0x8			# double precision
+
+set rn_mode,		0x0			# round-to-nearest
+set rz_mode,		0x1			# round-to-zero
+set rm_mode,		0x2			# round-tp-minus-infinity
+set rp_mode,		0x3			# round-to-plus-infinity
+
+set mantissalen,	64			# length of mantissa in bits
+
+set BYTE,		1			# len(byte) == 1 byte
+set WORD,		2			# len(word) == 2 bytes
+set LONG,		4			# len(longword) == 2 bytes
+
+set BSUN_VEC,		0xc0			# bsun    vector offset
+set INEX_VEC,		0xc4			# inexact vector offset
+set DZ_VEC,		0xc8			# dz      vector offset
+set UNFL_VEC,		0xcc			# unfl    vector offset
+set OPERR_VEC,		0xd0			# operr   vector offset
+set OVFL_VEC,		0xd4			# ovfl    vector offset
+set SNAN_VEC,		0xd8			# snan    vector offset
+
+###########################
+# SPecial CONDition FLaGs #
+###########################
+set ftrapcc_flg,	0x01			# flag bit: ftrapcc exception
+set fbsun_flg,		0x02			# flag bit: bsun exception
+set mia7_flg,		0x04			# flag bit: (a7)+ <ea>
+set mda7_flg,		0x08			# flag bit: -(a7) <ea>
+set fmovm_flg,		0x40			# flag bit: fmovm instruction
+set immed_flg,		0x80			# flag bit: &<data> <ea>
+
+set ftrapcc_bit,	0x0
+set fbsun_bit,		0x1
+set mia7_bit,		0x2
+set mda7_bit,		0x3
+set immed_bit,		0x7
+
+##################################
+# TRANSCENDENTAL "LAST-OP" FLAGS #
+##################################
+set FMUL_OP,		0x0			# fmul instr performed last
+set FDIV_OP,		0x1			# fdiv performed last
+set FADD_OP,		0x2			# fadd performed last
+set FMOV_OP,		0x3			# fmov performed last
+
+#############
+# CONSTANTS #
+#############
+T1:	long		0x40C62D38,0xD3D64634	# 16381 LOG2 LEAD
+T2:	long		0x3D6F90AE,0xB1E75CC7	# 16381 LOG2 TRAIL
+
+PI:	long		0x40000000,0xC90FDAA2,0x2168C235,0x00000000
+PIBY2:	long		0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+TWOBYPI:
+	long		0x3FE45F30,0x6DC9C883
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fsins_
+_fsins_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L0_2s
+	bsr.l		ssin			# operand is a NORM
+	bra.b		_L0_6s
+_L0_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L0_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L0_6s
+_L0_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L0_4s			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L0_6s
+_L0_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L0_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L0_6s
+_L0_5s:
+	bsr.l		ssind			# operand is a DENORM
+_L0_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fsind_
+_fsind_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L0_2d
+	bsr.l		ssin			# operand is a NORM
+	bra.b		_L0_6d
+_L0_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L0_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L0_6d
+_L0_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L0_4d			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L0_6d
+_L0_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L0_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L0_6d
+_L0_5d:
+	bsr.l		ssind			# operand is a DENORM
+_L0_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fsinx_
+_fsinx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L0_2x
+	bsr.l		ssin			# operand is a NORM
+	bra.b		_L0_6x
+_L0_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L0_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L0_6x
+_L0_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L0_4x			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L0_6x
+_L0_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L0_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L0_6x
+_L0_5x:
+	bsr.l		ssind			# operand is a DENORM
+_L0_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fcoss_
+_fcoss_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L1_2s
+	bsr.l		scos			# operand is a NORM
+	bra.b		_L1_6s
+_L1_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L1_3s			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L1_6s
+_L1_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L1_4s			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L1_6s
+_L1_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L1_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L1_6s
+_L1_5s:
+	bsr.l		scosd			# operand is a DENORM
+_L1_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fcosd_
+_fcosd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L1_2d
+	bsr.l		scos			# operand is a NORM
+	bra.b		_L1_6d
+_L1_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L1_3d			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L1_6d
+_L1_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L1_4d			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L1_6d
+_L1_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L1_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L1_6d
+_L1_5d:
+	bsr.l		scosd			# operand is a DENORM
+_L1_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fcosx_
+_fcosx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L1_2x
+	bsr.l		scos			# operand is a NORM
+	bra.b		_L1_6x
+_L1_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L1_3x			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L1_6x
+_L1_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L1_4x			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L1_6x
+_L1_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L1_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L1_6x
+_L1_5x:
+	bsr.l		scosd			# operand is a DENORM
+_L1_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fsinhs_
+_fsinhs_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L2_2s
+	bsr.l		ssinh			# operand is a NORM
+	bra.b		_L2_6s
+_L2_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L2_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L2_6s
+_L2_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L2_4s			# no
+	bsr.l		src_inf			# yes
+	bra.b		_L2_6s
+_L2_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L2_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L2_6s
+_L2_5s:
+	bsr.l		ssinhd			# operand is a DENORM
+_L2_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fsinhd_
+_fsinhd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L2_2d
+	bsr.l		ssinh			# operand is a NORM
+	bra.b		_L2_6d
+_L2_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L2_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L2_6d
+_L2_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L2_4d			# no
+	bsr.l		src_inf			# yes
+	bra.b		_L2_6d
+_L2_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L2_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L2_6d
+_L2_5d:
+	bsr.l		ssinhd			# operand is a DENORM
+_L2_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fsinhx_
+_fsinhx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L2_2x
+	bsr.l		ssinh			# operand is a NORM
+	bra.b		_L2_6x
+_L2_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L2_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L2_6x
+_L2_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L2_4x			# no
+	bsr.l		src_inf			# yes
+	bra.b		_L2_6x
+_L2_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L2_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L2_6x
+_L2_5x:
+	bsr.l		ssinhd			# operand is a DENORM
+_L2_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_flognp1s_
+_flognp1s_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L3_2s
+	bsr.l		slognp1			# operand is a NORM
+	bra.b		_L3_6s
+_L3_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L3_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L3_6s
+_L3_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L3_4s			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L3_6s
+_L3_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L3_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L3_6s
+_L3_5s:
+	bsr.l		slognp1d			# operand is a DENORM
+_L3_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_flognp1d_
+_flognp1d_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L3_2d
+	bsr.l		slognp1			# operand is a NORM
+	bra.b		_L3_6d
+_L3_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L3_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L3_6d
+_L3_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L3_4d			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L3_6d
+_L3_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L3_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L3_6d
+_L3_5d:
+	bsr.l		slognp1d			# operand is a DENORM
+_L3_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_flognp1x_
+_flognp1x_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L3_2x
+	bsr.l		slognp1			# operand is a NORM
+	bra.b		_L3_6x
+_L3_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L3_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L3_6x
+_L3_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L3_4x			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L3_6x
+_L3_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L3_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L3_6x
+_L3_5x:
+	bsr.l		slognp1d			# operand is a DENORM
+_L3_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fetoxm1s_
+_fetoxm1s_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L4_2s
+	bsr.l		setoxm1			# operand is a NORM
+	bra.b		_L4_6s
+_L4_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L4_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L4_6s
+_L4_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L4_4s			# no
+	bsr.l		setoxm1i			# yes
+	bra.b		_L4_6s
+_L4_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L4_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L4_6s
+_L4_5s:
+	bsr.l		setoxm1d			# operand is a DENORM
+_L4_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fetoxm1d_
+_fetoxm1d_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L4_2d
+	bsr.l		setoxm1			# operand is a NORM
+	bra.b		_L4_6d
+_L4_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L4_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L4_6d
+_L4_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L4_4d			# no
+	bsr.l		setoxm1i			# yes
+	bra.b		_L4_6d
+_L4_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L4_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L4_6d
+_L4_5d:
+	bsr.l		setoxm1d			# operand is a DENORM
+_L4_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fetoxm1x_
+_fetoxm1x_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L4_2x
+	bsr.l		setoxm1			# operand is a NORM
+	bra.b		_L4_6x
+_L4_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L4_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L4_6x
+_L4_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L4_4x			# no
+	bsr.l		setoxm1i			# yes
+	bra.b		_L4_6x
+_L4_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L4_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L4_6x
+_L4_5x:
+	bsr.l		setoxm1d			# operand is a DENORM
+_L4_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_ftanhs_
+_ftanhs_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L5_2s
+	bsr.l		stanh			# operand is a NORM
+	bra.b		_L5_6s
+_L5_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L5_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L5_6s
+_L5_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L5_4s			# no
+	bsr.l		src_one			# yes
+	bra.b		_L5_6s
+_L5_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L5_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L5_6s
+_L5_5s:
+	bsr.l		stanhd			# operand is a DENORM
+_L5_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_ftanhd_
+_ftanhd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L5_2d
+	bsr.l		stanh			# operand is a NORM
+	bra.b		_L5_6d
+_L5_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L5_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L5_6d
+_L5_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L5_4d			# no
+	bsr.l		src_one			# yes
+	bra.b		_L5_6d
+_L5_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L5_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L5_6d
+_L5_5d:
+	bsr.l		stanhd			# operand is a DENORM
+_L5_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_ftanhx_
+_ftanhx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L5_2x
+	bsr.l		stanh			# operand is a NORM
+	bra.b		_L5_6x
+_L5_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L5_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L5_6x
+_L5_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L5_4x			# no
+	bsr.l		src_one			# yes
+	bra.b		_L5_6x
+_L5_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L5_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L5_6x
+_L5_5x:
+	bsr.l		stanhd			# operand is a DENORM
+_L5_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fatans_
+_fatans_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L6_2s
+	bsr.l		satan			# operand is a NORM
+	bra.b		_L6_6s
+_L6_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L6_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L6_6s
+_L6_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L6_4s			# no
+	bsr.l		spi_2			# yes
+	bra.b		_L6_6s
+_L6_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L6_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L6_6s
+_L6_5s:
+	bsr.l		satand			# operand is a DENORM
+_L6_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fatand_
+_fatand_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L6_2d
+	bsr.l		satan			# operand is a NORM
+	bra.b		_L6_6d
+_L6_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L6_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L6_6d
+_L6_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L6_4d			# no
+	bsr.l		spi_2			# yes
+	bra.b		_L6_6d
+_L6_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L6_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L6_6d
+_L6_5d:
+	bsr.l		satand			# operand is a DENORM
+_L6_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fatanx_
+_fatanx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L6_2x
+	bsr.l		satan			# operand is a NORM
+	bra.b		_L6_6x
+_L6_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L6_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L6_6x
+_L6_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L6_4x			# no
+	bsr.l		spi_2			# yes
+	bra.b		_L6_6x
+_L6_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L6_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L6_6x
+_L6_5x:
+	bsr.l		satand			# operand is a DENORM
+_L6_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fasins_
+_fasins_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L7_2s
+	bsr.l		sasin			# operand is a NORM
+	bra.b		_L7_6s
+_L7_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L7_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L7_6s
+_L7_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L7_4s			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L7_6s
+_L7_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L7_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L7_6s
+_L7_5s:
+	bsr.l		sasind			# operand is a DENORM
+_L7_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fasind_
+_fasind_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L7_2d
+	bsr.l		sasin			# operand is a NORM
+	bra.b		_L7_6d
+_L7_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L7_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L7_6d
+_L7_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L7_4d			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L7_6d
+_L7_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L7_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L7_6d
+_L7_5d:
+	bsr.l		sasind			# operand is a DENORM
+_L7_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fasinx_
+_fasinx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L7_2x
+	bsr.l		sasin			# operand is a NORM
+	bra.b		_L7_6x
+_L7_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L7_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L7_6x
+_L7_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L7_4x			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L7_6x
+_L7_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L7_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L7_6x
+_L7_5x:
+	bsr.l		sasind			# operand is a DENORM
+_L7_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fatanhs_
+_fatanhs_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L8_2s
+	bsr.l		satanh			# operand is a NORM
+	bra.b		_L8_6s
+_L8_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L8_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L8_6s
+_L8_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L8_4s			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L8_6s
+_L8_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L8_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L8_6s
+_L8_5s:
+	bsr.l		satanhd			# operand is a DENORM
+_L8_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fatanhd_
+_fatanhd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L8_2d
+	bsr.l		satanh			# operand is a NORM
+	bra.b		_L8_6d
+_L8_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L8_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L8_6d
+_L8_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L8_4d			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L8_6d
+_L8_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L8_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L8_6d
+_L8_5d:
+	bsr.l		satanhd			# operand is a DENORM
+_L8_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fatanhx_
+_fatanhx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L8_2x
+	bsr.l		satanh			# operand is a NORM
+	bra.b		_L8_6x
+_L8_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L8_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L8_6x
+_L8_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L8_4x			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L8_6x
+_L8_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L8_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L8_6x
+_L8_5x:
+	bsr.l		satanhd			# operand is a DENORM
+_L8_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_ftans_
+_ftans_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L9_2s
+	bsr.l		stan			# operand is a NORM
+	bra.b		_L9_6s
+_L9_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L9_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L9_6s
+_L9_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L9_4s			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L9_6s
+_L9_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L9_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L9_6s
+_L9_5s:
+	bsr.l		stand			# operand is a DENORM
+_L9_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_ftand_
+_ftand_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L9_2d
+	bsr.l		stan			# operand is a NORM
+	bra.b		_L9_6d
+_L9_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L9_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L9_6d
+_L9_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L9_4d			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L9_6d
+_L9_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L9_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L9_6d
+_L9_5d:
+	bsr.l		stand			# operand is a DENORM
+_L9_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_ftanx_
+_ftanx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L9_2x
+	bsr.l		stan			# operand is a NORM
+	bra.b		_L9_6x
+_L9_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L9_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L9_6x
+_L9_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L9_4x			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L9_6x
+_L9_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L9_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L9_6x
+_L9_5x:
+	bsr.l		stand			# operand is a DENORM
+_L9_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fetoxs_
+_fetoxs_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L10_2s
+	bsr.l		setox			# operand is a NORM
+	bra.b		_L10_6s
+_L10_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L10_3s			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L10_6s
+_L10_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L10_4s			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L10_6s
+_L10_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L10_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L10_6s
+_L10_5s:
+	bsr.l		setoxd			# operand is a DENORM
+_L10_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fetoxd_
+_fetoxd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L10_2d
+	bsr.l		setox			# operand is a NORM
+	bra.b		_L10_6d
+_L10_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L10_3d			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L10_6d
+_L10_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L10_4d			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L10_6d
+_L10_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L10_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L10_6d
+_L10_5d:
+	bsr.l		setoxd			# operand is a DENORM
+_L10_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fetoxx_
+_fetoxx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L10_2x
+	bsr.l		setox			# operand is a NORM
+	bra.b		_L10_6x
+_L10_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L10_3x			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L10_6x
+_L10_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L10_4x			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L10_6x
+_L10_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L10_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L10_6x
+_L10_5x:
+	bsr.l		setoxd			# operand is a DENORM
+_L10_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_ftwotoxs_
+_ftwotoxs_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L11_2s
+	bsr.l		stwotox			# operand is a NORM
+	bra.b		_L11_6s
+_L11_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L11_3s			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L11_6s
+_L11_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L11_4s			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L11_6s
+_L11_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L11_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L11_6s
+_L11_5s:
+	bsr.l		stwotoxd			# operand is a DENORM
+_L11_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_ftwotoxd_
+_ftwotoxd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L11_2d
+	bsr.l		stwotox			# operand is a NORM
+	bra.b		_L11_6d
+_L11_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L11_3d			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L11_6d
+_L11_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L11_4d			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L11_6d
+_L11_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L11_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L11_6d
+_L11_5d:
+	bsr.l		stwotoxd			# operand is a DENORM
+_L11_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_ftwotoxx_
+_ftwotoxx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L11_2x
+	bsr.l		stwotox			# operand is a NORM
+	bra.b		_L11_6x
+_L11_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L11_3x			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L11_6x
+_L11_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L11_4x			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L11_6x
+_L11_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L11_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L11_6x
+_L11_5x:
+	bsr.l		stwotoxd			# operand is a DENORM
+_L11_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_ftentoxs_
+_ftentoxs_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L12_2s
+	bsr.l		stentox			# operand is a NORM
+	bra.b		_L12_6s
+_L12_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L12_3s			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L12_6s
+_L12_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L12_4s			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L12_6s
+_L12_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L12_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L12_6s
+_L12_5s:
+	bsr.l		stentoxd			# operand is a DENORM
+_L12_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_ftentoxd_
+_ftentoxd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L12_2d
+	bsr.l		stentox			# operand is a NORM
+	bra.b		_L12_6d
+_L12_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L12_3d			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L12_6d
+_L12_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L12_4d			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L12_6d
+_L12_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L12_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L12_6d
+_L12_5d:
+	bsr.l		stentoxd			# operand is a DENORM
+_L12_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_ftentoxx_
+_ftentoxx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L12_2x
+	bsr.l		stentox			# operand is a NORM
+	bra.b		_L12_6x
+_L12_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L12_3x			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L12_6x
+_L12_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L12_4x			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L12_6x
+_L12_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L12_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L12_6x
+_L12_5x:
+	bsr.l		stentoxd			# operand is a DENORM
+_L12_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_flogns_
+_flogns_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L13_2s
+	bsr.l		slogn			# operand is a NORM
+	bra.b		_L13_6s
+_L13_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L13_3s			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L13_6s
+_L13_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L13_4s			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L13_6s
+_L13_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L13_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L13_6s
+_L13_5s:
+	bsr.l		slognd			# operand is a DENORM
+_L13_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_flognd_
+_flognd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L13_2d
+	bsr.l		slogn			# operand is a NORM
+	bra.b		_L13_6d
+_L13_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L13_3d			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L13_6d
+_L13_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L13_4d			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L13_6d
+_L13_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L13_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L13_6d
+_L13_5d:
+	bsr.l		slognd			# operand is a DENORM
+_L13_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_flognx_
+_flognx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L13_2x
+	bsr.l		slogn			# operand is a NORM
+	bra.b		_L13_6x
+_L13_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L13_3x			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L13_6x
+_L13_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L13_4x			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L13_6x
+_L13_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L13_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L13_6x
+_L13_5x:
+	bsr.l		slognd			# operand is a DENORM
+_L13_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_flog10s_
+_flog10s_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L14_2s
+	bsr.l		slog10			# operand is a NORM
+	bra.b		_L14_6s
+_L14_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L14_3s			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L14_6s
+_L14_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L14_4s			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L14_6s
+_L14_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L14_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L14_6s
+_L14_5s:
+	bsr.l		slog10d			# operand is a DENORM
+_L14_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_flog10d_
+_flog10d_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L14_2d
+	bsr.l		slog10			# operand is a NORM
+	bra.b		_L14_6d
+_L14_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L14_3d			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L14_6d
+_L14_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L14_4d			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L14_6d
+_L14_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L14_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L14_6d
+_L14_5d:
+	bsr.l		slog10d			# operand is a DENORM
+_L14_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_flog10x_
+_flog10x_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L14_2x
+	bsr.l		slog10			# operand is a NORM
+	bra.b		_L14_6x
+_L14_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L14_3x			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L14_6x
+_L14_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L14_4x			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L14_6x
+_L14_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L14_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L14_6x
+_L14_5x:
+	bsr.l		slog10d			# operand is a DENORM
+_L14_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_flog2s_
+_flog2s_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L15_2s
+	bsr.l		slog2			# operand is a NORM
+	bra.b		_L15_6s
+_L15_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L15_3s			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L15_6s
+_L15_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L15_4s			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L15_6s
+_L15_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L15_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L15_6s
+_L15_5s:
+	bsr.l		slog2d			# operand is a DENORM
+_L15_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_flog2d_
+_flog2d_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L15_2d
+	bsr.l		slog2			# operand is a NORM
+	bra.b		_L15_6d
+_L15_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L15_3d			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L15_6d
+_L15_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L15_4d			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L15_6d
+_L15_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L15_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L15_6d
+_L15_5d:
+	bsr.l		slog2d			# operand is a DENORM
+_L15_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_flog2x_
+_flog2x_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L15_2x
+	bsr.l		slog2			# operand is a NORM
+	bra.b		_L15_6x
+_L15_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L15_3x			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L15_6x
+_L15_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L15_4x			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L15_6x
+_L15_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L15_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L15_6x
+_L15_5x:
+	bsr.l		slog2d			# operand is a DENORM
+_L15_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fcoshs_
+_fcoshs_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L16_2s
+	bsr.l		scosh			# operand is a NORM
+	bra.b		_L16_6s
+_L16_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L16_3s			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L16_6s
+_L16_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L16_4s			# no
+	bsr.l		ld_pinf			# yes
+	bra.b		_L16_6s
+_L16_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L16_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L16_6s
+_L16_5s:
+	bsr.l		scoshd			# operand is a DENORM
+_L16_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fcoshd_
+_fcoshd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L16_2d
+	bsr.l		scosh			# operand is a NORM
+	bra.b		_L16_6d
+_L16_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L16_3d			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L16_6d
+_L16_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L16_4d			# no
+	bsr.l		ld_pinf			# yes
+	bra.b		_L16_6d
+_L16_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L16_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L16_6d
+_L16_5d:
+	bsr.l		scoshd			# operand is a DENORM
+_L16_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fcoshx_
+_fcoshx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L16_2x
+	bsr.l		scosh			# operand is a NORM
+	bra.b		_L16_6x
+_L16_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L16_3x			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L16_6x
+_L16_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L16_4x			# no
+	bsr.l		ld_pinf			# yes
+	bra.b		_L16_6x
+_L16_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L16_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L16_6x
+_L16_5x:
+	bsr.l		scoshd			# operand is a DENORM
+_L16_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_facoss_
+_facoss_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L17_2s
+	bsr.l		sacos			# operand is a NORM
+	bra.b		_L17_6s
+_L17_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L17_3s			# no
+	bsr.l		ld_ppi2			# yes
+	bra.b		_L17_6s
+_L17_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L17_4s			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L17_6s
+_L17_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L17_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L17_6s
+_L17_5s:
+	bsr.l		sacosd			# operand is a DENORM
+_L17_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_facosd_
+_facosd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L17_2d
+	bsr.l		sacos			# operand is a NORM
+	bra.b		_L17_6d
+_L17_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L17_3d			# no
+	bsr.l		ld_ppi2			# yes
+	bra.b		_L17_6d
+_L17_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L17_4d			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L17_6d
+_L17_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L17_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L17_6d
+_L17_5d:
+	bsr.l		sacosd			# operand is a DENORM
+_L17_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_facosx_
+_facosx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L17_2x
+	bsr.l		sacos			# operand is a NORM
+	bra.b		_L17_6x
+_L17_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L17_3x			# no
+	bsr.l		ld_ppi2			# yes
+	bra.b		_L17_6x
+_L17_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L17_4x			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L17_6x
+_L17_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L17_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L17_6x
+_L17_5x:
+	bsr.l		sacosd			# operand is a DENORM
+_L17_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fgetexps_
+_fgetexps_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L18_2s
+	bsr.l		sgetexp			# operand is a NORM
+	bra.b		_L18_6s
+_L18_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L18_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L18_6s
+_L18_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L18_4s			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L18_6s
+_L18_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L18_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L18_6s
+_L18_5s:
+	bsr.l		sgetexpd			# operand is a DENORM
+_L18_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fgetexpd_
+_fgetexpd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L18_2d
+	bsr.l		sgetexp			# operand is a NORM
+	bra.b		_L18_6d
+_L18_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L18_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L18_6d
+_L18_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L18_4d			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L18_6d
+_L18_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L18_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L18_6d
+_L18_5d:
+	bsr.l		sgetexpd			# operand is a DENORM
+_L18_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fgetexpx_
+_fgetexpx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L18_2x
+	bsr.l		sgetexp			# operand is a NORM
+	bra.b		_L18_6x
+_L18_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L18_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L18_6x
+_L18_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L18_4x			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L18_6x
+_L18_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L18_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L18_6x
+_L18_5x:
+	bsr.l		sgetexpd			# operand is a DENORM
+_L18_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fgetmans_
+_fgetmans_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L19_2s
+	bsr.l		sgetman			# operand is a NORM
+	bra.b		_L19_6s
+_L19_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L19_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L19_6s
+_L19_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L19_4s			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L19_6s
+_L19_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L19_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L19_6s
+_L19_5s:
+	bsr.l		sgetmand			# operand is a DENORM
+_L19_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fgetmand_
+_fgetmand_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L19_2d
+	bsr.l		sgetman			# operand is a NORM
+	bra.b		_L19_6d
+_L19_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L19_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L19_6d
+_L19_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L19_4d			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L19_6d
+_L19_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L19_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L19_6d
+_L19_5d:
+	bsr.l		sgetmand			# operand is a DENORM
+_L19_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fgetmanx_
+_fgetmanx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L19_2x
+	bsr.l		sgetman			# operand is a NORM
+	bra.b		_L19_6x
+_L19_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L19_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L19_6x
+_L19_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L19_4x			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L19_6x
+_L19_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L19_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L19_6x
+_L19_5x:
+	bsr.l		sgetmand			# operand is a DENORM
+_L19_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fsincoss_
+_fsincoss_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L20_2s
+	bsr.l		ssincos			# operand is a NORM
+	bra.b		_L20_6s
+_L20_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L20_3s			# no
+	bsr.l		ssincosz			# yes
+	bra.b		_L20_6s
+_L20_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L20_4s			# no
+	bsr.l		ssincosi			# yes
+	bra.b		_L20_6s
+_L20_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L20_5s			# no
+	bsr.l		ssincosqnan			# yes
+	bra.b		_L20_6s
+_L20_5s:
+	bsr.l		ssincosd			# operand is a DENORM
+_L20_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		&0x03,-(%sp)		# store off fp0/fp1
+	fmovm.x		(%sp)+,&0x40		# fp0 now in fp1
+	fmovm.x		(%sp)+,&0x80		# fp1 now in fp0
+	unlk		%a6
+	rts
+
+	global		_fsincosd_
+_fsincosd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L20_2d
+	bsr.l		ssincos			# operand is a NORM
+	bra.b		_L20_6d
+_L20_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L20_3d			# no
+	bsr.l		ssincosz			# yes
+	bra.b		_L20_6d
+_L20_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L20_4d			# no
+	bsr.l		ssincosi			# yes
+	bra.b		_L20_6d
+_L20_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L20_5d			# no
+	bsr.l		ssincosqnan			# yes
+	bra.b		_L20_6d
+_L20_5d:
+	bsr.l		ssincosd			# operand is a DENORM
+_L20_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		&0x03,-(%sp)		# store off fp0/fp1
+	fmovm.x		(%sp)+,&0x40		# fp0 now in fp1
+	fmovm.x		(%sp)+,&0x80		# fp1 now in fp0
+	unlk		%a6
+	rts
+
+	global		_fsincosx_
+_fsincosx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L20_2x
+	bsr.l		ssincos			# operand is a NORM
+	bra.b		_L20_6x
+_L20_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L20_3x			# no
+	bsr.l		ssincosz			# yes
+	bra.b		_L20_6x
+_L20_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L20_4x			# no
+	bsr.l		ssincosi			# yes
+	bra.b		_L20_6x
+_L20_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L20_5x			# no
+	bsr.l		ssincosqnan			# yes
+	bra.b		_L20_6x
+_L20_5x:
+	bsr.l		ssincosd			# operand is a DENORM
+_L20_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		&0x03,-(%sp)		# store off fp0/fp1
+	fmovm.x		(%sp)+,&0x40		# fp0 now in fp1
+	fmovm.x		(%sp)+,&0x80		# fp1 now in fp0
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# DYADIC TEMPLATE							#
+#########################################################################
+	global		_frems_
+_frems_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl dst
+	fmov.x		%fp0,FP_DST(%a6)
+	lea		FP_DST(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	fmov.s		0xc(%a6),%fp0		# load sgl src
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L21_2s
+	bsr.l		srem_snorm			# operand is a NORM
+	bra.b		_L21_6s
+_L21_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L21_3s			# no
+	bsr.l		srem_szero			# yes
+	bra.b		_L21_6s
+_L21_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L21_4s			# no
+	bsr.l		srem_sinf			# yes
+	bra.b		_L21_6s
+_L21_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L21_5s			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L21_6s
+_L21_5s:
+	bsr.l		srem_sdnrm			# operand is a DENORM
+_L21_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fremd_
+_fremd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl dst
+	fmov.x		%fp0,FP_DST(%a6)
+	lea		FP_DST(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	fmov.d		0x10(%a6),%fp0		# load dbl src
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L21_2d
+	bsr.l		srem_snorm			# operand is a NORM
+	bra.b		_L21_6d
+_L21_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L21_3d			# no
+	bsr.l		srem_szero			# yes
+	bra.b		_L21_6d
+_L21_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L21_4d			# no
+	bsr.l		srem_sinf			# yes
+	bra.b		_L21_6d
+_L21_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L21_5d			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L21_6d
+_L21_5d:
+	bsr.l		srem_sdnrm			# operand is a DENORM
+_L21_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fremx_
+_fremx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_DST(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext dst
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x14+0x0(%a6),0x0(%a0)	# load ext src
+	mov.l		0x14+0x4(%a6),0x4(%a0)
+	mov.l		0x14+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L21_2x
+	bsr.l		srem_snorm			# operand is a NORM
+	bra.b		_L21_6x
+_L21_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L21_3x			# no
+	bsr.l		srem_szero			# yes
+	bra.b		_L21_6x
+_L21_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L21_4x			# no
+	bsr.l		srem_sinf			# yes
+	bra.b		_L21_6x
+_L21_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L21_5x			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L21_6x
+_L21_5x:
+	bsr.l		srem_sdnrm			# operand is a DENORM
+_L21_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# DYADIC TEMPLATE							#
+#########################################################################
+	global		_fmods_
+_fmods_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl dst
+	fmov.x		%fp0,FP_DST(%a6)
+	lea		FP_DST(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	fmov.s		0xc(%a6),%fp0		# load sgl src
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L22_2s
+	bsr.l		smod_snorm			# operand is a NORM
+	bra.b		_L22_6s
+_L22_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L22_3s			# no
+	bsr.l		smod_szero			# yes
+	bra.b		_L22_6s
+_L22_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L22_4s			# no
+	bsr.l		smod_sinf			# yes
+	bra.b		_L22_6s
+_L22_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L22_5s			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L22_6s
+_L22_5s:
+	bsr.l		smod_sdnrm			# operand is a DENORM
+_L22_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fmodd_
+_fmodd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl dst
+	fmov.x		%fp0,FP_DST(%a6)
+	lea		FP_DST(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	fmov.d		0x10(%a6),%fp0		# load dbl src
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L22_2d
+	bsr.l		smod_snorm			# operand is a NORM
+	bra.b		_L22_6d
+_L22_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L22_3d			# no
+	bsr.l		smod_szero			# yes
+	bra.b		_L22_6d
+_L22_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L22_4d			# no
+	bsr.l		smod_sinf			# yes
+	bra.b		_L22_6d
+_L22_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L22_5d			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L22_6d
+_L22_5d:
+	bsr.l		smod_sdnrm			# operand is a DENORM
+_L22_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fmodx_
+_fmodx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_DST(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext dst
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x14+0x0(%a6),0x0(%a0)	# load ext src
+	mov.l		0x14+0x4(%a6),0x4(%a0)
+	mov.l		0x14+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L22_2x
+	bsr.l		smod_snorm			# operand is a NORM
+	bra.b		_L22_6x
+_L22_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L22_3x			# no
+	bsr.l		smod_szero			# yes
+	bra.b		_L22_6x
+_L22_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L22_4x			# no
+	bsr.l		smod_sinf			# yes
+	bra.b		_L22_6x
+_L22_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L22_5x			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L22_6x
+_L22_5x:
+	bsr.l		smod_sdnrm			# operand is a DENORM
+_L22_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# DYADIC TEMPLATE							#
+#########################################################################
+	global		_fscales_
+_fscales_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl dst
+	fmov.x		%fp0,FP_DST(%a6)
+	lea		FP_DST(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	fmov.s		0xc(%a6),%fp0		# load sgl src
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L23_2s
+	bsr.l		sscale_snorm			# operand is a NORM
+	bra.b		_L23_6s
+_L23_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L23_3s			# no
+	bsr.l		sscale_szero			# yes
+	bra.b		_L23_6s
+_L23_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L23_4s			# no
+	bsr.l		sscale_sinf			# yes
+	bra.b		_L23_6s
+_L23_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L23_5s			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L23_6s
+_L23_5s:
+	bsr.l		sscale_sdnrm			# operand is a DENORM
+_L23_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fscaled_
+_fscaled_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl dst
+	fmov.x		%fp0,FP_DST(%a6)
+	lea		FP_DST(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	fmov.d		0x10(%a6),%fp0		# load dbl src
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L23_2d
+	bsr.l		sscale_snorm			# operand is a NORM
+	bra.b		_L23_6d
+_L23_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L23_3d			# no
+	bsr.l		sscale_szero			# yes
+	bra.b		_L23_6d
+_L23_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L23_4d			# no
+	bsr.l		sscale_sinf			# yes
+	bra.b		_L23_6d
+_L23_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L23_5d			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L23_6d
+_L23_5d:
+	bsr.l		sscale_sdnrm			# operand is a DENORM
+_L23_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fscalex_
+_fscalex_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_DST(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext dst
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x14+0x0(%a6),0x0(%a0)	# load ext src
+	mov.l		0x14+0x4(%a6),0x4(%a0)
+	mov.l		0x14+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L23_2x
+	bsr.l		sscale_snorm			# operand is a NORM
+	bra.b		_L23_6x
+_L23_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L23_3x			# no
+	bsr.l		sscale_szero			# yes
+	bra.b		_L23_6x
+_L23_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L23_4x			# no
+	bsr.l		sscale_sinf			# yes
+	bra.b		_L23_6x
+_L23_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L23_5x			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L23_6x
+_L23_5x:
+	bsr.l		sscale_sdnrm			# operand is a DENORM
+_L23_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# ssin():     computes the sine of a normalized input			#
+# ssind():    computes the sine of a denormalized input			#
+# scos():     computes the cosine of a normalized input			#
+# scosd():    computes the cosine of a denormalized input		#
+# ssincos():  computes the sine and cosine of a normalized input	#
+# ssincosd(): computes the sine and cosine of a denormalized input	#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = sin(X) or cos(X)						#
+#									#
+#    For ssincos(X):							#
+#	fp0 = sin(X)							#
+#	fp1 = cos(X)							#
+#									#
+# ACCURACY and MONOTONICITY ******************************************* #
+#	The returned result is within 1 ulp in 64 significant bit, i.e.	#
+#	within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	SIN and COS:							#
+#	1. If SIN is invoked, set AdjN := 0; otherwise, set AdjN := 1.	#
+#									#
+#	2. If |X| >= 15Pi or |X| < 2**(-40), go to 7.			#
+#									#
+#	3. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let	#
+#		k = N mod 4, so in particular, k = 0,1,2,or 3.		#
+#		Overwrite k by k := k + AdjN.				#
+#									#
+#	4. If k is even, go to 6.					#
+#									#
+#	5. (k is odd) Set j := (k-1)/2, sgn := (-1)**j.			#
+#		Return sgn*cos(r) where cos(r) is approximated by an	#
+#		even polynomial in r, 1 + r*r*(B1+s*(B2+ ... + s*B8)),	#
+#		s = r*r.						#
+#		Exit.							#
+#									#
+#	6. (k is even) Set j := k/2, sgn := (-1)**j. Return sgn*sin(r)	#
+#		where sin(r) is approximated by an odd polynomial in r	#
+#		r + r*s*(A1+s*(A2+ ... + s*A7)),	s = r*r.	#
+#		Exit.							#
+#									#
+#	7. If |X| > 1, go to 9.						#
+#									#
+#	8. (|X|<2**(-40)) If SIN is invoked, return X;			#
+#		otherwise return 1.					#
+#									#
+#	9. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi,		#
+#		go back to 3.						#
+#									#
+#	SINCOS:								#
+#	1. If |X| >= 15Pi or |X| < 2**(-40), go to 6.			#
+#									#
+#	2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let	#
+#		k = N mod 4, so in particular, k = 0,1,2,or 3.		#
+#									#
+#	3. If k is even, go to 5.					#
+#									#
+#	4. (k is odd) Set j1 := (k-1)/2, j2 := j1 (EOR) (k mod 2), ie.	#
+#		j1 exclusive or with the l.s.b. of k.			#
+#		sgn1 := (-1)**j1, sgn2 := (-1)**j2.			#
+#		SIN(X) = sgn1 * cos(r) and COS(X) = sgn2*sin(r) where	#
+#		sin(r) and cos(r) are computed as odd and even		#
+#		polynomials in r, respectively. Exit			#
+#									#
+#	5. (k is even) Set j1 := k/2, sgn1 := (-1)**j1.			#
+#		SIN(X) = sgn1 * sin(r) and COS(X) = sgn1*cos(r) where	#
+#		sin(r) and cos(r) are computed as odd and even		#
+#		polynomials in r, respectively. Exit			#
+#									#
+#	6. If |X| > 1, go to 8.						#
+#									#
+#	7. (|X|<2**(-40)) SIN(X) = X and COS(X) = 1. Exit.		#
+#									#
+#	8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi,		#
+#		go back to 2.						#
+#									#
+#########################################################################
+
+SINA7:	long		0xBD6AAA77,0xCCC994F5
+SINA6:	long		0x3DE61209,0x7AAE8DA1
+SINA5:	long		0xBE5AE645,0x2A118AE4
+SINA4:	long		0x3EC71DE3,0xA5341531
+SINA3:	long		0xBF2A01A0,0x1A018B59,0x00000000,0x00000000
+SINA2:	long		0x3FF80000,0x88888888,0x888859AF,0x00000000
+SINA1:	long		0xBFFC0000,0xAAAAAAAA,0xAAAAAA99,0x00000000
+
+COSB8:	long		0x3D2AC4D0,0xD6011EE3
+COSB7:	long		0xBDA9396F,0x9F45AC19
+COSB6:	long		0x3E21EED9,0x0612C972
+COSB5:	long		0xBE927E4F,0xB79D9FCF
+COSB4:	long		0x3EFA01A0,0x1A01D423,0x00000000,0x00000000
+COSB3:	long		0xBFF50000,0xB60B60B6,0x0B61D438,0x00000000
+COSB2:	long		0x3FFA0000,0xAAAAAAAA,0xAAAAAB5E
+COSB1:	long		0xBF000000
+
+	set		INARG,FP_SCR0
+
+	set		X,FP_SCR0
+#	set		XDCARE,X+2
+	set		XFRAC,X+4
+
+	set		RPRIME,FP_SCR0
+	set		SPRIME,FP_SCR1
+
+	set		POSNEG1,L_SCR1
+	set		TWOTO63,L_SCR1
+
+	set		ENDFLAG,L_SCR2
+	set		INT,L_SCR2
+
+	set		ADJN,L_SCR3
+
+############################################
+	global		ssin
+ssin:
+	mov.l		&0,ADJN(%a6)		# yes; SET ADJN TO 0
+	bra.b		SINBGN
+
+############################################
+	global		scos
+scos:
+	mov.l		&1,ADJN(%a6)		# yes; SET ADJN TO 1
+
+############################################
+SINBGN:
+#--SAVE FPCR, FP1. CHECK IF |X| IS TOO SMALL OR LARGE
+
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	fmov.x		%fp0,X(%a6)		# save input at X
+
+# "COMPACTIFY" X
+	mov.l		(%a0),%d1		# put exp in hi word
+	mov.w		4(%a0),%d1		# fetch hi(man)
+	and.l		&0x7FFFFFFF,%d1		# strip sign
+
+	cmpi.l		%d1,&0x3FD78000		# is |X| >= 2**(-40)?
+	bge.b		SOK1			# no
+	bra.w		SINSM			# yes; input is very small
+
+SOK1:
+	cmp.l		%d1,&0x4004BC7E		# is |X| < 15 PI?
+	blt.b		SINMAIN			# no
+	bra.w		SREDUCEX		# yes; input is very large
+
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+SINMAIN:
+	fmov.x		%fp0,%fp1
+	fmul.d		TWOBYPI(%pc),%fp1	# X*2/PI
+
+	lea		PITBL+0x200(%pc),%a1	# TABLE OF N*PI/2, N = -32,...,32
+
+	fmov.l		%fp1,INT(%a6)		# CONVERT TO INTEGER
+
+	mov.l		INT(%a6),%d1		# make a copy of N
+	asl.l		&4,%d1			# N *= 16
+	add.l		%d1,%a1			# tbl_addr = a1 + (N*16)
+
+# A1 IS THE ADDRESS OF N*PIBY2
+# ...WHICH IS IN TWO PIECES Y1 & Y2
+	fsub.x		(%a1)+,%fp0		# X-Y1
+	fsub.s		(%a1),%fp0		# fp0 = R = (X-Y1)-Y2
+
+SINCONT:
+#--continuation from REDUCEX
+
+#--GET N+ADJN AND SEE IF SIN(R) OR COS(R) IS NEEDED
+	mov.l		INT(%a6),%d1
+	add.l		ADJN(%a6),%d1		# SEE IF D0 IS ODD OR EVEN
+	ror.l		&1,%d1			# D0 WAS ODD IFF D0 IS NEGATIVE
+	cmp.l		%d1,&0
+	blt.w		COSPOLY
+
+#--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
+#--THEN WE RETURN	SGN*SIN(R). SGN*SIN(R) IS COMPUTED BY
+#--R' + R'*S*(A1 + S(A2 + S(A3 + S(A4 + ... + SA7)))), WHERE
+#--R' = SGN*R, S=R*R. THIS CAN BE REWRITTEN AS
+#--R' + R'*S*( [A1+T(A3+T(A5+TA7))] + [S(A2+T(A4+TA6))])
+#--WHERE T=S*S.
+#--NOTE THAT A3 THROUGH A7 ARE STORED IN DOUBLE PRECISION
+#--WHILE A1 AND A2 ARE IN DOUBLE-EXTENDED FORMAT.
+SINPOLY:
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmov.x		%fp0,X(%a6)		# X IS R
+	fmul.x		%fp0,%fp0		# FP0 IS S
+
+	fmov.d		SINA7(%pc),%fp3
+	fmov.d		SINA6(%pc),%fp2
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS T
+
+	ror.l		&1,%d1
+	and.l		&0x80000000,%d1
+# ...LEAST SIG. BIT OF D0 IN SIGN POSITION
+	eor.l		%d1,X(%a6)		# X IS NOW R'= SGN*R
+
+	fmul.x		%fp1,%fp3		# TA7
+	fmul.x		%fp1,%fp2		# TA6
+
+	fadd.d		SINA5(%pc),%fp3		# A5+TA7
+	fadd.d		SINA4(%pc),%fp2		# A4+TA6
+
+	fmul.x		%fp1,%fp3		# T(A5+TA7)
+	fmul.x		%fp1,%fp2		# T(A4+TA6)
+
+	fadd.d		SINA3(%pc),%fp3		# A3+T(A5+TA7)
+	fadd.x		SINA2(%pc),%fp2		# A2+T(A4+TA6)
+
+	fmul.x		%fp3,%fp1		# T(A3+T(A5+TA7))
+
+	fmul.x		%fp0,%fp2		# S(A2+T(A4+TA6))
+	fadd.x		SINA1(%pc),%fp1		# A1+T(A3+T(A5+TA7))
+	fmul.x		X(%a6),%fp0		# R'*S
+
+	fadd.x		%fp2,%fp1		# [A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))]
+
+	fmul.x		%fp1,%fp0		# SIN(R')-R'
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fadd.x		X(%a6),%fp0		# last inst - possible exception set
+	bra		t_inx2
+
+#--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
+#--THEN WE RETURN	SGN*COS(R). SGN*COS(R) IS COMPUTED BY
+#--SGN + S'*(B1 + S(B2 + S(B3 + S(B4 + ... + SB8)))), WHERE
+#--S=R*R AND S'=SGN*S. THIS CAN BE REWRITTEN AS
+#--SGN + S'*([B1+T(B3+T(B5+TB7))] + [S(B2+T(B4+T(B6+TB8)))])
+#--WHERE T=S*S.
+#--NOTE THAT B4 THROUGH B8 ARE STORED IN DOUBLE PRECISION
+#--WHILE B2 AND B3 ARE IN DOUBLE-EXTENDED FORMAT, B1 IS -1/2
+#--AND IS THEREFORE STORED AS SINGLE PRECISION.
+COSPOLY:
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmul.x		%fp0,%fp0		# FP0 IS S
+
+	fmov.d		COSB8(%pc),%fp2
+	fmov.d		COSB7(%pc),%fp3
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS T
+
+	fmov.x		%fp0,X(%a6)		# X IS S
+	ror.l		&1,%d1
+	and.l		&0x80000000,%d1
+# ...LEAST SIG. BIT OF D0 IN SIGN POSITION
+
+	fmul.x		%fp1,%fp2		# TB8
+
+	eor.l		%d1,X(%a6)		# X IS NOW S'= SGN*S
+	and.l		&0x80000000,%d1
+
+	fmul.x		%fp1,%fp3		# TB7
+
+	or.l		&0x3F800000,%d1		# D0 IS SGN IN SINGLE
+	mov.l		%d1,POSNEG1(%a6)
+
+	fadd.d		COSB6(%pc),%fp2		# B6+TB8
+	fadd.d		COSB5(%pc),%fp3		# B5+TB7
+
+	fmul.x		%fp1,%fp2		# T(B6+TB8)
+	fmul.x		%fp1,%fp3		# T(B5+TB7)
+
+	fadd.d		COSB4(%pc),%fp2		# B4+T(B6+TB8)
+	fadd.x		COSB3(%pc),%fp3		# B3+T(B5+TB7)
+
+	fmul.x		%fp1,%fp2		# T(B4+T(B6+TB8))
+	fmul.x		%fp3,%fp1		# T(B3+T(B5+TB7))
+
+	fadd.x		COSB2(%pc),%fp2		# B2+T(B4+T(B6+TB8))
+	fadd.s		COSB1(%pc),%fp1		# B1+T(B3+T(B5+TB7))
+
+	fmul.x		%fp2,%fp0		# S(B2+T(B4+T(B6+TB8)))
+
+	fadd.x		%fp1,%fp0
+
+	fmul.x		X(%a6),%fp0
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fadd.s		POSNEG1(%a6),%fp0	# last inst - possible exception set
+	bra		t_inx2
+
+##############################################
+
+# SINe: Big OR Small?
+#--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
+#--IF |X| < 2**(-40), RETURN X OR 1.
+SINBORS:
+	cmp.l		%d1,&0x3FFF8000
+	bgt.l		SREDUCEX
+
+SINSM:
+	mov.l		ADJN(%a6),%d1
+	cmp.l		%d1,&0
+	bgt.b		COSTINY
+
+# here, the operation may underflow iff the precision is sgl or dbl.
+# extended denorms are handled through another entry point.
+SINTINY:
+#	mov.w		&0x0000,XDCARE(%a6)	# JUST IN CASE
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		X(%a6),%fp0		# last inst - possible exception set
+	bra		t_catch
+
+COSTINY:
+	fmov.s		&0x3F800000,%fp0	# fp0 = 1.0
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fadd.s		&0x80800000,%fp0	# last inst - possible exception set
+	bra		t_pinx2
+
+################################################
+	global		ssind
+#--SIN(X) = X FOR DENORMALIZED X
+ssind:
+	bra		t_extdnrm
+
+############################################
+	global		scosd
+#--COS(X) = 1 FOR DENORMALIZED X
+scosd:
+	fmov.s		&0x3F800000,%fp0	# fp0 = 1.0
+	bra		t_pinx2
+
+##################################################
+
+	global		ssincos
+ssincos:
+#--SET ADJN TO 4
+	mov.l		&4,ADJN(%a6)
+
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	fmov.x		%fp0,X(%a6)
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1		# COMPACTIFY X
+
+	cmp.l		%d1,&0x3FD78000		# |X| >= 2**(-40)?
+	bge.b		SCOK1
+	bra.w		SCSM
+
+SCOK1:
+	cmp.l		%d1,&0x4004BC7E		# |X| < 15 PI?
+	blt.b		SCMAIN
+	bra.w		SREDUCEX
+
+
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+SCMAIN:
+	fmov.x		%fp0,%fp1
+
+	fmul.d		TWOBYPI(%pc),%fp1	# X*2/PI
+
+	lea		PITBL+0x200(%pc),%a1	# TABLE OF N*PI/2, N = -32,...,32
+
+	fmov.l		%fp1,INT(%a6)		# CONVERT TO INTEGER
+
+	mov.l		INT(%a6),%d1
+	asl.l		&4,%d1
+	add.l		%d1,%a1			# ADDRESS OF N*PIBY2, IN Y1, Y2
+
+	fsub.x		(%a1)+,%fp0		# X-Y1
+	fsub.s		(%a1),%fp0		# FP0 IS R = (X-Y1)-Y2
+
+SCCONT:
+#--continuation point from REDUCEX
+
+	mov.l		INT(%a6),%d1
+	ror.l		&1,%d1
+	cmp.l		%d1,&0			# D0 < 0 IFF N IS ODD
+	bge.w		NEVEN
+
+SNODD:
+#--REGISTERS SAVED SO FAR: D0, A0, FP2.
+	fmovm.x		&0x04,-(%sp)		# save fp2
+
+	fmov.x		%fp0,RPRIME(%a6)
+	fmul.x		%fp0,%fp0		# FP0 IS S = R*R
+	fmov.d		SINA7(%pc),%fp1		# A7
+	fmov.d		COSB8(%pc),%fp2		# B8
+	fmul.x		%fp0,%fp1		# SA7
+	fmul.x		%fp0,%fp2		# SB8
+
+	mov.l		%d2,-(%sp)
+	mov.l		%d1,%d2
+	ror.l		&1,%d2
+	and.l		&0x80000000,%d2
+	eor.l		%d1,%d2
+	and.l		&0x80000000,%d2
+
+	fadd.d		SINA6(%pc),%fp1		# A6+SA7
+	fadd.d		COSB7(%pc),%fp2		# B7+SB8
+
+	fmul.x		%fp0,%fp1		# S(A6+SA7)
+	eor.l		%d2,RPRIME(%a6)
+	mov.l		(%sp)+,%d2
+	fmul.x		%fp0,%fp2		# S(B7+SB8)
+	ror.l		&1,%d1
+	and.l		&0x80000000,%d1
+	mov.l		&0x3F800000,POSNEG1(%a6)
+	eor.l		%d1,POSNEG1(%a6)
+
+	fadd.d		SINA5(%pc),%fp1		# A5+S(A6+SA7)
+	fadd.d		COSB6(%pc),%fp2		# B6+S(B7+SB8)
+
+	fmul.x		%fp0,%fp1		# S(A5+S(A6+SA7))
+	fmul.x		%fp0,%fp2		# S(B6+S(B7+SB8))
+	fmov.x		%fp0,SPRIME(%a6)
+
+	fadd.d		SINA4(%pc),%fp1		# A4+S(A5+S(A6+SA7))
+	eor.l		%d1,SPRIME(%a6)
+	fadd.d		COSB5(%pc),%fp2		# B5+S(B6+S(B7+SB8))
+
+	fmul.x		%fp0,%fp1		# S(A4+...)
+	fmul.x		%fp0,%fp2		# S(B5+...)
+
+	fadd.d		SINA3(%pc),%fp1		# A3+S(A4+...)
+	fadd.d		COSB4(%pc),%fp2		# B4+S(B5+...)
+
+	fmul.x		%fp0,%fp1		# S(A3+...)
+	fmul.x		%fp0,%fp2		# S(B4+...)
+
+	fadd.x		SINA2(%pc),%fp1		# A2+S(A3+...)
+	fadd.x		COSB3(%pc),%fp2		# B3+S(B4+...)
+
+	fmul.x		%fp0,%fp1		# S(A2+...)
+	fmul.x		%fp0,%fp2		# S(B3+...)
+
+	fadd.x		SINA1(%pc),%fp1		# A1+S(A2+...)
+	fadd.x		COSB2(%pc),%fp2		# B2+S(B3+...)
+
+	fmul.x		%fp0,%fp1		# S(A1+...)
+	fmul.x		%fp2,%fp0		# S(B2+...)
+
+	fmul.x		RPRIME(%a6),%fp1	# R'S(A1+...)
+	fadd.s		COSB1(%pc),%fp0		# B1+S(B2...)
+	fmul.x		SPRIME(%a6),%fp0	# S'(B1+S(B2+...))
+
+	fmovm.x		(%sp)+,&0x20		# restore fp2
+
+	fmov.l		%d0,%fpcr
+	fadd.x		RPRIME(%a6),%fp1	# COS(X)
+	bsr		sto_cos			# store cosine result
+	fadd.s		POSNEG1(%a6),%fp0	# SIN(X)
+	bra		t_inx2
+
+NEVEN:
+#--REGISTERS SAVED SO FAR: FP2.
+	fmovm.x		&0x04,-(%sp)		# save fp2
+
+	fmov.x		%fp0,RPRIME(%a6)
+	fmul.x		%fp0,%fp0		# FP0 IS S = R*R
+
+	fmov.d		COSB8(%pc),%fp1		# B8
+	fmov.d		SINA7(%pc),%fp2		# A7
+
+	fmul.x		%fp0,%fp1		# SB8
+	fmov.x		%fp0,SPRIME(%a6)
+	fmul.x		%fp0,%fp2		# SA7
+
+	ror.l		&1,%d1
+	and.l		&0x80000000,%d1
+
+	fadd.d		COSB7(%pc),%fp1		# B7+SB8
+	fadd.d		SINA6(%pc),%fp2		# A6+SA7
+
+	eor.l		%d1,RPRIME(%a6)
+	eor.l		%d1,SPRIME(%a6)
+
+	fmul.x		%fp0,%fp1		# S(B7+SB8)
+
+	or.l		&0x3F800000,%d1
+	mov.l		%d1,POSNEG1(%a6)
+
+	fmul.x		%fp0,%fp2		# S(A6+SA7)
+
+	fadd.d		COSB6(%pc),%fp1		# B6+S(B7+SB8)
+	fadd.d		SINA5(%pc),%fp2		# A5+S(A6+SA7)
+
+	fmul.x		%fp0,%fp1		# S(B6+S(B7+SB8))
+	fmul.x		%fp0,%fp2		# S(A5+S(A6+SA7))
+
+	fadd.d		COSB5(%pc),%fp1		# B5+S(B6+S(B7+SB8))
+	fadd.d		SINA4(%pc),%fp2		# A4+S(A5+S(A6+SA7))
+
+	fmul.x		%fp0,%fp1		# S(B5+...)
+	fmul.x		%fp0,%fp2		# S(A4+...)
+
+	fadd.d		COSB4(%pc),%fp1		# B4+S(B5+...)
+	fadd.d		SINA3(%pc),%fp2		# A3+S(A4+...)
+
+	fmul.x		%fp0,%fp1		# S(B4+...)
+	fmul.x		%fp0,%fp2		# S(A3+...)
+
+	fadd.x		COSB3(%pc),%fp1		# B3+S(B4+...)
+	fadd.x		SINA2(%pc),%fp2		# A2+S(A3+...)
+
+	fmul.x		%fp0,%fp1		# S(B3+...)
+	fmul.x		%fp0,%fp2		# S(A2+...)
+
+	fadd.x		COSB2(%pc),%fp1		# B2+S(B3+...)
+	fadd.x		SINA1(%pc),%fp2		# A1+S(A2+...)
+
+	fmul.x		%fp0,%fp1		# S(B2+...)
+	fmul.x		%fp2,%fp0		# s(a1+...)
+
+
+	fadd.s		COSB1(%pc),%fp1		# B1+S(B2...)
+	fmul.x		RPRIME(%a6),%fp0	# R'S(A1+...)
+	fmul.x		SPRIME(%a6),%fp1	# S'(B1+S(B2+...))
+
+	fmovm.x		(%sp)+,&0x20		# restore fp2
+
+	fmov.l		%d0,%fpcr
+	fadd.s		POSNEG1(%a6),%fp1	# COS(X)
+	bsr		sto_cos			# store cosine result
+	fadd.x		RPRIME(%a6),%fp0	# SIN(X)
+	bra		t_inx2
+
+################################################
+
+SCBORS:
+	cmp.l		%d1,&0x3FFF8000
+	bgt.w		SREDUCEX
+
+################################################
+
+SCSM:
+#	mov.w		&0x0000,XDCARE(%a6)
+	fmov.s		&0x3F800000,%fp1
+
+	fmov.l		%d0,%fpcr
+	fsub.s		&0x00800000,%fp1
+	bsr		sto_cos			# store cosine result
+	fmov.l		%fpcr,%d0		# d0 must have fpcr,too
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		X(%a6),%fp0
+	bra		t_catch
+
+##############################################
+
+	global		ssincosd
+#--SIN AND COS OF X FOR DENORMALIZED X
+ssincosd:
+	mov.l		%d0,-(%sp)		# save d0
+	fmov.s		&0x3F800000,%fp1
+	bsr		sto_cos			# store cosine result
+	mov.l		(%sp)+,%d0		# restore d0
+	bra		t_extdnrm
+
+############################################
+
+#--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
+#--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
+#--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
+SREDUCEX:
+	fmovm.x		&0x3c,-(%sp)		# save {fp2-fp5}
+	mov.l		%d2,-(%sp)		# save d2
+	fmov.s		&0x00000000,%fp1	# fp1 = 0
+
+#--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
+#--there is a danger of unwanted overflow in first LOOP iteration.  In this
+#--case, reduce argument by one remainder step to make subsequent reduction
+#--safe.
+	cmp.l		%d1,&0x7ffeffff		# is arg dangerously large?
+	bne.b		SLOOP			# no
+
+# yes; create 2**16383*PI/2
+	mov.w		&0x7ffe,FP_SCR0_EX(%a6)
+	mov.l		&0xc90fdaa2,FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)
+
+# create low half of 2**16383*PI/2 at FP_SCR1
+	mov.w		&0x7fdc,FP_SCR1_EX(%a6)
+	mov.l		&0x85a308d3,FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)
+
+	ftest.x		%fp0			# test sign of argument
+	fblt.w		sred_neg
+
+	or.b		&0x80,FP_SCR0_EX(%a6)	# positive arg
+	or.b		&0x80,FP_SCR1_EX(%a6)
+sred_neg:
+	fadd.x		FP_SCR0(%a6),%fp0	# high part of reduction is exact
+	fmov.x		%fp0,%fp1		# save high result in fp1
+	fadd.x		FP_SCR1(%a6),%fp0	# low part of reduction
+	fsub.x		%fp0,%fp1		# determine low component of result
+	fadd.x		FP_SCR1(%a6),%fp1	# fp0/fp1 are reduced argument.
+
+#--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
+#--integer quotient will be stored in N
+#--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
+SLOOP:
+	fmov.x		%fp0,INARG(%a6)		# +-2**K * F, 1 <= F < 2
+	mov.w		INARG(%a6),%d1
+	mov.l		%d1,%a1			# save a copy of D0
+	and.l		&0x00007FFF,%d1
+	sub.l		&0x00003FFF,%d1		# d0 = K
+	cmp.l		%d1,&28
+	ble.b		SLASTLOOP
+SCONTLOOP:
+	sub.l		&27,%d1			# d0 = L := K-27
+	mov.b		&0,ENDFLAG(%a6)
+	bra.b		SWORK
+SLASTLOOP:
+	clr.l		%d1			# d0 = L := 0
+	mov.b		&1,ENDFLAG(%a6)
+
+SWORK:
+#--FIND THE REMAINDER OF (R,r) W.R.T.	2**L * (PI/2). L IS SO CHOSEN
+#--THAT	INT( X * (2/PI) / 2**(L) ) < 2**29.
+
+#--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
+#--2**L * (PIby2_1), 2**L * (PIby2_2)
+
+	mov.l		&0x00003FFE,%d2		# BIASED EXP OF 2/PI
+	sub.l		%d1,%d2			# BIASED EXP OF 2**(-L)*(2/PI)
+
+	mov.l		&0xA2F9836E,FP_SCR0_HI(%a6)
+	mov.l		&0x4E44152A,FP_SCR0_LO(%a6)
+	mov.w		%d2,FP_SCR0_EX(%a6)	# FP_SCR0 = 2**(-L)*(2/PI)
+
+	fmov.x		%fp0,%fp2
+	fmul.x		FP_SCR0(%a6),%fp2	# fp2 = X * 2**(-L)*(2/PI)
+
+#--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
+#--FLOATING POINT FORMAT, THE TWO FMOVE'S	FMOVE.L FP <--> N
+#--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
+#--(SIGN(INARG)*2**63	+	FP2) - SIGN(INARG)*2**63 WILL GIVE
+#--US THE DESIRED VALUE IN FLOATING POINT.
+	mov.l		%a1,%d2
+	swap		%d2
+	and.l		&0x80000000,%d2
+	or.l		&0x5F000000,%d2		# d2 = SIGN(INARG)*2**63 IN SGL
+	mov.l		%d2,TWOTO63(%a6)
+	fadd.s		TWOTO63(%a6),%fp2	# THE FRACTIONAL PART OF FP1 IS ROUNDED
+	fsub.s		TWOTO63(%a6),%fp2	# fp2 = N
+#	fint.x		%fp2
+
+#--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
+	mov.l		%d1,%d2			# d2 = L
+
+	add.l		&0x00003FFF,%d2		# BIASED EXP OF 2**L * (PI/2)
+	mov.w		%d2,FP_SCR0_EX(%a6)
+	mov.l		&0xC90FDAA2,FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)		# FP_SCR0 = 2**(L) * Piby2_1
+
+	add.l		&0x00003FDD,%d1
+	mov.w		%d1,FP_SCR1_EX(%a6)
+	mov.l		&0x85A308D3,FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)		# FP_SCR1 = 2**(L) * Piby2_2
+
+	mov.b		ENDFLAG(%a6),%d1
+
+#--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
+#--P2 = 2**(L) * Piby2_2
+	fmov.x		%fp2,%fp4		# fp4 = N
+	fmul.x		FP_SCR0(%a6),%fp4	# fp4 = W = N*P1
+	fmov.x		%fp2,%fp5		# fp5 = N
+	fmul.x		FP_SCR1(%a6),%fp5	# fp5 = w = N*P2
+	fmov.x		%fp4,%fp3		# fp3 = W = N*P1
+
+#--we want P+p = W+w  but  |p| <= half ulp of P
+#--Then, we need to compute  A := R-P   and  a := r-p
+	fadd.x		%fp5,%fp3		# fp3 = P
+	fsub.x		%fp3,%fp4		# fp4 = W-P
+
+	fsub.x		%fp3,%fp0		# fp0 = A := R - P
+	fadd.x		%fp5,%fp4		# fp4 = p = (W-P)+w
+
+	fmov.x		%fp0,%fp3		# fp3 = A
+	fsub.x		%fp4,%fp1		# fp1 = a := r - p
+
+#--Now we need to normalize (A,a) to  "new (R,r)" where R+r = A+a but
+#--|r| <= half ulp of R.
+	fadd.x		%fp1,%fp0		# fp0 = R := A+a
+#--No need to calculate r if this is the last loop
+	cmp.b		%d1,&0
+	bgt.w		SRESTORE
+
+#--Need to calculate r
+	fsub.x		%fp0,%fp3		# fp3 = A-R
+	fadd.x		%fp3,%fp1		# fp1 = r := (A-R)+a
+	bra.w		SLOOP
+
+SRESTORE:
+	fmov.l		%fp2,INT(%a6)
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		(%sp)+,&0x3c		# restore {fp2-fp5}
+
+	mov.l		ADJN(%a6),%d1
+	cmp.l		%d1,&4
+
+	blt.w		SINCONT
+	bra.w		SCCONT
+
+#########################################################################
+# stan():  computes the tangent of a normalized input			#
+# stand(): computes the tangent of a denormalized input			#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = tan(X)							#
+#									#
+# ACCURACY and MONOTONICITY ******************************************* #
+#	The returned result is within 3 ulp in 64 significant bit, i.e. #
+#	within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM *********************************************************** #
+#									#
+#	1. If |X| >= 15Pi or |X| < 2**(-40), go to 6.			#
+#									#
+#	2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let	#
+#		k = N mod 2, so in particular, k = 0 or 1.		#
+#									#
+#	3. If k is odd, go to 5.					#
+#									#
+#	4. (k is even) Tan(X) = tan(r) and tan(r) is approximated by a	#
+#		rational function U/V where				#
+#		U = r + r*s*(P1 + s*(P2 + s*P3)), and			#
+#		V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))),  s = r*r.	#
+#		Exit.							#
+#									#
+#	4. (k is odd) Tan(X) = -cot(r). Since tan(r) is approximated by #
+#		a rational function U/V where				#
+#		U = r + r*s*(P1 + s*(P2 + s*P3)), and			#
+#		V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r,	#
+#		-Cot(r) = -V/U. Exit.					#
+#									#
+#	6. If |X| > 1, go to 8.						#
+#									#
+#	7. (|X|<2**(-40)) Tan(X) = X. Exit.				#
+#									#
+#	8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, go back	#
+#		to 2.							#
+#									#
+#########################################################################
+
+TANQ4:
+	long		0x3EA0B759,0xF50F8688
+TANP3:
+	long		0xBEF2BAA5,0xA8924F04
+
+TANQ3:
+	long		0xBF346F59,0xB39BA65F,0x00000000,0x00000000
+
+TANP2:
+	long		0x3FF60000,0xE073D3FC,0x199C4A00,0x00000000
+
+TANQ2:
+	long		0x3FF90000,0xD23CD684,0x15D95FA1,0x00000000
+
+TANP1:
+	long		0xBFFC0000,0x8895A6C5,0xFB423BCA,0x00000000
+
+TANQ1:
+	long		0xBFFD0000,0xEEF57E0D,0xA84BC8CE,0x00000000
+
+INVTWOPI:
+	long		0x3FFC0000,0xA2F9836E,0x4E44152A,0x00000000
+
+TWOPI1:
+	long		0x40010000,0xC90FDAA2,0x00000000,0x00000000
+TWOPI2:
+	long		0x3FDF0000,0x85A308D4,0x00000000,0x00000000
+
+#--N*PI/2, -32 <= N <= 32, IN A LEADING TERM IN EXT. AND TRAILING
+#--TERM IN SGL. NOTE THAT PI IS 64-BIT LONG, THUS N*PI/2 IS AT
+#--MOST 69 BITS LONG.
+#	global		PITBL
+PITBL:
+	long		0xC0040000,0xC90FDAA2,0x2168C235,0x21800000
+	long		0xC0040000,0xC2C75BCD,0x105D7C23,0xA0D00000
+	long		0xC0040000,0xBC7EDCF7,0xFF523611,0xA1E80000
+	long		0xC0040000,0xB6365E22,0xEE46F000,0x21480000
+	long		0xC0040000,0xAFEDDF4D,0xDD3BA9EE,0xA1200000
+	long		0xC0040000,0xA9A56078,0xCC3063DD,0x21FC0000
+	long		0xC0040000,0xA35CE1A3,0xBB251DCB,0x21100000
+	long		0xC0040000,0x9D1462CE,0xAA19D7B9,0xA1580000
+	long		0xC0040000,0x96CBE3F9,0x990E91A8,0x21E00000
+	long		0xC0040000,0x90836524,0x88034B96,0x20B00000
+	long		0xC0040000,0x8A3AE64F,0x76F80584,0xA1880000
+	long		0xC0040000,0x83F2677A,0x65ECBF73,0x21C40000
+	long		0xC0030000,0xFB53D14A,0xA9C2F2C2,0x20000000
+	long		0xC0030000,0xEEC2D3A0,0x87AC669F,0x21380000
+	long		0xC0030000,0xE231D5F6,0x6595DA7B,0xA1300000
+	long		0xC0030000,0xD5A0D84C,0x437F4E58,0x9FC00000
+	long		0xC0030000,0xC90FDAA2,0x2168C235,0x21000000
+	long		0xC0030000,0xBC7EDCF7,0xFF523611,0xA1680000
+	long		0xC0030000,0xAFEDDF4D,0xDD3BA9EE,0xA0A00000
+	long		0xC0030000,0xA35CE1A3,0xBB251DCB,0x20900000
+	long		0xC0030000,0x96CBE3F9,0x990E91A8,0x21600000
+	long		0xC0030000,0x8A3AE64F,0x76F80584,0xA1080000
+	long		0xC0020000,0xFB53D14A,0xA9C2F2C2,0x1F800000
+	long		0xC0020000,0xE231D5F6,0x6595DA7B,0xA0B00000
+	long		0xC0020000,0xC90FDAA2,0x2168C235,0x20800000
+	long		0xC0020000,0xAFEDDF4D,0xDD3BA9EE,0xA0200000
+	long		0xC0020000,0x96CBE3F9,0x990E91A8,0x20E00000
+	long		0xC0010000,0xFB53D14A,0xA9C2F2C2,0x1F000000
+	long		0xC0010000,0xC90FDAA2,0x2168C235,0x20000000
+	long		0xC0010000,0x96CBE3F9,0x990E91A8,0x20600000
+	long		0xC0000000,0xC90FDAA2,0x2168C235,0x1F800000
+	long		0xBFFF0000,0xC90FDAA2,0x2168C235,0x1F000000
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+	long		0x3FFF0000,0xC90FDAA2,0x2168C235,0x9F000000
+	long		0x40000000,0xC90FDAA2,0x2168C235,0x9F800000
+	long		0x40010000,0x96CBE3F9,0x990E91A8,0xA0600000
+	long		0x40010000,0xC90FDAA2,0x2168C235,0xA0000000
+	long		0x40010000,0xFB53D14A,0xA9C2F2C2,0x9F000000
+	long		0x40020000,0x96CBE3F9,0x990E91A8,0xA0E00000
+	long		0x40020000,0xAFEDDF4D,0xDD3BA9EE,0x20200000
+	long		0x40020000,0xC90FDAA2,0x2168C235,0xA0800000
+	long		0x40020000,0xE231D5F6,0x6595DA7B,0x20B00000
+	long		0x40020000,0xFB53D14A,0xA9C2F2C2,0x9F800000
+	long		0x40030000,0x8A3AE64F,0x76F80584,0x21080000
+	long		0x40030000,0x96CBE3F9,0x990E91A8,0xA1600000
+	long		0x40030000,0xA35CE1A3,0xBB251DCB,0xA0900000
+	long		0x40030000,0xAFEDDF4D,0xDD3BA9EE,0x20A00000
+	long		0x40030000,0xBC7EDCF7,0xFF523611,0x21680000
+	long		0x40030000,0xC90FDAA2,0x2168C235,0xA1000000
+	long		0x40030000,0xD5A0D84C,0x437F4E58,0x1FC00000
+	long		0x40030000,0xE231D5F6,0x6595DA7B,0x21300000
+	long		0x40030000,0xEEC2D3A0,0x87AC669F,0xA1380000
+	long		0x40030000,0xFB53D14A,0xA9C2F2C2,0xA0000000
+	long		0x40040000,0x83F2677A,0x65ECBF73,0xA1C40000
+	long		0x40040000,0x8A3AE64F,0x76F80584,0x21880000
+	long		0x40040000,0x90836524,0x88034B96,0xA0B00000
+	long		0x40040000,0x96CBE3F9,0x990E91A8,0xA1E00000
+	long		0x40040000,0x9D1462CE,0xAA19D7B9,0x21580000
+	long		0x40040000,0xA35CE1A3,0xBB251DCB,0xA1100000
+	long		0x40040000,0xA9A56078,0xCC3063DD,0xA1FC0000
+	long		0x40040000,0xAFEDDF4D,0xDD3BA9EE,0x21200000
+	long		0x40040000,0xB6365E22,0xEE46F000,0xA1480000
+	long		0x40040000,0xBC7EDCF7,0xFF523611,0x21E80000
+	long		0x40040000,0xC2C75BCD,0x105D7C23,0x20D00000
+	long		0x40040000,0xC90FDAA2,0x2168C235,0xA1800000
+
+	set		INARG,FP_SCR0
+
+	set		TWOTO63,L_SCR1
+	set		INT,L_SCR1
+	set		ENDFLAG,L_SCR2
+
+	global		stan
+stan:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+
+	cmp.l		%d1,&0x3FD78000		# |X| >= 2**(-40)?
+	bge.b		TANOK1
+	bra.w		TANSM
+TANOK1:
+	cmp.l		%d1,&0x4004BC7E		# |X| < 15 PI?
+	blt.b		TANMAIN
+	bra.w		REDUCEX
+
+TANMAIN:
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+	fmov.x		%fp0,%fp1
+	fmul.d		TWOBYPI(%pc),%fp1	# X*2/PI
+
+	lea.l		PITBL+0x200(%pc),%a1	# TABLE OF N*PI/2, N = -32,...,32
+
+	fmov.l		%fp1,%d1		# CONVERT TO INTEGER
+
+	asl.l		&4,%d1
+	add.l		%d1,%a1			# ADDRESS N*PIBY2 IN Y1, Y2
+
+	fsub.x		(%a1)+,%fp0		# X-Y1
+
+	fsub.s		(%a1),%fp0		# FP0 IS R = (X-Y1)-Y2
+
+	ror.l		&5,%d1
+	and.l		&0x80000000,%d1		# D0 WAS ODD IFF D0 < 0
+
+TANCONT:
+	fmovm.x		&0x0c,-(%sp)		# save fp2,fp3
+
+	cmp.l		%d1,&0
+	blt.w		NODD
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# S = R*R
+
+	fmov.d		TANQ4(%pc),%fp3
+	fmov.d		TANP3(%pc),%fp2
+
+	fmul.x		%fp1,%fp3		# SQ4
+	fmul.x		%fp1,%fp2		# SP3
+
+	fadd.d		TANQ3(%pc),%fp3		# Q3+SQ4
+	fadd.x		TANP2(%pc),%fp2		# P2+SP3
+
+	fmul.x		%fp1,%fp3		# S(Q3+SQ4)
+	fmul.x		%fp1,%fp2		# S(P2+SP3)
+
+	fadd.x		TANQ2(%pc),%fp3		# Q2+S(Q3+SQ4)
+	fadd.x		TANP1(%pc),%fp2		# P1+S(P2+SP3)
+
+	fmul.x		%fp1,%fp3		# S(Q2+S(Q3+SQ4))
+	fmul.x		%fp1,%fp2		# S(P1+S(P2+SP3))
+
+	fadd.x		TANQ1(%pc),%fp3		# Q1+S(Q2+S(Q3+SQ4))
+	fmul.x		%fp0,%fp2		# RS(P1+S(P2+SP3))
+
+	fmul.x		%fp3,%fp1		# S(Q1+S(Q2+S(Q3+SQ4)))
+
+	fadd.x		%fp2,%fp0		# R+RS(P1+S(P2+SP3))
+
+	fadd.s		&0x3F800000,%fp1	# 1+S(Q1+...)
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2,fp3
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fdiv.x		%fp1,%fp0		# last inst - possible exception set
+	bra		t_inx2
+
+NODD:
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp0,%fp0		# S = R*R
+
+	fmov.d		TANQ4(%pc),%fp3
+	fmov.d		TANP3(%pc),%fp2
+
+	fmul.x		%fp0,%fp3		# SQ4
+	fmul.x		%fp0,%fp2		# SP3
+
+	fadd.d		TANQ3(%pc),%fp3		# Q3+SQ4
+	fadd.x		TANP2(%pc),%fp2		# P2+SP3
+
+	fmul.x		%fp0,%fp3		# S(Q3+SQ4)
+	fmul.x		%fp0,%fp2		# S(P2+SP3)
+
+	fadd.x		TANQ2(%pc),%fp3		# Q2+S(Q3+SQ4)
+	fadd.x		TANP1(%pc),%fp2		# P1+S(P2+SP3)
+
+	fmul.x		%fp0,%fp3		# S(Q2+S(Q3+SQ4))
+	fmul.x		%fp0,%fp2		# S(P1+S(P2+SP3))
+
+	fadd.x		TANQ1(%pc),%fp3		# Q1+S(Q2+S(Q3+SQ4))
+	fmul.x		%fp1,%fp2		# RS(P1+S(P2+SP3))
+
+	fmul.x		%fp3,%fp0		# S(Q1+S(Q2+S(Q3+SQ4)))
+
+	fadd.x		%fp2,%fp1		# R+RS(P1+S(P2+SP3))
+	fadd.s		&0x3F800000,%fp0	# 1+S(Q1+...)
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2,fp3
+
+	fmov.x		%fp1,-(%sp)
+	eor.l		&0x80000000,(%sp)
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fdiv.x		(%sp)+,%fp0		# last inst - possible exception set
+	bra		t_inx2
+
+TANBORS:
+#--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
+#--IF |X| < 2**(-40), RETURN X OR 1.
+	cmp.l		%d1,&0x3FFF8000
+	bgt.b		REDUCEX
+
+TANSM:
+	fmov.x		%fp0,-(%sp)
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		(%sp)+,%fp0		# last inst - posibble exception set
+	bra		t_catch
+
+	global		stand
+#--TAN(X) = X FOR DENORMALIZED X
+stand:
+	bra		t_extdnrm
+
+#--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
+#--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
+#--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
+REDUCEX:
+	fmovm.x		&0x3c,-(%sp)		# save {fp2-fp5}
+	mov.l		%d2,-(%sp)		# save d2
+	fmov.s		&0x00000000,%fp1	# fp1 = 0
+
+#--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
+#--there is a danger of unwanted overflow in first LOOP iteration.  In this
+#--case, reduce argument by one remainder step to make subsequent reduction
+#--safe.
+	cmp.l		%d1,&0x7ffeffff		# is arg dangerously large?
+	bne.b		LOOP			# no
+
+# yes; create 2**16383*PI/2
+	mov.w		&0x7ffe,FP_SCR0_EX(%a6)
+	mov.l		&0xc90fdaa2,FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)
+
+# create low half of 2**16383*PI/2 at FP_SCR1
+	mov.w		&0x7fdc,FP_SCR1_EX(%a6)
+	mov.l		&0x85a308d3,FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)
+
+	ftest.x		%fp0			# test sign of argument
+	fblt.w		red_neg
+
+	or.b		&0x80,FP_SCR0_EX(%a6)	# positive arg
+	or.b		&0x80,FP_SCR1_EX(%a6)
+red_neg:
+	fadd.x		FP_SCR0(%a6),%fp0	# high part of reduction is exact
+	fmov.x		%fp0,%fp1		# save high result in fp1
+	fadd.x		FP_SCR1(%a6),%fp0	# low part of reduction
+	fsub.x		%fp0,%fp1		# determine low component of result
+	fadd.x		FP_SCR1(%a6),%fp1	# fp0/fp1 are reduced argument.
+
+#--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
+#--integer quotient will be stored in N
+#--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
+LOOP:
+	fmov.x		%fp0,INARG(%a6)		# +-2**K * F, 1 <= F < 2
+	mov.w		INARG(%a6),%d1
+	mov.l		%d1,%a1			# save a copy of D0
+	and.l		&0x00007FFF,%d1
+	sub.l		&0x00003FFF,%d1		# d0 = K
+	cmp.l		%d1,&28
+	ble.b		LASTLOOP
+CONTLOOP:
+	sub.l		&27,%d1			# d0 = L := K-27
+	mov.b		&0,ENDFLAG(%a6)
+	bra.b		WORK
+LASTLOOP:
+	clr.l		%d1			# d0 = L := 0
+	mov.b		&1,ENDFLAG(%a6)
+
+WORK:
+#--FIND THE REMAINDER OF (R,r) W.R.T.	2**L * (PI/2). L IS SO CHOSEN
+#--THAT	INT( X * (2/PI) / 2**(L) ) < 2**29.
+
+#--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
+#--2**L * (PIby2_1), 2**L * (PIby2_2)
+
+	mov.l		&0x00003FFE,%d2		# BIASED EXP OF 2/PI
+	sub.l		%d1,%d2			# BIASED EXP OF 2**(-L)*(2/PI)
+
+	mov.l		&0xA2F9836E,FP_SCR0_HI(%a6)
+	mov.l		&0x4E44152A,FP_SCR0_LO(%a6)
+	mov.w		%d2,FP_SCR0_EX(%a6)	# FP_SCR0 = 2**(-L)*(2/PI)
+
+	fmov.x		%fp0,%fp2
+	fmul.x		FP_SCR0(%a6),%fp2	# fp2 = X * 2**(-L)*(2/PI)
+
+#--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
+#--FLOATING POINT FORMAT, THE TWO FMOVE'S	FMOVE.L FP <--> N
+#--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
+#--(SIGN(INARG)*2**63	+	FP2) - SIGN(INARG)*2**63 WILL GIVE
+#--US THE DESIRED VALUE IN FLOATING POINT.
+	mov.l		%a1,%d2
+	swap		%d2
+	and.l		&0x80000000,%d2
+	or.l		&0x5F000000,%d2		# d2 = SIGN(INARG)*2**63 IN SGL
+	mov.l		%d2,TWOTO63(%a6)
+	fadd.s		TWOTO63(%a6),%fp2	# THE FRACTIONAL PART OF FP1 IS ROUNDED
+	fsub.s		TWOTO63(%a6),%fp2	# fp2 = N
+#	fintrz.x	%fp2,%fp2
+
+#--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
+	mov.l		%d1,%d2			# d2 = L
+
+	add.l		&0x00003FFF,%d2		# BIASED EXP OF 2**L * (PI/2)
+	mov.w		%d2,FP_SCR0_EX(%a6)
+	mov.l		&0xC90FDAA2,FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)		# FP_SCR0 = 2**(L) * Piby2_1
+
+	add.l		&0x00003FDD,%d1
+	mov.w		%d1,FP_SCR1_EX(%a6)
+	mov.l		&0x85A308D3,FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)		# FP_SCR1 = 2**(L) * Piby2_2
+
+	mov.b		ENDFLAG(%a6),%d1
+
+#--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
+#--P2 = 2**(L) * Piby2_2
+	fmov.x		%fp2,%fp4		# fp4 = N
+	fmul.x		FP_SCR0(%a6),%fp4	# fp4 = W = N*P1
+	fmov.x		%fp2,%fp5		# fp5 = N
+	fmul.x		FP_SCR1(%a6),%fp5	# fp5 = w = N*P2
+	fmov.x		%fp4,%fp3		# fp3 = W = N*P1
+
+#--we want P+p = W+w  but  |p| <= half ulp of P
+#--Then, we need to compute  A := R-P   and  a := r-p
+	fadd.x		%fp5,%fp3		# fp3 = P
+	fsub.x		%fp3,%fp4		# fp4 = W-P
+
+	fsub.x		%fp3,%fp0		# fp0 = A := R - P
+	fadd.x		%fp5,%fp4		# fp4 = p = (W-P)+w
+
+	fmov.x		%fp0,%fp3		# fp3 = A
+	fsub.x		%fp4,%fp1		# fp1 = a := r - p
+
+#--Now we need to normalize (A,a) to  "new (R,r)" where R+r = A+a but
+#--|r| <= half ulp of R.
+	fadd.x		%fp1,%fp0		# fp0 = R := A+a
+#--No need to calculate r if this is the last loop
+	cmp.b		%d1,&0
+	bgt.w		RESTORE
+
+#--Need to calculate r
+	fsub.x		%fp0,%fp3		# fp3 = A-R
+	fadd.x		%fp3,%fp1		# fp1 = r := (A-R)+a
+	bra.w		LOOP
+
+RESTORE:
+	fmov.l		%fp2,INT(%a6)
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		(%sp)+,&0x3c		# restore {fp2-fp5}
+
+	mov.l		INT(%a6),%d1
+	ror.l		&1,%d1
+
+	bra.w		TANCONT
+
+#########################################################################
+# satan():  computes the arctangent of a normalized number		#
+# satand(): computes the arctangent of a denormalized number		#
+#									#
+# INPUT	*************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = arctan(X)							#
+#									#
+# ACCURACY and MONOTONICITY ******************************************* #
+#	The returned result is within 2 ulps in	64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM *********************************************************** #
+#	Step 1. If |X| >= 16 or |X| < 1/16, go to Step 5.		#
+#									#
+#	Step 2. Let X = sgn * 2**k * 1.xxxxxxxx...x.			#
+#		Note that k = -4, -3,..., or 3.				#
+#		Define F = sgn * 2**k * 1.xxxx1, i.e. the first 5	#
+#		significant bits of X with a bit-1 attached at the 6-th	#
+#		bit position. Define u to be u = (X-F) / (1 + X*F).	#
+#									#
+#	Step 3. Approximate arctan(u) by a polynomial poly.		#
+#									#
+#	Step 4. Return arctan(F) + poly, arctan(F) is fetched from a	#
+#		table of values calculated beforehand. Exit.		#
+#									#
+#	Step 5. If |X| >= 16, go to Step 7.				#
+#									#
+#	Step 6. Approximate arctan(X) by an odd polynomial in X. Exit.	#
+#									#
+#	Step 7. Define X' = -1/X. Approximate arctan(X') by an odd	#
+#		polynomial in X'.					#
+#		Arctan(X) = sign(X)*Pi/2 + arctan(X'). Exit.		#
+#									#
+#########################################################################
+
+ATANA3:	long		0xBFF6687E,0x314987D8
+ATANA2:	long		0x4002AC69,0x34A26DB3
+ATANA1:	long		0xBFC2476F,0x4E1DA28E
+
+ATANB6:	long		0x3FB34444,0x7F876989
+ATANB5:	long		0xBFB744EE,0x7FAF45DB
+ATANB4:	long		0x3FBC71C6,0x46940220
+ATANB3:	long		0xBFC24924,0x921872F9
+ATANB2:	long		0x3FC99999,0x99998FA9
+ATANB1:	long		0xBFD55555,0x55555555
+
+ATANC5:	long		0xBFB70BF3,0x98539E6A
+ATANC4:	long		0x3FBC7187,0x962D1D7D
+ATANC3:	long		0xBFC24924,0x827107B8
+ATANC2:	long		0x3FC99999,0x9996263E
+ATANC1:	long		0xBFD55555,0x55555536
+
+PPIBY2:	long		0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+NPIBY2:	long		0xBFFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+PTINY:	long		0x00010000,0x80000000,0x00000000,0x00000000
+NTINY:	long		0x80010000,0x80000000,0x00000000,0x00000000
+
+ATANTBL:
+	long		0x3FFB0000,0x83D152C5,0x060B7A51,0x00000000
+	long		0x3FFB0000,0x8BC85445,0x65498B8B,0x00000000
+	long		0x3FFB0000,0x93BE4060,0x17626B0D,0x00000000
+	long		0x3FFB0000,0x9BB3078D,0x35AEC202,0x00000000
+	long		0x3FFB0000,0xA3A69A52,0x5DDCE7DE,0x00000000
+	long		0x3FFB0000,0xAB98E943,0x62765619,0x00000000
+	long		0x3FFB0000,0xB389E502,0xF9C59862,0x00000000
+	long		0x3FFB0000,0xBB797E43,0x6B09E6FB,0x00000000
+	long		0x3FFB0000,0xC367A5C7,0x39E5F446,0x00000000
+	long		0x3FFB0000,0xCB544C61,0xCFF7D5C6,0x00000000
+	long		0x3FFB0000,0xD33F62F8,0x2488533E,0x00000000
+	long		0x3FFB0000,0xDB28DA81,0x62404C77,0x00000000
+	long		0x3FFB0000,0xE310A407,0x8AD34F18,0x00000000
+	long		0x3FFB0000,0xEAF6B0A8,0x188EE1EB,0x00000000
+	long		0x3FFB0000,0xF2DAF194,0x9DBE79D5,0x00000000
+	long		0x3FFB0000,0xFABD5813,0x61D47E3E,0x00000000
+	long		0x3FFC0000,0x8346AC21,0x0959ECC4,0x00000000
+	long		0x3FFC0000,0x8B232A08,0x304282D8,0x00000000
+	long		0x3FFC0000,0x92FB70B8,0xD29AE2F9,0x00000000
+	long		0x3FFC0000,0x9ACF476F,0x5CCD1CB4,0x00000000
+	long		0x3FFC0000,0xA29E7630,0x4954F23F,0x00000000
+	long		0x3FFC0000,0xAA68C5D0,0x8AB85230,0x00000000
+	long		0x3FFC0000,0xB22DFFFD,0x9D539F83,0x00000000
+	long		0x3FFC0000,0xB9EDEF45,0x3E900EA5,0x00000000
+	long		0x3FFC0000,0xC1A85F1C,0xC75E3EA5,0x00000000
+	long		0x3FFC0000,0xC95D1BE8,0x28138DE6,0x00000000
+	long		0x3FFC0000,0xD10BF300,0x840D2DE4,0x00000000
+	long		0x3FFC0000,0xD8B4B2BA,0x6BC05E7A,0x00000000
+	long		0x3FFC0000,0xE0572A6B,0xB42335F6,0x00000000
+	long		0x3FFC0000,0xE7F32A70,0xEA9CAA8F,0x00000000
+	long		0x3FFC0000,0xEF888432,0x64ECEFAA,0x00000000
+	long		0x3FFC0000,0xF7170A28,0xECC06666,0x00000000
+	long		0x3FFD0000,0x812FD288,0x332DAD32,0x00000000
+	long		0x3FFD0000,0x88A8D1B1,0x218E4D64,0x00000000
+	long		0x3FFD0000,0x9012AB3F,0x23E4AEE8,0x00000000
+	long		0x3FFD0000,0x976CC3D4,0x11E7F1B9,0x00000000
+	long		0x3FFD0000,0x9EB68949,0x3889A227,0x00000000
+	long		0x3FFD0000,0xA5EF72C3,0x4487361B,0x00000000
+	long		0x3FFD0000,0xAD1700BA,0xF07A7227,0x00000000
+	long		0x3FFD0000,0xB42CBCFA,0xFD37EFB7,0x00000000
+	long		0x3FFD0000,0xBB303A94,0x0BA80F89,0x00000000
+	long		0x3FFD0000,0xC22115C6,0xFCAEBBAF,0x00000000
+	long		0x3FFD0000,0xC8FEF3E6,0x86331221,0x00000000
+	long		0x3FFD0000,0xCFC98330,0xB4000C70,0x00000000
+	long		0x3FFD0000,0xD6807AA1,0x102C5BF9,0x00000000
+	long		0x3FFD0000,0xDD2399BC,0x31252AA3,0x00000000
+	long		0x3FFD0000,0xE3B2A855,0x6B8FC517,0x00000000
+	long		0x3FFD0000,0xEA2D764F,0x64315989,0x00000000
+	long		0x3FFD0000,0xF3BF5BF8,0xBAD1A21D,0x00000000
+	long		0x3FFE0000,0x801CE39E,0x0D205C9A,0x00000000
+	long		0x3FFE0000,0x8630A2DA,0xDA1ED066,0x00000000
+	long		0x3FFE0000,0x8C1AD445,0xF3E09B8C,0x00000000
+	long		0x3FFE0000,0x91DB8F16,0x64F350E2,0x00000000
+	long		0x3FFE0000,0x97731420,0x365E538C,0x00000000
+	long		0x3FFE0000,0x9CE1C8E6,0xA0B8CDBA,0x00000000
+	long		0x3FFE0000,0xA22832DB,0xCADAAE09,0x00000000
+	long		0x3FFE0000,0xA746F2DD,0xB7602294,0x00000000
+	long		0x3FFE0000,0xAC3EC0FB,0x997DD6A2,0x00000000
+	long		0x3FFE0000,0xB110688A,0xEBDC6F6A,0x00000000
+	long		0x3FFE0000,0xB5BCC490,0x59ECC4B0,0x00000000
+	long		0x3FFE0000,0xBA44BC7D,0xD470782F,0x00000000
+	long		0x3FFE0000,0xBEA94144,0xFD049AAC,0x00000000
+	long		0x3FFE0000,0xC2EB4ABB,0x661628B6,0x00000000
+	long		0x3FFE0000,0xC70BD54C,0xE602EE14,0x00000000
+	long		0x3FFE0000,0xCD000549,0xADEC7159,0x00000000
+	long		0x3FFE0000,0xD48457D2,0xD8EA4EA3,0x00000000
+	long		0x3FFE0000,0xDB948DA7,0x12DECE3B,0x00000000
+	long		0x3FFE0000,0xE23855F9,0x69E8096A,0x00000000
+	long		0x3FFE0000,0xE8771129,0xC4353259,0x00000000
+	long		0x3FFE0000,0xEE57C16E,0x0D379C0D,0x00000000
+	long		0x3FFE0000,0xF3E10211,0xA87C3779,0x00000000
+	long		0x3FFE0000,0xF919039D,0x758B8D41,0x00000000
+	long		0x3FFE0000,0xFE058B8F,0x64935FB3,0x00000000
+	long		0x3FFF0000,0x8155FB49,0x7B685D04,0x00000000
+	long		0x3FFF0000,0x83889E35,0x49D108E1,0x00000000
+	long		0x3FFF0000,0x859CFA76,0x511D724B,0x00000000
+	long		0x3FFF0000,0x87952ECF,0xFF8131E7,0x00000000
+	long		0x3FFF0000,0x89732FD1,0x9557641B,0x00000000
+	long		0x3FFF0000,0x8B38CAD1,0x01932A35,0x00000000
+	long		0x3FFF0000,0x8CE7A8D8,0x301EE6B5,0x00000000
+	long		0x3FFF0000,0x8F46A39E,0x2EAE5281,0x00000000
+	long		0x3FFF0000,0x922DA7D7,0x91888487,0x00000000
+	long		0x3FFF0000,0x94D19FCB,0xDEDF5241,0x00000000
+	long		0x3FFF0000,0x973AB944,0x19D2A08B,0x00000000
+	long		0x3FFF0000,0x996FF00E,0x08E10B96,0x00000000
+	long		0x3FFF0000,0x9B773F95,0x12321DA7,0x00000000
+	long		0x3FFF0000,0x9D55CC32,0x0F935624,0x00000000
+	long		0x3FFF0000,0x9F100575,0x006CC571,0x00000000
+	long		0x3FFF0000,0xA0A9C290,0xD97CC06C,0x00000000
+	long		0x3FFF0000,0xA22659EB,0xEBC0630A,0x00000000
+	long		0x3FFF0000,0xA388B4AF,0xF6EF0EC9,0x00000000
+	long		0x3FFF0000,0xA4D35F10,0x61D292C4,0x00000000
+	long		0x3FFF0000,0xA60895DC,0xFBE3187E,0x00000000
+	long		0x3FFF0000,0xA72A51DC,0x7367BEAC,0x00000000
+	long		0x3FFF0000,0xA83A5153,0x0956168F,0x00000000
+	long		0x3FFF0000,0xA93A2007,0x7539546E,0x00000000
+	long		0x3FFF0000,0xAA9E7245,0x023B2605,0x00000000
+	long		0x3FFF0000,0xAC4C84BA,0x6FE4D58F,0x00000000
+	long		0x3FFF0000,0xADCE4A4A,0x606B9712,0x00000000
+	long		0x3FFF0000,0xAF2A2DCD,0x8D263C9C,0x00000000
+	long		0x3FFF0000,0xB0656F81,0xF22265C7,0x00000000
+	long		0x3FFF0000,0xB1846515,0x0F71496A,0x00000000
+	long		0x3FFF0000,0xB28AAA15,0x6F9ADA35,0x00000000
+	long		0x3FFF0000,0xB37B44FF,0x3766B895,0x00000000
+	long		0x3FFF0000,0xB458C3DC,0xE9630433,0x00000000
+	long		0x3FFF0000,0xB525529D,0x562246BD,0x00000000
+	long		0x3FFF0000,0xB5E2CCA9,0x5F9D88CC,0x00000000
+	long		0x3FFF0000,0xB692CADA,0x7ACA1ADA,0x00000000
+	long		0x3FFF0000,0xB736AEA7,0xA6925838,0x00000000
+	long		0x3FFF0000,0xB7CFAB28,0x7E9F7B36,0x00000000
+	long		0x3FFF0000,0xB85ECC66,0xCB219835,0x00000000
+	long		0x3FFF0000,0xB8E4FD5A,0x20A593DA,0x00000000
+	long		0x3FFF0000,0xB99F41F6,0x4AFF9BB5,0x00000000
+	long		0x3FFF0000,0xBA7F1E17,0x842BBE7B,0x00000000
+	long		0x3FFF0000,0xBB471285,0x7637E17D,0x00000000
+	long		0x3FFF0000,0xBBFABE8A,0x4788DF6F,0x00000000
+	long		0x3FFF0000,0xBC9D0FAD,0x2B689D79,0x00000000
+	long		0x3FFF0000,0xBD306A39,0x471ECD86,0x00000000
+	long		0x3FFF0000,0xBDB6C731,0x856AF18A,0x00000000
+	long		0x3FFF0000,0xBE31CAC5,0x02E80D70,0x00000000
+	long		0x3FFF0000,0xBEA2D55C,0xE33194E2,0x00000000
+	long		0x3FFF0000,0xBF0B10B7,0xC03128F0,0x00000000
+	long		0x3FFF0000,0xBF6B7A18,0xDACB778D,0x00000000
+	long		0x3FFF0000,0xBFC4EA46,0x63FA18F6,0x00000000
+	long		0x3FFF0000,0xC0181BDE,0x8B89A454,0x00000000
+	long		0x3FFF0000,0xC065B066,0xCFBF6439,0x00000000
+	long		0x3FFF0000,0xC0AE345F,0x56340AE6,0x00000000
+	long		0x3FFF0000,0xC0F22291,0x9CB9E6A7,0x00000000
+
+	set		X,FP_SCR0
+	set		XDCARE,X+2
+	set		XFRAC,X+4
+	set		XFRACLO,X+8
+
+	set		ATANF,FP_SCR1
+	set		ATANFHI,ATANF+4
+	set		ATANFLO,ATANF+8
+
+	global		satan
+#--ENTRY POINT FOR ATAN(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+satan:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	fmov.x		%fp0,X(%a6)
+	and.l		&0x7FFFFFFF,%d1
+
+	cmp.l		%d1,&0x3FFB8000		# |X| >= 1/16?
+	bge.b		ATANOK1
+	bra.w		ATANSM
+
+ATANOK1:
+	cmp.l		%d1,&0x4002FFFF		# |X| < 16 ?
+	ble.b		ATANMAIN
+	bra.w		ATANBIG
+
+#--THE MOST LIKELY CASE, |X| IN [1/16, 16). WE USE TABLE TECHNIQUE
+#--THE IDEA IS ATAN(X) = ATAN(F) + ATAN( [X-F] / [1+XF] ).
+#--SO IF F IS CHOSEN TO BE CLOSE TO X AND ATAN(F) IS STORED IN
+#--A TABLE, ALL WE NEED IS TO APPROXIMATE ATAN(U) WHERE
+#--U = (X-F)/(1+XF) IS SMALL (REMEMBER F IS CLOSE TO X). IT IS
+#--TRUE THAT A DIVIDE IS NOW NEEDED, BUT THE APPROXIMATION FOR
+#--ATAN(U) IS A VERY SHORT POLYNOMIAL AND THE INDEXING TO
+#--FETCH F AND SAVING OF REGISTERS CAN BE ALL HIDED UNDER THE
+#--DIVIDE. IN THE END THIS METHOD IS MUCH FASTER THAN A TRADITIONAL
+#--ONE. NOTE ALSO THAT THE TRADITIONAL SCHEME THAT APPROXIMATE
+#--ATAN(X) DIRECTLY WILL NEED TO USE A RATIONAL APPROXIMATION
+#--(DIVISION NEEDED) ANYWAY BECAUSE A POLYNOMIAL APPROXIMATION
+#--WILL INVOLVE A VERY LONG POLYNOMIAL.
+
+#--NOW WE SEE X AS +-2^K * 1.BBBBBBB....B <- 1. + 63 BITS
+#--WE CHOSE F TO BE +-2^K * 1.BBBB1
+#--THAT IS IT MATCHES THE EXPONENT AND FIRST 5 BITS OF X, THE
+#--SIXTH BITS IS SET TO BE 1. SINCE K = -4, -3, ..., 3, THERE
+#--ARE ONLY 8 TIMES 16 = 2^7 = 128 |F|'S. SINCE ATAN(-|F|) IS
+#-- -ATAN(|F|), WE NEED TO STORE ONLY ATAN(|F|).
+
+ATANMAIN:
+
+	and.l		&0xF8000000,XFRAC(%a6)	# FIRST 5 BITS
+	or.l		&0x04000000,XFRAC(%a6)	# SET 6-TH BIT TO 1
+	mov.l		&0x00000000,XFRACLO(%a6) # LOCATION OF X IS NOW F
+
+	fmov.x		%fp0,%fp1		# FP1 IS X
+	fmul.x		X(%a6),%fp1		# FP1 IS X*F, NOTE THAT X*F > 0
+	fsub.x		X(%a6),%fp0		# FP0 IS X-F
+	fadd.s		&0x3F800000,%fp1	# FP1 IS 1 + X*F
+	fdiv.x		%fp1,%fp0		# FP0 IS U = (X-F)/(1+X*F)
+
+#--WHILE THE DIVISION IS TAKING ITS TIME, WE FETCH ATAN(|F|)
+#--CREATE ATAN(F) AND STORE IT IN ATANF, AND
+#--SAVE REGISTERS FP2.
+
+	mov.l		%d2,-(%sp)		# SAVE d2 TEMPORARILY
+	mov.l		%d1,%d2			# THE EXP AND 16 BITS OF X
+	and.l		&0x00007800,%d1		# 4 VARYING BITS OF F'S FRACTION
+	and.l		&0x7FFF0000,%d2		# EXPONENT OF F
+	sub.l		&0x3FFB0000,%d2		# K+4
+	asr.l		&1,%d2
+	add.l		%d2,%d1			# THE 7 BITS IDENTIFYING F
+	asr.l		&7,%d1			# INDEX INTO TBL OF ATAN(|F|)
+	lea		ATANTBL(%pc),%a1
+	add.l		%d1,%a1			# ADDRESS OF ATAN(|F|)
+	mov.l		(%a1)+,ATANF(%a6)
+	mov.l		(%a1)+,ATANFHI(%a6)
+	mov.l		(%a1)+,ATANFLO(%a6)	# ATANF IS NOW ATAN(|F|)
+	mov.l		X(%a6),%d1		# LOAD SIGN AND EXPO. AGAIN
+	and.l		&0x80000000,%d1		# SIGN(F)
+	or.l		%d1,ATANF(%a6)		# ATANF IS NOW SIGN(F)*ATAN(|F|)
+	mov.l		(%sp)+,%d2		# RESTORE d2
+
+#--THAT'S ALL I HAVE TO DO FOR NOW,
+#--BUT ALAS, THE DIVIDE IS STILL CRANKING!
+
+#--U IN FP0, WE ARE NOW READY TO COMPUTE ATAN(U) AS
+#--U + A1*U*V*(A2 + V*(A3 + V)), V = U*U
+#--THE POLYNOMIAL MAY LOOK STRANGE, BUT IS NEVERTHELESS CORRECT.
+#--THE NATURAL FORM IS U + U*V*(A1 + V*(A2 + V*A3))
+#--WHAT WE HAVE HERE IS MERELY	A1 = A3, A2 = A1/A3, A3 = A2/A3.
+#--THE REASON FOR THIS REARRANGEMENT IS TO MAKE THE INDEPENDENT
+#--PARTS A1*U*V AND (A2 + ... STUFF) MORE LOAD-BALANCED
+
+	fmovm.x		&0x04,-(%sp)		# save fp2
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1
+	fmov.d		ATANA3(%pc),%fp2
+	fadd.x		%fp1,%fp2		# A3+V
+	fmul.x		%fp1,%fp2		# V*(A3+V)
+	fmul.x		%fp0,%fp1		# U*V
+	fadd.d		ATANA2(%pc),%fp2	# A2+V*(A3+V)
+	fmul.d		ATANA1(%pc),%fp1	# A1*U*V
+	fmul.x		%fp2,%fp1		# A1*U*V*(A2+V*(A3+V))
+	fadd.x		%fp1,%fp0		# ATAN(U), FP1 RELEASED
+
+	fmovm.x		(%sp)+,&0x20		# restore fp2
+
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	fadd.x		ATANF(%a6),%fp0		# ATAN(X)
+	bra		t_inx2
+
+ATANBORS:
+#--|X| IS IN d0 IN COMPACT FORM. FP1, d0 SAVED.
+#--FP0 IS X AND |X| <= 1/16 OR |X| >= 16.
+	cmp.l		%d1,&0x3FFF8000
+	bgt.w		ATANBIG			# I.E. |X| >= 16
+
+ATANSM:
+#--|X| <= 1/16
+#--IF |X| < 2^(-40), RETURN X AS ANSWER. OTHERWISE, APPROXIMATE
+#--ATAN(X) BY X + X*Y*(B1+Y*(B2+Y*(B3+Y*(B4+Y*(B5+Y*B6)))))
+#--WHICH IS X + X*Y*( [B1+Z*(B3+Z*B5)] + [Y*(B2+Z*(B4+Z*B6)] )
+#--WHERE Y = X*X, AND Z = Y*Y.
+
+	cmp.l		%d1,&0x3FD78000
+	blt.w		ATANTINY
+
+#--COMPUTE POLYNOMIAL
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmul.x		%fp0,%fp0		# FPO IS Y = X*X
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS Z = Y*Y
+
+	fmov.d		ATANB6(%pc),%fp2
+	fmov.d		ATANB5(%pc),%fp3
+
+	fmul.x		%fp1,%fp2		# Z*B6
+	fmul.x		%fp1,%fp3		# Z*B5
+
+	fadd.d		ATANB4(%pc),%fp2	# B4+Z*B6
+	fadd.d		ATANB3(%pc),%fp3	# B3+Z*B5
+
+	fmul.x		%fp1,%fp2		# Z*(B4+Z*B6)
+	fmul.x		%fp3,%fp1		# Z*(B3+Z*B5)
+
+	fadd.d		ATANB2(%pc),%fp2	# B2+Z*(B4+Z*B6)
+	fadd.d		ATANB1(%pc),%fp1	# B1+Z*(B3+Z*B5)
+
+	fmul.x		%fp0,%fp2		# Y*(B2+Z*(B4+Z*B6))
+	fmul.x		X(%a6),%fp0		# X*Y
+
+	fadd.x		%fp2,%fp1		# [B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]
+
+	fmul.x		%fp1,%fp0		# X*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))])
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	fadd.x		X(%a6),%fp0
+	bra		t_inx2
+
+ATANTINY:
+#--|X| < 2^(-40), ATAN(X) = X
+
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		X(%a6),%fp0		# last inst - possible exception set
+
+	bra		t_catch
+
+ATANBIG:
+#--IF |X| > 2^(100), RETURN	SIGN(X)*(PI/2 - TINY). OTHERWISE,
+#--RETURN SIGN(X)*PI/2 + ATAN(-1/X).
+	cmp.l		%d1,&0x40638000
+	bgt.w		ATANHUGE
+
+#--APPROXIMATE ATAN(-1/X) BY
+#--X'+X'*Y*(C1+Y*(C2+Y*(C3+Y*(C4+Y*C5)))), X' = -1/X, Y = X'*X'
+#--THIS CAN BE RE-WRITTEN AS
+#--X'+X'*Y*( [C1+Z*(C3+Z*C5)] + [Y*(C2+Z*C4)] ), Z = Y*Y.
+
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmov.s		&0xBF800000,%fp1	# LOAD -1
+	fdiv.x		%fp0,%fp1		# FP1 IS -1/X
+
+#--DIVIDE IS STILL CRANKING
+
+	fmov.x		%fp1,%fp0		# FP0 IS X'
+	fmul.x		%fp0,%fp0		# FP0 IS Y = X'*X'
+	fmov.x		%fp1,X(%a6)		# X IS REALLY X'
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS Z = Y*Y
+
+	fmov.d		ATANC5(%pc),%fp3
+	fmov.d		ATANC4(%pc),%fp2
+
+	fmul.x		%fp1,%fp3		# Z*C5
+	fmul.x		%fp1,%fp2		# Z*B4
+
+	fadd.d		ATANC3(%pc),%fp3	# C3+Z*C5
+	fadd.d		ATANC2(%pc),%fp2	# C2+Z*C4
+
+	fmul.x		%fp3,%fp1		# Z*(C3+Z*C5), FP3 RELEASED
+	fmul.x		%fp0,%fp2		# Y*(C2+Z*C4)
+
+	fadd.d		ATANC1(%pc),%fp1	# C1+Z*(C3+Z*C5)
+	fmul.x		X(%a6),%fp0		# X'*Y
+
+	fadd.x		%fp2,%fp1		# [Y*(C2+Z*C4)]+[C1+Z*(C3+Z*C5)]
+
+	fmul.x		%fp1,%fp0		# X'*Y*([B1+Z*(B3+Z*B5)]
+#					...	+[Y*(B2+Z*(B4+Z*B6))])
+	fadd.x		X(%a6),%fp0
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	tst.b		(%a0)
+	bpl.b		pos_big
+
+neg_big:
+	fadd.x		NPIBY2(%pc),%fp0
+	bra		t_minx2
+
+pos_big:
+	fadd.x		PPIBY2(%pc),%fp0
+	bra		t_pinx2
+
+ATANHUGE:
+#--RETURN SIGN(X)*(PIBY2 - TINY) = SIGN(X)*PIBY2 - SIGN(X)*TINY
+	tst.b		(%a0)
+	bpl.b		pos_huge
+
+neg_huge:
+	fmov.x		NPIBY2(%pc),%fp0
+	fmov.l		%d0,%fpcr
+	fadd.x		PTINY(%pc),%fp0
+	bra		t_minx2
+
+pos_huge:
+	fmov.x		PPIBY2(%pc),%fp0
+	fmov.l		%d0,%fpcr
+	fadd.x		NTINY(%pc),%fp0
+	bra		t_pinx2
+
+	global		satand
+#--ENTRY POINT FOR ATAN(X) FOR DENORMALIZED ARGUMENT
+satand:
+	bra		t_extdnrm
+
+#########################################################################
+# sasin():  computes the inverse sine of a normalized input		#
+# sasind(): computes the inverse sine of a denormalized input		#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = arcsin(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in	64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	ASIN								#
+#	1. If |X| >= 1, go to 3.					#
+#									#
+#	2. (|X| < 1) Calculate asin(X) by				#
+#		z := sqrt( [1-X][1+X] )					#
+#		asin(X) = atan( x / z ).				#
+#		Exit.							#
+#									#
+#	3. If |X| > 1, go to 5.						#
+#									#
+#	4. (|X| = 1) sgn := sign(X), return asin(X) := sgn * Pi/2. Exit.#
+#									#
+#	5. (|X| > 1) Generate an invalid operation by 0 * infinity.	#
+#		Exit.							#
+#									#
+#########################################################################
+
+	global		sasin
+sasin:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x3FFF8000
+	bge.b		ASINBIG
+
+# This catch is added here for the '060 QSP. Originally, the call to
+# satan() would handle this case by causing the exception which would
+# not be caught until gen_except(). Now, with the exceptions being
+# detected inside of satan(), the exception would have been handled there
+# instead of inside sasin() as expected.
+	cmp.l		%d1,&0x3FD78000
+	blt.w		ASINTINY
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--ASIN(X) = ATAN( X / SQRT( (1-X)(1+X) ) )
+
+ASINMAIN:
+	fmov.s		&0x3F800000,%fp1
+	fsub.x		%fp0,%fp1		# 1-X
+	fmovm.x		&0x4,-(%sp)		#  {fp2}
+	fmov.s		&0x3F800000,%fp2
+	fadd.x		%fp0,%fp2		# 1+X
+	fmul.x		%fp2,%fp1		# (1+X)(1-X)
+	fmovm.x		(%sp)+,&0x20		#  {fp2}
+	fsqrt.x		%fp1			# SQRT([1-X][1+X])
+	fdiv.x		%fp1,%fp0		# X/SQRT([1-X][1+X])
+	fmovm.x		&0x01,-(%sp)		# save X/SQRT(...)
+	lea		(%sp),%a0		# pass ptr to X/SQRT(...)
+	bsr		satan
+	add.l		&0xc,%sp		# clear X/SQRT(...) from stack
+	bra		t_inx2
+
+ASINBIG:
+	fabs.x		%fp0			# |X|
+	fcmp.s		%fp0,&0x3F800000
+	fbgt		t_operr			# cause an operr exception
+
+#--|X| = 1, ASIN(X) = +- PI/2.
+ASINONE:
+	fmov.x		PIBY2(%pc),%fp0
+	mov.l		(%a0),%d1
+	and.l		&0x80000000,%d1		# SIGN BIT OF X
+	or.l		&0x3F800000,%d1		# +-1 IN SGL FORMAT
+	mov.l		%d1,-(%sp)		# push SIGN(X) IN SGL-FMT
+	fmov.l		%d0,%fpcr
+	fmul.s		(%sp)+,%fp0
+	bra		t_inx2
+
+#--|X| < 2^(-40), ATAN(X) = X
+ASINTINY:
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		(%a0),%fp0		# last inst - possible exception
+	bra		t_catch
+
+	global		sasind
+#--ASIN(X) = X FOR DENORMALIZED X
+sasind:
+	bra		t_extdnrm
+
+#########################################################################
+# sacos():  computes the inverse cosine of a normalized input		#
+# sacosd(): computes the inverse cosine of a denormalized input		#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = arccos(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in	64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM *********************************************************** #
+#									#
+#	ACOS								#
+#	1. If |X| >= 1, go to 3.					#
+#									#
+#	2. (|X| < 1) Calculate acos(X) by				#
+#		z := (1-X) / (1+X)					#
+#		acos(X) = 2 * atan( sqrt(z) ).				#
+#		Exit.							#
+#									#
+#	3. If |X| > 1, go to 5.						#
+#									#
+#	4. (|X| = 1) If X > 0, return 0. Otherwise, return Pi. Exit.	#
+#									#
+#	5. (|X| > 1) Generate an invalid operation by 0 * infinity.	#
+#		Exit.							#
+#									#
+#########################################################################
+
+	global		sacos
+sacos:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1		# pack exp w/ upper 16 fraction
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x3FFF8000
+	bge.b		ACOSBIG
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--ACOS(X) = 2 * ATAN(	SQRT( (1-X)/(1+X) ) )
+
+ACOSMAIN:
+	fmov.s		&0x3F800000,%fp1
+	fadd.x		%fp0,%fp1		# 1+X
+	fneg.x		%fp0			# -X
+	fadd.s		&0x3F800000,%fp0	# 1-X
+	fdiv.x		%fp1,%fp0		# (1-X)/(1+X)
+	fsqrt.x		%fp0			# SQRT((1-X)/(1+X))
+	mov.l		%d0,-(%sp)		# save original users fpcr
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save SQRT(...) to stack
+	lea		(%sp),%a0		# pass ptr to sqrt
+	bsr		satan			# ATAN(SQRT([1-X]/[1+X]))
+	add.l		&0xc,%sp		# clear SQRT(...) from stack
+
+	fmov.l		(%sp)+,%fpcr		# restore users round prec,mode
+	fadd.x		%fp0,%fp0		# 2 * ATAN( STUFF )
+	bra		t_pinx2
+
+ACOSBIG:
+	fabs.x		%fp0
+	fcmp.s		%fp0,&0x3F800000
+	fbgt		t_operr			# cause an operr exception
+
+#--|X| = 1, ACOS(X) = 0 OR PI
+	tst.b		(%a0)			# is X positive or negative?
+	bpl.b		ACOSP1
+
+#--X = -1
+#Returns PI and inexact exception
+ACOSM1:
+	fmov.x		PI(%pc),%fp0		# load PI
+	fmov.l		%d0,%fpcr		# load round mode,prec
+	fadd.s		&0x00800000,%fp0	# add a small value
+	bra		t_pinx2
+
+ACOSP1:
+	bra		ld_pzero		# answer is positive zero
+
+	global		sacosd
+#--ACOS(X) = PI/2 FOR DENORMALIZED X
+sacosd:
+	fmov.l		%d0,%fpcr		# load user's rnd mode/prec
+	fmov.x		PIBY2(%pc),%fp0
+	bra		t_pinx2
+
+#########################################################################
+# setox():    computes the exponential for a normalized input		#
+# setoxd():   computes the exponential for a denormalized input		#
+# setoxm1():  computes the exponential minus 1 for a normalized input	#
+# setoxm1d(): computes the exponential minus 1 for a denormalized input	#
+#									#
+# INPUT	*************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = exp(X) or exp(X)-1					#
+#									#
+# ACCURACY and MONOTONICITY ******************************************* #
+#	The returned result is within 0.85 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM and IMPLEMENTATION **************************************** #
+#									#
+#	setoxd								#
+#	------								#
+#	Step 1.	Set ans := 1.0						#
+#									#
+#	Step 2.	Return	ans := ans + sign(X)*2^(-126). Exit.		#
+#	Notes:	This will always generate one exception -- inexact.	#
+#									#
+#									#
+#	setox								#
+#	-----								#
+#									#
+#	Step 1.	Filter out extreme cases of input argument.		#
+#		1.1	If |X| >= 2^(-65), go to Step 1.3.		#
+#		1.2	Go to Step 7.					#
+#		1.3	If |X| < 16380 log(2), go to Step 2.		#
+#		1.4	Go to Step 8.					#
+#	Notes:	The usual case should take the branches 1.1 -> 1.3 -> 2.#
+#		To avoid the use of floating-point comparisons, a	#
+#		compact representation of |X| is used. This format is a	#
+#		32-bit integer, the upper (more significant) 16 bits	#
+#		are the sign and biased exponent field of |X|; the	#
+#		lower 16 bits are the 16 most significant fraction	#
+#		(including the explicit bit) bits of |X|. Consequently,	#
+#		the comparisons in Steps 1.1 and 1.3 can be performed	#
+#		by integer comparison. Note also that the constant	#
+#		16380 log(2) used in Step 1.3 is also in the compact	#
+#		form. Thus taking the branch to Step 2 guarantees	#
+#		|X| < 16380 log(2). There is no harm to have a small	#
+#		number of cases where |X| is less than,	but close to,	#
+#		16380 log(2) and the branch to Step 9 is taken.		#
+#									#
+#	Step 2.	Calculate N = round-to-nearest-int( X * 64/log2 ).	#
+#		2.1	Set AdjFlag := 0 (indicates the branch 1.3 -> 2 #
+#			was taken)					#
+#		2.2	N := round-to-nearest-integer( X * 64/log2 ).	#
+#		2.3	Calculate	J = N mod 64; so J = 0,1,2,..., #
+#			or 63.						#
+#		2.4	Calculate	M = (N - J)/64; so N = 64M + J.	#
+#		2.5	Calculate the address of the stored value of	#
+#			2^(J/64).					#
+#		2.6	Create the value Scale = 2^M.			#
+#	Notes:	The calculation in 2.2 is really performed by		#
+#			Z := X * constant				#
+#			N := round-to-nearest-integer(Z)		#
+#		where							#
+#			constant := single-precision( 64/log 2 ).	#
+#									#
+#		Using a single-precision constant avoids memory		#
+#		access. Another effect of using a single-precision	#
+#		"constant" is that the calculated value Z is		#
+#									#
+#			Z = X*(64/log2)*(1+eps), |eps| <= 2^(-24).	#
+#									#
+#		This error has to be considered later in Steps 3 and 4.	#
+#									#
+#	Step 3.	Calculate X - N*log2/64.				#
+#		3.1	R := X + N*L1,					#
+#				where L1 := single-precision(-log2/64).	#
+#		3.2	R := R + N*L2,					#
+#				L2 := extended-precision(-log2/64 - L1).#
+#	Notes:	a) The way L1 and L2 are chosen ensures L1+L2		#
+#		approximate the value -log2/64 to 88 bits of accuracy.	#
+#		b) N*L1 is exact because N is no longer than 22 bits	#
+#		and L1 is no longer than 24 bits.			#
+#		c) The calculation X+N*L1 is also exact due to		#
+#		cancellation. Thus, R is practically X+N(L1+L2) to full	#
+#		64 bits.						#
+#		d) It is important to estimate how large can |R| be	#
+#		after Step 3.2.						#
+#									#
+#		N = rnd-to-int( X*64/log2 (1+eps) ), |eps|<=2^(-24)	#
+#		X*64/log2 (1+eps)	=	N + f,	|f| <= 0.5	#
+#		X*64/log2 - N	=	f - eps*X 64/log2		#
+#		X - N*log2/64	=	f*log2/64 - eps*X		#
+#									#
+#									#
+#		Now |X| <= 16446 log2, thus				#
+#									#
+#			|X - N*log2/64| <= (0.5 + 16446/2^(18))*log2/64	#
+#					<= 0.57 log2/64.		#
+#		 This bound will be used in Step 4.			#
+#									#
+#	Step 4.	Approximate exp(R)-1 by a polynomial			#
+#		p = R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))	#
+#	Notes:	a) In order to reduce memory access, the coefficients	#
+#		are made as "short" as possible: A1 (which is 1/2), A4	#
+#		and A5 are single precision; A2 and A3 are double	#
+#		precision.						#
+#		b) Even with the restrictions above,			#
+#		   |p - (exp(R)-1)| < 2^(-68.8) for all |R| <= 0.0062.	#
+#		Note that 0.0062 is slightly bigger than 0.57 log2/64.	#
+#		c) To fully utilize the pipeline, p is separated into	#
+#		two independent pieces of roughly equal complexities	#
+#			p = [ R + R*S*(A2 + S*A4) ]	+		#
+#				[ S*(A1 + S*(A3 + S*A5)) ]		#
+#		where S = R*R.						#
+#									#
+#	Step 5.	Compute 2^(J/64)*exp(R) = 2^(J/64)*(1+p) by		#
+#				ans := T + ( T*p + t)			#
+#		where T and t are the stored values for 2^(J/64).	#
+#	Notes:	2^(J/64) is stored as T and t where T+t approximates	#
+#		2^(J/64) to roughly 85 bits; T is in extended precision	#
+#		and t is in single precision. Note also that T is	#
+#		rounded to 62 bits so that the last two bits of T are	#
+#		zero. The reason for such a special form is that T-1,	#
+#		T-2, and T-8 will all be exact --- a property that will	#
+#		give much more accurate computation of the function	#
+#		EXPM1.							#
+#									#
+#	Step 6.	Reconstruction of exp(X)				#
+#			exp(X) = 2^M * 2^(J/64) * exp(R).		#
+#		6.1	If AdjFlag = 0, go to 6.3			#
+#		6.2	ans := ans * AdjScale				#
+#		6.3	Restore the user FPCR				#
+#		6.4	Return ans := ans * Scale. Exit.		#
+#	Notes:	If AdjFlag = 0, we have X = Mlog2 + Jlog2/64 + R,	#
+#		|M| <= 16380, and Scale = 2^M. Moreover, exp(X) will	#
+#		neither overflow nor underflow. If AdjFlag = 1, that	#
+#		means that						#
+#			X = (M1+M)log2 + Jlog2/64 + R, |M1+M| >= 16380.	#
+#		Hence, exp(X) may overflow or underflow or neither.	#
+#		When that is the case, AdjScale = 2^(M1) where M1 is	#
+#		approximately M. Thus 6.2 will never cause		#
+#		over/underflow. Possible exception in 6.4 is overflow	#
+#		or underflow. The inexact exception is not generated in	#
+#		6.4. Although one can argue that the inexact flag	#
+#		should always be raised, to simulate that exception	#
+#		cost to much than the flag is worth in practical uses.	#
+#									#
+#	Step 7.	Return 1 + X.						#
+#		7.1	ans := X					#
+#		7.2	Restore user FPCR.				#
+#		7.3	Return ans := 1 + ans. Exit			#
+#	Notes:	For non-zero X, the inexact exception will always be	#
+#		raised by 7.3. That is the only exception raised by 7.3.#
+#		Note also that we use the FMOVEM instruction to move X	#
+#		in Step 7.1 to avoid unnecessary trapping. (Although	#
+#		the FMOVEM may not seem relevant since X is normalized,	#
+#		the precaution will be useful in the library version of	#
+#		this code where the separate entry for denormalized	#
+#		inputs will be done away with.)				#
+#									#
+#	Step 8.	Handle exp(X) where |X| >= 16380log2.			#
+#		8.1	If |X| > 16480 log2, go to Step 9.		#
+#		(mimic 2.2 - 2.6)					#
+#		8.2	N := round-to-integer( X * 64/log2 )		#
+#		8.3	Calculate J = N mod 64, J = 0,1,...,63		#
+#		8.4	K := (N-J)/64, M1 := truncate(K/2), M = K-M1,	#
+#			AdjFlag := 1.					#
+#		8.5	Calculate the address of the stored value	#
+#			2^(J/64).					#
+#		8.6	Create the values Scale = 2^M, AdjScale = 2^M1.	#
+#		8.7	Go to Step 3.					#
+#	Notes:	Refer to notes for 2.2 - 2.6.				#
+#									#
+#	Step 9.	Handle exp(X), |X| > 16480 log2.			#
+#		9.1	If X < 0, go to 9.3				#
+#		9.2	ans := Huge, go to 9.4				#
+#		9.3	ans := Tiny.					#
+#		9.4	Restore user FPCR.				#
+#		9.5	Return ans := ans * ans. Exit.			#
+#	Notes:	Exp(X) will surely overflow or underflow, depending on	#
+#		X's sign. "Huge" and "Tiny" are respectively large/tiny	#
+#		extended-precision numbers whose square over/underflow	#
+#		with an inexact result. Thus, 9.5 always raises the	#
+#		inexact together with either overflow or underflow.	#
+#									#
+#	setoxm1d							#
+#	--------							#
+#									#
+#	Step 1.	Set ans := 0						#
+#									#
+#	Step 2.	Return	ans := X + ans. Exit.				#
+#	Notes:	This will return X with the appropriate rounding	#
+#		 precision prescribed by the user FPCR.			#
+#									#
+#	setoxm1								#
+#	-------								#
+#									#
+#	Step 1.	Check |X|						#
+#		1.1	If |X| >= 1/4, go to Step 1.3.			#
+#		1.2	Go to Step 7.					#
+#		1.3	If |X| < 70 log(2), go to Step 2.		#
+#		1.4	Go to Step 10.					#
+#	Notes:	The usual case should take the branches 1.1 -> 1.3 -> 2.#
+#		However, it is conceivable |X| can be small very often	#
+#		because EXPM1 is intended to evaluate exp(X)-1		#
+#		accurately when |X| is small. For further details on	#
+#		the comparisons, see the notes on Step 1 of setox.	#
+#									#
+#	Step 2.	Calculate N = round-to-nearest-int( X * 64/log2 ).	#
+#		2.1	N := round-to-nearest-integer( X * 64/log2 ).	#
+#		2.2	Calculate	J = N mod 64; so J = 0,1,2,..., #
+#			or 63.						#
+#		2.3	Calculate	M = (N - J)/64; so N = 64M + J.	#
+#		2.4	Calculate the address of the stored value of	#
+#			2^(J/64).					#
+#		2.5	Create the values Sc = 2^M and			#
+#			OnebySc := -2^(-M).				#
+#	Notes:	See the notes on Step 2 of setox.			#
+#									#
+#	Step 3.	Calculate X - N*log2/64.				#
+#		3.1	R := X + N*L1,					#
+#				where L1 := single-precision(-log2/64).	#
+#		3.2	R := R + N*L2,					#
+#				L2 := extended-precision(-log2/64 - L1).#
+#	Notes:	Applying the analysis of Step 3 of setox in this case	#
+#		shows that |R| <= 0.0055 (note that |X| <= 70 log2 in	#
+#		this case).						#
+#									#
+#	Step 4.	Approximate exp(R)-1 by a polynomial			#
+#			p = R+R*R*(A1+R*(A2+R*(A3+R*(A4+R*(A5+R*A6)))))	#
+#	Notes:	a) In order to reduce memory access, the coefficients	#
+#		are made as "short" as possible: A1 (which is 1/2), A5	#
+#		and A6 are single precision; A2, A3 and A4 are double	#
+#		precision.						#
+#		b) Even with the restriction above,			#
+#			|p - (exp(R)-1)| <	|R| * 2^(-72.7)		#
+#		for all |R| <= 0.0055.					#
+#		c) To fully utilize the pipeline, p is separated into	#
+#		two independent pieces of roughly equal complexity	#
+#			p = [ R*S*(A2 + S*(A4 + S*A6)) ]	+	#
+#				[ R + S*(A1 + S*(A3 + S*A5)) ]		#
+#		where S = R*R.						#
+#									#
+#	Step 5.	Compute 2^(J/64)*p by					#
+#				p := T*p				#
+#		where T and t are the stored values for 2^(J/64).	#
+#	Notes:	2^(J/64) is stored as T and t where T+t approximates	#
+#		2^(J/64) to roughly 85 bits; T is in extended precision	#
+#		and t is in single precision. Note also that T is	#
+#		rounded to 62 bits so that the last two bits of T are	#
+#		zero. The reason for such a special form is that T-1,	#
+#		T-2, and T-8 will all be exact --- a property that will	#
+#		be exploited in Step 6 below. The total relative error	#
+#		in p is no bigger than 2^(-67.7) compared to the final	#
+#		result.							#
+#									#
+#	Step 6.	Reconstruction of exp(X)-1				#
+#			exp(X)-1 = 2^M * ( 2^(J/64) + p - 2^(-M) ).	#
+#		6.1	If M <= 63, go to Step 6.3.			#
+#		6.2	ans := T + (p + (t + OnebySc)). Go to 6.6	#
+#		6.3	If M >= -3, go to 6.5.				#
+#		6.4	ans := (T + (p + t)) + OnebySc. Go to 6.6	#
+#		6.5	ans := (T + OnebySc) + (p + t).			#
+#		6.6	Restore user FPCR.				#
+#		6.7	Return ans := Sc * ans. Exit.			#
+#	Notes:	The various arrangements of the expressions give	#
+#		accurate evaluations.					#
+#									#
+#	Step 7.	exp(X)-1 for |X| < 1/4.					#
+#		7.1	If |X| >= 2^(-65), go to Step 9.		#
+#		7.2	Go to Step 8.					#
+#									#
+#	Step 8.	Calculate exp(X)-1, |X| < 2^(-65).			#
+#		8.1	If |X| < 2^(-16312), goto 8.3			#
+#		8.2	Restore FPCR; return ans := X - 2^(-16382).	#
+#			Exit.						#
+#		8.3	X := X * 2^(140).				#
+#		8.4	Restore FPCR; ans := ans - 2^(-16382).		#
+#		 Return ans := ans*2^(140). Exit			#
+#	Notes:	The idea is to return "X - tiny" under the user		#
+#		precision and rounding modes. To avoid unnecessary	#
+#		inefficiency, we stay away from denormalized numbers	#
+#		the best we can. For |X| >= 2^(-16312), the		#
+#		straightforward 8.2 generates the inexact exception as	#
+#		the case warrants.					#
+#									#
+#	Step 9.	Calculate exp(X)-1, |X| < 1/4, by a polynomial		#
+#			p = X + X*X*(B1 + X*(B2 + ... + X*B12))		#
+#	Notes:	a) In order to reduce memory access, the coefficients	#
+#		are made as "short" as possible: B1 (which is 1/2), B9	#
+#		to B12 are single precision; B3 to B8 are double	#
+#		precision; and B2 is double extended.			#
+#		b) Even with the restriction above,			#
+#			|p - (exp(X)-1)| < |X| 2^(-70.6)		#
+#		for all |X| <= 0.251.					#
+#		Note that 0.251 is slightly bigger than 1/4.		#
+#		c) To fully preserve accuracy, the polynomial is	#
+#		computed as						#
+#			X + ( S*B1 +	Q ) where S = X*X and		#
+#			Q	=	X*S*(B2 + X*(B3 + ... + X*B12))	#
+#		d) To fully utilize the pipeline, Q is separated into	#
+#		two independent pieces of roughly equal complexity	#
+#			Q = [ X*S*(B2 + S*(B4 + ... + S*B12)) ] +	#
+#				[ S*S*(B3 + S*(B5 + ... + S*B11)) ]	#
+#									#
+#	Step 10. Calculate exp(X)-1 for |X| >= 70 log 2.		#
+#		10.1 If X >= 70log2 , exp(X) - 1 = exp(X) for all	#
+#		practical purposes. Therefore, go to Step 1 of setox.	#
+#		10.2 If X <= -70log2, exp(X) - 1 = -1 for all practical	#
+#		purposes.						#
+#		ans := -1						#
+#		Restore user FPCR					#
+#		Return ans := ans + 2^(-126). Exit.			#
+#	Notes:	10.2 will always create an inexact and return -1 + tiny	#
+#		in the user rounding precision and mode.		#
+#									#
+#########################################################################
+
+L2:	long		0x3FDC0000,0x82E30865,0x4361C4C6,0x00000000
+
+EEXPA3:	long		0x3FA55555,0x55554CC1
+EEXPA2:	long		0x3FC55555,0x55554A54
+
+EM1A4:	long		0x3F811111,0x11174385
+EM1A3:	long		0x3FA55555,0x55554F5A
+
+EM1A2:	long		0x3FC55555,0x55555555,0x00000000,0x00000000
+
+EM1B8:	long		0x3EC71DE3,0xA5774682
+EM1B7:	long		0x3EFA01A0,0x19D7CB68
+
+EM1B6:	long		0x3F2A01A0,0x1A019DF3
+EM1B5:	long		0x3F56C16C,0x16C170E2
+
+EM1B4:	long		0x3F811111,0x11111111
+EM1B3:	long		0x3FA55555,0x55555555
+
+EM1B2:	long		0x3FFC0000,0xAAAAAAAA,0xAAAAAAAB
+	long		0x00000000
+
+TWO140:	long		0x48B00000,0x00000000
+TWON140:
+	long		0x37300000,0x00000000
+
+EEXPTBL:
+	long		0x3FFF0000,0x80000000,0x00000000,0x00000000
+	long		0x3FFF0000,0x8164D1F3,0xBC030774,0x9F841A9B
+	long		0x3FFF0000,0x82CD8698,0xAC2BA1D8,0x9FC1D5B9
+	long		0x3FFF0000,0x843A28C3,0xACDE4048,0xA0728369
+	long		0x3FFF0000,0x85AAC367,0xCC487B14,0x1FC5C95C
+	long		0x3FFF0000,0x871F6196,0x9E8D1010,0x1EE85C9F
+	long		0x3FFF0000,0x88980E80,0x92DA8528,0x9FA20729
+	long		0x3FFF0000,0x8A14D575,0x496EFD9C,0xA07BF9AF
+	long		0x3FFF0000,0x8B95C1E3,0xEA8BD6E8,0xA0020DCF
+	long		0x3FFF0000,0x8D1ADF5B,0x7E5BA9E4,0x205A63DA
+	long		0x3FFF0000,0x8EA4398B,0x45CD53C0,0x1EB70051
+	long		0x3FFF0000,0x9031DC43,0x1466B1DC,0x1F6EB029
+	long		0x3FFF0000,0x91C3D373,0xAB11C338,0xA0781494
+	long		0x3FFF0000,0x935A2B2F,0x13E6E92C,0x9EB319B0
+	long		0x3FFF0000,0x94F4EFA8,0xFEF70960,0x2017457D
+	long		0x3FFF0000,0x96942D37,0x20185A00,0x1F11D537
+	long		0x3FFF0000,0x9837F051,0x8DB8A970,0x9FB952DD
+	long		0x3FFF0000,0x99E04593,0x20B7FA64,0x1FE43087
+	long		0x3FFF0000,0x9B8D39B9,0xD54E5538,0x1FA2A818
+	long		0x3FFF0000,0x9D3ED9A7,0x2CFFB750,0x1FDE494D
+	long		0x3FFF0000,0x9EF53260,0x91A111AC,0x20504890
+	long		0x3FFF0000,0xA0B0510F,0xB9714FC4,0xA073691C
+	long		0x3FFF0000,0xA2704303,0x0C496818,0x1F9B7A05
+	long		0x3FFF0000,0xA43515AE,0x09E680A0,0xA0797126
+	long		0x3FFF0000,0xA5FED6A9,0xB15138EC,0xA071A140
+	long		0x3FFF0000,0xA7CD93B4,0xE9653568,0x204F62DA
+	long		0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x1F283C4A
+	long		0x3FFF0000,0xAB7A39B5,0xA93ED338,0x9F9A7FDC
+	long		0x3FFF0000,0xAD583EEA,0x42A14AC8,0xA05B3FAC
+	long		0x3FFF0000,0xAF3B78AD,0x690A4374,0x1FDF2610
+	long		0x3FFF0000,0xB123F581,0xD2AC2590,0x9F705F90
+	long		0x3FFF0000,0xB311C412,0xA9112488,0x201F678A
+	long		0x3FFF0000,0xB504F333,0xF9DE6484,0x1F32FB13
+	long		0x3FFF0000,0xB6FD91E3,0x28D17790,0x20038B30
+	long		0x3FFF0000,0xB8FBAF47,0x62FB9EE8,0x200DC3CC
+	long		0x3FFF0000,0xBAFF5AB2,0x133E45FC,0x9F8B2AE6
+	long		0x3FFF0000,0xBD08A39F,0x580C36C0,0xA02BBF70
+	long		0x3FFF0000,0xBF1799B6,0x7A731084,0xA00BF518
+	long		0x3FFF0000,0xC12C4CCA,0x66709458,0xA041DD41
+	long		0x3FFF0000,0xC346CCDA,0x24976408,0x9FDF137B
+	long		0x3FFF0000,0xC5672A11,0x5506DADC,0x201F1568
+	long		0x3FFF0000,0xC78D74C8,0xABB9B15C,0x1FC13A2E
+	long		0x3FFF0000,0xC9B9BD86,0x6E2F27A4,0xA03F8F03
+	long		0x3FFF0000,0xCBEC14FE,0xF2727C5C,0x1FF4907D
+	long		0x3FFF0000,0xCE248C15,0x1F8480E4,0x9E6E53E4
+	long		0x3FFF0000,0xD06333DA,0xEF2B2594,0x1FD6D45C
+	long		0x3FFF0000,0xD2A81D91,0xF12AE45C,0xA076EDB9
+	long		0x3FFF0000,0xD4F35AAB,0xCFEDFA20,0x9FA6DE21
+	long		0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x1EE69A2F
+	long		0x3FFF0000,0xD99D15C2,0x78AFD7B4,0x207F439F
+	long		0x3FFF0000,0xDBFBB797,0xDAF23754,0x201EC207
+	long		0x3FFF0000,0xDE60F482,0x5E0E9124,0x9E8BE175
+	long		0x3FFF0000,0xE0CCDEEC,0x2A94E110,0x20032C4B
+	long		0x3FFF0000,0xE33F8972,0xBE8A5A50,0x2004DFF5
+	long		0x3FFF0000,0xE5B906E7,0x7C8348A8,0x1E72F47A
+	long		0x3FFF0000,0xE8396A50,0x3C4BDC68,0x1F722F22
+	long		0x3FFF0000,0xEAC0C6E7,0xDD243930,0xA017E945
+	long		0x3FFF0000,0xED4F301E,0xD9942B84,0x1F401A5B
+	long		0x3FFF0000,0xEFE4B99B,0xDCDAF5CC,0x9FB9A9E3
+	long		0x3FFF0000,0xF281773C,0x59FFB138,0x20744C05
+	long		0x3FFF0000,0xF5257D15,0x2486CC2C,0x1F773A19
+	long		0x3FFF0000,0xF7D0DF73,0x0AD13BB8,0x1FFE90D5
+	long		0x3FFF0000,0xFA83B2DB,0x722A033C,0xA041ED22
+	long		0x3FFF0000,0xFD3E0C0C,0xF486C174,0x1F853F3A
+
+	set		ADJFLAG,L_SCR2
+	set		SCALE,FP_SCR0
+	set		ADJSCALE,FP_SCR1
+	set		SC,FP_SCR0
+	set		ONEBYSC,FP_SCR1
+
+	global		setox
+setox:
+#--entry point for EXP(X), here X is finite, non-zero, and not NaN's
+
+#--Step 1.
+	mov.l		(%a0),%d1		# load part of input X
+	and.l		&0x7FFF0000,%d1		# biased expo. of X
+	cmp.l		%d1,&0x3FBE0000		# 2^(-65)
+	bge.b		EXPC1			# normal case
+	bra		EXPSM
+
+EXPC1:
+#--The case |X| >= 2^(-65)
+	mov.w		4(%a0),%d1		# expo. and partial sig. of |X|
+	cmp.l		%d1,&0x400CB167		# 16380 log2 trunc. 16 bits
+	blt.b		EXPMAIN			# normal case
+	bra		EEXPBIG
+
+EXPMAIN:
+#--Step 2.
+#--This is the normal branch:	2^(-65) <= |X| < 16380 log2.
+	fmov.x		(%a0),%fp0		# load input from (a0)
+
+	fmov.x		%fp0,%fp1
+	fmul.s		&0x42B8AA3B,%fp0	# 64/log2 * X
+	fmovm.x		&0xc,-(%sp)		# save fp2 {%fp2/%fp3}
+	mov.l		&0,ADJFLAG(%a6)
+	fmov.l		%fp0,%d1		# N = int( X * 64/log2 )
+	lea		EEXPTBL(%pc),%a1
+	fmov.l		%d1,%fp0		# convert to floating-format
+
+	mov.l		%d1,L_SCR1(%a6)		# save N temporarily
+	and.l		&0x3F,%d1		# D0 is J = N mod 64
+	lsl.l		&4,%d1
+	add.l		%d1,%a1			# address of 2^(J/64)
+	mov.l		L_SCR1(%a6),%d1
+	asr.l		&6,%d1			# D0 is M
+	add.w		&0x3FFF,%d1		# biased expo. of 2^(M)
+	mov.w		L2(%pc),L_SCR1(%a6)	# prefetch L2, no need in CB
+
+EXPCONT1:
+#--Step 3.
+#--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
+#--a0 points to 2^(J/64), D0 is biased expo. of 2^(M)
+	fmov.x		%fp0,%fp2
+	fmul.s		&0xBC317218,%fp0	# N * L1, L1 = lead(-log2/64)
+	fmul.x		L2(%pc),%fp2		# N * L2, L1+L2 = -log2/64
+	fadd.x		%fp1,%fp0		# X + N*L1
+	fadd.x		%fp2,%fp0		# fp0 is R, reduced arg.
+
+#--Step 4.
+#--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
+#-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))
+#--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
+#--[R+R*S*(A2+S*A4)] + [S*(A1+S*(A3+S*A5))]
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# fp1 IS S = R*R
+
+	fmov.s		&0x3AB60B70,%fp2	# fp2 IS A5
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*A5
+	fmov.x		%fp1,%fp3
+	fmul.s		&0x3C088895,%fp3	# fp3 IS S*A4
+
+	fadd.d		EEXPA3(%pc),%fp2	# fp2 IS A3+S*A5
+	fadd.d		EEXPA2(%pc),%fp3	# fp3 IS A2+S*A4
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*(A3+S*A5)
+	mov.w		%d1,SCALE(%a6)		# SCALE is 2^(M) in extended
+	mov.l		&0x80000000,SCALE+4(%a6)
+	clr.l		SCALE+8(%a6)
+
+	fmul.x		%fp1,%fp3		# fp3 IS S*(A2+S*A4)
+
+	fadd.s		&0x3F000000,%fp2	# fp2 IS A1+S*(A3+S*A5)
+	fmul.x		%fp0,%fp3		# fp3 IS R*S*(A2+S*A4)
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*(A1+S*(A3+S*A5))
+	fadd.x		%fp3,%fp0		# fp0 IS R+R*S*(A2+S*A4),
+
+	fmov.x		(%a1)+,%fp1		# fp1 is lead. pt. of 2^(J/64)
+	fadd.x		%fp2,%fp0		# fp0 is EXP(R) - 1
+
+#--Step 5
+#--final reconstruction process
+#--EXP(X) = 2^M * ( 2^(J/64) + 2^(J/64)*(EXP(R)-1) )
+
+	fmul.x		%fp1,%fp0		# 2^(J/64)*(Exp(R)-1)
+	fmovm.x		(%sp)+,&0x30		# fp2 restored {%fp2/%fp3}
+	fadd.s		(%a1),%fp0		# accurate 2^(J/64)
+
+	fadd.x		%fp1,%fp0		# 2^(J/64) + 2^(J/64)*...
+	mov.l		ADJFLAG(%a6),%d1
+
+#--Step 6
+	tst.l		%d1
+	beq.b		NORMAL
+ADJUST:
+	fmul.x		ADJSCALE(%a6),%fp0
+NORMAL:
+	fmov.l		%d0,%fpcr		# restore user FPCR
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		SCALE(%a6),%fp0		# multiply 2^(M)
+	bra		t_catch
+
+EXPSM:
+#--Step 7
+	fmovm.x		(%a0),&0x80		# load X
+	fmov.l		%d0,%fpcr
+	fadd.s		&0x3F800000,%fp0	# 1+X in user mode
+	bra		t_pinx2
+
+EEXPBIG:
+#--Step 8
+	cmp.l		%d1,&0x400CB27C		# 16480 log2
+	bgt.b		EXP2BIG
+#--Steps 8.2 -- 8.6
+	fmov.x		(%a0),%fp0		# load input from (a0)
+
+	fmov.x		%fp0,%fp1
+	fmul.s		&0x42B8AA3B,%fp0	# 64/log2 * X
+	fmovm.x		&0xc,-(%sp)		# save fp2 {%fp2/%fp3}
+	mov.l		&1,ADJFLAG(%a6)
+	fmov.l		%fp0,%d1		# N = int( X * 64/log2 )
+	lea		EEXPTBL(%pc),%a1
+	fmov.l		%d1,%fp0		# convert to floating-format
+	mov.l		%d1,L_SCR1(%a6)		# save N temporarily
+	and.l		&0x3F,%d1		# D0 is J = N mod 64
+	lsl.l		&4,%d1
+	add.l		%d1,%a1			# address of 2^(J/64)
+	mov.l		L_SCR1(%a6),%d1
+	asr.l		&6,%d1			# D0 is K
+	mov.l		%d1,L_SCR1(%a6)		# save K temporarily
+	asr.l		&1,%d1			# D0 is M1
+	sub.l		%d1,L_SCR1(%a6)		# a1 is M
+	add.w		&0x3FFF,%d1		# biased expo. of 2^(M1)
+	mov.w		%d1,ADJSCALE(%a6)	# ADJSCALE := 2^(M1)
+	mov.l		&0x80000000,ADJSCALE+4(%a6)
+	clr.l		ADJSCALE+8(%a6)
+	mov.l		L_SCR1(%a6),%d1		# D0 is M
+	add.w		&0x3FFF,%d1		# biased expo. of 2^(M)
+	bra.w		EXPCONT1		# go back to Step 3
+
+EXP2BIG:
+#--Step 9
+	tst.b		(%a0)			# is X positive or negative?
+	bmi		t_unfl2
+	bra		t_ovfl2
+
+	global		setoxd
+setoxd:
+#--entry point for EXP(X), X is denormalized
+	mov.l		(%a0),-(%sp)
+	andi.l		&0x80000000,(%sp)
+	ori.l		&0x00800000,(%sp)	# sign(X)*2^(-126)
+
+	fmov.s		&0x3F800000,%fp0
+
+	fmov.l		%d0,%fpcr
+	fadd.s		(%sp)+,%fp0
+	bra		t_pinx2
+
+	global		setoxm1
+setoxm1:
+#--entry point for EXPM1(X), here X is finite, non-zero, non-NaN
+
+#--Step 1.
+#--Step 1.1
+	mov.l		(%a0),%d1		# load part of input X
+	and.l		&0x7FFF0000,%d1		# biased expo. of X
+	cmp.l		%d1,&0x3FFD0000		# 1/4
+	bge.b		EM1CON1			# |X| >= 1/4
+	bra		EM1SM
+
+EM1CON1:
+#--Step 1.3
+#--The case |X| >= 1/4
+	mov.w		4(%a0),%d1		# expo. and partial sig. of |X|
+	cmp.l		%d1,&0x4004C215		# 70log2 rounded up to 16 bits
+	ble.b		EM1MAIN			# 1/4 <= |X| <= 70log2
+	bra		EM1BIG
+
+EM1MAIN:
+#--Step 2.
+#--This is the case:	1/4 <= |X| <= 70 log2.
+	fmov.x		(%a0),%fp0		# load input from (a0)
+
+	fmov.x		%fp0,%fp1
+	fmul.s		&0x42B8AA3B,%fp0	# 64/log2 * X
+	fmovm.x		&0xc,-(%sp)		# save fp2 {%fp2/%fp3}
+	fmov.l		%fp0,%d1		# N = int( X * 64/log2 )
+	lea		EEXPTBL(%pc),%a1
+	fmov.l		%d1,%fp0		# convert to floating-format
+
+	mov.l		%d1,L_SCR1(%a6)		# save N temporarily
+	and.l		&0x3F,%d1		# D0 is J = N mod 64
+	lsl.l		&4,%d1
+	add.l		%d1,%a1			# address of 2^(J/64)
+	mov.l		L_SCR1(%a6),%d1
+	asr.l		&6,%d1			# D0 is M
+	mov.l		%d1,L_SCR1(%a6)		# save a copy of M
+
+#--Step 3.
+#--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
+#--a0 points to 2^(J/64), D0 and a1 both contain M
+	fmov.x		%fp0,%fp2
+	fmul.s		&0xBC317218,%fp0	# N * L1, L1 = lead(-log2/64)
+	fmul.x		L2(%pc),%fp2		# N * L2, L1+L2 = -log2/64
+	fadd.x		%fp1,%fp0		# X + N*L1
+	fadd.x		%fp2,%fp0		# fp0 is R, reduced arg.
+	add.w		&0x3FFF,%d1		# D0 is biased expo. of 2^M
+
+#--Step 4.
+#--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
+#-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*(A5 + R*A6)))))
+#--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
+#--[R*S*(A2+S*(A4+S*A6))] + [R+S*(A1+S*(A3+S*A5))]
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# fp1 IS S = R*R
+
+	fmov.s		&0x3950097B,%fp2	# fp2 IS a6
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*A6
+	fmov.x		%fp1,%fp3
+	fmul.s		&0x3AB60B6A,%fp3	# fp3 IS S*A5
+
+	fadd.d		EM1A4(%pc),%fp2		# fp2 IS A4+S*A6
+	fadd.d		EM1A3(%pc),%fp3		# fp3 IS A3+S*A5
+	mov.w		%d1,SC(%a6)		# SC is 2^(M) in extended
+	mov.l		&0x80000000,SC+4(%a6)
+	clr.l		SC+8(%a6)
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*(A4+S*A6)
+	mov.l		L_SCR1(%a6),%d1		# D0 is	M
+	neg.w		%d1			# D0 is -M
+	fmul.x		%fp1,%fp3		# fp3 IS S*(A3+S*A5)
+	add.w		&0x3FFF,%d1		# biased expo. of 2^(-M)
+	fadd.d		EM1A2(%pc),%fp2		# fp2 IS A2+S*(A4+S*A6)
+	fadd.s		&0x3F000000,%fp3	# fp3 IS A1+S*(A3+S*A5)
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*(A2+S*(A4+S*A6))
+	or.w		&0x8000,%d1		# signed/expo. of -2^(-M)
+	mov.w		%d1,ONEBYSC(%a6)	# OnebySc is -2^(-M)
+	mov.l		&0x80000000,ONEBYSC+4(%a6)
+	clr.l		ONEBYSC+8(%a6)
+	fmul.x		%fp3,%fp1		# fp1 IS S*(A1+S*(A3+S*A5))
+
+	fmul.x		%fp0,%fp2		# fp2 IS R*S*(A2+S*(A4+S*A6))
+	fadd.x		%fp1,%fp0		# fp0 IS R+S*(A1+S*(A3+S*A5))
+
+	fadd.x		%fp2,%fp0		# fp0 IS EXP(R)-1
+
+	fmovm.x		(%sp)+,&0x30		# fp2 restored {%fp2/%fp3}
+
+#--Step 5
+#--Compute 2^(J/64)*p
+
+	fmul.x		(%a1),%fp0		# 2^(J/64)*(Exp(R)-1)
+
+#--Step 6
+#--Step 6.1
+	mov.l		L_SCR1(%a6),%d1		# retrieve M
+	cmp.l		%d1,&63
+	ble.b		MLE63
+#--Step 6.2	M >= 64
+	fmov.s		12(%a1),%fp1		# fp1 is t
+	fadd.x		ONEBYSC(%a6),%fp1	# fp1 is t+OnebySc
+	fadd.x		%fp1,%fp0		# p+(t+OnebySc), fp1 released
+	fadd.x		(%a1),%fp0		# T+(p+(t+OnebySc))
+	bra		EM1SCALE
+MLE63:
+#--Step 6.3	M <= 63
+	cmp.l		%d1,&-3
+	bge.b		MGEN3
+MLTN3:
+#--Step 6.4	M <= -4
+	fadd.s		12(%a1),%fp0		# p+t
+	fadd.x		(%a1),%fp0		# T+(p+t)
+	fadd.x		ONEBYSC(%a6),%fp0	# OnebySc + (T+(p+t))
+	bra		EM1SCALE
+MGEN3:
+#--Step 6.5	-3 <= M <= 63
+	fmov.x		(%a1)+,%fp1		# fp1 is T
+	fadd.s		(%a1),%fp0		# fp0 is p+t
+	fadd.x		ONEBYSC(%a6),%fp1	# fp1 is T+OnebySc
+	fadd.x		%fp1,%fp0		# (T+OnebySc)+(p+t)
+
+EM1SCALE:
+#--Step 6.6
+	fmov.l		%d0,%fpcr
+	fmul.x		SC(%a6),%fp0
+	bra		t_inx2
+
+EM1SM:
+#--Step 7	|X| < 1/4.
+	cmp.l		%d1,&0x3FBE0000		# 2^(-65)
+	bge.b		EM1POLY
+
+EM1TINY:
+#--Step 8	|X| < 2^(-65)
+	cmp.l		%d1,&0x00330000		# 2^(-16312)
+	blt.b		EM12TINY
+#--Step 8.2
+	mov.l		&0x80010000,SC(%a6)	# SC is -2^(-16382)
+	mov.l		&0x80000000,SC+4(%a6)
+	clr.l		SC+8(%a6)
+	fmov.x		(%a0),%fp0
+	fmov.l		%d0,%fpcr
+	mov.b		&FADD_OP,%d1		# last inst is ADD
+	fadd.x		SC(%a6),%fp0
+	bra		t_catch
+
+EM12TINY:
+#--Step 8.3
+	fmov.x		(%a0),%fp0
+	fmul.d		TWO140(%pc),%fp0
+	mov.l		&0x80010000,SC(%a6)
+	mov.l		&0x80000000,SC+4(%a6)
+	clr.l		SC+8(%a6)
+	fadd.x		SC(%a6),%fp0
+	fmov.l		%d0,%fpcr
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.d		TWON140(%pc),%fp0
+	bra		t_catch
+
+EM1POLY:
+#--Step 9	exp(X)-1 by a simple polynomial
+	fmov.x		(%a0),%fp0		# fp0 is X
+	fmul.x		%fp0,%fp0		# fp0 is S := X*X
+	fmovm.x		&0xc,-(%sp)		# save fp2 {%fp2/%fp3}
+	fmov.s		&0x2F30CAA8,%fp1	# fp1 is B12
+	fmul.x		%fp0,%fp1		# fp1 is S*B12
+	fmov.s		&0x310F8290,%fp2	# fp2 is B11
+	fadd.s		&0x32D73220,%fp1	# fp1 is B10+S*B12
+
+	fmul.x		%fp0,%fp2		# fp2 is S*B11
+	fmul.x		%fp0,%fp1		# fp1 is S*(B10 + ...
+
+	fadd.s		&0x3493F281,%fp2	# fp2 is B9+S*...
+	fadd.d		EM1B8(%pc),%fp1		# fp1 is B8+S*...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*(B9+...
+	fmul.x		%fp0,%fp1		# fp1 is S*(B8+...
+
+	fadd.d		EM1B7(%pc),%fp2		# fp2 is B7+S*...
+	fadd.d		EM1B6(%pc),%fp1		# fp1 is B6+S*...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*(B7+...
+	fmul.x		%fp0,%fp1		# fp1 is S*(B6+...
+
+	fadd.d		EM1B5(%pc),%fp2		# fp2 is B5+S*...
+	fadd.d		EM1B4(%pc),%fp1		# fp1 is B4+S*...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*(B5+...
+	fmul.x		%fp0,%fp1		# fp1 is S*(B4+...
+
+	fadd.d		EM1B3(%pc),%fp2		# fp2 is B3+S*...
+	fadd.x		EM1B2(%pc),%fp1		# fp1 is B2+S*...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*(B3+...
+	fmul.x		%fp0,%fp1		# fp1 is S*(B2+...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*S*(B3+...)
+	fmul.x		(%a0),%fp1		# fp1 is X*S*(B2...
+
+	fmul.s		&0x3F000000,%fp0	# fp0 is S*B1
+	fadd.x		%fp2,%fp1		# fp1 is Q
+
+	fmovm.x		(%sp)+,&0x30		# fp2 restored {%fp2/%fp3}
+
+	fadd.x		%fp1,%fp0		# fp0 is S*B1+Q
+
+	fmov.l		%d0,%fpcr
+	fadd.x		(%a0),%fp0
+	bra		t_inx2
+
+EM1BIG:
+#--Step 10	|X| > 70 log2
+	mov.l		(%a0),%d1
+	cmp.l		%d1,&0
+	bgt.w		EXPC1
+#--Step 10.2
+	fmov.s		&0xBF800000,%fp0	# fp0 is -1
+	fmov.l		%d0,%fpcr
+	fadd.s		&0x00800000,%fp0	# -1 + 2^(-126)
+	bra		t_minx2
+
+	global		setoxm1d
+setoxm1d:
+#--entry point for EXPM1(X), here X is denormalized
+#--Step 0.
+	bra		t_extdnrm
+
+#########################################################################
+# sgetexp():  returns the exponent portion of the input argument.	#
+#	      The exponent bias is removed and the exponent value is	#
+#	      returned as an extended precision number in fp0.		#
+# sgetexpd(): handles denormalized numbers.				#
+#									#
+# sgetman():  extracts the mantissa of the input argument. The		#
+#	      mantissa is converted to an extended precision number w/	#
+#	      an exponent of $3fff and is returned in fp0. The range of #
+#	      the result is [1.0 - 2.0).				#
+# sgetmand(): handles denormalized numbers.				#
+#									#
+# INPUT *************************************************************** #
+#	a0  = pointer to extended precision input			#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = exponent(X) or mantissa(X)				#
+#									#
+#########################################################################
+
+	global		sgetexp
+sgetexp:
+	mov.w		SRC_EX(%a0),%d0		# get the exponent
+	bclr		&0xf,%d0		# clear the sign bit
+	subi.w		&0x3fff,%d0		# subtract off the bias
+	fmov.w		%d0,%fp0		# return exp in fp0
+	blt.b		sgetexpn		# it's negative
+	rts
+
+sgetexpn:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+	global		sgetexpd
+sgetexpd:
+	bsr.l		norm			# normalize
+	neg.w		%d0			# new exp = -(shft amt)
+	subi.w		&0x3fff,%d0		# subtract off the bias
+	fmov.w		%d0,%fp0		# return exp in fp0
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+	global		sgetman
+sgetman:
+	mov.w		SRC_EX(%a0),%d0		# get the exp
+	ori.w		&0x7fff,%d0		# clear old exp
+	bclr		&0xe,%d0		# make it the new exp +-3fff
+
+# here, we build the result in a tmp location so as not to disturb the input
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6) # copy to tmp loc
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6) # copy to tmp loc
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmov.x		FP_SCR0(%a6),%fp0	# put new value back in fp0
+	bmi.b		sgetmann		# it's negative
+	rts
+
+sgetmann:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+#
+# For denormalized numbers, shift the mantissa until the j-bit = 1,
+# then load the exponent with +/1 $3fff.
+#
+	global		sgetmand
+sgetmand:
+	bsr.l		norm			# normalize exponent
+	bra.b		sgetman
+
+#########################################################################
+# scosh():  computes the hyperbolic cosine of a normalized input	#
+# scoshd(): computes the hyperbolic cosine of a denormalized input	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = cosh(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	COSH								#
+#	1. If |X| > 16380 log2, go to 3.				#
+#									#
+#	2. (|X| <= 16380 log2) Cosh(X) is obtained by the formulae	#
+#		y = |X|, z = exp(Y), and				#
+#		cosh(X) = (1/2)*( z + 1/z ).				#
+#		Exit.							#
+#									#
+#	3. (|X| > 16380 log2). If |X| > 16480 log2, go to 5.		#
+#									#
+#	4. (16380 log2 < |X| <= 16480 log2)				#
+#		cosh(X) = sign(X) * exp(|X|)/2.				#
+#		However, invoking exp(|X|) may cause premature		#
+#		overflow. Thus, we calculate sinh(X) as follows:	#
+#		Y	:= |X|						#
+#		Fact	:=	2**(16380)				#
+#		Y'	:= Y - 16381 log2				#
+#		cosh(X) := Fact * exp(Y').				#
+#		Exit.							#
+#									#
+#	5. (|X| > 16480 log2) sinh(X) must overflow. Return		#
+#		Huge*Huge to generate overflow and an infinity with	#
+#		the appropriate sign. Huge is the largest finite number	#
+#		in extended format. Exit.				#
+#									#
+#########################################################################
+
+TWO16380:
+	long		0x7FFB0000,0x80000000,0x00000000,0x00000000
+
+	global		scosh
+scosh:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x400CB167
+	bgt.b		COSHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 16380 LOG2
+#--COSH(X) = (1/2) * ( EXP(X) + 1/EXP(X) )
+
+	fabs.x		%fp0			# |X|
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save |X| to stack
+	lea		(%sp),%a0		# pass ptr to |X|
+	bsr		setox			# FP0 IS EXP(|X|)
+	add.l		&0xc,%sp		# erase |X| from stack
+	fmul.s		&0x3F000000,%fp0	# (1/2)EXP(|X|)
+	mov.l		(%sp)+,%d0
+
+	fmov.s		&0x3E800000,%fp1	# (1/4)
+	fdiv.x		%fp0,%fp1		# 1/(2 EXP(|X|))
+
+	fmov.l		%d0,%fpcr
+	mov.b		&FADD_OP,%d1		# last inst is ADD
+	fadd.x		%fp1,%fp0
+	bra		t_catch
+
+COSHBIG:
+	cmp.l		%d1,&0x400CB2B3
+	bgt.b		COSHHUGE
+
+	fabs.x		%fp0
+	fsub.d		T1(%pc),%fp0		# (|X|-16381LOG2_LEAD)
+	fsub.d		T2(%pc),%fp0		# |X| - 16381 LOG2, ACCURATE
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save fp0 to stack
+	lea		(%sp),%a0		# pass ptr to fp0
+	bsr		setox
+	add.l		&0xc,%sp		# clear fp0 from stack
+	mov.l		(%sp)+,%d0
+
+	fmov.l		%d0,%fpcr
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		TWO16380(%pc),%fp0
+	bra		t_catch
+
+COSHHUGE:
+	bra		t_ovfl2
+
+	global		scoshd
+#--COSH(X) = 1 FOR DENORMALIZED X
+scoshd:
+	fmov.s		&0x3F800000,%fp0
+
+	fmov.l		%d0,%fpcr
+	fadd.s		&0x00800000,%fp0
+	bra		t_pinx2
+
+#########################################################################
+# ssinh():  computes the hyperbolic sine of a normalized input		#
+# ssinhd(): computes the hyperbolic sine of a denormalized input	#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = sinh(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM *********************************************************** #
+#									#
+#       SINH								#
+#       1. If |X| > 16380 log2, go to 3.				#
+#									#
+#       2. (|X| <= 16380 log2) Sinh(X) is obtained by the formula	#
+#               y = |X|, sgn = sign(X), and z = expm1(Y),		#
+#               sinh(X) = sgn*(1/2)*( z + z/(1+z) ).			#
+#          Exit.							#
+#									#
+#       3. If |X| > 16480 log2, go to 5.				#
+#									#
+#       4. (16380 log2 < |X| <= 16480 log2)				#
+#               sinh(X) = sign(X) * exp(|X|)/2.				#
+#          However, invoking exp(|X|) may cause premature overflow.	#
+#          Thus, we calculate sinh(X) as follows:			#
+#             Y       := |X|						#
+#             sgn     := sign(X)					#
+#             sgnFact := sgn * 2**(16380)				#
+#             Y'      := Y - 16381 log2					#
+#             sinh(X) := sgnFact * exp(Y').				#
+#          Exit.							#
+#									#
+#       5. (|X| > 16480 log2) sinh(X) must overflow. Return		#
+#          sign(X)*Huge*Huge to generate overflow and an infinity with	#
+#          the appropriate sign. Huge is the largest finite number in	#
+#          extended format. Exit.					#
+#									#
+#########################################################################
+
+	global		ssinh
+ssinh:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	mov.l		%d1,%a1			# save (compacted) operand
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x400CB167
+	bgt.b		SINHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 16380 LOG2
+#--Y = |X|, Z = EXPM1(Y), SINH(X) = SIGN(X)*(1/2)*( Z + Z/(1+Z) )
+
+	fabs.x		%fp0			# Y = |X|
+
+	movm.l		&0x8040,-(%sp)		# {a1/d0}
+	fmovm.x		&0x01,-(%sp)		# save Y on stack
+	lea		(%sp),%a0		# pass ptr to Y
+	clr.l		%d0
+	bsr		setoxm1			# FP0 IS Z = EXPM1(Y)
+	add.l		&0xc,%sp		# clear Y from stack
+	fmov.l		&0,%fpcr
+	movm.l		(%sp)+,&0x0201		# {a1/d0}
+
+	fmov.x		%fp0,%fp1
+	fadd.s		&0x3F800000,%fp1	# 1+Z
+	fmov.x		%fp0,-(%sp)
+	fdiv.x		%fp1,%fp0		# Z/(1+Z)
+	mov.l		%a1,%d1
+	and.l		&0x80000000,%d1
+	or.l		&0x3F000000,%d1
+	fadd.x		(%sp)+,%fp0
+	mov.l		%d1,-(%sp)
+
+	fmov.l		%d0,%fpcr
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.s		(%sp)+,%fp0		# last fp inst - possible exceptions set
+	bra		t_catch
+
+SINHBIG:
+	cmp.l		%d1,&0x400CB2B3
+	bgt		t_ovfl
+	fabs.x		%fp0
+	fsub.d		T1(%pc),%fp0		# (|X|-16381LOG2_LEAD)
+	mov.l		&0,-(%sp)
+	mov.l		&0x80000000,-(%sp)
+	mov.l		%a1,%d1
+	and.l		&0x80000000,%d1
+	or.l		&0x7FFB0000,%d1
+	mov.l		%d1,-(%sp)		# EXTENDED FMT
+	fsub.d		T2(%pc),%fp0		# |X| - 16381 LOG2, ACCURATE
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save fp0 on stack
+	lea		(%sp),%a0		# pass ptr to fp0
+	bsr		setox
+	add.l		&0xc,%sp		# clear fp0 from stack
+
+	mov.l		(%sp)+,%d0
+	fmov.l		%d0,%fpcr
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		(%sp)+,%fp0		# possible exception
+	bra		t_catch
+
+	global		ssinhd
+#--SINH(X) = X FOR DENORMALIZED X
+ssinhd:
+	bra		t_extdnrm
+
+#########################################################################
+# stanh():  computes the hyperbolic tangent of a normalized input	#
+# stanhd(): computes the hyperbolic tangent of a denormalized input	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = tanh(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	TANH								#
+#	1. If |X| >= (5/2) log2 or |X| <= 2**(-40), go to 3.		#
+#									#
+#	2. (2**(-40) < |X| < (5/2) log2) Calculate tanh(X) by		#
+#		sgn := sign(X), y := 2|X|, z := expm1(Y), and		#
+#		tanh(X) = sgn*( z/(2+z) ).				#
+#		Exit.							#
+#									#
+#	3. (|X| <= 2**(-40) or |X| >= (5/2) log2). If |X| < 1,		#
+#		go to 7.						#
+#									#
+#	4. (|X| >= (5/2) log2) If |X| >= 50 log2, go to 6.		#
+#									#
+#	5. ((5/2) log2 <= |X| < 50 log2) Calculate tanh(X) by		#
+#		sgn := sign(X), y := 2|X|, z := exp(Y),			#
+#		tanh(X) = sgn - [ sgn*2/(1+z) ].			#
+#		Exit.							#
+#									#
+#	6. (|X| >= 50 log2) Tanh(X) = +-1 (round to nearest). Thus, we	#
+#		calculate Tanh(X) by					#
+#		sgn := sign(X), Tiny := 2**(-126),			#
+#		tanh(X) := sgn - sgn*Tiny.				#
+#		Exit.							#
+#									#
+#	7. (|X| < 2**(-40)). Tanh(X) = X.	Exit.			#
+#									#
+#########################################################################
+
+	set		X,FP_SCR0
+	set		XFRAC,X+4
+
+	set		SGN,L_SCR3
+
+	set		V,FP_SCR0
+
+	global		stanh
+stanh:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	fmov.x		%fp0,X(%a6)
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	mov.l		%d1,X(%a6)
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1, &0x3fd78000	# is |X| < 2^(-40)?
+	blt.w		TANHBORS		# yes
+	cmp.l		%d1, &0x3fffddce	# is |X| > (5/2)LOG2?
+	bgt.w		TANHBORS		# yes
+
+#--THIS IS THE USUAL CASE
+#--Y = 2|X|, Z = EXPM1(Y), TANH(X) = SIGN(X) * Z / (Z+2).
+
+	mov.l		X(%a6),%d1
+	mov.l		%d1,SGN(%a6)
+	and.l		&0x7FFF0000,%d1
+	add.l		&0x00010000,%d1		# EXPONENT OF 2|X|
+	mov.l		%d1,X(%a6)
+	and.l		&0x80000000,SGN(%a6)
+	fmov.x		X(%a6),%fp0		# FP0 IS Y = 2|X|
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x1,-(%sp)		# save Y on stack
+	lea		(%sp),%a0		# pass ptr to Y
+	bsr		setoxm1			# FP0 IS Z = EXPM1(Y)
+	add.l		&0xc,%sp		# clear Y from stack
+	mov.l		(%sp)+,%d0
+
+	fmov.x		%fp0,%fp1
+	fadd.s		&0x40000000,%fp1	# Z+2
+	mov.l		SGN(%a6),%d1
+	fmov.x		%fp1,V(%a6)
+	eor.l		%d1,V(%a6)
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	fdiv.x		V(%a6),%fp0
+	bra		t_inx2
+
+TANHBORS:
+	cmp.l		%d1,&0x3FFF8000
+	blt.w		TANHSM
+
+	cmp.l		%d1,&0x40048AA1
+	bgt.w		TANHHUGE
+
+#-- (5/2) LOG2 < |X| < 50 LOG2,
+#--TANH(X) = 1 - (2/[EXP(2X)+1]). LET Y = 2|X|, SGN = SIGN(X),
+#--TANH(X) = SGN -	SGN*2/[EXP(Y)+1].
+
+	mov.l		X(%a6),%d1
+	mov.l		%d1,SGN(%a6)
+	and.l		&0x7FFF0000,%d1
+	add.l		&0x00010000,%d1		# EXPO OF 2|X|
+	mov.l		%d1,X(%a6)		# Y = 2|X|
+	and.l		&0x80000000,SGN(%a6)
+	mov.l		SGN(%a6),%d1
+	fmov.x		X(%a6),%fp0		# Y = 2|X|
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save Y on stack
+	lea		(%sp),%a0		# pass ptr to Y
+	bsr		setox			# FP0 IS EXP(Y)
+	add.l		&0xc,%sp		# clear Y from stack
+	mov.l		(%sp)+,%d0
+	mov.l		SGN(%a6),%d1
+	fadd.s		&0x3F800000,%fp0	# EXP(Y)+1
+
+	eor.l		&0xC0000000,%d1		# -SIGN(X)*2
+	fmov.s		%d1,%fp1		# -SIGN(X)*2 IN SGL FMT
+	fdiv.x		%fp0,%fp1		# -SIGN(X)2 / [EXP(Y)+1 ]
+
+	mov.l		SGN(%a6),%d1
+	or.l		&0x3F800000,%d1		# SGN
+	fmov.s		%d1,%fp0		# SGN IN SGL FMT
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	mov.b		&FADD_OP,%d1		# last inst is ADD
+	fadd.x		%fp1,%fp0
+	bra		t_inx2
+
+TANHSM:
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		X(%a6),%fp0		# last inst - possible exception set
+	bra		t_catch
+
+#---RETURN SGN(X) - SGN(X)EPS
+TANHHUGE:
+	mov.l		X(%a6),%d1
+	and.l		&0x80000000,%d1
+	or.l		&0x3F800000,%d1
+	fmov.s		%d1,%fp0
+	and.l		&0x80000000,%d1
+	eor.l		&0x80800000,%d1		# -SIGN(X)*EPS
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	fadd.s		%d1,%fp0
+	bra		t_inx2
+
+	global		stanhd
+#--TANH(X) = X FOR DENORMALIZED X
+stanhd:
+	bra		t_extdnrm
+
+#########################################################################
+# slogn():    computes the natural logarithm of a normalized input	#
+# slognd():   computes the natural logarithm of a denormalized input	#
+# slognp1():  computes the log(1+X) of a normalized input		#
+# slognp1d(): computes the log(1+X) of a denormalized input		#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = log(X) or log(1+X)					#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 2 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	LOGN:								#
+#	Step 1. If |X-1| < 1/16, approximate log(X) by an odd		#
+#		polynomial in u, where u = 2(X-1)/(X+1). Otherwise,	#
+#		move on to Step 2.					#
+#									#
+#	Step 2. X = 2**k * Y where 1 <= Y < 2. Define F to be the first	#
+#		seven significant bits of Y plus 2**(-7), i.e.		#
+#		F = 1.xxxxxx1 in base 2 where the six "x" match those	#
+#		of Y. Note that |Y-F| <= 2**(-7).			#
+#									#
+#	Step 3. Define u = (Y-F)/F. Approximate log(1+u) by a		#
+#		polynomial in u, log(1+u) = poly.			#
+#									#
+#	Step 4. Reconstruct						#
+#		log(X) = log( 2**k * Y ) = k*log(2) + log(F) + log(1+u)	#
+#		by k*log(2) + (log(F) + poly). The values of log(F) are	#
+#		calculated beforehand and stored in the program.	#
+#									#
+#	lognp1:								#
+#	Step 1: If |X| < 1/16, approximate log(1+X) by an odd		#
+#		polynomial in u where u = 2X/(2+X). Otherwise, move on	#
+#		to Step 2.						#
+#									#
+#	Step 2: Let 1+X = 2**k * Y, where 1 <= Y < 2. Define F as done	#
+#		in Step 2 of the algorithm for LOGN and compute		#
+#		log(1+X) as k*log(2) + log(F) + poly where poly		#
+#		approximates log(1+u), u = (Y-F)/F.			#
+#									#
+#	Implementation Notes:						#
+#	Note 1. There are 64 different possible values for F, thus 64	#
+#		log(F)'s need to be tabulated. Moreover, the values of	#
+#		1/F are also tabulated so that the division in (Y-F)/F	#
+#		can be performed by a multiplication.			#
+#									#
+#	Note 2. In Step 2 of lognp1, in order to preserved accuracy,	#
+#		the value Y-F has to be calculated carefully when	#
+#		1/2 <= X < 3/2.						#
+#									#
+#	Note 3. To fully exploit the pipeline, polynomials are usually	#
+#		separated into two parts evaluated independently before	#
+#		being added up.						#
+#									#
+#########################################################################
+LOGOF2:
+	long		0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
+
+one:
+	long		0x3F800000
+zero:
+	long		0x00000000
+infty:
+	long		0x7F800000
+negone:
+	long		0xBF800000
+
+LOGA6:
+	long		0x3FC2499A,0xB5E4040B
+LOGA5:
+	long		0xBFC555B5,0x848CB7DB
+
+LOGA4:
+	long		0x3FC99999,0x987D8730
+LOGA3:
+	long		0xBFCFFFFF,0xFF6F7E97
+
+LOGA2:
+	long		0x3FD55555,0x555555A4
+LOGA1:
+	long		0xBFE00000,0x00000008
+
+LOGB5:
+	long		0x3F175496,0xADD7DAD6
+LOGB4:
+	long		0x3F3C71C2,0xFE80C7E0
+
+LOGB3:
+	long		0x3F624924,0x928BCCFF
+LOGB2:
+	long		0x3F899999,0x999995EC
+
+LOGB1:
+	long		0x3FB55555,0x55555555
+TWO:
+	long		0x40000000,0x00000000
+
+LTHOLD:
+	long		0x3f990000,0x80000000,0x00000000,0x00000000
+
+LOGTBL:
+	long		0x3FFE0000,0xFE03F80F,0xE03F80FE,0x00000000
+	long		0x3FF70000,0xFF015358,0x833C47E2,0x00000000
+	long		0x3FFE0000,0xFA232CF2,0x52138AC0,0x00000000
+	long		0x3FF90000,0xBDC8D83E,0xAD88D549,0x00000000
+	long		0x3FFE0000,0xF6603D98,0x0F6603DA,0x00000000
+	long		0x3FFA0000,0x9CF43DCF,0xF5EAFD48,0x00000000
+	long		0x3FFE0000,0xF2B9D648,0x0F2B9D65,0x00000000
+	long		0x3FFA0000,0xDA16EB88,0xCB8DF614,0x00000000
+	long		0x3FFE0000,0xEF2EB71F,0xC4345238,0x00000000
+	long		0x3FFB0000,0x8B29B775,0x1BD70743,0x00000000
+	long		0x3FFE0000,0xEBBDB2A5,0xC1619C8C,0x00000000
+	long		0x3FFB0000,0xA8D839F8,0x30C1FB49,0x00000000
+	long		0x3FFE0000,0xE865AC7B,0x7603A197,0x00000000
+	long		0x3FFB0000,0xC61A2EB1,0x8CD907AD,0x00000000
+	long		0x3FFE0000,0xE525982A,0xF70C880E,0x00000000
+	long		0x3FFB0000,0xE2F2A47A,0xDE3A18AF,0x00000000
+	long		0x3FFE0000,0xE1FC780E,0x1FC780E2,0x00000000
+	long		0x3FFB0000,0xFF64898E,0xDF55D551,0x00000000
+	long		0x3FFE0000,0xDEE95C4C,0xA037BA57,0x00000000
+	long		0x3FFC0000,0x8DB956A9,0x7B3D0148,0x00000000
+	long		0x3FFE0000,0xDBEB61EE,0xD19C5958,0x00000000
+	long		0x3FFC0000,0x9B8FE100,0xF47BA1DE,0x00000000
+	long		0x3FFE0000,0xD901B203,0x6406C80E,0x00000000
+	long		0x3FFC0000,0xA9372F1D,0x0DA1BD17,0x00000000
+	long		0x3FFE0000,0xD62B80D6,0x2B80D62C,0x00000000
+	long		0x3FFC0000,0xB6B07F38,0xCE90E46B,0x00000000
+	long		0x3FFE0000,0xD3680D36,0x80D3680D,0x00000000
+	long		0x3FFC0000,0xC3FD0329,0x06488481,0x00000000
+	long		0x3FFE0000,0xD0B69FCB,0xD2580D0B,0x00000000
+	long		0x3FFC0000,0xD11DE0FF,0x15AB18CA,0x00000000
+	long		0x3FFE0000,0xCE168A77,0x25080CE1,0x00000000
+	long		0x3FFC0000,0xDE1433A1,0x6C66B150,0x00000000
+	long		0x3FFE0000,0xCB8727C0,0x65C393E0,0x00000000
+	long		0x3FFC0000,0xEAE10B5A,0x7DDC8ADD,0x00000000
+	long		0x3FFE0000,0xC907DA4E,0x871146AD,0x00000000
+	long		0x3FFC0000,0xF7856E5E,0xE2C9B291,0x00000000
+	long		0x3FFE0000,0xC6980C69,0x80C6980C,0x00000000
+	long		0x3FFD0000,0x82012CA5,0xA68206D7,0x00000000
+	long		0x3FFE0000,0xC4372F85,0x5D824CA6,0x00000000
+	long		0x3FFD0000,0x882C5FCD,0x7256A8C5,0x00000000
+	long		0x3FFE0000,0xC1E4BBD5,0x95F6E947,0x00000000
+	long		0x3FFD0000,0x8E44C60B,0x4CCFD7DE,0x00000000
+	long		0x3FFE0000,0xBFA02FE8,0x0BFA02FF,0x00000000
+	long		0x3FFD0000,0x944AD09E,0xF4351AF6,0x00000000
+	long		0x3FFE0000,0xBD691047,0x07661AA3,0x00000000
+	long		0x3FFD0000,0x9A3EECD4,0xC3EAA6B2,0x00000000
+	long		0x3FFE0000,0xBB3EE721,0xA54D880C,0x00000000
+	long		0x3FFD0000,0xA0218434,0x353F1DE8,0x00000000
+	long		0x3FFE0000,0xB92143FA,0x36F5E02E,0x00000000
+	long		0x3FFD0000,0xA5F2FCAB,0xBBC506DA,0x00000000
+	long		0x3FFE0000,0xB70FBB5A,0x19BE3659,0x00000000
+	long		0x3FFD0000,0xABB3B8BA,0x2AD362A5,0x00000000
+	long		0x3FFE0000,0xB509E68A,0x9B94821F,0x00000000
+	long		0x3FFD0000,0xB1641795,0xCE3CA97B,0x00000000
+	long		0x3FFE0000,0xB30F6352,0x8917C80B,0x00000000
+	long		0x3FFD0000,0xB7047551,0x5D0F1C61,0x00000000
+	long		0x3FFE0000,0xB11FD3B8,0x0B11FD3C,0x00000000
+	long		0x3FFD0000,0xBC952AFE,0xEA3D13E1,0x00000000
+	long		0x3FFE0000,0xAF3ADDC6,0x80AF3ADE,0x00000000
+	long		0x3FFD0000,0xC2168ED0,0xF458BA4A,0x00000000
+	long		0x3FFE0000,0xAD602B58,0x0AD602B6,0x00000000
+	long		0x3FFD0000,0xC788F439,0xB3163BF1,0x00000000
+	long		0x3FFE0000,0xAB8F69E2,0x8359CD11,0x00000000
+	long		0x3FFD0000,0xCCECAC08,0xBF04565D,0x00000000
+	long		0x3FFE0000,0xA9C84A47,0xA07F5638,0x00000000
+	long		0x3FFD0000,0xD2420487,0x2DD85160,0x00000000
+	long		0x3FFE0000,0xA80A80A8,0x0A80A80B,0x00000000
+	long		0x3FFD0000,0xD7894992,0x3BC3588A,0x00000000
+	long		0x3FFE0000,0xA655C439,0x2D7B73A8,0x00000000
+	long		0x3FFD0000,0xDCC2C4B4,0x9887DACC,0x00000000
+	long		0x3FFE0000,0xA4A9CF1D,0x96833751,0x00000000
+	long		0x3FFD0000,0xE1EEBD3E,0x6D6A6B9E,0x00000000
+	long		0x3FFE0000,0xA3065E3F,0xAE7CD0E0,0x00000000
+	long		0x3FFD0000,0xE70D785C,0x2F9F5BDC,0x00000000
+	long		0x3FFE0000,0xA16B312E,0xA8FC377D,0x00000000
+	long		0x3FFD0000,0xEC1F392C,0x5179F283,0x00000000
+	long		0x3FFE0000,0x9FD809FD,0x809FD80A,0x00000000
+	long		0x3FFD0000,0xF12440D3,0xE36130E6,0x00000000
+	long		0x3FFE0000,0x9E4CAD23,0xDD5F3A20,0x00000000
+	long		0x3FFD0000,0xF61CCE92,0x346600BB,0x00000000
+	long		0x3FFE0000,0x9CC8E160,0xC3FB19B9,0x00000000
+	long		0x3FFD0000,0xFB091FD3,0x8145630A,0x00000000
+	long		0x3FFE0000,0x9B4C6F9E,0xF03A3CAA,0x00000000
+	long		0x3FFD0000,0xFFE97042,0xBFA4C2AD,0x00000000
+	long		0x3FFE0000,0x99D722DA,0xBDE58F06,0x00000000
+	long		0x3FFE0000,0x825EFCED,0x49369330,0x00000000
+	long		0x3FFE0000,0x9868C809,0x868C8098,0x00000000
+	long		0x3FFE0000,0x84C37A7A,0xB9A905C9,0x00000000
+	long		0x3FFE0000,0x97012E02,0x5C04B809,0x00000000
+	long		0x3FFE0000,0x87224C2E,0x8E645FB7,0x00000000
+	long		0x3FFE0000,0x95A02568,0x095A0257,0x00000000
+	long		0x3FFE0000,0x897B8CAC,0x9F7DE298,0x00000000
+	long		0x3FFE0000,0x94458094,0x45809446,0x00000000
+	long		0x3FFE0000,0x8BCF55DE,0xC4CD05FE,0x00000000
+	long		0x3FFE0000,0x92F11384,0x0497889C,0x00000000
+	long		0x3FFE0000,0x8E1DC0FB,0x89E125E5,0x00000000
+	long		0x3FFE0000,0x91A2B3C4,0xD5E6F809,0x00000000
+	long		0x3FFE0000,0x9066E68C,0x955B6C9B,0x00000000
+	long		0x3FFE0000,0x905A3863,0x3E06C43B,0x00000000
+	long		0x3FFE0000,0x92AADE74,0xC7BE59E0,0x00000000
+	long		0x3FFE0000,0x8F1779D9,0xFDC3A219,0x00000000
+	long		0x3FFE0000,0x94E9BFF6,0x15845643,0x00000000
+	long		0x3FFE0000,0x8DDA5202,0x37694809,0x00000000
+	long		0x3FFE0000,0x9723A1B7,0x20134203,0x00000000
+	long		0x3FFE0000,0x8CA29C04,0x6514E023,0x00000000
+	long		0x3FFE0000,0x995899C8,0x90EB8990,0x00000000
+	long		0x3FFE0000,0x8B70344A,0x139BC75A,0x00000000
+	long		0x3FFE0000,0x9B88BDAA,0x3A3DAE2F,0x00000000
+	long		0x3FFE0000,0x8A42F870,0x5669DB46,0x00000000
+	long		0x3FFE0000,0x9DB4224F,0xFFE1157C,0x00000000
+	long		0x3FFE0000,0x891AC73A,0xE9819B50,0x00000000
+	long		0x3FFE0000,0x9FDADC26,0x8B7A12DA,0x00000000
+	long		0x3FFE0000,0x87F78087,0xF78087F8,0x00000000
+	long		0x3FFE0000,0xA1FCFF17,0xCE733BD4,0x00000000
+	long		0x3FFE0000,0x86D90544,0x7A34ACC6,0x00000000
+	long		0x3FFE0000,0xA41A9E8F,0x5446FB9F,0x00000000
+	long		0x3FFE0000,0x85BF3761,0x2CEE3C9B,0x00000000
+	long		0x3FFE0000,0xA633CD7E,0x6771CD8B,0x00000000
+	long		0x3FFE0000,0x84A9F9C8,0x084A9F9D,0x00000000
+	long		0x3FFE0000,0xA8489E60,0x0B435A5E,0x00000000
+	long		0x3FFE0000,0x83993052,0x3FBE3368,0x00000000
+	long		0x3FFE0000,0xAA59233C,0xCCA4BD49,0x00000000
+	long		0x3FFE0000,0x828CBFBE,0xB9A020A3,0x00000000
+	long		0x3FFE0000,0xAC656DAE,0x6BCC4985,0x00000000
+	long		0x3FFE0000,0x81848DA8,0xFAF0D277,0x00000000
+	long		0x3FFE0000,0xAE6D8EE3,0x60BB2468,0x00000000
+	long		0x3FFE0000,0x80808080,0x80808081,0x00000000
+	long		0x3FFE0000,0xB07197A2,0x3C46C654,0x00000000
+
+	set		ADJK,L_SCR1
+
+	set		X,FP_SCR0
+	set		XDCARE,X+2
+	set		XFRAC,X+4
+
+	set		F,FP_SCR1
+	set		FFRAC,F+4
+
+	set		KLOG2,FP_SCR0
+
+	set		SAVEU,FP_SCR0
+
+	global		slogn
+#--ENTRY POINT FOR LOG(X) FOR X FINITE, NON-ZERO, NOT NAN'S
+slogn:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	mov.l		&0x00000000,ADJK(%a6)
+
+LOGBGN:
+#--FPCR SAVED AND CLEARED, INPUT IS 2^(ADJK)*FP0, FP0 CONTAINS
+#--A FINITE, NON-ZERO, NORMALIZED NUMBER.
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+
+	mov.l		(%a0),X(%a6)
+	mov.l		4(%a0),X+4(%a6)
+	mov.l		8(%a0),X+8(%a6)
+
+	cmp.l		%d1,&0			# CHECK IF X IS NEGATIVE
+	blt.w		LOGNEG			# LOG OF NEGATIVE ARGUMENT IS INVALID
+# X IS POSITIVE, CHECK IF X IS NEAR 1
+	cmp.l		%d1,&0x3ffef07d		# IS X < 15/16?
+	blt.b		LOGMAIN			# YES
+	cmp.l		%d1,&0x3fff8841		# IS X > 17/16?
+	ble.w		LOGNEAR1		# NO
+
+LOGMAIN:
+#--THIS SHOULD BE THE USUAL CASE, X NOT VERY CLOSE TO 1
+
+#--X = 2^(K) * Y, 1 <= Y < 2. THUS, Y = 1.XXXXXXXX....XX IN BINARY.
+#--WE DEFINE F = 1.XXXXXX1, I.E. FIRST 7 BITS OF Y AND ATTACH A 1.
+#--THE IDEA IS THAT LOG(X) = K*LOG2 + LOG(Y)
+#--			 = K*LOG2 + LOG(F) + LOG(1 + (Y-F)/F).
+#--NOTE THAT U = (Y-F)/F IS VERY SMALL AND THUS APPROXIMATING
+#--LOG(1+U) CAN BE VERY EFFICIENT.
+#--ALSO NOTE THAT THE VALUE 1/F IS STORED IN A TABLE SO THAT NO
+#--DIVISION IS NEEDED TO CALCULATE (Y-F)/F.
+
+#--GET K, Y, F, AND ADDRESS OF 1/F.
+	asr.l		&8,%d1
+	asr.l		&8,%d1			# SHIFTED 16 BITS, BIASED EXPO. OF X
+	sub.l		&0x3FFF,%d1		# THIS IS K
+	add.l		ADJK(%a6),%d1		# ADJUST K, ORIGINAL INPUT MAY BE  DENORM.
+	lea		LOGTBL(%pc),%a0		# BASE ADDRESS OF 1/F AND LOG(F)
+	fmov.l		%d1,%fp1		# CONVERT K TO FLOATING-POINT FORMAT
+
+#--WHILE THE CONVERSION IS GOING ON, WE GET F AND ADDRESS OF 1/F
+	mov.l		&0x3FFF0000,X(%a6)	# X IS NOW Y, I.E. 2^(-K)*X
+	mov.l		XFRAC(%a6),FFRAC(%a6)
+	and.l		&0xFE000000,FFRAC(%a6)	# FIRST 7 BITS OF Y
+	or.l		&0x01000000,FFRAC(%a6)	# GET F: ATTACH A 1 AT THE EIGHTH BIT
+	mov.l		FFRAC(%a6),%d1	# READY TO GET ADDRESS OF 1/F
+	and.l		&0x7E000000,%d1
+	asr.l		&8,%d1
+	asr.l		&8,%d1
+	asr.l		&4,%d1			# SHIFTED 20, D0 IS THE DISPLACEMENT
+	add.l		%d1,%a0			# A0 IS THE ADDRESS FOR 1/F
+
+	fmov.x		X(%a6),%fp0
+	mov.l		&0x3fff0000,F(%a6)
+	clr.l		F+8(%a6)
+	fsub.x		F(%a6),%fp0		# Y-F
+	fmovm.x		&0xc,-(%sp)		# SAVE FP2-3 WHILE FP0 IS NOT READY
+#--SUMMARY: FP0 IS Y-F, A0 IS ADDRESS OF 1/F, FP1 IS K
+#--REGISTERS SAVED: FPCR, FP1, FP2
+
+LP1CONT1:
+#--AN RE-ENTRY POINT FOR LOGNP1
+	fmul.x		(%a0),%fp0		# FP0 IS U = (Y-F)/F
+	fmul.x		LOGOF2(%pc),%fp1	# GET K*LOG2 WHILE FP0 IS NOT READY
+	fmov.x		%fp0,%fp2
+	fmul.x		%fp2,%fp2		# FP2 IS V=U*U
+	fmov.x		%fp1,KLOG2(%a6)		# PUT K*LOG2 IN MEMEORY, FREE FP1
+
+#--LOG(1+U) IS APPROXIMATED BY
+#--U + V*(A1+U*(A2+U*(A3+U*(A4+U*(A5+U*A6))))) WHICH IS
+#--[U + V*(A1+V*(A3+V*A5))]  +  [U*V*(A2+V*(A4+V*A6))]
+
+	fmov.x		%fp2,%fp3
+	fmov.x		%fp2,%fp1
+
+	fmul.d		LOGA6(%pc),%fp1		# V*A6
+	fmul.d		LOGA5(%pc),%fp2		# V*A5
+
+	fadd.d		LOGA4(%pc),%fp1		# A4+V*A6
+	fadd.d		LOGA3(%pc),%fp2		# A3+V*A5
+
+	fmul.x		%fp3,%fp1		# V*(A4+V*A6)
+	fmul.x		%fp3,%fp2		# V*(A3+V*A5)
+
+	fadd.d		LOGA2(%pc),%fp1		# A2+V*(A4+V*A6)
+	fadd.d		LOGA1(%pc),%fp2		# A1+V*(A3+V*A5)
+
+	fmul.x		%fp3,%fp1		# V*(A2+V*(A4+V*A6))
+	add.l		&16,%a0			# ADDRESS OF LOG(F)
+	fmul.x		%fp3,%fp2		# V*(A1+V*(A3+V*A5))
+
+	fmul.x		%fp0,%fp1		# U*V*(A2+V*(A4+V*A6))
+	fadd.x		%fp2,%fp0		# U+V*(A1+V*(A3+V*A5))
+
+	fadd.x		(%a0),%fp1		# LOG(F)+U*V*(A2+V*(A4+V*A6))
+	fmovm.x		(%sp)+,&0x30		# RESTORE FP2-3
+	fadd.x		%fp1,%fp0		# FP0 IS LOG(F) + LOG(1+U)
+
+	fmov.l		%d0,%fpcr
+	fadd.x		KLOG2(%a6),%fp0		# FINAL ADD
+	bra		t_inx2
+
+
+LOGNEAR1:
+
+# if the input is exactly equal to one, then exit through ld_pzero.
+# if these 2 lines weren't here, the correct answer would be returned
+# but the INEX2 bit would be set.
+	fcmp.b		%fp0,&0x1		# is it equal to one?
+	fbeq.l		ld_pzero		# yes
+
+#--REGISTERS SAVED: FPCR, FP1. FP0 CONTAINS THE INPUT.
+	fmov.x		%fp0,%fp1
+	fsub.s		one(%pc),%fp1		# FP1 IS X-1
+	fadd.s		one(%pc),%fp0		# FP0 IS X+1
+	fadd.x		%fp1,%fp1		# FP1 IS 2(X-1)
+#--LOG(X) = LOG(1+U/2)-LOG(1-U/2) WHICH IS AN ODD POLYNOMIAL
+#--IN U, U = 2(X-1)/(X+1) = FP1/FP0
+
+LP1CONT2:
+#--THIS IS AN RE-ENTRY POINT FOR LOGNP1
+	fdiv.x		%fp0,%fp1		# FP1 IS U
+	fmovm.x		&0xc,-(%sp)		# SAVE FP2-3
+#--REGISTERS SAVED ARE NOW FPCR,FP1,FP2,FP3
+#--LET V=U*U, W=V*V, CALCULATE
+#--U + U*V*(B1 + V*(B2 + V*(B3 + V*(B4 + V*B5)))) BY
+#--U + U*V*(  [B1 + W*(B3 + W*B5)]  +  [V*(B2 + W*B4)]  )
+	fmov.x		%fp1,%fp0
+	fmul.x		%fp0,%fp0		# FP0 IS V
+	fmov.x		%fp1,SAVEU(%a6)		# STORE U IN MEMORY, FREE FP1
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS W
+
+	fmov.d		LOGB5(%pc),%fp3
+	fmov.d		LOGB4(%pc),%fp2
+
+	fmul.x		%fp1,%fp3		# W*B5
+	fmul.x		%fp1,%fp2		# W*B4
+
+	fadd.d		LOGB3(%pc),%fp3		# B3+W*B5
+	fadd.d		LOGB2(%pc),%fp2		# B2+W*B4
+
+	fmul.x		%fp3,%fp1		# W*(B3+W*B5), FP3 RELEASED
+
+	fmul.x		%fp0,%fp2		# V*(B2+W*B4)
+
+	fadd.d		LOGB1(%pc),%fp1		# B1+W*(B3+W*B5)
+	fmul.x		SAVEU(%a6),%fp0		# FP0 IS U*V
+
+	fadd.x		%fp2,%fp1		# B1+W*(B3+W*B5) + V*(B2+W*B4), FP2 RELEASED
+	fmovm.x		(%sp)+,&0x30		# FP2-3 RESTORED
+
+	fmul.x		%fp1,%fp0		# U*V*( [B1+W*(B3+W*B5)] + [V*(B2+W*B4)] )
+
+	fmov.l		%d0,%fpcr
+	fadd.x		SAVEU(%a6),%fp0
+	bra		t_inx2
+
+#--REGISTERS SAVED FPCR. LOG(-VE) IS INVALID
+LOGNEG:
+	bra		t_operr
+
+	global		slognd
+slognd:
+#--ENTRY POINT FOR LOG(X) FOR DENORMALIZED INPUT
+
+	mov.l		&-100,ADJK(%a6)		# INPUT = 2^(ADJK) * FP0
+
+#----normalize the input value by left shifting k bits (k to be determined
+#----below), adjusting exponent and storing -k to  ADJK
+#----the value TWOTO100 is no longer needed.
+#----Note that this code assumes the denormalized input is NON-ZERO.
+
+	movm.l		&0x3f00,-(%sp)		# save some registers  {d2-d7}
+	mov.l		(%a0),%d3		# D3 is exponent of smallest norm. #
+	mov.l		4(%a0),%d4
+	mov.l		8(%a0),%d5		# (D4,D5) is (Hi_X,Lo_X)
+	clr.l		%d2			# D2 used for holding K
+
+	tst.l		%d4
+	bne.b		Hi_not0
+
+Hi_0:
+	mov.l		%d5,%d4
+	clr.l		%d5
+	mov.l		&32,%d2
+	clr.l		%d6
+	bfffo		%d4{&0:&32},%d6
+	lsl.l		%d6,%d4
+	add.l		%d6,%d2			# (D3,D4,D5) is normalized
+
+	mov.l		%d3,X(%a6)
+	mov.l		%d4,XFRAC(%a6)
+	mov.l		%d5,XFRAC+4(%a6)
+	neg.l		%d2
+	mov.l		%d2,ADJK(%a6)
+	fmov.x		X(%a6),%fp0
+	movm.l		(%sp)+,&0xfc		# restore registers {d2-d7}
+	lea		X(%a6),%a0
+	bra.w		LOGBGN			# begin regular log(X)
+
+Hi_not0:
+	clr.l		%d6
+	bfffo		%d4{&0:&32},%d6		# find first 1
+	mov.l		%d6,%d2			# get k
+	lsl.l		%d6,%d4
+	mov.l		%d5,%d7			# a copy of D5
+	lsl.l		%d6,%d5
+	neg.l		%d6
+	add.l		&32,%d6
+	lsr.l		%d6,%d7
+	or.l		%d7,%d4			# (D3,D4,D5) normalized
+
+	mov.l		%d3,X(%a6)
+	mov.l		%d4,XFRAC(%a6)
+	mov.l		%d5,XFRAC+4(%a6)
+	neg.l		%d2
+	mov.l		%d2,ADJK(%a6)
+	fmov.x		X(%a6),%fp0
+	movm.l		(%sp)+,&0xfc		# restore registers {d2-d7}
+	lea		X(%a6),%a0
+	bra.w		LOGBGN			# begin regular log(X)
+
+	global		slognp1
+#--ENTRY POINT FOR LOG(1+X) FOR X FINITE, NON-ZERO, NOT NAN'S
+slognp1:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	fabs.x		%fp0			# test magnitude
+	fcmp.x		%fp0,LTHOLD(%pc)	# compare with min threshold
+	fbgt.w		LP1REAL			# if greater, continue
+	fmov.l		%d0,%fpcr
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		(%a0),%fp0		# return signed argument
+	bra		t_catch
+
+LP1REAL:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	mov.l		&0x00000000,ADJK(%a6)
+	fmov.x		%fp0,%fp1		# FP1 IS INPUT Z
+	fadd.s		one(%pc),%fp0		# X := ROUND(1+Z)
+	fmov.x		%fp0,X(%a6)
+	mov.w		XFRAC(%a6),XDCARE(%a6)
+	mov.l		X(%a6),%d1
+	cmp.l		%d1,&0
+	ble.w		LP1NEG0			# LOG OF ZERO OR -VE
+	cmp.l		%d1,&0x3ffe8000		# IS BOUNDS [1/2,3/2]?
+	blt.w		LOGMAIN
+	cmp.l		%d1,&0x3fffc000
+	bgt.w		LOGMAIN
+#--IF 1+Z > 3/2 OR 1+Z < 1/2, THEN X, WHICH IS ROUNDING 1+Z,
+#--CONTAINS AT LEAST 63 BITS OF INFORMATION OF Z. IN THAT CASE,
+#--SIMPLY INVOKE LOG(X) FOR LOG(1+Z).
+
+LP1NEAR1:
+#--NEXT SEE IF EXP(-1/16) < X < EXP(1/16)
+	cmp.l		%d1,&0x3ffef07d
+	blt.w		LP1CARE
+	cmp.l		%d1,&0x3fff8841
+	bgt.w		LP1CARE
+
+LP1ONE16:
+#--EXP(-1/16) < X < EXP(1/16). LOG(1+Z) = LOG(1+U/2) - LOG(1-U/2)
+#--WHERE U = 2Z/(2+Z) = 2Z/(1+X).
+	fadd.x		%fp1,%fp1		# FP1 IS 2Z
+	fadd.s		one(%pc),%fp0		# FP0 IS 1+X
+#--U = FP1/FP0
+	bra.w		LP1CONT2
+
+LP1CARE:
+#--HERE WE USE THE USUAL TABLE DRIVEN APPROACH. CARE HAS TO BE
+#--TAKEN BECAUSE 1+Z CAN HAVE 67 BITS OF INFORMATION AND WE MUST
+#--PRESERVE ALL THE INFORMATION. BECAUSE 1+Z IS IN [1/2,3/2],
+#--THERE ARE ONLY TWO CASES.
+#--CASE 1: 1+Z < 1, THEN K = -1 AND Y-F = (2-F) + 2Z
+#--CASE 2: 1+Z > 1, THEN K = 0  AND Y-F = (1-F) + Z
+#--ON RETURNING TO LP1CONT1, WE MUST HAVE K IN FP1, ADDRESS OF
+#--(1/F) IN A0, Y-F IN FP0, AND FP2 SAVED.
+
+	mov.l		XFRAC(%a6),FFRAC(%a6)
+	and.l		&0xFE000000,FFRAC(%a6)
+	or.l		&0x01000000,FFRAC(%a6)	# F OBTAINED
+	cmp.l		%d1,&0x3FFF8000		# SEE IF 1+Z > 1
+	bge.b		KISZERO
+
+KISNEG1:
+	fmov.s		TWO(%pc),%fp0
+	mov.l		&0x3fff0000,F(%a6)
+	clr.l		F+8(%a6)
+	fsub.x		F(%a6),%fp0		# 2-F
+	mov.l		FFRAC(%a6),%d1
+	and.l		&0x7E000000,%d1
+	asr.l		&8,%d1
+	asr.l		&8,%d1
+	asr.l		&4,%d1			# D0 CONTAINS DISPLACEMENT FOR 1/F
+	fadd.x		%fp1,%fp1		# GET 2Z
+	fmovm.x		&0xc,-(%sp)		# SAVE FP2  {%fp2/%fp3}
+	fadd.x		%fp1,%fp0		# FP0 IS Y-F = (2-F)+2Z
+	lea		LOGTBL(%pc),%a0		# A0 IS ADDRESS OF 1/F
+	add.l		%d1,%a0
+	fmov.s		negone(%pc),%fp1	# FP1 IS K = -1
+	bra.w		LP1CONT1
+
+KISZERO:
+	fmov.s		one(%pc),%fp0
+	mov.l		&0x3fff0000,F(%a6)
+	clr.l		F+8(%a6)
+	fsub.x		F(%a6),%fp0		# 1-F
+	mov.l		FFRAC(%a6),%d1
+	and.l		&0x7E000000,%d1
+	asr.l		&8,%d1
+	asr.l		&8,%d1
+	asr.l		&4,%d1
+	fadd.x		%fp1,%fp0		# FP0 IS Y-F
+	fmovm.x		&0xc,-(%sp)		# FP2 SAVED {%fp2/%fp3}
+	lea		LOGTBL(%pc),%a0
+	add.l		%d1,%a0			# A0 IS ADDRESS OF 1/F
+	fmov.s		zero(%pc),%fp1		# FP1 IS K = 0
+	bra.w		LP1CONT1
+
+LP1NEG0:
+#--FPCR SAVED. D0 IS X IN COMPACT FORM.
+	cmp.l		%d1,&0
+	blt.b		LP1NEG
+LP1ZERO:
+	fmov.s		negone(%pc),%fp0
+
+	fmov.l		%d0,%fpcr
+	bra		t_dz
+
+LP1NEG:
+	fmov.s		zero(%pc),%fp0
+
+	fmov.l		%d0,%fpcr
+	bra		t_operr
+
+	global		slognp1d
+#--ENTRY POINT FOR LOG(1+Z) FOR DENORMALIZED INPUT
+# Simply return the denorm
+slognp1d:
+	bra		t_extdnrm
+
+#########################################################################
+# satanh():  computes the inverse hyperbolic tangent of a norm input	#
+# satanhd(): computes the inverse hyperbolic tangent of a denorm input	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = arctanh(X)						#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in	64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	ATANH								#
+#	1. If |X| >= 1, go to 3.					#
+#									#
+#	2. (|X| < 1) Calculate atanh(X) by				#
+#		sgn := sign(X)						#
+#		y := |X|						#
+#		z := 2y/(1-y)						#
+#		atanh(X) := sgn * (1/2) * logp1(z)			#
+#		Exit.							#
+#									#
+#	3. If |X| > 1, go to 5.						#
+#									#
+#	4. (|X| = 1) Generate infinity with an appropriate sign and	#
+#		divide-by-zero by					#
+#		sgn := sign(X)						#
+#		atan(X) := sgn / (+0).					#
+#		Exit.							#
+#									#
+#	5. (|X| > 1) Generate an invalid operation by 0 * infinity.	#
+#		Exit.							#
+#									#
+#########################################################################
+
+	global		satanh
+satanh:
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x3FFF8000
+	bge.b		ATANHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--Y = |X|, Z = 2Y/(1-Y), ATANH(X) = SIGN(X) * (1/2) * LOG1P(Z).
+
+	fabs.x		(%a0),%fp0		# Y = |X|
+	fmov.x		%fp0,%fp1
+	fneg.x		%fp1			# -Y
+	fadd.x		%fp0,%fp0		# 2Y
+	fadd.s		&0x3F800000,%fp1	# 1-Y
+	fdiv.x		%fp1,%fp0		# 2Y/(1-Y)
+	mov.l		(%a0),%d1
+	and.l		&0x80000000,%d1
+	or.l		&0x3F000000,%d1		# SIGN(X)*HALF
+	mov.l		%d1,-(%sp)
+
+	mov.l		%d0,-(%sp)		# save rnd prec,mode
+	clr.l		%d0			# pass ext prec,RN
+	fmovm.x		&0x01,-(%sp)		# save Z on stack
+	lea		(%sp),%a0		# pass ptr to Z
+	bsr		slognp1			# LOG1P(Z)
+	add.l		&0xc,%sp		# clear Z from stack
+
+	mov.l		(%sp)+,%d0		# fetch old prec,mode
+	fmov.l		%d0,%fpcr		# load it
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.s		(%sp)+,%fp0
+	bra		t_catch
+
+ATANHBIG:
+	fabs.x		(%a0),%fp0		# |X|
+	fcmp.s		%fp0,&0x3F800000
+	fbgt		t_operr
+	bra		t_dz
+
+	global		satanhd
+#--ATANH(X) = X FOR DENORMALIZED X
+satanhd:
+	bra		t_extdnrm
+
+#########################################################################
+# slog10():  computes the base-10 logarithm of a normalized input	#
+# slog10d(): computes the base-10 logarithm of a denormalized input	#
+# slog2():   computes the base-2 logarithm of a normalized input	#
+# slog2d():  computes the base-2 logarithm of a denormalized input	#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = log_10(X) or log_2(X)					#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 1.7 ulps in 64 significant bit,	#
+#	i.e. within 0.5003 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#       slog10d:							#
+#									#
+#       Step 0.	If X < 0, create a NaN and raise the invalid operation	#
+#               flag. Otherwise, save FPCR in D1; set FpCR to default.	#
+#       Notes:  Default means round-to-nearest mode, no floating-point	#
+#               traps, and precision control = double extended.		#
+#									#
+#       Step 1. Call slognd to obtain Y = log(X), the natural log of X.	#
+#       Notes:  Even if X is denormalized, log(X) is always normalized.	#
+#									#
+#       Step 2.  Compute log_10(X) = log(X) * (1/log(10)).		#
+#            2.1 Restore the user FPCR					#
+#            2.2 Return ans := Y * INV_L10.				#
+#									#
+#       slog10:								#
+#									#
+#       Step 0. If X < 0, create a NaN and raise the invalid operation	#
+#               flag. Otherwise, save FPCR in D1; set FpCR to default.	#
+#       Notes:  Default means round-to-nearest mode, no floating-point	#
+#               traps, and precision control = double extended.		#
+#									#
+#       Step 1. Call sLogN to obtain Y = log(X), the natural log of X.	#
+#									#
+#       Step 2.   Compute log_10(X) = log(X) * (1/log(10)).		#
+#            2.1  Restore the user FPCR					#
+#            2.2  Return ans := Y * INV_L10.				#
+#									#
+#       sLog2d:								#
+#									#
+#       Step 0. If X < 0, create a NaN and raise the invalid operation	#
+#               flag. Otherwise, save FPCR in D1; set FpCR to default.	#
+#       Notes:  Default means round-to-nearest mode, no floating-point	#
+#               traps, and precision control = double extended.		#
+#									#
+#       Step 1. Call slognd to obtain Y = log(X), the natural log of X.	#
+#       Notes:  Even if X is denormalized, log(X) is always normalized.	#
+#									#
+#       Step 2.   Compute log_10(X) = log(X) * (1/log(2)).		#
+#            2.1  Restore the user FPCR					#
+#            2.2  Return ans := Y * INV_L2.				#
+#									#
+#       sLog2:								#
+#									#
+#       Step 0. If X < 0, create a NaN and raise the invalid operation	#
+#               flag. Otherwise, save FPCR in D1; set FpCR to default.	#
+#       Notes:  Default means round-to-nearest mode, no floating-point	#
+#               traps, and precision control = double extended.		#
+#									#
+#       Step 1. If X is not an integer power of two, i.e., X != 2^k,	#
+#               go to Step 3.						#
+#									#
+#       Step 2.   Return k.						#
+#            2.1  Get integer k, X = 2^k.				#
+#            2.2  Restore the user FPCR.				#
+#            2.3  Return ans := convert-to-double-extended(k).		#
+#									#
+#       Step 3. Call sLogN to obtain Y = log(X), the natural log of X.	#
+#									#
+#       Step 4.   Compute log_2(X) = log(X) * (1/log(2)).		#
+#            4.1  Restore the user FPCR					#
+#            4.2  Return ans := Y * INV_L2.				#
+#									#
+#########################################################################
+
+INV_L10:
+	long		0x3FFD0000,0xDE5BD8A9,0x37287195,0x00000000
+
+INV_L2:
+	long		0x3FFF0000,0xB8AA3B29,0x5C17F0BC,0x00000000
+
+	global		slog10
+#--entry point for Log10(X), X is normalized
+slog10:
+	fmov.b		&0x1,%fp0
+	fcmp.x		%fp0,(%a0)		# if operand == 1,
+	fbeq.l		ld_pzero		# return an EXACT zero
+
+	mov.l		(%a0),%d1
+	blt.w		invalid
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	bsr		slogn			# log(X), X normal.
+	fmov.l		(%sp)+,%fpcr
+	fmul.x		INV_L10(%pc),%fp0
+	bra		t_inx2
+
+	global		slog10d
+#--entry point for Log10(X), X is denormalized
+slog10d:
+	mov.l		(%a0),%d1
+	blt.w		invalid
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	bsr		slognd			# log(X), X denorm.
+	fmov.l		(%sp)+,%fpcr
+	fmul.x		INV_L10(%pc),%fp0
+	bra		t_minx2
+
+	global		slog2
+#--entry point for Log2(X), X is normalized
+slog2:
+	mov.l		(%a0),%d1
+	blt.w		invalid
+
+	mov.l		8(%a0),%d1
+	bne.b		continue		# X is not 2^k
+
+	mov.l		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	bne.b		continue
+
+#--X = 2^k.
+	mov.w		(%a0),%d1
+	and.l		&0x00007FFF,%d1
+	sub.l		&0x3FFF,%d1
+	beq.l		ld_pzero
+	fmov.l		%d0,%fpcr
+	fmov.l		%d1,%fp0
+	bra		t_inx2
+
+continue:
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	bsr		slogn			# log(X), X normal.
+	fmov.l		(%sp)+,%fpcr
+	fmul.x		INV_L2(%pc),%fp0
+	bra		t_inx2
+
+invalid:
+	bra		t_operr
+
+	global		slog2d
+#--entry point for Log2(X), X is denormalized
+slog2d:
+	mov.l		(%a0),%d1
+	blt.w		invalid
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	bsr		slognd			# log(X), X denorm.
+	fmov.l		(%sp)+,%fpcr
+	fmul.x		INV_L2(%pc),%fp0
+	bra		t_minx2
+
+#########################################################################
+# stwotox():  computes 2**X for a normalized input			#
+# stwotoxd(): computes 2**X for a denormalized input			#
+# stentox():  computes 10**X for a normalized input			#
+# stentoxd(): computes 10**X for a denormalized input			#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = 2**X or 10**X						#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 2 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	twotox								#
+#	1. If |X| > 16480, go to ExpBig.				#
+#									#
+#	2. If |X| < 2**(-70), go to ExpSm.				#
+#									#
+#	3. Decompose X as X = N/64 + r where |r| <= 1/128. Furthermore	#
+#		decompose N as						#
+#		 N = 64(M + M') + j,  j = 0,1,2,...,63.			#
+#									#
+#	4. Overwrite r := r * log2. Then				#
+#		2**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r).		#
+#		Go to expr to compute that expression.			#
+#									#
+#	tentox								#
+#	1. If |X| > 16480*log_10(2) (base 10 log of 2), go to ExpBig.	#
+#									#
+#	2. If |X| < 2**(-70), go to ExpSm.				#
+#									#
+#	3. Set y := X*log_2(10)*64 (base 2 log of 10). Set		#
+#		N := round-to-int(y). Decompose N as			#
+#		 N = 64(M + M') + j,  j = 0,1,2,...,63.			#
+#									#
+#	4. Define r as							#
+#		r := ((X - N*L1)-N*L2) * L10				#
+#		where L1, L2 are the leading and trailing parts of	#
+#		log_10(2)/64 and L10 is the natural log of 10. Then	#
+#		10**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r).		#
+#		Go to expr to compute that expression.			#
+#									#
+#	expr								#
+#	1. Fetch 2**(j/64) from table as Fact1 and Fact2.		#
+#									#
+#	2. Overwrite Fact1 and Fact2 by					#
+#		Fact1 := 2**(M) * Fact1					#
+#		Fact2 := 2**(M) * Fact2					#
+#		Thus Fact1 + Fact2 = 2**(M) * 2**(j/64).		#
+#									#
+#	3. Calculate P where 1 + P approximates exp(r):			#
+#		P = r + r*r*(A1+r*(A2+...+r*A5)).			#
+#									#
+#	4. Let AdjFact := 2**(M'). Return				#
+#		AdjFact * ( Fact1 + ((Fact1*P) + Fact2) ).		#
+#		Exit.							#
+#									#
+#	ExpBig								#
+#	1. Generate overflow by Huge * Huge if X > 0; otherwise,	#
+#	        generate underflow by Tiny * Tiny.			#
+#									#
+#	ExpSm								#
+#	1. Return 1 + X.						#
+#									#
+#########################################################################
+
+L2TEN64:
+	long		0x406A934F,0x0979A371	# 64LOG10/LOG2
+L10TWO1:
+	long		0x3F734413,0x509F8000	# LOG2/64LOG10
+
+L10TWO2:
+	long		0xBFCD0000,0xC0219DC1,0xDA994FD2,0x00000000
+
+LOG10:	long		0x40000000,0x935D8DDD,0xAAA8AC17,0x00000000
+
+LOG2:	long		0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
+
+EXPA5:	long		0x3F56C16D,0x6F7BD0B2
+EXPA4:	long		0x3F811112,0x302C712C
+EXPA3:	long		0x3FA55555,0x55554CC1
+EXPA2:	long		0x3FC55555,0x55554A54
+EXPA1:	long		0x3FE00000,0x00000000,0x00000000,0x00000000
+
+TEXPTBL:
+	long		0x3FFF0000,0x80000000,0x00000000,0x3F738000
+	long		0x3FFF0000,0x8164D1F3,0xBC030773,0x3FBEF7CA
+	long		0x3FFF0000,0x82CD8698,0xAC2BA1D7,0x3FBDF8A9
+	long		0x3FFF0000,0x843A28C3,0xACDE4046,0x3FBCD7C9
+	long		0x3FFF0000,0x85AAC367,0xCC487B15,0xBFBDE8DA
+	long		0x3FFF0000,0x871F6196,0x9E8D1010,0x3FBDE85C
+	long		0x3FFF0000,0x88980E80,0x92DA8527,0x3FBEBBF1
+	long		0x3FFF0000,0x8A14D575,0x496EFD9A,0x3FBB80CA
+	long		0x3FFF0000,0x8B95C1E3,0xEA8BD6E7,0xBFBA8373
+	long		0x3FFF0000,0x8D1ADF5B,0x7E5BA9E6,0xBFBE9670
+	long		0x3FFF0000,0x8EA4398B,0x45CD53C0,0x3FBDB700
+	long		0x3FFF0000,0x9031DC43,0x1466B1DC,0x3FBEEEB0
+	long		0x3FFF0000,0x91C3D373,0xAB11C336,0x3FBBFD6D
+	long		0x3FFF0000,0x935A2B2F,0x13E6E92C,0xBFBDB319
+	long		0x3FFF0000,0x94F4EFA8,0xFEF70961,0x3FBDBA2B
+	long		0x3FFF0000,0x96942D37,0x20185A00,0x3FBE91D5
+	long		0x3FFF0000,0x9837F051,0x8DB8A96F,0x3FBE8D5A
+	long		0x3FFF0000,0x99E04593,0x20B7FA65,0xBFBCDE7B
+	long		0x3FFF0000,0x9B8D39B9,0xD54E5539,0xBFBEBAAF
+	long		0x3FFF0000,0x9D3ED9A7,0x2CFFB751,0xBFBD86DA
+	long		0x3FFF0000,0x9EF53260,0x91A111AE,0xBFBEBEDD
+	long		0x3FFF0000,0xA0B0510F,0xB9714FC2,0x3FBCC96E
+	long		0x3FFF0000,0xA2704303,0x0C496819,0xBFBEC90B
+	long		0x3FFF0000,0xA43515AE,0x09E6809E,0x3FBBD1DB
+	long		0x3FFF0000,0xA5FED6A9,0xB15138EA,0x3FBCE5EB
+	long		0x3FFF0000,0xA7CD93B4,0xE965356A,0xBFBEC274
+	long		0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x3FBEA83C
+	long		0x3FFF0000,0xAB7A39B5,0xA93ED337,0x3FBECB00
+	long		0x3FFF0000,0xAD583EEA,0x42A14AC6,0x3FBE9301
+	long		0x3FFF0000,0xAF3B78AD,0x690A4375,0xBFBD8367
+	long		0x3FFF0000,0xB123F581,0xD2AC2590,0xBFBEF05F
+	long		0x3FFF0000,0xB311C412,0xA9112489,0x3FBDFB3C
+	long		0x3FFF0000,0xB504F333,0xF9DE6484,0x3FBEB2FB
+	long		0x3FFF0000,0xB6FD91E3,0x28D17791,0x3FBAE2CB
+	long		0x3FFF0000,0xB8FBAF47,0x62FB9EE9,0x3FBCDC3C
+	long		0x3FFF0000,0xBAFF5AB2,0x133E45FB,0x3FBEE9AA
+	long		0x3FFF0000,0xBD08A39F,0x580C36BF,0xBFBEAEFD
+	long		0x3FFF0000,0xBF1799B6,0x7A731083,0xBFBCBF51
+	long		0x3FFF0000,0xC12C4CCA,0x66709456,0x3FBEF88A
+	long		0x3FFF0000,0xC346CCDA,0x24976407,0x3FBD83B2
+	long		0x3FFF0000,0xC5672A11,0x5506DADD,0x3FBDF8AB
+	long		0x3FFF0000,0xC78D74C8,0xABB9B15D,0xBFBDFB17
+	long		0x3FFF0000,0xC9B9BD86,0x6E2F27A3,0xBFBEFE3C
+	long		0x3FFF0000,0xCBEC14FE,0xF2727C5D,0xBFBBB6F8
+	long		0x3FFF0000,0xCE248C15,0x1F8480E4,0xBFBCEE53
+	long		0x3FFF0000,0xD06333DA,0xEF2B2595,0xBFBDA4AE
+	long		0x3FFF0000,0xD2A81D91,0xF12AE45A,0x3FBC9124
+	long		0x3FFF0000,0xD4F35AAB,0xCFEDFA1F,0x3FBEB243
+	long		0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x3FBDE69A
+	long		0x3FFF0000,0xD99D15C2,0x78AFD7B6,0xBFB8BC61
+	long		0x3FFF0000,0xDBFBB797,0xDAF23755,0x3FBDF610
+	long		0x3FFF0000,0xDE60F482,0x5E0E9124,0xBFBD8BE1
+	long		0x3FFF0000,0xE0CCDEEC,0x2A94E111,0x3FBACB12
+	long		0x3FFF0000,0xE33F8972,0xBE8A5A51,0x3FBB9BFE
+	long		0x3FFF0000,0xE5B906E7,0x7C8348A8,0x3FBCF2F4
+	long		0x3FFF0000,0xE8396A50,0x3C4BDC68,0x3FBEF22F
+	long		0x3FFF0000,0xEAC0C6E7,0xDD24392F,0xBFBDBF4A
+	long		0x3FFF0000,0xED4F301E,0xD9942B84,0x3FBEC01A
+	long		0x3FFF0000,0xEFE4B99B,0xDCDAF5CB,0x3FBE8CAC
+	long		0x3FFF0000,0xF281773C,0x59FFB13A,0xBFBCBB3F
+	long		0x3FFF0000,0xF5257D15,0x2486CC2C,0x3FBEF73A
+	long		0x3FFF0000,0xF7D0DF73,0x0AD13BB9,0xBFB8B795
+	long		0x3FFF0000,0xFA83B2DB,0x722A033A,0x3FBEF84B
+	long		0x3FFF0000,0xFD3E0C0C,0xF486C175,0xBFBEF581
+
+	set		INT,L_SCR1
+
+	set		X,FP_SCR0
+	set		XDCARE,X+2
+	set		XFRAC,X+4
+
+	set		ADJFACT,FP_SCR0
+
+	set		FACT1,FP_SCR0
+	set		FACT1HI,FACT1+4
+	set		FACT1LOW,FACT1+8
+
+	set		FACT2,FP_SCR1
+	set		FACT2HI,FACT2+4
+	set		FACT2LOW,FACT2+8
+
+	global		stwotox
+#--ENTRY POINT FOR 2**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+stwotox:
+	fmovm.x		(%a0),&0x80		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	fmov.x		%fp0,X(%a6)
+	and.l		&0x7FFFFFFF,%d1
+
+	cmp.l		%d1,&0x3FB98000		# |X| >= 2**(-70)?
+	bge.b		TWOOK1
+	bra.w		EXPBORS
+
+TWOOK1:
+	cmp.l		%d1,&0x400D80C0		# |X| > 16480?
+	ble.b		TWOMAIN
+	bra.w		EXPBORS
+
+TWOMAIN:
+#--USUAL CASE, 2^(-70) <= |X| <= 16480
+
+	fmov.x		%fp0,%fp1
+	fmul.s		&0x42800000,%fp1	# 64 * X
+	fmov.l		%fp1,INT(%a6)		# N = ROUND-TO-INT(64 X)
+	mov.l		%d2,-(%sp)
+	lea		TEXPTBL(%pc),%a1	# LOAD ADDRESS OF TABLE OF 2^(J/64)
+	fmov.l		INT(%a6),%fp1		# N --> FLOATING FMT
+	mov.l		INT(%a6),%d1
+	mov.l		%d1,%d2
+	and.l		&0x3F,%d1		# D0 IS J
+	asl.l		&4,%d1			# DISPLACEMENT FOR 2^(J/64)
+	add.l		%d1,%a1			# ADDRESS FOR 2^(J/64)
+	asr.l		&6,%d2			# d2 IS L, N = 64L + J
+	mov.l		%d2,%d1
+	asr.l		&1,%d1			# D0 IS M
+	sub.l		%d1,%d2			# d2 IS M', N = 64(M+M') + J
+	add.l		&0x3FFF,%d2
+
+#--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
+#--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
+#--ADJFACT = 2^(M').
+#--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
+
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmul.s		&0x3C800000,%fp1	# (1/64)*N
+	mov.l		(%a1)+,FACT1(%a6)
+	mov.l		(%a1)+,FACT1HI(%a6)
+	mov.l		(%a1)+,FACT1LOW(%a6)
+	mov.w		(%a1)+,FACT2(%a6)
+
+	fsub.x		%fp1,%fp0		# X - (1/64)*INT(64 X)
+
+	mov.w		(%a1)+,FACT2HI(%a6)
+	clr.w		FACT2HI+2(%a6)
+	clr.l		FACT2LOW(%a6)
+	add.w		%d1,FACT1(%a6)
+	fmul.x		LOG2(%pc),%fp0		# FP0 IS R
+	add.w		%d1,FACT2(%a6)
+
+	bra.w		expr
+
+EXPBORS:
+#--FPCR, D0 SAVED
+	cmp.l		%d1,&0x3FFF8000
+	bgt.b		TEXPBIG
+
+#--|X| IS SMALL, RETURN 1 + X
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	fadd.s		&0x3F800000,%fp0	# RETURN 1 + X
+	bra		t_pinx2
+
+TEXPBIG:
+#--|X| IS LARGE, GENERATE OVERFLOW IF X > 0; ELSE GENERATE UNDERFLOW
+#--REGISTERS SAVE SO FAR ARE FPCR AND  D0
+	mov.l		X(%a6),%d1
+	cmp.l		%d1,&0
+	blt.b		EXPNEG
+
+	bra		t_ovfl2			# t_ovfl expects positive value
+
+EXPNEG:
+	bra		t_unfl2			# t_unfl expects positive value
+
+	global		stwotoxd
+stwotoxd:
+#--ENTRY POINT FOR 2**(X) FOR DENORMALIZED ARGUMENT
+
+	fmov.l		%d0,%fpcr		# set user's rounding mode/precision
+	fmov.s		&0x3F800000,%fp0	# RETURN 1 + X
+	mov.l		(%a0),%d1
+	or.l		&0x00800001,%d1
+	fadd.s		%d1,%fp0
+	bra		t_pinx2
+
+	global		stentox
+#--ENTRY POINT FOR 10**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+stentox:
+	fmovm.x		(%a0),&0x80		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	fmov.x		%fp0,X(%a6)
+	and.l		&0x7FFFFFFF,%d1
+
+	cmp.l		%d1,&0x3FB98000		# |X| >= 2**(-70)?
+	bge.b		TENOK1
+	bra.w		EXPBORS
+
+TENOK1:
+	cmp.l		%d1,&0x400B9B07		# |X| <= 16480*log2/log10 ?
+	ble.b		TENMAIN
+	bra.w		EXPBORS
+
+TENMAIN:
+#--USUAL CASE, 2^(-70) <= |X| <= 16480 LOG 2 / LOG 10
+
+	fmov.x		%fp0,%fp1
+	fmul.d		L2TEN64(%pc),%fp1	# X*64*LOG10/LOG2
+	fmov.l		%fp1,INT(%a6)		# N=INT(X*64*LOG10/LOG2)
+	mov.l		%d2,-(%sp)
+	lea		TEXPTBL(%pc),%a1	# LOAD ADDRESS OF TABLE OF 2^(J/64)
+	fmov.l		INT(%a6),%fp1		# N --> FLOATING FMT
+	mov.l		INT(%a6),%d1
+	mov.l		%d1,%d2
+	and.l		&0x3F,%d1		# D0 IS J
+	asl.l		&4,%d1			# DISPLACEMENT FOR 2^(J/64)
+	add.l		%d1,%a1			# ADDRESS FOR 2^(J/64)
+	asr.l		&6,%d2			# d2 IS L, N = 64L + J
+	mov.l		%d2,%d1
+	asr.l		&1,%d1			# D0 IS M
+	sub.l		%d1,%d2			# d2 IS M', N = 64(M+M') + J
+	add.l		&0x3FFF,%d2
+
+#--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
+#--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
+#--ADJFACT = 2^(M').
+#--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmov.x		%fp1,%fp2
+
+	fmul.d		L10TWO1(%pc),%fp1	# N*(LOG2/64LOG10)_LEAD
+	mov.l		(%a1)+,FACT1(%a6)
+
+	fmul.x		L10TWO2(%pc),%fp2	# N*(LOG2/64LOG10)_TRAIL
+
+	mov.l		(%a1)+,FACT1HI(%a6)
+	mov.l		(%a1)+,FACT1LOW(%a6)
+	fsub.x		%fp1,%fp0		# X - N L_LEAD
+	mov.w		(%a1)+,FACT2(%a6)
+
+	fsub.x		%fp2,%fp0		# X - N L_TRAIL
+
+	mov.w		(%a1)+,FACT2HI(%a6)
+	clr.w		FACT2HI+2(%a6)
+	clr.l		FACT2LOW(%a6)
+
+	fmul.x		LOG10(%pc),%fp0		# FP0 IS R
+	add.w		%d1,FACT1(%a6)
+	add.w		%d1,FACT2(%a6)
+
+expr:
+#--FPCR, FP2, FP3 ARE SAVED IN ORDER AS SHOWN.
+#--ADJFACT CONTAINS 2**(M'), FACT1 + FACT2 = 2**(M) * 2**(J/64).
+#--FP0 IS R. THE FOLLOWING CODE COMPUTES
+#--	2**(M'+M) * 2**(J/64) * EXP(R)
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS S = R*R
+
+	fmov.d		EXPA5(%pc),%fp2		# FP2 IS A5
+	fmov.d		EXPA4(%pc),%fp3		# FP3 IS A4
+
+	fmul.x		%fp1,%fp2		# FP2 IS S*A5
+	fmul.x		%fp1,%fp3		# FP3 IS S*A4
+
+	fadd.d		EXPA3(%pc),%fp2		# FP2 IS A3+S*A5
+	fadd.d		EXPA2(%pc),%fp3		# FP3 IS A2+S*A4
+
+	fmul.x		%fp1,%fp2		# FP2 IS S*(A3+S*A5)
+	fmul.x		%fp1,%fp3		# FP3 IS S*(A2+S*A4)
+
+	fadd.d		EXPA1(%pc),%fp2		# FP2 IS A1+S*(A3+S*A5)
+	fmul.x		%fp0,%fp3		# FP3 IS R*S*(A2+S*A4)
+
+	fmul.x		%fp1,%fp2		# FP2 IS S*(A1+S*(A3+S*A5))
+	fadd.x		%fp3,%fp0		# FP0 IS R+R*S*(A2+S*A4)
+	fadd.x		%fp2,%fp0		# FP0 IS EXP(R) - 1
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+#--FINAL RECONSTRUCTION PROCESS
+#--EXP(X) = 2^M*2^(J/64) + 2^M*2^(J/64)*(EXP(R)-1)  -  (1 OR 0)
+
+	fmul.x		FACT1(%a6),%fp0
+	fadd.x		FACT2(%a6),%fp0
+	fadd.x		FACT1(%a6),%fp0
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	mov.w		%d2,ADJFACT(%a6)	# INSERT EXPONENT
+	mov.l		(%sp)+,%d2
+	mov.l		&0x80000000,ADJFACT+4(%a6)
+	clr.l		ADJFACT+8(%a6)
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		ADJFACT(%a6),%fp0	# FINAL ADJUSTMENT
+	bra		t_catch
+
+	global		stentoxd
+stentoxd:
+#--ENTRY POINT FOR 10**(X) FOR DENORMALIZED ARGUMENT
+
+	fmov.l		%d0,%fpcr		# set user's rounding mode/precision
+	fmov.s		&0x3F800000,%fp0	# RETURN 1 + X
+	mov.l		(%a0),%d1
+	or.l		&0x00800001,%d1
+	fadd.s		%d1,%fp0
+	bra		t_pinx2
+
+#########################################################################
+# sscale(): computes the destination operand scaled by the source	#
+#	    operand. If the absoulute value of the source operand is	#
+#	    >= 2^14, an overflow or underflow is returned.		#
+#									#
+# INPUT *************************************************************** #
+#	a0  = pointer to double-extended source operand X		#
+#	a1  = pointer to double-extended destination operand Y		#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 =  scale(X,Y)						#
+#									#
+#########################################################################
+
+set	SIGN,		L_SCR1
+
+	global		sscale
+sscale:
+	mov.l		%d0,-(%sp)		# store off ctrl bits for now
+
+	mov.w		DST_EX(%a1),%d1		# get dst exponent
+	smi.b		SIGN(%a6)		# use SIGN to hold dst sign
+	andi.l		&0x00007fff,%d1		# strip sign from dst exp
+
+	mov.w		SRC_EX(%a0),%d0		# check src bounds
+	andi.w		&0x7fff,%d0		# clr src sign bit
+	cmpi.w		%d0,&0x3fff		# is src ~ ZERO?
+	blt.w		src_small		# yes
+	cmpi.w		%d0,&0x400c		# no; is src too big?
+	bgt.w		src_out			# yes
+
+#
+# Source is within 2^14 range.
+#
+src_ok:
+	fintrz.x	SRC(%a0),%fp0		# calc int of src
+	fmov.l		%fp0,%d0		# int src to d0
+# don't want any accrued bits from the fintrz showing up later since
+# we may need to read the fpsr for the last fp op in t_catch2().
+	fmov.l		&0x0,%fpsr
+
+	tst.b		DST_HI(%a1)		# is dst denormalized?
+	bmi.b		sok_norm
+
+# the dst is a DENORM. normalize the DENORM and add the adjustment to
+# the src value. then, jump to the norm part of the routine.
+sok_dnrm:
+	mov.l		%d0,-(%sp)		# save src for now
+
+	mov.w		DST_EX(%a1),FP_SCR0_EX(%a6) # make a copy
+	mov.l		DST_HI(%a1),FP_SCR0_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR0_LO(%a6)
+
+	lea		FP_SCR0(%a6),%a0	# pass ptr to DENORM
+	bsr.l		norm			# normalize the DENORM
+	neg.l		%d0
+	add.l		(%sp)+,%d0		# add adjustment to src
+
+	fmovm.x		FP_SCR0(%a6),&0x80	# load normalized DENORM
+
+	cmpi.w		%d0,&-0x3fff		# is the shft amt really low?
+	bge.b		sok_norm2		# thank goodness no
+
+# the multiply factor that we're trying to create should be a denorm
+# for the multiply to work. therefore, we're going to actually do a
+# multiply with a denorm which will cause an unimplemented data type
+# exception to be put into the machine which will be caught and corrected
+# later. we don't do this with the DENORMs above because this method
+# is slower. but, don't fret, I don't see it being used much either.
+	fmov.l		(%sp)+,%fpcr		# restore user fpcr
+	mov.l		&0x80000000,%d1		# load normalized mantissa
+	subi.l		&-0x3fff,%d0		# how many should we shift?
+	neg.l		%d0			# make it positive
+	cmpi.b		%d0,&0x20		# is it > 32?
+	bge.b		sok_dnrm_32		# yes
+	lsr.l		%d0,%d1			# no; bit stays in upper lw
+	clr.l		-(%sp)			# insert zero low mantissa
+	mov.l		%d1,-(%sp)		# insert new high mantissa
+	clr.l		-(%sp)			# make zero exponent
+	bra.b		sok_norm_cont
+sok_dnrm_32:
+	subi.b		&0x20,%d0		# get shift count
+	lsr.l		%d0,%d1			# make low mantissa longword
+	mov.l		%d1,-(%sp)		# insert new low mantissa
+	clr.l		-(%sp)			# insert zero high mantissa
+	clr.l		-(%sp)			# make zero exponent
+	bra.b		sok_norm_cont
+
+# the src will force the dst to a DENORM value or worse. so, let's
+# create an fp multiply that will create the result.
+sok_norm:
+	fmovm.x		DST(%a1),&0x80		# load fp0 with normalized src
+sok_norm2:
+	fmov.l		(%sp)+,%fpcr		# restore user fpcr
+
+	addi.w		&0x3fff,%d0		# turn src amt into exp value
+	swap		%d0			# put exponent in high word
+	clr.l		-(%sp)			# insert new exponent
+	mov.l		&0x80000000,-(%sp)	# insert new high mantissa
+	mov.l		%d0,-(%sp)		# insert new lo mantissa
+
+sok_norm_cont:
+	fmov.l		%fpcr,%d0		# d0 needs fpcr for t_catch2
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		(%sp)+,%fp0		# do the multiply
+	bra		t_catch2		# catch any exceptions
+
+#
+# Source is outside of 2^14 range.  Test the sign and branch
+# to the appropriate exception handler.
+#
+src_out:
+	mov.l		(%sp)+,%d0		# restore ctrl bits
+	exg		%a0,%a1			# swap src,dst ptrs
+	tst.b		SRC_EX(%a1)		# is src negative?
+	bmi		t_unfl			# yes; underflow
+	bra		t_ovfl_sc		# no; overflow
+
+#
+# The source input is below 1, so we check for denormalized numbers
+# and set unfl.
+#
+src_small:
+	tst.b		DST_HI(%a1)		# is dst denormalized?
+	bpl.b		ssmall_done		# yes
+
+	mov.l		(%sp)+,%d0
+	fmov.l		%d0,%fpcr		# no; load control bits
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		DST(%a1),%fp0		# simply return dest
+	bra		t_catch2
+ssmall_done:
+	mov.l		(%sp)+,%d0		# load control bits into d1
+	mov.l		%a1,%a0			# pass ptr to dst
+	bra		t_resdnrm
+
+#########################################################################
+# smod(): computes the fp MOD of the input values X,Y.			#
+# srem(): computes the fp (IEEE) REM of the input values X,Y.		#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input X			#
+#	a1 = pointer to extended precision input Y			#
+#	d0 = round precision,mode					#
+#									#
+#	The input operands X and Y can be either normalized or		#
+#	denormalized.							#
+#									#
+# OUTPUT ************************************************************** #
+#      fp0 = FREM(X,Y) or FMOD(X,Y)					#
+#									#
+# ALGORITHM *********************************************************** #
+#									#
+#       Step 1.  Save and strip signs of X and Y: signX := sign(X),	#
+#                signY := sign(Y), X := |X|, Y := |Y|,			#
+#                signQ := signX EOR signY. Record whether MOD or REM	#
+#                is requested.						#
+#									#
+#       Step 2.  Set L := expo(X)-expo(Y), k := 0, Q := 0.		#
+#                If (L < 0) then					#
+#                   R := X, go to Step 4.				#
+#                else							#
+#                   R := 2^(-L)X, j := L.				#
+#                endif							#
+#									#
+#       Step 3.  Perform MOD(X,Y)					#
+#            3.1 If R = Y, go to Step 9.				#
+#            3.2 If R > Y, then { R := R - Y, Q := Q + 1}		#
+#            3.3 If j = 0, go to Step 4.				#
+#            3.4 k := k + 1, j := j - 1, Q := 2Q, R := 2R. Go to	#
+#                Step 3.1.						#
+#									#
+#       Step 4.  At this point, R = X - QY = MOD(X,Y). Set		#
+#                Last_Subtract := false (used in Step 7 below). If	#
+#                MOD is requested, go to Step 6.			#
+#									#
+#       Step 5.  R = MOD(X,Y), but REM(X,Y) is requested.		#
+#            5.1 If R < Y/2, then R = MOD(X,Y) = REM(X,Y). Go to	#
+#                Step 6.						#
+#            5.2 If R > Y/2, then { set Last_Subtract := true,		#
+#                Q := Q + 1, Y := signY*Y }. Go to Step 6.		#
+#            5.3 This is the tricky case of R = Y/2. If Q is odd,	#
+#                then { Q := Q + 1, signX := -signX }.			#
+#									#
+#       Step 6.  R := signX*R.						#
+#									#
+#       Step 7.  If Last_Subtract = true, R := R - Y.			#
+#									#
+#       Step 8.  Return signQ, last 7 bits of Q, and R as required.	#
+#									#
+#       Step 9.  At this point, R = 2^(-j)*X - Q Y = Y. Thus,		#
+#                X = 2^(j)*(Q+1)Y. set Q := 2^(j)*(Q+1),		#
+#                R := 0. Return signQ, last 7 bits of Q, and R.		#
+#									#
+#########################################################################
+
+	set		Mod_Flag,L_SCR3
+	set		Sc_Flag,L_SCR3+1
+
+	set		SignY,L_SCR2
+	set		SignX,L_SCR2+2
+	set		SignQ,L_SCR3+2
+
+	set		Y,FP_SCR0
+	set		Y_Hi,Y+4
+	set		Y_Lo,Y+8
+
+	set		R,FP_SCR1
+	set		R_Hi,R+4
+	set		R_Lo,R+8
+
+Scale:
+	long		0x00010000,0x80000000,0x00000000,0x00000000
+
+	global		smod
+smod:
+	clr.b		FPSR_QBYTE(%a6)
+	mov.l		%d0,-(%sp)		# save ctrl bits
+	clr.b		Mod_Flag(%a6)
+	bra.b		Mod_Rem
+
+	global		srem
+srem:
+	clr.b		FPSR_QBYTE(%a6)
+	mov.l		%d0,-(%sp)		# save ctrl bits
+	mov.b		&0x1,Mod_Flag(%a6)
+
+Mod_Rem:
+#..Save sign of X and Y
+	movm.l		&0x3f00,-(%sp)		# save data registers
+	mov.w		SRC_EX(%a0),%d3
+	mov.w		%d3,SignY(%a6)
+	and.l		&0x00007FFF,%d3		# Y := |Y|
+
+#
+	mov.l		SRC_HI(%a0),%d4
+	mov.l		SRC_LO(%a0),%d5		# (D3,D4,D5) is |Y|
+
+	tst.l		%d3
+	bne.b		Y_Normal
+
+	mov.l		&0x00003FFE,%d3		# $3FFD + 1
+	tst.l		%d4
+	bne.b		HiY_not0
+
+HiY_0:
+	mov.l		%d5,%d4
+	clr.l		%d5
+	sub.l		&32,%d3
+	clr.l		%d6
+	bfffo		%d4{&0:&32},%d6
+	lsl.l		%d6,%d4
+	sub.l		%d6,%d3			# (D3,D4,D5) is normalized
+#	                                        ...with bias $7FFD
+	bra.b		Chk_X
+
+HiY_not0:
+	clr.l		%d6
+	bfffo		%d4{&0:&32},%d6
+	sub.l		%d6,%d3
+	lsl.l		%d6,%d4
+	mov.l		%d5,%d7			# a copy of D5
+	lsl.l		%d6,%d5
+	neg.l		%d6
+	add.l		&32,%d6
+	lsr.l		%d6,%d7
+	or.l		%d7,%d4			# (D3,D4,D5) normalized
+#                                       ...with bias $7FFD
+	bra.b		Chk_X
+
+Y_Normal:
+	add.l		&0x00003FFE,%d3		# (D3,D4,D5) normalized
+#                                       ...with bias $7FFD
+
+Chk_X:
+	mov.w		DST_EX(%a1),%d0
+	mov.w		%d0,SignX(%a6)
+	mov.w		SignY(%a6),%d1
+	eor.l		%d0,%d1
+	and.l		&0x00008000,%d1
+	mov.w		%d1,SignQ(%a6)		# sign(Q) obtained
+	and.l		&0x00007FFF,%d0
+	mov.l		DST_HI(%a1),%d1
+	mov.l		DST_LO(%a1),%d2		# (D0,D1,D2) is |X|
+	tst.l		%d0
+	bne.b		X_Normal
+	mov.l		&0x00003FFE,%d0
+	tst.l		%d1
+	bne.b		HiX_not0
+
+HiX_0:
+	mov.l		%d2,%d1
+	clr.l		%d2
+	sub.l		&32,%d0
+	clr.l		%d6
+	bfffo		%d1{&0:&32},%d6
+	lsl.l		%d6,%d1
+	sub.l		%d6,%d0			# (D0,D1,D2) is normalized
+#                                       ...with bias $7FFD
+	bra.b		Init
+
+HiX_not0:
+	clr.l		%d6
+	bfffo		%d1{&0:&32},%d6
+	sub.l		%d6,%d0
+	lsl.l		%d6,%d1
+	mov.l		%d2,%d7			# a copy of D2
+	lsl.l		%d6,%d2
+	neg.l		%d6
+	add.l		&32,%d6
+	lsr.l		%d6,%d7
+	or.l		%d7,%d1			# (D0,D1,D2) normalized
+#                                       ...with bias $7FFD
+	bra.b		Init
+
+X_Normal:
+	add.l		&0x00003FFE,%d0		# (D0,D1,D2) normalized
+#                                       ...with bias $7FFD
+
+Init:
+#
+	mov.l		%d3,L_SCR1(%a6)		# save biased exp(Y)
+	mov.l		%d0,-(%sp)		# save biased exp(X)
+	sub.l		%d3,%d0			# L := expo(X)-expo(Y)
+
+	clr.l		%d6			# D6 := carry <- 0
+	clr.l		%d3			# D3 is Q
+	mov.l		&0,%a1			# A1 is k; j+k=L, Q=0
+
+#..(Carry,D1,D2) is R
+	tst.l		%d0
+	bge.b		Mod_Loop_pre
+
+#..expo(X) < expo(Y). Thus X = mod(X,Y)
+#
+	mov.l		(%sp)+,%d0		# restore d0
+	bra.w		Get_Mod
+
+Mod_Loop_pre:
+	addq.l		&0x4,%sp		# erase exp(X)
+#..At this point  R = 2^(-L)X; Q = 0; k = 0; and  k+j = L
+Mod_Loop:
+	tst.l		%d6			# test carry bit
+	bgt.b		R_GT_Y
+
+#..At this point carry = 0, R = (D1,D2), Y = (D4,D5)
+	cmp.l		%d1,%d4			# compare hi(R) and hi(Y)
+	bne.b		R_NE_Y
+	cmp.l		%d2,%d5			# compare lo(R) and lo(Y)
+	bne.b		R_NE_Y
+
+#..At this point, R = Y
+	bra.w		Rem_is_0
+
+R_NE_Y:
+#..use the borrow of the previous compare
+	bcs.b		R_LT_Y			# borrow is set iff R < Y
+
+R_GT_Y:
+#..If Carry is set, then Y < (Carry,D1,D2) < 2Y. Otherwise, Carry = 0
+#..and Y < (D1,D2) < 2Y. Either way, perform R - Y
+	sub.l		%d5,%d2			# lo(R) - lo(Y)
+	subx.l		%d4,%d1			# hi(R) - hi(Y)
+	clr.l		%d6			# clear carry
+	addq.l		&1,%d3			# Q := Q + 1
+
+R_LT_Y:
+#..At this point, Carry=0, R < Y. R = 2^(k-L)X - QY; k+j = L; j >= 0.
+	tst.l		%d0			# see if j = 0.
+	beq.b		PostLoop
+
+	add.l		%d3,%d3			# Q := 2Q
+	add.l		%d2,%d2			# lo(R) = 2lo(R)
+	roxl.l		&1,%d1			# hi(R) = 2hi(R) + carry
+	scs		%d6			# set Carry if 2(R) overflows
+	addq.l		&1,%a1			# k := k+1
+	subq.l		&1,%d0			# j := j - 1
+#..At this point, R=(Carry,D1,D2) = 2^(k-L)X - QY, j+k=L, j >= 0, R < 2Y.
+
+	bra.b		Mod_Loop
+
+PostLoop:
+#..k = L, j = 0, Carry = 0, R = (D1,D2) = X - QY, R < Y.
+
+#..normalize R.
+	mov.l		L_SCR1(%a6),%d0		# new biased expo of R
+	tst.l		%d1
+	bne.b		HiR_not0
+
+HiR_0:
+	mov.l		%d2,%d1
+	clr.l		%d2
+	sub.l		&32,%d0
+	clr.l		%d6
+	bfffo		%d1{&0:&32},%d6
+	lsl.l		%d6,%d1
+	sub.l		%d6,%d0			# (D0,D1,D2) is normalized
+#                                       ...with bias $7FFD
+	bra.b		Get_Mod
+
+HiR_not0:
+	clr.l		%d6
+	bfffo		%d1{&0:&32},%d6
+	bmi.b		Get_Mod			# already normalized
+	sub.l		%d6,%d0
+	lsl.l		%d6,%d1
+	mov.l		%d2,%d7			# a copy of D2
+	lsl.l		%d6,%d2
+	neg.l		%d6
+	add.l		&32,%d6
+	lsr.l		%d6,%d7
+	or.l		%d7,%d1			# (D0,D1,D2) normalized
+
+#
+Get_Mod:
+	cmp.l		%d0,&0x000041FE
+	bge.b		No_Scale
+Do_Scale:
+	mov.w		%d0,R(%a6)
+	mov.l		%d1,R_Hi(%a6)
+	mov.l		%d2,R_Lo(%a6)
+	mov.l		L_SCR1(%a6),%d6
+	mov.w		%d6,Y(%a6)
+	mov.l		%d4,Y_Hi(%a6)
+	mov.l		%d5,Y_Lo(%a6)
+	fmov.x		R(%a6),%fp0		# no exception
+	mov.b		&1,Sc_Flag(%a6)
+	bra.b		ModOrRem
+No_Scale:
+	mov.l		%d1,R_Hi(%a6)
+	mov.l		%d2,R_Lo(%a6)
+	sub.l		&0x3FFE,%d0
+	mov.w		%d0,R(%a6)
+	mov.l		L_SCR1(%a6),%d6
+	sub.l		&0x3FFE,%d6
+	mov.l		%d6,L_SCR1(%a6)
+	fmov.x		R(%a6),%fp0
+	mov.w		%d6,Y(%a6)
+	mov.l		%d4,Y_Hi(%a6)
+	mov.l		%d5,Y_Lo(%a6)
+	clr.b		Sc_Flag(%a6)
+
+#
+ModOrRem:
+	tst.b		Mod_Flag(%a6)
+	beq.b		Fix_Sign
+
+	mov.l		L_SCR1(%a6),%d6		# new biased expo(Y)
+	subq.l		&1,%d6			# biased expo(Y/2)
+	cmp.l		%d0,%d6
+	blt.b		Fix_Sign
+	bgt.b		Last_Sub
+
+	cmp.l		%d1,%d4
+	bne.b		Not_EQ
+	cmp.l		%d2,%d5
+	bne.b		Not_EQ
+	bra.w		Tie_Case
+
+Not_EQ:
+	bcs.b		Fix_Sign
+
+Last_Sub:
+#
+	fsub.x		Y(%a6),%fp0		# no exceptions
+	addq.l		&1,%d3			# Q := Q + 1
+
+#
+Fix_Sign:
+#..Get sign of X
+	mov.w		SignX(%a6),%d6
+	bge.b		Get_Q
+	fneg.x		%fp0
+
+#..Get Q
+#
+Get_Q:
+	clr.l		%d6
+	mov.w		SignQ(%a6),%d6		# D6 is sign(Q)
+	mov.l		&8,%d7
+	lsr.l		%d7,%d6
+	and.l		&0x0000007F,%d3		# 7 bits of Q
+	or.l		%d6,%d3			# sign and bits of Q
+#	swap		%d3
+#	fmov.l		%fpsr,%d6
+#	and.l		&0xFF00FFFF,%d6
+#	or.l		%d3,%d6
+#	fmov.l		%d6,%fpsr		# put Q in fpsr
+	mov.b		%d3,FPSR_QBYTE(%a6)	# put Q in fpsr
+
+#
+Restore:
+	movm.l		(%sp)+,&0xfc		#  {%d2-%d7}
+	mov.l		(%sp)+,%d0
+	fmov.l		%d0,%fpcr
+	tst.b		Sc_Flag(%a6)
+	beq.b		Finish
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		Scale(%pc),%fp0		# may cause underflow
+	bra		t_catch2
+# the '040 package did this apparently to see if the dst operand for the
+# preceding fmul was a denorm. but, it better not have been since the
+# algorithm just got done playing with fp0 and expected no exceptions
+# as a result. trust me...
+#	bra		t_avoid_unsupp		# check for denorm as a
+#						;result of the scaling
+
+Finish:
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		%fp0,%fp0		# capture exceptions & round
+	bra		t_catch2
+
+Rem_is_0:
+#..R = 2^(-j)X - Q Y = Y, thus R = 0 and quotient = 2^j (Q+1)
+	addq.l		&1,%d3
+	cmp.l		%d0,&8			# D0 is j
+	bge.b		Q_Big
+
+	lsl.l		%d0,%d3
+	bra.b		Set_R_0
+
+Q_Big:
+	clr.l		%d3
+
+Set_R_0:
+	fmov.s		&0x00000000,%fp0
+	clr.b		Sc_Flag(%a6)
+	bra.w		Fix_Sign
+
+Tie_Case:
+#..Check parity of Q
+	mov.l		%d3,%d6
+	and.l		&0x00000001,%d6
+	tst.l		%d6
+	beq.w		Fix_Sign		# Q is even
+
+#..Q is odd, Q := Q + 1, signX := -signX
+	addq.l		&1,%d3
+	mov.w		SignX(%a6),%d6
+	eor.l		&0x00008000,%d6
+	mov.w		%d6,SignX(%a6)
+	bra.w		Fix_Sign
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	tag(): return the optype of the input ext fp number		#
+#									#
+#	This routine is used by the 060FPLSP.				#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of type tag						#
+#		one of: NORM, INF, QNAN, SNAN, DENORM, ZERO		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Simply test the exponent, j-bit, and mantissa values to		#
+# determine the type of operand.					#
+#	If it's an unnormalized zero, alter the operand and force it	#
+# to be a normal zero.							#
+#									#
+#########################################################################
+
+	global		tag
+tag:
+	mov.w		FTEMP_EX(%a0), %d0	# extract exponent
+	andi.w		&0x7fff, %d0		# strip off sign
+	cmpi.w		%d0, &0x7fff		# is (EXP == MAX)?
+	beq.b		inf_or_nan_x
+not_inf_or_nan_x:
+	btst		&0x7,FTEMP_HI(%a0)
+	beq.b		not_norm_x
+is_norm_x:
+	mov.b		&NORM, %d0
+	rts
+not_norm_x:
+	tst.w		%d0			# is exponent = 0?
+	bne.b		is_unnorm_x
+not_unnorm_x:
+	tst.l		FTEMP_HI(%a0)
+	bne.b		is_denorm_x
+	tst.l		FTEMP_LO(%a0)
+	bne.b		is_denorm_x
+is_zero_x:
+	mov.b		&ZERO, %d0
+	rts
+is_denorm_x:
+	mov.b		&DENORM, %d0
+	rts
+is_unnorm_x:
+	bsr.l		unnorm_fix		# convert to norm,denorm,or zero
+	rts
+is_unnorm_reg_x:
+	mov.b		&UNNORM, %d0
+	rts
+inf_or_nan_x:
+	tst.l		FTEMP_LO(%a0)
+	bne.b		is_nan_x
+	mov.l		FTEMP_HI(%a0), %d0
+	and.l		&0x7fffffff, %d0	# msb is a don't care!
+	bne.b		is_nan_x
+is_inf_x:
+	mov.b		&INF, %d0
+	rts
+is_nan_x:
+	mov.b		&QNAN, %d0
+	rts
+
+#############################################################
+
+qnan:	long		0x7fff0000, 0xffffffff, 0xffffffff
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	t_dz(): Handle 060FPLSP dz exception for "flogn" emulation.	#
+#	t_dz2(): Handle 060FPLSP dz exception for "fatanh" emulation.	#
+#									#
+#	These rouitnes are used by the 060FPLSP package.		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand.		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default DZ result.					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Transcendental emulation for the 060FPLSP has detected that	#
+# a DZ exception should occur for the instruction. If DZ is disabled,	#
+# return the default result.						#
+#	If DZ is enabled, the dst operand should be returned unscathed	#
+# in fp0 while fp1 is used to create a DZ exception so that the		#
+# operating system can log that such an event occurred.			#
+#									#
+#########################################################################
+
+	global		t_dz
+t_dz:
+	tst.b		SRC_EX(%a0)		# check sign for neg or pos
+	bpl.b		dz_pinf			# branch if pos sign
+
+	global		t_dz2
+t_dz2:
+	ori.l		&dzinf_mask+neg_mask,USER_FPSR(%a6) # set N/I/DZ/ADZ
+
+	btst		&dz_bit,FPCR_ENABLE(%a6)
+	bne.b		dz_minf_ena
+
+# dz is disabled. return a -INF.
+	fmov.s		&0xff800000,%fp0	# return -INF
+	rts
+
+# dz is enabled. create a dz exception so the user can record it
+# but use fp1 instead. return the dst operand unscathed in fp0.
+dz_minf_ena:
+	fmovm.x		EXC_FP0(%a6),&0x80	# return fp0 unscathed
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmov.s		&0xbf800000,%fp1	# load -1
+	fdiv.s		&0x00000000,%fp1	# -1 / 0
+	rts
+
+dz_pinf:
+	ori.l		&dzinf_mask,USER_FPSR(%a6) # set I/DZ/ADZ
+
+	btst		&dz_bit,FPCR_ENABLE(%a6)
+	bne.b		dz_pinf_ena
+
+# dz is disabled. return a +INF.
+	fmov.s		&0x7f800000,%fp0	# return +INF
+	rts
+
+# dz is enabled. create a dz exception so the user can record it
+# but use fp1 instead. return the dst operand unscathed in fp0.
+dz_pinf_ena:
+	fmovm.x		EXC_FP0(%a6),&0x80	# return fp0 unscathed
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmov.s		&0x3f800000,%fp1	# load +1
+	fdiv.s		&0x00000000,%fp1	# +1 / 0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	t_operr(): Handle 060FPLSP OPERR exception during emulation.	#
+#									#
+#	This routine is used by the 060FPLSP package.			#
+#									#
+# XREF ****************************************************************	#
+#	None.								#
+#									#
+# INPUT ***************************************************************	#
+#	fp1 = source operand						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default result						#
+#	fp1 = unchanged							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	An operand error should occur as the result of transcendental	#
+# emulation in the 060FPLSP. If OPERR is disabled, just return a NAN	#
+# in fp0. If OPERR is enabled, return the dst operand unscathed in fp0	#
+# and the source operand in fp1. Use fp2 to create an OPERR exception	#
+# so that the operating system can log the event.			#
+#									#
+#########################################################################
+
+	global		t_operr
+t_operr:
+	ori.l		&opnan_mask,USER_FPSR(%a6) # set NAN/OPERR/AIOP
+
+	btst		&operr_bit,FPCR_ENABLE(%a6)
+	bne.b		operr_ena
+
+# operr is disabled. return a QNAN in fp0
+	fmovm.x		qnan(%pc),&0x80		# return QNAN
+	rts
+
+# operr is enabled. create an operr exception so the user can record it
+# but use fp2 instead. return the dst operand unscathed in fp0.
+operr_ena:
+	fmovm.x		EXC_FP0(%a6),&0x80	# return fp0 unscathed
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmovm.x		&0x04,-(%sp)		# save fp2
+	fmov.s		&0x7f800000,%fp2	# load +INF
+	fmul.s		&0x00000000,%fp2	# +INF x 0
+	fmovm.x		(%sp)+,&0x20		# restore fp2
+	rts
+
+pls_huge:
+	long		0x7ffe0000,0xffffffff,0xffffffff
+mns_huge:
+	long		0xfffe0000,0xffffffff,0xffffffff
+pls_tiny:
+	long		0x00000000,0x80000000,0x00000000
+mns_tiny:
+	long		0x80000000,0x80000000,0x00000000
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	t_unfl(): Handle 060FPLSP underflow exception during emulation.	#
+#	t_unfl2(): Handle 060FPLSP underflow exception during		#
+#	           emulation. result always positive.			#
+#									#
+#	This routine is used by the 060FPLSP package.			#
+#									#
+# XREF ****************************************************************	#
+#	None.								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default underflow result					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	An underflow should occur as the result of transcendental	#
+# emulation in the 060FPLSP. Create an underflow by using "fmul"	#
+# and two very small numbers of appropriate sign so the operating	#
+# system can log the event.						#
+#									#
+#########################################################################
+
+	global		t_unfl
+t_unfl:
+	tst.b		SRC_EX(%a0)
+	bpl.b		unf_pos
+
+	global		t_unfl2
+t_unfl2:
+	ori.l		&unfinx_mask+neg_mask,USER_FPSR(%a6) # set N/UNFL/INEX2/AUNFL/AINEX
+
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmovm.x		mns_tiny(%pc),&0x80
+	fmul.x		pls_tiny(%pc),%fp0
+
+	fmov.l		%fpsr,%d0
+	rol.l		&0x8,%d0
+	mov.b		%d0,FPSR_CC(%a6)
+	rts
+unf_pos:
+	ori.w		&unfinx_mask,FPSR_EXCEPT(%a6) # set UNFL/INEX2/AUNFL/AINEX
+
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmovm.x		pls_tiny(%pc),&0x80
+	fmul.x		%fp0,%fp0
+
+	fmov.l		%fpsr,%d0
+	rol.l		&0x8,%d0
+	mov.b		%d0,FPSR_CC(%a6)
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	t_ovfl(): Handle 060FPLSP overflow exception during emulation.	#
+#		  (monadic)						#
+#	t_ovfl2(): Handle 060FPLSP overflow exception during		#
+#	           emulation. result always positive. (dyadic)		#
+#	t_ovfl_sc(): Handle 060FPLSP overflow exception during		#
+#	             emulation for "fscale".				#
+#									#
+#	This routine is used by the 060FPLSP package.			#
+#									#
+# XREF ****************************************************************	#
+#	None.								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default underflow result					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	An overflow should occur as the result of transcendental	#
+# emulation in the 060FPLSP. Create an overflow by using "fmul"		#
+# and two very lareg numbers of appropriate sign so the operating	#
+# system can log the event.						#
+#	For t_ovfl_sc() we take special care not to lose the INEX2 bit.	#
+#									#
+#########################################################################
+
+	global		t_ovfl_sc
+t_ovfl_sc:
+	ori.l		&ovfl_inx_mask,USER_FPSR(%a6) # set OVFL/AOVFL/AINEX
+
+	mov.b		%d0,%d1			# fetch rnd prec,mode
+	andi.b		&0xc0,%d1		# extract prec
+	beq.w		ovfl_work
+
+# dst op is a DENORM. we have to normalize the mantissa to see if the
+# result would be inexact for the given precision. make a copy of the
+# dst so we don't screw up the version passed to us.
+	mov.w		LOCAL_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		LOCAL_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		LOCAL_LO(%a0),FP_SCR0_LO(%a6)
+	lea		FP_SCR0(%a6),%a0	# pass ptr to FP_SCR0
+	movm.l		&0xc080,-(%sp)		# save d0-d1/a0
+	bsr.l		norm			# normalize mantissa
+	movm.l		(%sp)+,&0x0103		# restore d0-d1/a0
+
+	cmpi.b		%d1,&0x40		# is precision sgl?
+	bne.b		ovfl_sc_dbl		# no; dbl
+ovfl_sc_sgl:
+	tst.l		LOCAL_LO(%a0)		# is lo lw of sgl set?
+	bne.b		ovfl_sc_inx		# yes
+	tst.b		3+LOCAL_HI(%a0)		# is lo byte of hi lw set?
+	bne.b		ovfl_sc_inx		# yes
+	bra.w		ovfl_work		# don't set INEX2
+ovfl_sc_dbl:
+	mov.l		LOCAL_LO(%a0),%d1	# are any of lo 11 bits of
+	andi.l		&0x7ff,%d1		# dbl mantissa set?
+	beq.w		ovfl_work		# no; don't set INEX2
+ovfl_sc_inx:
+	ori.l		&inex2_mask,USER_FPSR(%a6) # set INEX2
+	bra.b		ovfl_work		# continue
+
+	global		t_ovfl
+t_ovfl:
+	ori.w		&ovfinx_mask,FPSR_EXCEPT(%a6) # set OVFL/INEX2/AOVFL/AINEX
+ovfl_work:
+	tst.b		SRC_EX(%a0)
+	bpl.b		ovfl_p
+ovfl_m:
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmovm.x		mns_huge(%pc),&0x80
+	fmul.x		pls_huge(%pc),%fp0
+
+	fmov.l		%fpsr,%d0
+	rol.l		&0x8,%d0
+	ori.b		&neg_mask,%d0
+	mov.b		%d0,FPSR_CC(%a6)
+	rts
+ovfl_p:
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmovm.x		pls_huge(%pc),&0x80
+	fmul.x		pls_huge(%pc),%fp0
+
+	fmov.l		%fpsr,%d0
+	rol.l		&0x8,%d0
+	mov.b		%d0,FPSR_CC(%a6)
+	rts
+
+	global		t_ovfl2
+t_ovfl2:
+	ori.w		&ovfinx_mask,FPSR_EXCEPT(%a6) # set OVFL/INEX2/AOVFL/AINEX
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmovm.x		pls_huge(%pc),&0x80
+	fmul.x		pls_huge(%pc),%fp0
+
+	fmov.l		%fpsr,%d0
+	rol.l		&0x8,%d0
+	mov.b		%d0,FPSR_CC(%a6)
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	t_catch(): Handle 060FPLSP OVFL,UNFL,or INEX2 exception during	#
+#		   emulation.						#
+#	t_catch2(): Handle 060FPLSP OVFL,UNFL,or INEX2 exception during	#
+#		    emulation.						#
+#									#
+#	These routines are used by the 060FPLSP package.		#
+#									#
+# XREF ****************************************************************	#
+#	None.								#
+#									#
+# INPUT ***************************************************************	#
+#	fp0 = default underflow or overflow result			#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default result						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	If an overflow or underflow occurred during the last		#
+# instruction of transcendental 060FPLSP emulation, then it has already	#
+# occurred and has been logged. Now we need to see if an inexact	#
+# exception should occur.						#
+#									#
+#########################################################################
+
+	global		t_catch2
+t_catch2:
+	fmov.l		%fpsr,%d0
+	or.l		%d0,USER_FPSR(%a6)
+	bra.b		inx2_work
+
+	global		t_catch
+t_catch:
+	fmov.l		%fpsr,%d0
+	or.l		%d0,USER_FPSR(%a6)
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	t_inx2(): Handle inexact 060FPLSP exception during emulation.	#
+#	t_pinx2(): Handle inexact 060FPLSP exception for "+" results.	#
+#	t_minx2(): Handle inexact 060FPLSP exception for "-" results.	#
+#									#
+# XREF ****************************************************************	#
+#	None.								#
+#									#
+# INPUT ***************************************************************	#
+#	fp0 = default result						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default result						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	The last instruction of transcendental emulation for the	#
+# 060FPLSP should be inexact. So, if inexact is enabled, then we create	#
+# the event here by adding a large and very small number together	#
+# so that the operating system can log the event.			#
+#	Must check, too, if the result was zero, in which case we just	#
+# set the FPSR bits and return.						#
+#									#
+#########################################################################
+
+	global		t_inx2
+t_inx2:
+	fblt.w		t_minx2
+	fbeq.w		inx2_zero
+
+	global		t_pinx2
+t_pinx2:
+	ori.w		&inx2a_mask,FPSR_EXCEPT(%a6) # set INEX2/AINEX
+	bra.b		inx2_work
+
+	global		t_minx2
+t_minx2:
+	ori.l		&inx2a_mask+neg_mask,USER_FPSR(%a6)
+
+inx2_work:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
+	bne.b		inx2_work_ena		# yes
+	rts
+inx2_work_ena:
+	fmov.l		USER_FPCR(%a6),%fpcr	# insert user's exceptions
+	fmov.s		&0x3f800000,%fp1	# load +1
+	fadd.x		pls_tiny(%pc),%fp1	# cause exception
+	rts
+
+inx2_zero:
+	mov.b		&z_bmask,FPSR_CC(%a6)
+	ori.w		&inx2a_mask,2+USER_FPSR(%a6) # set INEX/AINEX
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	t_extdnrm(): Handle DENORM inputs in 060FPLSP.			#
+#	t_resdnrm(): Handle DENORM inputs in 060FPLSP for "fscale".	#
+#									#
+#	This routine is used by the 060FPLSP package.			#
+#									#
+# XREF ****************************************************************	#
+#	None.								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default result						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	For all functions that have a denormalized input and that	#
+# f(x)=x, this is the entry point.					#
+#	DENORM value is moved using "fmove" which triggers an exception	#
+# if enabled so the operating system can log the event.			#
+#									#
+#########################################################################
+
+	global		t_extdnrm
+t_extdnrm:
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmov.x		SRC_EX(%a0),%fp0
+	fmov.l		%fpsr,%d0
+	ori.l		&unfinx_mask,%d0
+	or.l		%d0,USER_FPSR(%a6)
+	rts
+
+	global		t_resdnrm
+t_resdnrm:
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmov.x		SRC_EX(%a0),%fp0
+	fmov.l		%fpsr,%d0
+	or.l		%d0,USER_FPSR(%a6)
+	rts
+
+##########################################
+
+#
+# sto_cos:
+#	This is used by fsincos library emulation. The correct
+# values are already in fp0 and fp1 so we do nothing here.
+#
+	global		sto_cos
+sto_cos:
+	rts
+
+##########################################
+
+#
+#	dst_qnan --- force result when destination is a NaN
+#
+	global		dst_qnan
+dst_qnan:
+	fmov.x		DST(%a1),%fp0
+	tst.b		DST_EX(%a1)
+	bmi.b		dst_qnan_m
+dst_qnan_p:
+	mov.b		&nan_bmask,FPSR_CC(%a6)
+	rts
+dst_qnan_m:
+	mov.b		&nan_bmask+neg_bmask,FPSR_CC(%a6)
+	rts
+
+#
+#	src_qnan --- force result when source is a NaN
+#
+	global		src_qnan
+src_qnan:
+	fmov.x		SRC(%a0),%fp0
+	tst.b		SRC_EX(%a0)
+	bmi.b		src_qnan_m
+src_qnan_p:
+	mov.b		&nan_bmask,FPSR_CC(%a6)
+	rts
+src_qnan_m:
+	mov.b		&nan_bmask+neg_bmask,FPSR_CC(%a6)
+	rts
+
+##########################################
+
+#
+#	Native instruction support
+#
+#	Some systems may need entry points even for 68060 native
+#	instructions.  These routines are provided for
+#	convenience.
+#
+	global		_fadds_
+_fadds_:
+	fmov.l		%fpcr,-(%sp)		# save fpcr
+	fmov.l		&0x00000000,%fpcr	# clear fpcr for load
+	fmov.s		0x8(%sp),%fp0		# load sgl dst
+	fmov.l		(%sp)+,%fpcr		# restore fpcr
+	fadd.s		0x8(%sp),%fp0		# fadd w/ sgl src
+	rts
+
+	global		_faddd_
+_faddd_:
+	fmov.l		%fpcr,-(%sp)		# save fpcr
+	fmov.l		&0x00000000,%fpcr	# clear fpcr for load
+	fmov.d		0x8(%sp),%fp0		# load dbl dst
+	fmov.l		(%sp)+,%fpcr		# restore fpcr
+	fadd.d		0xc(%sp),%fp0		# fadd w/ dbl src
+	rts
+
+	global		_faddx_
+_faddx_:
+	fmovm.x		0x4(%sp),&0x80		# load ext dst
+	fadd.x		0x10(%sp),%fp0		# fadd w/ ext src
+	rts
+
+	global		_fsubs_
+_fsubs_:
+	fmov.l		%fpcr,-(%sp)		# save fpcr
+	fmov.l		&0x00000000,%fpcr	# clear fpcr for load
+	fmov.s		0x8(%sp),%fp0		# load sgl dst
+	fmov.l		(%sp)+,%fpcr		# restore fpcr
+	fsub.s		0x8(%sp),%fp0		# fsub w/ sgl src
+	rts
+
+	global		_fsubd_
+_fsubd_:
+	fmov.l		%fpcr,-(%sp)		# save fpcr
+	fmov.l		&0x00000000,%fpcr	# clear fpcr for load
+	fmov.d		0x8(%sp),%fp0		# load dbl dst
+	fmov.l		(%sp)+,%fpcr		# restore fpcr
+	fsub.d		0xc(%sp),%fp0		# fsub w/ dbl src
+	rts
+
+	global		_fsubx_
+_fsubx_:
+	fmovm.x		0x4(%sp),&0x80		# load ext dst
+	fsub.x		0x10(%sp),%fp0		# fsub w/ ext src
+	rts
+
+	global		_fmuls_
+_fmuls_:
+	fmov.l		%fpcr,-(%sp)		# save fpcr
+	fmov.l		&0x00000000,%fpcr	# clear fpcr for load
+	fmov.s		0x8(%sp),%fp0		# load sgl dst
+	fmov.l		(%sp)+,%fpcr		# restore fpcr
+	fmul.s		0x8(%sp),%fp0		# fmul w/ sgl src
+	rts
+
+	global		_fmuld_
+_fmuld_:
+	fmov.l		%fpcr,-(%sp)		# save fpcr
+	fmov.l		&0x00000000,%fpcr	# clear fpcr for load
+	fmov.d		0x8(%sp),%fp0		# load dbl dst
+	fmov.l		(%sp)+,%fpcr		# restore fpcr
+	fmul.d		0xc(%sp),%fp0		# fmul w/ dbl src
+	rts
+
+	global		_fmulx_
+_fmulx_:
+	fmovm.x		0x4(%sp),&0x80		# load ext dst
+	fmul.x		0x10(%sp),%fp0		# fmul w/ ext src
+	rts
+
+	global		_fdivs_
+_fdivs_:
+	fmov.l		%fpcr,-(%sp)		# save fpcr
+	fmov.l		&0x00000000,%fpcr	# clear fpcr for load
+	fmov.s		0x8(%sp),%fp0		# load sgl dst
+	fmov.l		(%sp)+,%fpcr		# restore fpcr
+	fdiv.s		0x8(%sp),%fp0		# fdiv w/ sgl src
+	rts
+
+	global		_fdivd_
+_fdivd_:
+	fmov.l		%fpcr,-(%sp)		# save fpcr
+	fmov.l		&0x00000000,%fpcr	# clear fpcr for load
+	fmov.d		0x8(%sp),%fp0		# load dbl dst
+	fmov.l		(%sp)+,%fpcr		# restore fpcr
+	fdiv.d		0xc(%sp),%fp0		# fdiv w/ dbl src
+	rts
+
+	global		_fdivx_
+_fdivx_:
+	fmovm.x		0x4(%sp),&0x80		# load ext dst
+	fdiv.x		0x10(%sp),%fp0		# fdiv w/ ext src
+	rts
+
+	global		_fabss_
+_fabss_:
+	fabs.s		0x4(%sp),%fp0		# fabs w/ sgl src
+	rts
+
+	global		_fabsd_
+_fabsd_:
+	fabs.d		0x4(%sp),%fp0		# fabs w/ dbl src
+	rts
+
+	global		_fabsx_
+_fabsx_:
+	fabs.x		0x4(%sp),%fp0		# fabs w/ ext src
+	rts
+
+	global		_fnegs_
+_fnegs_:
+	fneg.s		0x4(%sp),%fp0		# fneg w/ sgl src
+	rts
+
+	global		_fnegd_
+_fnegd_:
+	fneg.d		0x4(%sp),%fp0		# fneg w/ dbl src
+	rts
+
+	global		_fnegx_
+_fnegx_:
+	fneg.x		0x4(%sp),%fp0		# fneg w/ ext src
+	rts
+
+	global		_fsqrts_
+_fsqrts_:
+	fsqrt.s		0x4(%sp),%fp0		# fsqrt w/ sgl src
+	rts
+
+	global		_fsqrtd_
+_fsqrtd_:
+	fsqrt.d		0x4(%sp),%fp0		# fsqrt w/ dbl src
+	rts
+
+	global		_fsqrtx_
+_fsqrtx_:
+	fsqrt.x		0x4(%sp),%fp0		# fsqrt w/ ext src
+	rts
+
+	global		_fints_
+_fints_:
+	fint.s		0x4(%sp),%fp0		# fint w/ sgl src
+	rts
+
+	global		_fintd_
+_fintd_:
+	fint.d		0x4(%sp),%fp0		# fint w/ dbl src
+	rts
+
+	global		_fintx_
+_fintx_:
+	fint.x		0x4(%sp),%fp0		# fint w/ ext src
+	rts
+
+	global		_fintrzs_
+_fintrzs_:
+	fintrz.s	0x4(%sp),%fp0		# fintrz w/ sgl src
+	rts
+
+	global		_fintrzd_
+_fintrzd_:
+	fintrz.d	0x4(%sp),%fp0		# fintrx w/ dbl src
+	rts
+
+	global		_fintrzx_
+_fintrzx_:
+	fintrz.x	0x4(%sp),%fp0		# fintrz w/ ext src
+	rts
+
+########################################################################
+
+#########################################################################
+# src_zero(): Return signed zero according to sign of src operand.	#
+#########################################################################
+	global		src_zero
+src_zero:
+	tst.b		SRC_EX(%a0)		# get sign of src operand
+	bmi.b		ld_mzero		# if neg, load neg zero
+
+#
+# ld_pzero(): return a positive zero.
+#
+	global		ld_pzero
+ld_pzero:
+	fmov.s		&0x00000000,%fp0	# load +0
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+
+# ld_mzero(): return a negative zero.
+	global		ld_mzero
+ld_mzero:
+	fmov.s		&0x80000000,%fp0	# load -0
+	mov.b		&neg_bmask+z_bmask,FPSR_CC(%a6) # set 'N','Z' ccode bits
+	rts
+
+#########################################################################
+# dst_zero(): Return signed zero according to sign of dst operand.	#
+#########################################################################
+	global		dst_zero
+dst_zero:
+	tst.b		DST_EX(%a1)		# get sign of dst operand
+	bmi.b		ld_mzero		# if neg, load neg zero
+	bra.b		ld_pzero		# load positive zero
+
+#########################################################################
+# src_inf(): Return signed inf according to sign of src operand.	#
+#########################################################################
+	global		src_inf
+src_inf:
+	tst.b		SRC_EX(%a0)		# get sign of src operand
+	bmi.b		ld_minf			# if negative branch
+
+#
+# ld_pinf(): return a positive infinity.
+#
+	global		ld_pinf
+ld_pinf:
+	fmov.s		&0x7f800000,%fp0	# load +INF
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'INF' ccode bit
+	rts
+
+#
+# ld_minf():return a negative infinity.
+#
+	global		ld_minf
+ld_minf:
+	fmov.s		&0xff800000,%fp0	# load -INF
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+	rts
+
+#########################################################################
+# dst_inf(): Return signed inf according to sign of dst operand.	#
+#########################################################################
+	global		dst_inf
+dst_inf:
+	tst.b		DST_EX(%a1)		# get sign of dst operand
+	bmi.b		ld_minf			# if negative branch
+	bra.b		ld_pinf
+
+	global		szr_inf
+#################################################################
+# szr_inf(): Return +ZERO for a negative src operand or		#
+#	            +INF for a positive src operand.		#
+#	     Routine used for fetox, ftwotox, and ftentox.	#
+#################################################################
+szr_inf:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.b		ld_pzero
+	bra.b		ld_pinf
+
+#########################################################################
+# sopr_inf(): Return +INF for a positive src operand or			#
+#	      jump to operand error routine for a negative src operand.	#
+#	      Routine used for flogn, flognp1, flog10, and flog2.	#
+#########################################################################
+	global		sopr_inf
+sopr_inf:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.w		t_operr
+	bra.b		ld_pinf
+
+#################################################################
+# setoxm1i(): Return minus one for a negative src operand or	#
+#	      positive infinity for a positive src operand.	#
+#	      Routine used for fetoxm1.				#
+#################################################################
+	global		setoxm1i
+setoxm1i:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.b		ld_mone
+	bra.b		ld_pinf
+
+#########################################################################
+# src_one(): Return signed one according to sign of src operand.	#
+#########################################################################
+	global		src_one
+src_one:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.b		ld_mone
+
+#
+# ld_pone(): return positive one.
+#
+	global		ld_pone
+ld_pone:
+	fmov.s		&0x3f800000,%fp0	# load +1
+	clr.b		FPSR_CC(%a6)
+	rts
+
+#
+# ld_mone(): return negative one.
+#
+	global		ld_mone
+ld_mone:
+	fmov.s		&0xbf800000,%fp0	# load -1
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+ppiby2:	long		0x3fff0000, 0xc90fdaa2, 0x2168c235
+mpiby2:	long		0xbfff0000, 0xc90fdaa2, 0x2168c235
+
+#################################################################
+# spi_2(): Return signed PI/2 according to sign of src operand.	#
+#################################################################
+	global		spi_2
+spi_2:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.b		ld_mpi2
+
+#
+# ld_ppi2(): return positive PI/2.
+#
+	global		ld_ppi2
+ld_ppi2:
+	fmov.l		%d0,%fpcr
+	fmov.x		ppiby2(%pc),%fp0	# load +pi/2
+	bra.w		t_pinx2			# set INEX2
+
+#
+# ld_mpi2(): return negative PI/2.
+#
+	global		ld_mpi2
+ld_mpi2:
+	fmov.l		%d0,%fpcr
+	fmov.x		mpiby2(%pc),%fp0	# load -pi/2
+	bra.w		t_minx2			# set INEX2
+
+####################################################
+# The following routines give support for fsincos. #
+####################################################
+
+#
+# ssincosz(): When the src operand is ZERO, store a one in the
+#	      cosine register and return a ZERO in fp0 w/ the same sign
+#	      as the src operand.
+#
+	global		ssincosz
+ssincosz:
+	fmov.s		&0x3f800000,%fp1
+	tst.b		SRC_EX(%a0)		# test sign
+	bpl.b		sincoszp
+	fmov.s		&0x80000000,%fp0	# return sin result in fp0
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)
+	rts
+sincoszp:
+	fmov.s		&0x00000000,%fp0	# return sin result in fp0
+	mov.b		&z_bmask,FPSR_CC(%a6)
+	rts
+
+#
+# ssincosi(): When the src operand is INF, store a QNAN in the cosine
+#	      register and jump to the operand error routine for negative
+#	      src operands.
+#
+	global		ssincosi
+ssincosi:
+	fmov.x		qnan(%pc),%fp1		# load NAN
+	bra.w		t_operr
+
+#
+# ssincosqnan(): When the src operand is a QNAN, store the QNAN in the cosine
+#		 register and branch to the src QNAN routine.
+#
+	global		ssincosqnan
+ssincosqnan:
+	fmov.x		LOCAL_EX(%a0),%fp1
+	bra.w		src_qnan
+
+########################################################################
+
+	global		smod_sdnrm
+	global		smod_snorm
+smod_sdnrm:
+smod_snorm:
+	mov.b		DTAG(%a6),%d1
+	beq.l		smod
+	cmpi.b		%d1,&ZERO
+	beq.w		smod_zro
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		smod
+	bra.l		dst_qnan
+
+	global		smod_szero
+smod_szero:
+	mov.b		DTAG(%a6),%d1
+	beq.l		t_operr
+	cmpi.b		%d1,&ZERO
+	beq.l		t_operr
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		t_operr
+	bra.l		dst_qnan
+
+	global		smod_sinf
+smod_sinf:
+	mov.b		DTAG(%a6),%d1
+	beq.l		smod_fpn
+	cmpi.b		%d1,&ZERO
+	beq.l		smod_zro
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		smod_fpn
+	bra.l		dst_qnan
+
+smod_zro:
+srem_zro:
+	mov.b		SRC_EX(%a0),%d1		# get src sign
+	mov.b		DST_EX(%a1),%d0		# get dst sign
+	eor.b		%d0,%d1			# get qbyte sign
+	andi.b		&0x80,%d1
+	mov.b		%d1,FPSR_QBYTE(%a6)
+	tst.b		%d0
+	bpl.w		ld_pzero
+	bra.w		ld_mzero
+
+smod_fpn:
+srem_fpn:
+	clr.b		FPSR_QBYTE(%a6)
+	mov.l		%d0,-(%sp)
+	mov.b		SRC_EX(%a0),%d1		# get src sign
+	mov.b		DST_EX(%a1),%d0		# get dst sign
+	eor.b		%d0,%d1			# get qbyte sign
+	andi.b		&0x80,%d1
+	mov.b		%d1,FPSR_QBYTE(%a6)
+	cmpi.b		DTAG(%a6),&DENORM
+	bne.b		smod_nrm
+	lea		DST(%a1),%a0
+	mov.l		(%sp)+,%d0
+	bra		t_resdnrm
+smod_nrm:
+	fmov.l		(%sp)+,%fpcr
+	fmov.x		DST(%a1),%fp0
+	tst.b		DST_EX(%a1)
+	bmi.b		smod_nrm_neg
+	rts
+
+smod_nrm_neg:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' code
+	rts
+
+#########################################################################
+	global		srem_snorm
+	global		srem_sdnrm
+srem_sdnrm:
+srem_snorm:
+	mov.b		DTAG(%a6),%d1
+	beq.l		srem
+	cmpi.b		%d1,&ZERO
+	beq.w		srem_zro
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		srem
+	bra.l		dst_qnan
+
+	global		srem_szero
+srem_szero:
+	mov.b		DTAG(%a6),%d1
+	beq.l		t_operr
+	cmpi.b		%d1,&ZERO
+	beq.l		t_operr
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		t_operr
+	bra.l		dst_qnan
+
+	global		srem_sinf
+srem_sinf:
+	mov.b		DTAG(%a6),%d1
+	beq.w		srem_fpn
+	cmpi.b		%d1,&ZERO
+	beq.w		srem_zro
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		srem_fpn
+	bra.l		dst_qnan
+
+#########################################################################
+
+	global		sscale_snorm
+	global		sscale_sdnrm
+sscale_snorm:
+sscale_sdnrm:
+	mov.b		DTAG(%a6),%d1
+	beq.l		sscale
+	cmpi.b		%d1,&ZERO
+	beq.l		dst_zero
+	cmpi.b		%d1,&INF
+	beq.l		dst_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		sscale
+	bra.l		dst_qnan
+
+	global		sscale_szero
+sscale_szero:
+	mov.b		DTAG(%a6),%d1
+	beq.l		sscale
+	cmpi.b		%d1,&ZERO
+	beq.l		dst_zero
+	cmpi.b		%d1,&INF
+	beq.l		dst_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		sscale
+	bra.l		dst_qnan
+
+	global		sscale_sinf
+sscale_sinf:
+	mov.b		DTAG(%a6),%d1
+	beq.l		t_operr
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		t_operr
+
+########################################################################
+
+	global		sop_sqnan
+sop_sqnan:
+	mov.b		DTAG(%a6),%d1
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		src_qnan
+
+#########################################################################
+# norm(): normalize the mantissa of an extended precision input. the	#
+#	  input operand should not be normalized already.		#
+#									#
+# XDEF ****************************************************************	#
+#	norm()								#
+#									#
+# XREF **************************************************************** #
+#	none								#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer fp extended precision operand to normalize		#
+#									#
+# OUTPUT ************************************************************** #
+#	d0 = number of bit positions the mantissa was shifted		#
+#	a0 = the input operand's mantissa is normalized; the exponent	#
+#	     is unchanged.						#
+#									#
+#########################################################################
+	global		norm
+norm:
+	mov.l		%d2, -(%sp)		# create some temp regs
+	mov.l		%d3, -(%sp)
+
+	mov.l		FTEMP_HI(%a0), %d0	# load hi(mantissa)
+	mov.l		FTEMP_LO(%a0), %d1	# load lo(mantissa)
+
+	bfffo		%d0{&0:&32}, %d2	# how many places to shift?
+	beq.b		norm_lo			# hi(man) is all zeroes!
+
+norm_hi:
+	lsl.l		%d2, %d0		# left shift hi(man)
+	bfextu		%d1{&0:%d2}, %d3	# extract lo bits
+
+	or.l		%d3, %d0		# create hi(man)
+	lsl.l		%d2, %d1		# create lo(man)
+
+	mov.l		%d0, FTEMP_HI(%a0)	# store new hi(man)
+	mov.l		%d1, FTEMP_LO(%a0)	# store new lo(man)
+
+	mov.l		%d2, %d0		# return shift amount
+
+	mov.l		(%sp)+, %d3		# restore temp regs
+	mov.l		(%sp)+, %d2
+
+	rts
+
+norm_lo:
+	bfffo		%d1{&0:&32}, %d2	# how many places to shift?
+	lsl.l		%d2, %d1		# shift lo(man)
+	add.l		&32, %d2		# add 32 to shft amount
+
+	mov.l		%d1, FTEMP_HI(%a0)	# store hi(man)
+	clr.l		FTEMP_LO(%a0)		# lo(man) is now zero
+
+	mov.l		%d2, %d0		# return shift amount
+
+	mov.l		(%sp)+, %d3		# restore temp regs
+	mov.l		(%sp)+, %d2
+
+	rts
+
+#########################################################################
+# unnorm_fix(): - changes an UNNORM to one of NORM, DENORM, or ZERO	#
+#		- returns corresponding optype tag			#
+#									#
+# XDEF ****************************************************************	#
+#	unnorm_fix()							#
+#									#
+# XREF **************************************************************** #
+#	norm() - normalize the mantissa					#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to unnormalized extended precision number		#
+#									#
+# OUTPUT ************************************************************** #
+#	d0 = optype tag - is corrected to one of NORM, DENORM, or ZERO	#
+#	a0 = input operand has been converted to a norm, denorm, or	#
+#	     zero; both the exponent and mantissa are changed.		#
+#									#
+#########################################################################
+
+	global		unnorm_fix
+unnorm_fix:
+	bfffo		FTEMP_HI(%a0){&0:&32}, %d0 # how many shifts are needed?
+	bne.b		unnorm_shift		# hi(man) is not all zeroes
+
+#
+# hi(man) is all zeroes so see if any bits in lo(man) are set
+#
+unnorm_chk_lo:
+	bfffo		FTEMP_LO(%a0){&0:&32}, %d0 # is operand really a zero?
+	beq.w		unnorm_zero		# yes
+
+	add.w		&32, %d0		# no; fix shift distance
+
+#
+# d0 = # shifts needed for complete normalization
+#
+unnorm_shift:
+	clr.l		%d1			# clear top word
+	mov.w		FTEMP_EX(%a0), %d1	# extract exponent
+	and.w		&0x7fff, %d1		# strip off sgn
+
+	cmp.w		%d0, %d1		# will denorm push exp < 0?
+	bgt.b		unnorm_nrm_zero		# yes; denorm only until exp = 0
+
+#
+# exponent would not go < 0. therefore, number stays normalized
+#
+	sub.w		%d0, %d1		# shift exponent value
+	mov.w		FTEMP_EX(%a0), %d0	# load old exponent
+	and.w		&0x8000, %d0		# save old sign
+	or.w		%d0, %d1		# {sgn,new exp}
+	mov.w		%d1, FTEMP_EX(%a0)	# insert new exponent
+
+	bsr.l		norm			# normalize UNNORM
+
+	mov.b		&NORM, %d0		# return new optype tag
+	rts
+
+#
+# exponent would go < 0, so only denormalize until exp = 0
+#
+unnorm_nrm_zero:
+	cmp.b		%d1, &32		# is exp <= 32?
+	bgt.b		unnorm_nrm_zero_lrg	# no; go handle large exponent
+
+	bfextu		FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man)
+	mov.l		%d0, FTEMP_HI(%a0)	# save new hi(man)
+
+	mov.l		FTEMP_LO(%a0), %d0	# fetch old lo(man)
+	lsl.l		%d1, %d0		# extract new lo(man)
+	mov.l		%d0, FTEMP_LO(%a0)	# save new lo(man)
+
+	and.w		&0x8000, FTEMP_EX(%a0)	# set exp = 0
+
+	mov.b		&DENORM, %d0		# return new optype tag
+	rts
+
+#
+# only mantissa bits set are in lo(man)
+#
+unnorm_nrm_zero_lrg:
+	sub.w		&32, %d1		# adjust shft amt by 32
+
+	mov.l		FTEMP_LO(%a0), %d0	# fetch old lo(man)
+	lsl.l		%d1, %d0		# left shift lo(man)
+
+	mov.l		%d0, FTEMP_HI(%a0)	# store new hi(man)
+	clr.l		FTEMP_LO(%a0)		# lo(man) = 0
+
+	and.w		&0x8000, FTEMP_EX(%a0)	# set exp = 0
+
+	mov.b		&DENORM, %d0		# return new optype tag
+	rts
+
+#
+# whole mantissa is zero so this UNNORM is actually a zero
+#
+unnorm_zero:
+	and.w		&0x8000, FTEMP_EX(%a0)	# force exponent to zero
+
+	mov.b		&ZERO, %d0		# fix optype tag
+	rts
diff --git a/arch/m68k/ifpsp060/src/fpsp.S b/arch/m68k/ifpsp060/src/fpsp.S
new file mode 100644
index 0000000..3b597a9
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/fpsp.S
@@ -0,0 +1,24785 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# freal.s:
+#	This file is appended to the top of the 060FPSP package
+# and contains the entry points into the package. The user, in
+# effect, branches to one of the branch table entries located
+# after _060FPSP_TABLE.
+#	Also, subroutine stubs exist in this file (_fpsp_done for
+# example) that are referenced by the FPSP package itself in order
+# to call a given routine. The stub routine actually performs the
+# callout. The FPSP code does a "bsr" to the stub routine. This
+# extra layer of hierarchy adds a slight performance penalty but
+# it makes the FPSP code easier to read and more mainatinable.
+#
+
+set	_off_bsun,	0x00
+set	_off_snan,	0x04
+set	_off_operr,	0x08
+set	_off_ovfl,	0x0c
+set	_off_unfl,	0x10
+set	_off_dz,	0x14
+set	_off_inex,	0x18
+set	_off_fline,	0x1c
+set	_off_fpu_dis,	0x20
+set	_off_trap,	0x24
+set	_off_trace,	0x28
+set	_off_access,	0x2c
+set	_off_done,	0x30
+
+set	_off_imr,	0x40
+set	_off_dmr,	0x44
+set	_off_dmw,	0x48
+set	_off_irw,	0x4c
+set	_off_irl,	0x50
+set	_off_drb,	0x54
+set	_off_drw,	0x58
+set	_off_drl,	0x5c
+set	_off_dwb,	0x60
+set	_off_dww,	0x64
+set	_off_dwl,	0x68
+
+_060FPSP_TABLE:
+
+###############################################################
+
+# Here's the table of ENTRY POINTS for those linking the package.
+	bra.l		_fpsp_snan
+	short		0x0000
+	bra.l		_fpsp_operr
+	short		0x0000
+	bra.l		_fpsp_ovfl
+	short		0x0000
+	bra.l		_fpsp_unfl
+	short		0x0000
+	bra.l		_fpsp_dz
+	short		0x0000
+	bra.l		_fpsp_inex
+	short		0x0000
+	bra.l		_fpsp_fline
+	short		0x0000
+	bra.l		_fpsp_unsupp
+	short		0x0000
+	bra.l		_fpsp_effadd
+	short		0x0000
+
+	space		56
+
+###############################################################
+	global		_fpsp_done
+_fpsp_done:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_done,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_ovfl
+_real_ovfl:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_ovfl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_unfl
+_real_unfl:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_unfl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_inex
+_real_inex:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_inex,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_bsun
+_real_bsun:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_bsun,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_operr
+_real_operr:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_operr,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_snan
+_real_snan:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_snan,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_dz
+_real_dz:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dz,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_fline
+_real_fline:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_fline,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_fpu_disabled
+_real_fpu_disabled:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_fpu_dis,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_trap
+_real_trap:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_trap,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_trace
+_real_trace:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_trace,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_access
+_real_access:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_access,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+#######################################
+
+	global		_imem_read
+_imem_read:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_imr,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read
+_dmem_read:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dmr,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write
+_dmem_write:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dmw,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_imem_read_word
+_imem_read_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_irw,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_imem_read_long
+_imem_read_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_irl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_byte
+_dmem_read_byte:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_drb,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_word
+_dmem_read_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_drw,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_long
+_dmem_read_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_drl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_byte
+_dmem_write_byte:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dwb,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_word
+_dmem_write_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dww,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_long
+_dmem_write_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dwl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+#
+# This file contains a set of define statements for constants
+# in order to promote readability within the corecode itself.
+#
+
+set LOCAL_SIZE,		192			# stack frame size(bytes)
+set LV,			-LOCAL_SIZE		# stack offset
+
+set EXC_SR,		0x4			# stack status register
+set EXC_PC,		0x6			# stack pc
+set EXC_VOFF,		0xa			# stacked vector offset
+set EXC_EA,		0xc			# stacked <ea>
+
+set EXC_FP,		0x0			# frame pointer
+
+set EXC_AREGS,		-68			# offset of all address regs
+set EXC_DREGS,		-100			# offset of all data regs
+set EXC_FPREGS,		-36			# offset of all fp regs
+
+set EXC_A7,		EXC_AREGS+(7*4)		# offset of saved a7
+set OLD_A7,		EXC_AREGS+(6*4)		# extra copy of saved a7
+set EXC_A6,		EXC_AREGS+(6*4)		# offset of saved a6
+set EXC_A5,		EXC_AREGS+(5*4)
+set EXC_A4,		EXC_AREGS+(4*4)
+set EXC_A3,		EXC_AREGS+(3*4)
+set EXC_A2,		EXC_AREGS+(2*4)
+set EXC_A1,		EXC_AREGS+(1*4)
+set EXC_A0,		EXC_AREGS+(0*4)
+set EXC_D7,		EXC_DREGS+(7*4)
+set EXC_D6,		EXC_DREGS+(6*4)
+set EXC_D5,		EXC_DREGS+(5*4)
+set EXC_D4,		EXC_DREGS+(4*4)
+set EXC_D3,		EXC_DREGS+(3*4)
+set EXC_D2,		EXC_DREGS+(2*4)
+set EXC_D1,		EXC_DREGS+(1*4)
+set EXC_D0,		EXC_DREGS+(0*4)
+
+set EXC_FP0,		EXC_FPREGS+(0*12)	# offset of saved fp0
+set EXC_FP1,		EXC_FPREGS+(1*12)	# offset of saved fp1
+set EXC_FP2,		EXC_FPREGS+(2*12)	# offset of saved fp2 (not used)
+
+set FP_SCR1,		LV+80			# fp scratch 1
+set FP_SCR1_EX,		FP_SCR1+0
+set FP_SCR1_SGN,	FP_SCR1+2
+set FP_SCR1_HI,		FP_SCR1+4
+set FP_SCR1_LO,		FP_SCR1+8
+
+set FP_SCR0,		LV+68			# fp scratch 0
+set FP_SCR0_EX,		FP_SCR0+0
+set FP_SCR0_SGN,	FP_SCR0+2
+set FP_SCR0_HI,		FP_SCR0+4
+set FP_SCR0_LO,		FP_SCR0+8
+
+set FP_DST,		LV+56			# fp destination operand
+set FP_DST_EX,		FP_DST+0
+set FP_DST_SGN,		FP_DST+2
+set FP_DST_HI,		FP_DST+4
+set FP_DST_LO,		FP_DST+8
+
+set FP_SRC,		LV+44			# fp source operand
+set FP_SRC_EX,		FP_SRC+0
+set FP_SRC_SGN,		FP_SRC+2
+set FP_SRC_HI,		FP_SRC+4
+set FP_SRC_LO,		FP_SRC+8
+
+set USER_FPIAR,		LV+40			# FP instr address register
+
+set USER_FPSR,		LV+36			# FP status register
+set FPSR_CC,		USER_FPSR+0		# FPSR condition codes
+set FPSR_QBYTE,		USER_FPSR+1		# FPSR qoutient byte
+set FPSR_EXCEPT,	USER_FPSR+2		# FPSR exception status byte
+set FPSR_AEXCEPT,	USER_FPSR+3		# FPSR accrued exception byte
+
+set USER_FPCR,		LV+32			# FP control register
+set FPCR_ENABLE,	USER_FPCR+2		# FPCR exception enable
+set FPCR_MODE,		USER_FPCR+3		# FPCR rounding mode control
+
+set L_SCR3,		LV+28			# integer scratch 3
+set L_SCR2,		LV+24			# integer scratch 2
+set L_SCR1,		LV+20			# integer scratch 1
+
+set STORE_FLG,		LV+19			# flag: operand store (ie. not fcmp/ftst)
+
+set EXC_TEMP2,		LV+24			# temporary space
+set EXC_TEMP,		LV+16			# temporary space
+
+set DTAG,		LV+15			# destination operand type
+set STAG,		LV+14			# source operand type
+
+set SPCOND_FLG,		LV+10			# flag: special case (see below)
+
+set EXC_CC,		LV+8			# saved condition codes
+set EXC_EXTWPTR,	LV+4			# saved current PC (active)
+set EXC_EXTWORD,	LV+2			# saved extension word
+set EXC_CMDREG,		LV+2			# saved extension word
+set EXC_OPWORD,		LV+0			# saved operation word
+
+################################
+
+# Helpful macros
+
+set FTEMP,		0			# offsets within an
+set FTEMP_EX,		0			# extended precision
+set FTEMP_SGN,		2			# value saved in memory.
+set FTEMP_HI,		4
+set FTEMP_LO,		8
+set FTEMP_GRS,		12
+
+set LOCAL,		0			# offsets within an
+set LOCAL_EX,		0			# extended precision
+set LOCAL_SGN,		2			# value saved in memory.
+set LOCAL_HI,		4
+set LOCAL_LO,		8
+set LOCAL_GRS,		12
+
+set DST,		0			# offsets within an
+set DST_EX,		0			# extended precision
+set DST_HI,		4			# value saved in memory.
+set DST_LO,		8
+
+set SRC,		0			# offsets within an
+set SRC_EX,		0			# extended precision
+set SRC_HI,		4			# value saved in memory.
+set SRC_LO,		8
+
+set SGL_LO,		0x3f81			# min sgl prec exponent
+set SGL_HI,		0x407e			# max sgl prec exponent
+set DBL_LO,		0x3c01			# min dbl prec exponent
+set DBL_HI,		0x43fe			# max dbl prec exponent
+set EXT_LO,		0x0			# min ext prec exponent
+set EXT_HI,		0x7ffe			# max ext prec exponent
+
+set EXT_BIAS,		0x3fff			# extended precision bias
+set SGL_BIAS,		0x007f			# single precision bias
+set DBL_BIAS,		0x03ff			# double precision bias
+
+set NORM,		0x00			# operand type for STAG/DTAG
+set ZERO,		0x01			# operand type for STAG/DTAG
+set INF,		0x02			# operand type for STAG/DTAG
+set QNAN,		0x03			# operand type for STAG/DTAG
+set DENORM,		0x04			# operand type for STAG/DTAG
+set SNAN,		0x05			# operand type for STAG/DTAG
+set UNNORM,		0x06			# operand type for STAG/DTAG
+
+##################
+# FPSR/FPCR bits #
+##################
+set neg_bit,		0x3			# negative result
+set z_bit,		0x2			# zero result
+set inf_bit,		0x1			# infinite result
+set nan_bit,		0x0			# NAN result
+
+set q_sn_bit,		0x7			# sign bit of quotient byte
+
+set bsun_bit,		7			# branch on unordered
+set snan_bit,		6			# signalling NAN
+set operr_bit,		5			# operand error
+set ovfl_bit,		4			# overflow
+set unfl_bit,		3			# underflow
+set dz_bit,		2			# divide by zero
+set inex2_bit,		1			# inexact result 2
+set inex1_bit,		0			# inexact result 1
+
+set aiop_bit,		7			# accrued inexact operation bit
+set aovfl_bit,		6			# accrued overflow bit
+set aunfl_bit,		5			# accrued underflow bit
+set adz_bit,		4			# accrued dz bit
+set ainex_bit,		3			# accrued inexact bit
+
+#############################
+# FPSR individual bit masks #
+#############################
+set neg_mask,		0x08000000		# negative bit mask (lw)
+set inf_mask,		0x02000000		# infinity bit mask (lw)
+set z_mask,		0x04000000		# zero bit mask (lw)
+set nan_mask,		0x01000000		# nan bit mask (lw)
+
+set neg_bmask,		0x08			# negative bit mask (byte)
+set inf_bmask,		0x02			# infinity bit mask (byte)
+set z_bmask,		0x04			# zero bit mask (byte)
+set nan_bmask,		0x01			# nan bit mask (byte)
+
+set bsun_mask,		0x00008000		# bsun exception mask
+set snan_mask,		0x00004000		# snan exception mask
+set operr_mask,		0x00002000		# operr exception mask
+set ovfl_mask,		0x00001000		# overflow exception mask
+set unfl_mask,		0x00000800		# underflow exception mask
+set dz_mask,		0x00000400		# dz exception mask
+set inex2_mask,		0x00000200		# inex2 exception mask
+set inex1_mask,		0x00000100		# inex1 exception mask
+
+set aiop_mask,		0x00000080		# accrued illegal operation
+set aovfl_mask,		0x00000040		# accrued overflow
+set aunfl_mask,		0x00000020		# accrued underflow
+set adz_mask,		0x00000010		# accrued divide by zero
+set ainex_mask,		0x00000008		# accrued inexact
+
+######################################
+# FPSR combinations used in the FPSP #
+######################################
+set dzinf_mask,		inf_mask+dz_mask+adz_mask
+set opnan_mask,		nan_mask+operr_mask+aiop_mask
+set nzi_mask,		0x01ffffff		#clears N, Z, and I
+set unfinx_mask,	unfl_mask+inex2_mask+aunfl_mask+ainex_mask
+set unf2inx_mask,	unfl_mask+inex2_mask+ainex_mask
+set ovfinx_mask,	ovfl_mask+inex2_mask+aovfl_mask+ainex_mask
+set inx1a_mask,		inex1_mask+ainex_mask
+set inx2a_mask,		inex2_mask+ainex_mask
+set snaniop_mask,	nan_mask+snan_mask+aiop_mask
+set snaniop2_mask,	snan_mask+aiop_mask
+set naniop_mask,	nan_mask+aiop_mask
+set neginf_mask,	neg_mask+inf_mask
+set infaiop_mask,	inf_mask+aiop_mask
+set negz_mask,		neg_mask+z_mask
+set opaop_mask,		operr_mask+aiop_mask
+set unfl_inx_mask,	unfl_mask+aunfl_mask+ainex_mask
+set ovfl_inx_mask,	ovfl_mask+aovfl_mask+ainex_mask
+
+#########
+# misc. #
+#########
+set rnd_stky_bit,	29			# stky bit pos in longword
+
+set sign_bit,		0x7			# sign bit
+set signan_bit,		0x6			# signalling nan bit
+
+set sgl_thresh,		0x3f81			# minimum sgl exponent
+set dbl_thresh,		0x3c01			# minimum dbl exponent
+
+set x_mode,		0x0			# extended precision
+set s_mode,		0x4			# single precision
+set d_mode,		0x8			# double precision
+
+set rn_mode,		0x0			# round-to-nearest
+set rz_mode,		0x1			# round-to-zero
+set rm_mode,		0x2			# round-tp-minus-infinity
+set rp_mode,		0x3			# round-to-plus-infinity
+
+set mantissalen,	64			# length of mantissa in bits
+
+set BYTE,		1			# len(byte) == 1 byte
+set WORD,		2			# len(word) == 2 bytes
+set LONG,		4			# len(longword) == 2 bytes
+
+set BSUN_VEC,		0xc0			# bsun    vector offset
+set INEX_VEC,		0xc4			# inexact vector offset
+set DZ_VEC,		0xc8			# dz      vector offset
+set UNFL_VEC,		0xcc			# unfl    vector offset
+set OPERR_VEC,		0xd0			# operr   vector offset
+set OVFL_VEC,		0xd4			# ovfl    vector offset
+set SNAN_VEC,		0xd8			# snan    vector offset
+
+###########################
+# SPecial CONDition FLaGs #
+###########################
+set ftrapcc_flg,	0x01			# flag bit: ftrapcc exception
+set fbsun_flg,		0x02			# flag bit: bsun exception
+set mia7_flg,		0x04			# flag bit: (a7)+ <ea>
+set mda7_flg,		0x08			# flag bit: -(a7) <ea>
+set fmovm_flg,		0x40			# flag bit: fmovm instruction
+set immed_flg,		0x80			# flag bit: &<data> <ea>
+
+set ftrapcc_bit,	0x0
+set fbsun_bit,		0x1
+set mia7_bit,		0x2
+set mda7_bit,		0x3
+set immed_bit,		0x7
+
+##################################
+# TRANSCENDENTAL "LAST-OP" FLAGS #
+##################################
+set FMUL_OP,		0x0			# fmul instr performed last
+set FDIV_OP,		0x1			# fdiv performed last
+set FADD_OP,		0x2			# fadd performed last
+set FMOV_OP,		0x3			# fmov performed last
+
+#############
+# CONSTANTS #
+#############
+T1:	long		0x40C62D38,0xD3D64634	# 16381 LOG2 LEAD
+T2:	long		0x3D6F90AE,0xB1E75CC7	# 16381 LOG2 TRAIL
+
+PI:	long		0x40000000,0xC90FDAA2,0x2168C235,0x00000000
+PIBY2:	long		0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+TWOBYPI:
+	long		0x3FE45F30,0x6DC9C883
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_ovfl(): 060FPSP entry point for FP Overflow exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Overflow exception in an operating system.			#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	fout() - emulate an opclass 3 instruction			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	_fpsp_done() - "callout" for 060FPSP exit (all work done!)	#
+#	_real_ovfl() - "callout" for Overflow exception enabled code	#
+#	_real_inex() - "callout" for Inexact exception enabled code	#
+#	_real_trace() - "callout" for Trace exception code		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP Ovfl exception stack frame	#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	Overflow Exception enabled:					#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#	Overflow Exception disabled:					#
+#	- The system stack is unchanged					#
+#	- The "exception present" flag in the fsave frame is cleared	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	On the 060, if an FP overflow is present as the result of any	#
+# instruction, the 060 will take an overflow exception whether the	#
+# exception is enabled or disabled in the FPCR. For the disabled case,	#
+# This handler emulates the instruction to determine what the correct	#
+# default result should be for the operation. This default result is	#
+# then stored in either the FP regfile, data regfile, or memory.	#
+# Finally, the handler exits through the "callout" _fpsp_done()		#
+# denoting that no exceptional conditions exist within the machine.	#
+#	If the exception is enabled, then this handler must create the	#
+# exceptional operand and plave it in the fsave state frame, and store	#
+# the default result (only if the instruction is opclass 3). For	#
+# exceptions enabled, this handler must exit through the "callout"	#
+# _real_ovfl() so that the operating system enabled overflow handler	#
+# can handle this case.							#
+#	Two other conditions exist. First, if overflow was disabled	#
+# but the inexact exception was enabled, this handler must exit		#
+# through the "callout" _real_inex() regardless of whether the result	#
+# was inexact.								#
+#	Also, in the case of an opclass three instruction where		#
+# overflow was disabled and the trace exception was enabled, this	#
+# handler must exit through the "callout" _real_trace().		#
+#									#
+#########################################################################
+
+	global		_fpsp_ovfl
+_fpsp_ovfl:
+
+#$#	sub.l		&24,%sp			# make room for src/dst
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&0x5,EXC_CMDREG(%a6)	# is instr an fmove out?
+	bne.w		fovfl_out
+
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+# since, I believe, only NORMs and DENORMs can come through here,
+# maybe we can avoid the subroutine call.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,STAG(%a6)		# maybe NORM,DENORM
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# that can pass through fpsp_ovfl(). remember that fcmp, ftst, and fsincos
+# will never take this exception.
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		fovfl_extract		# monadic
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fovfl_op2_done		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+fovfl_op2_done:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+fovfl_extract:
+
+#$#	mov.l		FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$#	mov.l		FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$#	mov.l		FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+#$#	mov.l		FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
+#$#	mov.l		FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
+#$#	mov.l		FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x007f,%d1		# extract extension
+
+	andi.l		&0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+# maybe we can make these entry points ONLY the OVFL entry points of each routine.
+	mov.l		(tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+# the operation has been emulated. the result is in fp0.
+# the EXOP, if an exception occurred, is in fp1.
+# we must save the default result regardless of whether
+# traps are enabled or disabled.
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0
+	bsr.l		store_fpreg
+
+# the exceptional possibilities we have left ourselves with are ONLY overflow
+# and inexact. and, the inexact is such that overflow occurred and was disabled
+# but inexact was enabled.
+	btst		&ovfl_bit,FPCR_ENABLE(%a6)
+	bne.b		fovfl_ovfl_on
+
+	btst		&inex2_bit,FPCR_ENABLE(%a6)
+	bne.b		fovfl_inex_on
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+#$#	add.l		&24,%sp
+	bra.l		_fpsp_done
+
+# overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
+# in fp1. now, simply jump to _real_ovfl()!
+fovfl_ovfl_on:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP (fp1) to stack
+
+	mov.w		&0xe005,2+FP_SRC(%a6)	# save exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# do this after fmovm,other f<op>s!
+
+	unlk		%a6
+
+	bra.l		_real_ovfl
+
+# overflow occurred but is disabled. meanwhile, inexact is enabled. therefore,
+# we must jump to real_inex().
+fovfl_inex_on:
+
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP (fp1) to stack
+
+	mov.b		&0xc4,1+EXC_VOFF(%a6)	# vector offset = 0xc4
+	mov.w		&0xe001,2+FP_SRC(%a6)	# save exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# do this after fmovm,other f<op>s!
+
+	unlk		%a6
+
+	bra.l		_real_inex
+
+########################################################################
+fovfl_out:
+
+
+#$#	mov.l		FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$#	mov.l		FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$#	mov.l		FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+
+# the src operand is definitely a NORM(!), so tag it as such
+	mov.b		&NORM,STAG(%a6)		# set src optype tag
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	and.l		&0xffff00ff,USER_FPSR(%a6) # zero all but accured field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	bsr.l		fout
+
+	btst		&ovfl_bit,FPCR_ENABLE(%a6)
+	bne.w		fovfl_ovfl_on
+
+	btst		&inex2_bit,FPCR_ENABLE(%a6)
+	bne.w		fovfl_inex_on
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+#$#	add.l		&24,%sp
+
+	btst		&0x7,(%sp)		# is trace on?
+	beq.l		_fpsp_done		# no
+
+	fmov.l		%fpiar,0x8(%sp)		# "Current PC" is in FPIAR
+	mov.w		&0x2024,0x6(%sp)	# stk fmt = 0x2; voff = 0x024
+	bra.l		_real_trace
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_unfl(): 060FPSP entry point for FP Underflow exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Underflow exception in an operating system.			#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	fout() - emulate an opclass 3 instruction			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	_fpsp_done() - "callout" for 060FPSP exit (all work done!)	#
+#	_real_ovfl() - "callout" for Overflow exception enabled code	#
+#	_real_inex() - "callout" for Inexact exception enabled code	#
+#	_real_trace() - "callout" for Trace exception code		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP Unfl exception stack frame	#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	Underflow Exception enabled:					#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#	Underflow Exception disabled:					#
+#	- The system stack is unchanged					#
+#	- The "exception present" flag in the fsave frame is cleared	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	On the 060, if an FP underflow is present as the result of any	#
+# instruction, the 060 will take an underflow exception whether the	#
+# exception is enabled or disabled in the FPCR. For the disabled case,	#
+# This handler emulates the instruction to determine what the correct	#
+# default result should be for the operation. This default result is	#
+# then stored in either the FP regfile, data regfile, or memory.	#
+# Finally, the handler exits through the "callout" _fpsp_done()		#
+# denoting that no exceptional conditions exist within the machine.	#
+#	If the exception is enabled, then this handler must create the	#
+# exceptional operand and plave it in the fsave state frame, and store	#
+# the default result (only if the instruction is opclass 3). For	#
+# exceptions enabled, this handler must exit through the "callout"	#
+# _real_unfl() so that the operating system enabled overflow handler	#
+# can handle this case.							#
+#	Two other conditions exist. First, if underflow was disabled	#
+# but the inexact exception was enabled and the result was inexact,	#
+# this handler must exit through the "callout" _real_inex().		#
+# was inexact.								#
+#	Also, in the case of an opclass three instruction where		#
+# underflow was disabled and the trace exception was enabled, this	#
+# handler must exit through the "callout" _real_trace().		#
+#									#
+#########################################################################
+
+	global		_fpsp_unfl
+_fpsp_unfl:
+
+#$#	sub.l		&24,%sp			# make room for src/dst
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&0x5,EXC_CMDREG(%a6)	# is instr an fmove out?
+	bne.w		funfl_out
+
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,STAG(%a6)		# maybe NORM,DENORM
+
+# bit five of the fp ext word separates the monadic and dyadic operations
+# that can pass through fpsp_unfl(). remember that fcmp, and ftst
+# will never take this exception.
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is op monadic or dyadic?
+	beq.b		funfl_extract		# monadic
+
+# now, what's left that's not dyadic is fsincos. we can distinguish it
+# from all dyadics by the '0110xxx pattern
+	btst		&0x4,1+EXC_CMDREG(%a6)	# is op an fsincos?
+	bne.b		funfl_extract		# yes
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		funfl_op2_done		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+funfl_op2_done:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+funfl_extract:
+
+#$#	mov.l		FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$#	mov.l		FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$#	mov.l		FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+#$#	mov.l		FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
+#$#	mov.l		FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
+#$#	mov.l		FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x007f,%d1		# extract extension
+
+	andi.l		&0x00ff01ff,USER_FPSR(%a6)
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+# maybe we can make these entry points ONLY the OVFL entry points of each routine.
+	mov.l		(tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0
+	bsr.l		store_fpreg
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception. Since this is incorrect, we need to check
+# if our emulation, after re-doing the operation, decided that
+# no underflow was called for. We do these checks only in
+# funfl_{unfl,inex}_on() because w/ both exceptions disabled, this
+# special case will simply exit gracefully with the correct result.
+
+# the exceptional possibilities we have left ourselves with are ONLY overflow
+# and inexact. and, the inexact is such that overflow occurred and was disabled
+# but inexact was enabled.
+	btst		&unfl_bit,FPCR_ENABLE(%a6)
+	bne.b		funfl_unfl_on
+
+funfl_chkinex:
+	btst		&inex2_bit,FPCR_ENABLE(%a6)
+	bne.b		funfl_inex_on
+
+funfl_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+#$#	add.l		&24,%sp
+	bra.l		_fpsp_done
+
+# overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
+# in fp1 (don't forget to save fp0). what to do now?
+# well, we simply have to get to go to _real_unfl()!
+funfl_unfl_on:
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception. Since this is incorrect, we check here to see
+# if our emulation, after re-doing the operation, decided that
+# no underflow was called for.
+	btst		&unfl_bit,FPSR_EXCEPT(%a6)
+	beq.w		funfl_chkinex
+
+funfl_unfl_on2:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP (fp1) to stack
+
+	mov.w		&0xe003,2+FP_SRC(%a6)	# save exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# do this after fmovm,other f<op>s!
+
+	unlk		%a6
+
+	bra.l		_real_unfl
+
+# undeflow occurred but is disabled. meanwhile, inexact is enabled. therefore,
+# we must jump to real_inex().
+funfl_inex_on:
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception.
+# But, whether bogus or not, if inexact is enabled AND it occurred,
+# then we have to branch to real_inex.
+
+	btst		&inex2_bit,FPSR_EXCEPT(%a6)
+	beq.w		funfl_exit
+
+funfl_inex_on2:
+
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP to stack
+
+	mov.b		&0xc4,1+EXC_VOFF(%a6)	# vector offset = 0xc4
+	mov.w		&0xe001,2+FP_SRC(%a6)	# save exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# do this after fmovm,other f<op>s!
+
+	unlk		%a6
+
+	bra.l		_real_inex
+
+#######################################################################
+funfl_out:
+
+
+#$#	mov.l		FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$#	mov.l		FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$#	mov.l		FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+
+# the src operand is definitely a NORM(!), so tag it as such
+	mov.b		&NORM,STAG(%a6)		# set src optype tag
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	and.l		&0xffff00ff,USER_FPSR(%a6) # zero all but accured field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	bsr.l		fout
+
+	btst		&unfl_bit,FPCR_ENABLE(%a6)
+	bne.w		funfl_unfl_on2
+
+	btst		&inex2_bit,FPCR_ENABLE(%a6)
+	bne.w		funfl_inex_on2
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+#$#	add.l		&24,%sp
+
+	btst		&0x7,(%sp)		# is trace on?
+	beq.l		_fpsp_done		# no
+
+	fmov.l		%fpiar,0x8(%sp)		# "Current PC" is in FPIAR
+	mov.w		&0x2024,0x6(%sp)	# stk fmt = 0x2; voff = 0x024
+	bra.l		_real_trace
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_unsupp(): 060FPSP entry point for FP "Unimplemented	#
+#		        Data Type" exception.				#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Unimplemented Data Type exception in an operating system.	#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_{word,long}() - read instruction word/longword	#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	load_fpn1() - load src operand from FP regfile			#
+#	fout() - emulate an opclass 3 instruction			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	_real_inex() - "callout" to operating system inexact handler	#
+#	_fpsp_done() - "callout" for exit; work all done		#
+#	_real_trace() - "callout" for Trace enabled exception		#
+#	funimp_skew() - adjust fsave src ops to "incorrect" value	#
+#	_real_snan() - "callout" for SNAN exception			#
+#	_real_operr() - "callout" for OPERR exception			#
+#	_real_ovfl() - "callout" for OVFL exception			#
+#	_real_unfl() - "callout" for UNFL exception			#
+#	get_packed() - fetch packed operand from memory			#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the "Unimp Data Type" stk frame	#
+#	- The fsave frame contains the ssrc op (for UNNORM/DENORM)	#
+#									#
+# OUTPUT **************************************************************	#
+#	If Inexact exception (opclass 3):				#
+#	- The system stack is changed to an Inexact exception stk frame	#
+#	If SNAN exception (opclass 3):					#
+#	- The system stack is changed to an SNAN exception stk frame	#
+#	If OPERR exception (opclass 3):					#
+#	- The system stack is changed to an OPERR exception stk frame	#
+#	If OVFL exception (opclass 3):					#
+#	- The system stack is changed to an OVFL exception stk frame	#
+#	If UNFL exception (opclass 3):					#
+#	- The system stack is changed to an UNFL exception stack frame	#
+#	If Trace exception enabled:					#
+#	- The system stack is changed to a Trace exception stack frame	#
+#	Else: (normal case)						#
+#	- Correct result has been stored as appropriate			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Two main instruction types can enter here: (1) DENORM or UNNORM	#
+# unimplemented data types. These can be either opclass 0,2 or 3	#
+# instructions, and (2) PACKED unimplemented data format instructions	#
+# also of opclasses 0,2, or 3.						#
+#	For UNNORM/DENORM opclass 0 and 2, the handler fetches the src	#
+# operand from the fsave state frame and the dst operand (if dyadic)	#
+# from the FP register file. The instruction is then emulated by	#
+# choosing an emulation routine from a table of routines indexed by	#
+# instruction type. Once the instruction has been emulated and result	#
+# saved, then we check to see if any enabled exceptions resulted from	#
+# instruction emulation. If none, then we exit through the "callout"	#
+# _fpsp_done(). If there is an enabled FP exception, then we insert	#
+# this exception into the FPU in the fsave state frame and then exit	#
+# through _fpsp_done().							#
+#	PACKED opclass 0 and 2 is similar in how the instruction is	#
+# emulated and exceptions handled. The differences occur in how the	#
+# handler loads the packed op (by calling get_packed() routine) and	#
+# by the fact that a Trace exception could be pending for PACKED ops.	#
+# If a Trace exception is pending, then the current exception stack	#
+# frame is changed to a Trace exception stack frame and an exit is	#
+# made through _real_trace().						#
+#	For UNNORM/DENORM opclass 3, the actual move out to memory is	#
+# performed by calling the routine fout(). If no exception should occur	#
+# as the result of emulation, then an exit either occurs through	#
+# _fpsp_done() or through _real_trace() if a Trace exception is pending	#
+# (a Trace stack frame must be created here, too). If an FP exception	#
+# should occur, then we must create an exception stack frame of that	#
+# type and jump to either _real_snan(), _real_operr(), _real_inex(),	#
+# _real_unfl(), or _real_ovfl() as appropriate. PACKED opclass 3	#
+# emulation is performed in a similar manner.				#
+#									#
+#########################################################################
+
+#
+# (1) DENORM and UNNORM (unimplemented) data types:
+#
+#				post-instruction
+#				*****************
+#				*      EA	*
+#	 pre-instruction	*		*
+#	*****************	*****************
+#	* 0x0 *  0x0dc  *	* 0x3 *  0x0dc  *
+#	*****************	*****************
+#	*     Next	*	*     Next	*
+#	*      PC	*	*      PC	*
+#	*****************	*****************
+#	*      SR	*	*      SR	*
+#	*****************	*****************
+#
+# (2) PACKED format (unsupported) opclasses two and three:
+#	*****************
+#	*      EA	*
+#	*		*
+#	*****************
+#	* 0x2 *  0x0dc	*
+#	*****************
+#	*     Next	*
+#	*      PC	*
+#	*****************
+#	*      SR	*
+#	*****************
+#
+	global		_fpsp_unsupp
+_fpsp_unsupp:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# save fp state
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor mode?
+	bne.b		fu_s
+fu_u:
+	mov.l		%usp,%a0		# fetch user stack pointer
+	mov.l		%a0,EXC_A7(%a6)		# save on stack
+	bra.b		fu_cont
+# if the exception is an opclass zero or two unimplemented data type
+# exception, then the a7' calculated here is wrong since it doesn't
+# stack an ea. however, we don't need an a7' for this case anyways.
+fu_s:
+	lea		0x4+EXC_EA(%a6),%a0	# load old a7'
+	mov.l		%a0,EXC_A7(%a6)		# save on stack
+
+fu_cont:
+
+# the FPIAR holds the "current PC" of the faulting instruction
+# the FPIAR should be set correctly for ALL exceptions passing through
+# this point.
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)	# store OPWORD and EXTWORD
+
+############################
+
+	clr.b		SPCOND_FLG(%a6)		# clear special condition flag
+
+# Separate opclass three (fpn-to-mem) ops since they have a different
+# stack frame and protocol.
+	btst		&0x5,EXC_CMDREG(%a6)	# is it an fmove out?
+	bne.w		fu_out			# yes
+
+# Separate packed opclass two instructions.
+	bfextu		EXC_CMDREG(%a6){&0:&6},%d0
+	cmpi.b		%d0,&0x13
+	beq.w		fu_in_pack
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field
+	andi.l		&0x00ff00ff,USER_FPSR(%a6) # zero exception field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+# Opclass two w/ memory-to-fpn operation will have an incorrect extended
+# precision format if the src format was single or double and the
+# source data type was an INF, NAN, DENORM, or UNNORM
+	lea		FP_SRC(%a6),%a0		# pass ptr to input
+	bsr.l		fix_skewed_ops
+
+# we don't know whether the src operand or the dst operand (or both) is the
+# UNNORM or DENORM. call the function that tags the operand type. if the
+# input is an UNNORM, then convert it to a NORM, DENORM, or ZERO.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fu_op2			# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+
+fu_op2:
+	mov.b		%d0,STAG(%a6)		# save src optype tag
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# at this point
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		fu_extract		# monadic
+	cmpi.b		1+EXC_CMDREG(%a6),&0x3a	# is operation an ftst?
+	beq.b		fu_extract		# yes, so it's monadic, too
+
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fu_op2_done		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+fu_op2_done:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+fu_extract:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode/prec
+
+	bfextu		1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+	mov.l		(tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+#	BSUN	: none
+#	SNAN	: all dyadic ops
+#	OPERR	: fsqrt(-NORM)
+#	OVFL	: all except ftst,fcmp
+#	UNFL	: all except ftst,fcmp
+#	DZ	: fdiv
+#	INEX2	: all except ftst,fcmp
+#	INEX1	: none (packed doesn't go through here)
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions set
+	bne.b		fu_in_ena		# some are enabled
+
+fu_in_cont:
+# fcmp and ftst do not store any result.
+	mov.b		1+EXC_CMDREG(%a6),%d0	# fetch extension
+	andi.b		&0x38,%d0		# extract bits 3-5
+	cmpi.b		%d0,&0x38		# is instr fcmp or ftst?
+	beq.b		fu_in_exit		# yes
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		store_fpreg		# store the result
+
+fu_in_exit:
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	bra.l		_fpsp_done
+
+fu_in_ena:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		fu_in_exc		# there is at least one set
+
+#
+# No exceptions occurred that were also enabled. Now:
+#
+#	if (OVFL && ovfl_disabled && inexact_enabled) {
+#	    branch to _real_inex() (even if the result was exact!);
+#	} else {
+#	    save the result in the proper fp reg (unless the op is fcmp or ftst);
+#	    return;
+#	}
+#
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+	beq.b		fu_in_cont		# no
+
+fu_in_ovflchk:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+	beq.b		fu_in_cont		# no
+	bra.w		fu_in_exc_ovfl		# go insert overflow frame
+
+#
+# An exception occurred and that exception was enabled:
+#
+#	shift enabled exception field into lo byte of d0;
+#	if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
+#	    ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
+#		/*
+#		 * this is the case where we must call _real_inex() now or else
+#		 * there will be no other way to pass it the exceptional operand
+#		 */
+#		call _real_inex();
+#	} else {
+#		restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
+#	}
+#
+fu_in_exc:
+	subi.l		&24,%d0			# fix offset to be 0-8
+	cmpi.b		%d0,&0x6		# is exception INEX? (6)
+	bne.b		fu_in_exc_exit		# no
+
+# the enabled exception was inexact
+	btst		&unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
+	bne.w		fu_in_exc_unfl		# yes
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
+	bne.w		fu_in_exc_ovfl		# yes
+
+# here, we insert the correct fsave status value into the fsave frame for the
+# corresponding exception. the operand in the fsave frame should be the original
+# src operand.
+fu_in_exc_exit:
+	mov.l		%d0,-(%sp)		# save d0
+	bsr.l		funimp_skew		# skew sgl or dbl inputs
+	mov.l		(%sp)+,%d0		# restore d0
+
+	mov.w		(tbl_except.b,%pc,%d0.w*2),2+FP_SRC(%a6) # create exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore src op
+
+	unlk		%a6
+
+	bra.l		_fpsp_done
+
+tbl_except:
+	short		0xe000,0xe006,0xe004,0xe005
+	short		0xe003,0xe002,0xe001,0xe001
+
+fu_in_exc_unfl:
+	mov.w		&0x4,%d0
+	bra.b		fu_in_exc_exit
+fu_in_exc_ovfl:
+	mov.w		&0x03,%d0
+	bra.b		fu_in_exc_exit
+
+# If the input operand to this operation was opclass two and a single
+# or double precision denorm, inf, or nan, the operand needs to be
+# "corrected" in order to have the proper equivalent extended precision
+# number.
+	global		fix_skewed_ops
+fix_skewed_ops:
+	bfextu		EXC_CMDREG(%a6){&0:&6},%d0 # extract opclass,src fmt
+	cmpi.b		%d0,&0x11		# is class = 2 & fmt = sgl?
+	beq.b		fso_sgl			# yes
+	cmpi.b		%d0,&0x15		# is class = 2 & fmt = dbl?
+	beq.b		fso_dbl			# yes
+	rts					# no
+
+fso_sgl:
+	mov.w		LOCAL_EX(%a0),%d0	# fetch src exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	cmpi.w		%d0,&0x3f80		# is |exp| == $3f80?
+	beq.b		fso_sgl_dnrm_zero	# yes
+	cmpi.w		%d0,&0x407f		# no; is |exp| == $407f?
+	beq.b		fso_infnan		# yes
+	rts					# no
+
+fso_sgl_dnrm_zero:
+	andi.l		&0x7fffffff,LOCAL_HI(%a0) # clear j-bit
+	beq.b		fso_zero		# it's a skewed zero
+fso_sgl_dnrm:
+# here, we count on norm not to alter a0...
+	bsr.l		norm			# normalize mantissa
+	neg.w		%d0			# -shft amt
+	addi.w		&0x3f81,%d0		# adjust new exponent
+	andi.w		&0x8000,LOCAL_EX(%a0)	# clear old exponent
+	or.w		%d0,LOCAL_EX(%a0)	# insert new exponent
+	rts
+
+fso_zero:
+	andi.w		&0x8000,LOCAL_EX(%a0)	# clear bogus exponent
+	rts
+
+fso_infnan:
+	andi.b		&0x7f,LOCAL_HI(%a0)	# clear j-bit
+	ori.w		&0x7fff,LOCAL_EX(%a0)	# make exponent = $7fff
+	rts
+
+fso_dbl:
+	mov.w		LOCAL_EX(%a0),%d0	# fetch src exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	cmpi.w		%d0,&0x3c00		# is |exp| == $3c00?
+	beq.b		fso_dbl_dnrm_zero	# yes
+	cmpi.w		%d0,&0x43ff		# no; is |exp| == $43ff?
+	beq.b		fso_infnan		# yes
+	rts					# no
+
+fso_dbl_dnrm_zero:
+	andi.l		&0x7fffffff,LOCAL_HI(%a0) # clear j-bit
+	bne.b		fso_dbl_dnrm		# it's a skewed denorm
+	tst.l		LOCAL_LO(%a0)		# is it a zero?
+	beq.b		fso_zero		# yes
+fso_dbl_dnrm:
+# here, we count on norm not to alter a0...
+	bsr.l		norm			# normalize mantissa
+	neg.w		%d0			# -shft amt
+	addi.w		&0x3c01,%d0		# adjust new exponent
+	andi.w		&0x8000,LOCAL_EX(%a0)	# clear old exponent
+	or.w		%d0,LOCAL_EX(%a0)	# insert new exponent
+	rts
+
+#################################################################
+
+# fmove out took an unimplemented data type exception.
+# the src operand is in FP_SRC. Call _fout() to write out the result and
+# to determine which exceptions, if any, to take.
+fu_out:
+
+# Separate packed move outs from the UNNORM and DENORM move outs.
+	bfextu		EXC_CMDREG(%a6){&3:&3},%d0
+	cmpi.b		%d0,&0x3
+	beq.w		fu_out_pack
+	cmpi.b		%d0,&0x7
+	beq.w		fu_out_pack
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field.
+# fmove out doesn't affect ccodes.
+	and.l		&0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+# the src can ONLY be a DENORM or an UNNORM! so, don't make any big subroutine
+# call here. just figure out what it is...
+	mov.w		FP_SRC_EX(%a6),%d0	# get exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	beq.b		fu_out_denorm		# it's a DENORM
+
+	lea		FP_SRC(%a6),%a0
+	bsr.l		unnorm_fix		# yes; fix it
+
+	mov.b		%d0,STAG(%a6)
+
+	bra.b		fu_out_cont
+fu_out_denorm:
+	mov.b		&DENORM,STAG(%a6)
+fu_out_cont:
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode/prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	mov.l		(%a6),EXC_A6(%a6)	# in case a6 changes
+	bsr.l		fout			# call fmove out routine
+
+# Exceptions in order of precedence:
+#	BSUN	: none
+#	SNAN	: none
+#	OPERR	: fmove.{b,w,l} out of large UNNORM
+#	OVFL	: fmove.{s,d}
+#	UNFL	: fmove.{s,d,x}
+#	DZ	: none
+#	INEX2	: all
+#	INEX1	: none (packed doesn't travel through here)
+
+# determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.w		fu_out_ena		# some are enabled
+
+fu_out_done:
+
+	mov.l		EXC_A6(%a6),(%a6)	# in case a6 changed
+
+# on extended precision opclass three instructions using pre-decrement or
+# post-increment addressing mode, the address register is not updated. is the
+# address register was the stack pointer used from user mode, then let's update
+# it here. if it was used from supervisor mode, then we have to handle this
+# as a special case.
+	btst		&0x5,EXC_SR(%a6)
+	bne.b		fu_out_done_s
+
+	mov.l		EXC_A7(%a6),%a0		# restore a7
+	mov.l		%a0,%usp
+
+fu_out_done_cont:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.b		fu_out_trace		# yes
+
+	bra.l		_fpsp_done
+
+# is the ea mode pre-decrement of the stack pointer from supervisor mode?
+# ("fmov.x fpm,-(a7)") if so,
+fu_out_done_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	bne.b		fu_out_done_cont
+
+# the extended precision result is still in fp0. but, we need to save it
+# somewhere on the stack until we can copy it to its final resting place.
+# here, we're counting on the top of the stack to be the old place-holders
+# for fp0/fp1 which have already been restored. that way, we can write
+# over those destinations with the shifted stack frame.
+	fmovm.x		&0x80,FP_SRC(%a6)	# put answer on stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+	mov.l		LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+	mov.l		LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+	btst		&0x7,(%sp)
+	bne.b		fu_out_trace
+
+	bra.l		_fpsp_done
+
+fu_out_ena:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		fu_out_exc		# there is at least one set
+
+# no exceptions were set.
+# if a disabled overflow occurred and inexact was enabled but the result
+# was exact, then a branch to _real_inex() is made.
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+	beq.w		fu_out_done		# no
+
+fu_out_ovflchk:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+	beq.w		fu_out_done		# no
+	bra.w		fu_inex			# yes
+
+#
+# The fp move out that took the "Unimplemented Data Type" exception was
+# being traced. Since the stack frames are similar, get the "current" PC
+# from FPIAR and put it in the trace stack frame then jump to _real_trace().
+#
+#		  UNSUPP FRAME		   TRACE FRAME
+#		*****************	*****************
+#		*      EA	*	*    Current	*
+#		*		*	*      PC	*
+#		*****************	*****************
+#		* 0x3 *  0x0dc	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*     Next	*	*     Next	*
+#		*      PC	*	*      PC	*
+#		*****************	*****************
+#		*      SR	*	*      SR	*
+#		*****************	*****************
+#
+fu_out_trace:
+	mov.w		&0x2024,0x6(%sp)
+	fmov.l		%fpiar,0x8(%sp)
+	bra.l		_real_trace
+
+# an exception occurred and that exception was enabled.
+fu_out_exc:
+	subi.l		&24,%d0			# fix offset to be 0-8
+
+# we don't mess with the existing fsave frame. just re-insert it and
+# jump to the "_real_{}()" handler...
+	mov.w		(tbl_fu_out.b,%pc,%d0.w*2),%d0
+	jmp		(tbl_fu_out.b,%pc,%d0.w*1)
+
+	swbeg		&0x8
+tbl_fu_out:
+	short		tbl_fu_out	- tbl_fu_out	# BSUN can't happen
+	short		tbl_fu_out	- tbl_fu_out	# SNAN can't happen
+	short		fu_operr	- tbl_fu_out	# OPERR
+	short		fu_ovfl		- tbl_fu_out	# OVFL
+	short		fu_unfl		- tbl_fu_out	# UNFL
+	short		tbl_fu_out	- tbl_fu_out	# DZ can't happen
+	short		fu_inex		- tbl_fu_out	# INEX2
+	short		tbl_fu_out	- tbl_fu_out	# INEX1 won't make it here
+
+# for snan,operr,ovfl,unfl, src op is still in FP_SRC so just
+# frestore it.
+fu_snan:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d8,EXC_VOFF(%a6)	# vector offset = 0xd8
+	mov.w		&0xe006,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+
+
+	bra.l		_real_snan
+
+fu_operr:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d0,EXC_VOFF(%a6)	# vector offset = 0xd0
+	mov.w		&0xe004,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+
+
+	bra.l		_real_operr
+
+fu_ovfl:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP to the stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d4,EXC_VOFF(%a6)	# vector offset = 0xd4
+	mov.w		&0xe005,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)		# restore EXOP
+
+	unlk		%a6
+
+	bra.l		_real_ovfl
+
+# underflow can happen for extended precision. extended precision opclass
+# three instruction exceptions don't update the stack pointer. so, if the
+# exception occurred from user mode, then simply update a7 and exit normally.
+# if the exception occurred from supervisor mode, check if
+fu_unfl:
+	mov.l		EXC_A6(%a6),(%a6)	# restore a6
+
+	btst		&0x5,EXC_SR(%a6)
+	bne.w		fu_unfl_s
+
+	mov.l		EXC_A7(%a6),%a0		# restore a7 whether we need
+	mov.l		%a0,%usp		# to or not...
+
+fu_unfl_cont:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP to the stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30cc,EXC_VOFF(%a6)	# vector offset = 0xcc
+	mov.w		&0xe003,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)		# restore EXOP
+
+	unlk		%a6
+
+	bra.l		_real_unfl
+
+fu_unfl_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg # was the <ea> mode -(sp)?
+	bne.b		fu_unfl_cont
+
+# the extended precision result is still in fp0. but, we need to save it
+# somewhere on the stack until we can copy it to its final resting place
+# (where the exc frame is currently). make sure it's not at the top of the
+# frame or it will get overwritten when the exc stack frame is shifted "down".
+	fmovm.x		&0x80,FP_SRC(%a6)	# put answer on stack
+	fmovm.x		&0x40,FP_DST(%a6)	# put EXOP on stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30cc,EXC_VOFF(%a6)	# vector offset = 0xcc
+	mov.w		&0xe003,2+FP_DST(%a6)
+
+	frestore	FP_DST(%a6)		# restore EXOP
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+	mov.l		LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+	mov.l		LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+	bra.l		_real_unfl
+
+# fmove in and out enter here.
+fu_inex:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP to the stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30c4,EXC_VOFF(%a6)	# vector offset = 0xc4
+	mov.w		&0xe001,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)		# restore EXOP
+
+	unlk		%a6
+
+
+	bra.l		_real_inex
+
+#########################################################################
+#########################################################################
+fu_in_pack:
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field
+	andi.l		&0x0ff00ff,USER_FPSR(%a6) # zero exception field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	bsr.l		get_packed		# fetch packed src operand
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	bsr.l		set_tag_x		# set src optype tag
+
+	mov.b		%d0,STAG(%a6)		# save src optype tag
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# at this point
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		fu_extract_p		# monadic
+	cmpi.b		1+EXC_CMDREG(%a6),&0x3a	# is operation an ftst?
+	beq.b		fu_extract_p		# yes, so it's monadic, too
+
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fu_op2_done_p		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+fu_op2_done_p:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+fu_extract_p:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode/prec
+
+	bfextu		1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+	mov.l		(tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+#	BSUN	: none
+#	SNAN	: all dyadic ops
+#	OPERR	: fsqrt(-NORM)
+#	OVFL	: all except ftst,fcmp
+#	UNFL	: all except ftst,fcmp
+#	DZ	: fdiv
+#	INEX2	: all except ftst,fcmp
+#	INEX1	: all
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.w		fu_in_ena_p		# some are enabled
+
+fu_in_cont_p:
+# fcmp and ftst do not store any result.
+	mov.b		1+EXC_CMDREG(%a6),%d0	# fetch extension
+	andi.b		&0x38,%d0		# extract bits 3-5
+	cmpi.b		%d0,&0x38		# is instr fcmp or ftst?
+	beq.b		fu_in_exit_p		# yes
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		store_fpreg		# store the result
+
+fu_in_exit_p:
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor?
+	bne.w		fu_in_exit_s_p		# supervisor
+
+	mov.l		EXC_A7(%a6),%a0		# update user a7
+	mov.l		%a0,%usp
+
+fu_in_exit_cont_p:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6			# unravel stack frame
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.w		fu_trace_p		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+# the exception occurred in supervisor mode. check to see if the
+# addressing mode was (a7)+. if so, we'll need to shift the
+# stack frame "up".
+fu_in_exit_s_p:
+	btst		&mia7_bit,SPCOND_FLG(%a6) # was ea mode (a7)+
+	beq.b		fu_in_exit_cont_p	# no
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6			# unravel stack frame
+
+# shift the stack frame "up". we don't really care about the <ea> field.
+	mov.l		0x4(%sp),0x10(%sp)
+	mov.l		0x0(%sp),0xc(%sp)
+	add.l		&0xc,%sp
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.w		fu_trace_p		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+fu_in_ena_p:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled & set
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		fu_in_exc_p		# at least one was set
+
+#
+# No exceptions occurred that were also enabled. Now:
+#
+#	if (OVFL && ovfl_disabled && inexact_enabled) {
+#	    branch to _real_inex() (even if the result was exact!);
+#	} else {
+#	    save the result in the proper fp reg (unless the op is fcmp or ftst);
+#	    return;
+#	}
+#
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+	beq.w		fu_in_cont_p		# no
+
+fu_in_ovflchk_p:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+	beq.w		fu_in_cont_p		# no
+	bra.w		fu_in_exc_ovfl_p	# do _real_inex() now
+
+#
+# An exception occurred and that exception was enabled:
+#
+#	shift enabled exception field into lo byte of d0;
+#	if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
+#	    ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
+#		/*
+#		 * this is the case where we must call _real_inex() now or else
+#		 * there will be no other way to pass it the exceptional operand
+#		 */
+#		call _real_inex();
+#	} else {
+#		restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
+#	}
+#
+fu_in_exc_p:
+	subi.l		&24,%d0			# fix offset to be 0-8
+	cmpi.b		%d0,&0x6		# is exception INEX? (6 or 7)
+	blt.b		fu_in_exc_exit_p	# no
+
+# the enabled exception was inexact
+	btst		&unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
+	bne.w		fu_in_exc_unfl_p	# yes
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
+	bne.w		fu_in_exc_ovfl_p	# yes
+
+# here, we insert the correct fsave status value into the fsave frame for the
+# corresponding exception. the operand in the fsave frame should be the original
+# src operand.
+# as a reminder for future predicted pain and agony, we are passing in fsave the
+# "non-skewed" operand for cases of sgl and dbl src INFs,NANs, and DENORMs.
+# this is INCORRECT for enabled SNAN which would give to the user the skewed SNAN!!!
+fu_in_exc_exit_p:
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor?
+	bne.w		fu_in_exc_exit_s_p	# supervisor
+
+	mov.l		EXC_A7(%a6),%a0		# update user a7
+	mov.l		%a0,%usp
+
+fu_in_exc_exit_cont_p:
+	mov.w		(tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore src op
+
+	unlk		%a6
+
+	btst		&0x7,(%sp)		# is trace enabled?
+	bne.w		fu_trace_p		# yes
+
+	bra.l		_fpsp_done
+
+tbl_except_p:
+	short		0xe000,0xe006,0xe004,0xe005
+	short		0xe003,0xe002,0xe001,0xe001
+
+fu_in_exc_ovfl_p:
+	mov.w		&0x3,%d0
+	bra.w		fu_in_exc_exit_p
+
+fu_in_exc_unfl_p:
+	mov.w		&0x4,%d0
+	bra.w		fu_in_exc_exit_p
+
+fu_in_exc_exit_s_p:
+	btst		&mia7_bit,SPCOND_FLG(%a6)
+	beq.b		fu_in_exc_exit_cont_p
+
+	mov.w		(tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore src op
+
+	unlk		%a6			# unravel stack frame
+
+# shift stack frame "up". who cares about <ea> field.
+	mov.l		0x4(%sp),0x10(%sp)
+	mov.l		0x0(%sp),0xc(%sp)
+	add.l		&0xc,%sp
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.b		fu_trace_p		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+#
+# The opclass two PACKED instruction that took an "Unimplemented Data Type"
+# exception was being traced. Make the "current" PC the FPIAR and put it in the
+# trace stack frame then jump to _real_trace().
+#
+#		  UNSUPP FRAME		   TRACE FRAME
+#		*****************	*****************
+#		*      EA	*	*    Current	*
+#		*		*	*      PC	*
+#		*****************	*****************
+#		* 0x2 *	0x0dc	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*     Next	*	*     Next	*
+#		*      PC	*	*      PC	*
+#		*****************	*****************
+#		*      SR	*	*      SR	*
+#		*****************	*****************
+fu_trace_p:
+	mov.w		&0x2024,0x6(%sp)
+	fmov.l		%fpiar,0x8(%sp)
+
+	bra.l		_real_trace
+
+#########################################################
+#########################################################
+fu_out_pack:
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field.
+# fmove out doesn't affect ccodes.
+	and.l		&0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0
+	bsr.l		load_fpn1
+
+# unlike other opclass 3, unimplemented data type exceptions, packed must be
+# able to detect all operand types.
+	lea		FP_SRC(%a6),%a0
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fu_op2_p		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+
+fu_op2_p:
+	mov.b		%d0,STAG(%a6)		# save src optype tag
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode/prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	mov.l		(%a6),EXC_A6(%a6)	# in case a6 changes
+	bsr.l		fout			# call fmove out routine
+
+# Exceptions in order of precedence:
+#	BSUN	: no
+#	SNAN	: yes
+#	OPERR	: if ((k_factor > +17) || (dec. exp exceeds 3 digits))
+#	OVFL	: no
+#	UNFL	: no
+#	DZ	: no
+#	INEX2	: yes
+#	INEX1	: no
+
+# determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.w		fu_out_ena_p		# some are enabled
+
+fu_out_exit_p:
+	mov.l		EXC_A6(%a6),(%a6)	# restore a6
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor?
+	bne.b		fu_out_exit_s_p		# supervisor
+
+	mov.l		EXC_A7(%a6),%a0		# update user a7
+	mov.l		%a0,%usp
+
+fu_out_exit_cont_p:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6			# unravel stack frame
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.w		fu_trace_p		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+# the exception occurred in supervisor mode. check to see if the
+# addressing mode was -(a7). if so, we'll need to shift the
+# stack frame "down".
+fu_out_exit_s_p:
+	btst		&mda7_bit,SPCOND_FLG(%a6) # was ea mode -(a7)
+	beq.b		fu_out_exit_cont_p	# no
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+	mov.l		LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+	btst		&0x7,(%sp)
+	bne.w		fu_trace_p
+
+	bra.l		_fpsp_done
+
+fu_out_ena_p:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	beq.w		fu_out_exit_p
+
+	mov.l		EXC_A6(%a6),(%a6)	# restore a6
+
+# an exception occurred and that exception was enabled.
+# the only exception possible on packed move out are INEX, OPERR, and SNAN.
+fu_out_exc_p:
+	cmpi.b		%d0,&0x1a
+	bgt.w		fu_inex_p2
+	beq.w		fu_operr_p
+
+fu_snan_p:
+	btst		&0x5,EXC_SR(%a6)
+	bne.b		fu_snan_s_p
+
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp
+	bra.w		fu_snan
+
+fu_snan_s_p:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	bne.w		fu_snan
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d8,EXC_VOFF(%a6)	# vector offset = 0xd0
+	mov.w		&0xe006,2+FP_SRC(%a6)	# set fsave status
+
+	frestore	FP_SRC(%a6)		# restore src operand
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+	mov.l		LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+
+	bra.l		_real_snan
+
+fu_operr_p:
+	btst		&0x5,EXC_SR(%a6)
+	bne.w		fu_operr_p_s
+
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp
+	bra.w		fu_operr
+
+fu_operr_p_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	bne.w		fu_operr
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d0,EXC_VOFF(%a6)	# vector offset = 0xd0
+	mov.w		&0xe004,2+FP_SRC(%a6)	# set fsave status
+
+	frestore	FP_SRC(%a6)		# restore src operand
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+	mov.l		LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+
+	bra.l		_real_operr
+
+fu_inex_p2:
+	btst		&0x5,EXC_SR(%a6)
+	bne.w		fu_inex_s_p2
+
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp
+	bra.w		fu_inex
+
+fu_inex_s_p2:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	bne.w		fu_inex
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30c4,EXC_VOFF(%a6)	# vector offset = 0xc4
+	mov.w		&0xe001,2+FP_SRC(%a6)	# set fsave status
+
+	frestore	FP_SRC(%a6)		# restore src operand
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+	mov.l		LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+
+	bra.l		_real_inex
+
+#########################################################################
+
+#
+# if we're stuffing a source operand back into an fsave frame then we
+# have to make sure that for single or double source operands that the
+# format stuffed is as weird as the hardware usually makes it.
+#
+	global		funimp_skew
+funimp_skew:
+	bfextu		EXC_EXTWORD(%a6){&3:&3},%d0 # extract src specifier
+	cmpi.b		%d0,&0x1		# was src sgl?
+	beq.b		funimp_skew_sgl		# yes
+	cmpi.b		%d0,&0x5		# was src dbl?
+	beq.b		funimp_skew_dbl		# yes
+	rts
+
+funimp_skew_sgl:
+	mov.w		FP_SRC_EX(%a6),%d0	# fetch DENORM exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	beq.b		funimp_skew_sgl_not
+	cmpi.w		%d0,&0x3f80
+	bgt.b		funimp_skew_sgl_not
+	neg.w		%d0			# make exponent negative
+	addi.w		&0x3f81,%d0		# find amt to shift
+	mov.l		FP_SRC_HI(%a6),%d1	# fetch DENORM hi(man)
+	lsr.l		%d0,%d1			# shift it
+	bset		&31,%d1			# set j-bit
+	mov.l		%d1,FP_SRC_HI(%a6)	# insert new hi(man)
+	andi.w		&0x8000,FP_SRC_EX(%a6)	# clear old exponent
+	ori.w		&0x3f80,FP_SRC_EX(%a6)	# insert new "skewed" exponent
+funimp_skew_sgl_not:
+	rts
+
+funimp_skew_dbl:
+	mov.w		FP_SRC_EX(%a6),%d0	# fetch DENORM exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	beq.b		funimp_skew_dbl_not
+	cmpi.w		%d0,&0x3c00
+	bgt.b		funimp_skew_dbl_not
+
+	tst.b		FP_SRC_EX(%a6)		# make "internal format"
+	smi.b		0x2+FP_SRC(%a6)
+	mov.w		%d0,FP_SRC_EX(%a6)	# insert exponent with cleared sign
+	clr.l		%d0			# clear g,r,s
+	lea		FP_SRC(%a6),%a0		# pass ptr to src op
+	mov.w		&0x3c01,%d1		# pass denorm threshold
+	bsr.l		dnrm_lp			# denorm it
+	mov.w		&0x3c00,%d0		# new exponent
+	tst.b		0x2+FP_SRC(%a6)		# is sign set?
+	beq.b		fss_dbl_denorm_done	# no
+	bset		&15,%d0			# set sign
+fss_dbl_denorm_done:
+	bset		&0x7,FP_SRC_HI(%a6)	# set j-bit
+	mov.w		%d0,FP_SRC_EX(%a6)	# insert new exponent
+funimp_skew_dbl_not:
+	rts
+
+#########################################################################
+	global		_mem_write2
+_mem_write2:
+	btst		&0x5,EXC_SR(%a6)
+	beq.l		_dmem_write
+	mov.l		0x0(%a0),FP_DST_EX(%a6)
+	mov.l		0x4(%a0),FP_DST_HI(%a6)
+	mov.l		0x8(%a0),FP_DST_LO(%a6)
+	clr.l		%d1
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_effadd(): 060FPSP entry point for FP "Unimplemented	#
+#			effective address" exception.			#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Unimplemented Effective Address exception in an operating	#
+#	system.								#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	decbin() - convert packed data to FP binary data		#
+#	_real_fpu_disabled() - "callout" for "FPU disabled" exception	#
+#	_real_access() - "callout" for access error exception		#
+#	_mem_read() - read extended immediate operand from memory	#
+#	_fpsp_done() - "callout" for exit; work all done		#
+#	_real_trace() - "callout" for Trace enabled exception		#
+#	fmovm_dynamic() - emulate dynamic fmovm instruction		#
+#	fmovm_ctrl() - emulate fmovm control instruction		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the "Unimplemented <ea>" stk frame	#
+#									#
+# OUTPUT **************************************************************	#
+#	If access error:						#
+#	- The system stack is changed to an access error stack frame	#
+#	If FPU disabled:						#
+#	- The system stack is changed to an FPU disabled stack frame	#
+#	If Trace exception enabled:					#
+#	- The system stack is changed to a Trace exception stack frame	#
+#	Else: (normal case)						#
+#	- None (correct result has been stored as appropriate)		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	This exception handles 3 types of operations:			#
+# (1) FP Instructions using extended precision or packed immediate	#
+#     addressing mode.							#
+# (2) The "fmovm.x" instruction w/ dynamic register specification.	#
+# (3) The "fmovm.l" instruction w/ 2 or 3 control registers.		#
+#									#
+#	For immediate data operations, the data is read in w/ a		#
+# _mem_read() "callout", converted to FP binary (if packed), and used	#
+# as the source operand to the instruction specified by the instruction	#
+# word. If no FP exception should be reported ads a result of the	#
+# emulation, then the result is stored to the destination register and	#
+# the handler exits through _fpsp_done(). If an enabled exc has been	#
+# signalled as a result of emulation, then an fsave state frame		#
+# corresponding to the FP exception type must be entered into the 060	#
+# FPU before exiting. In either the enabled or disabled cases, we	#
+# must also check if a Trace exception is pending, in which case, we	#
+# must create a Trace exception stack frame from the current exception	#
+# stack frame. If no Trace is pending, we simply exit through		#
+# _fpsp_done().								#
+#	For "fmovm.x", call the routine fmovm_dynamic() which will	#
+# decode and emulate the instruction. No FP exceptions can be pending	#
+# as a result of this operation emulation. A Trace exception can be	#
+# pending, though, which means the current stack frame must be changed	#
+# to a Trace stack frame and an exit made through _real_trace().	#
+# For the case of "fmovm.x Dn,-(a7)", where the offending instruction	#
+# was executed from supervisor mode, this handler must store the FP	#
+# register file values to the system stack by itself since		#
+# fmovm_dynamic() can't handle this. A normal exit is made through	#
+# fpsp_done().								#
+#	For "fmovm.l", fmovm_ctrl() is used to emulate the instruction.	#
+# Again, a Trace exception may be pending and an exit made through	#
+# _real_trace(). Else, a normal exit is made through _fpsp_done().	#
+#									#
+#	Before any of the above is attempted, it must be checked to	#
+# see if the FPU is disabled. Since the "Unimp <ea>" exception is taken	#
+# before the "FPU disabled" exception, but the "FPU disabled" exception	#
+# has higher priority, we check the disabled bit in the PCR. If set,	#
+# then we must create an 8 word "FPU disabled" exception stack frame	#
+# from the current 4 word exception stack frame. This includes		#
+# reproducing the effective address of the instruction to put on the	#
+# new stack frame.							#
+#									#
+#	In the process of all emulation work, if a _mem_read()		#
+# "callout" returns a failing result indicating an access error, then	#
+# we must create an access error stack frame from the current stack	#
+# frame. This information includes a faulting address and a fault-	#
+# status-longword. These are created within this handler.		#
+#									#
+#########################################################################
+
+	global		_fpsp_effadd
+_fpsp_effadd:
+
+# This exception type takes priority over the "Line F Emulator"
+# exception. Therefore, the FPU could be disabled when entering here.
+# So, we must check to see if it's disabled and handle that case separately.
+	mov.l		%d0,-(%sp)		# save d0
+	movc		%pcr,%d0		# load proc cr
+	btst		&0x1,%d0		# is FPU disabled?
+	bne.w		iea_disabled		# yes
+	mov.l		(%sp)+,%d0		# restore d0
+
+	link		%a6,&-LOCAL_SIZE	# init stack frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# PC of instruction that took the exception is the PC in the frame
+	mov.l		EXC_PC(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)	# store OPWORD and EXTWORD
+
+#########################################################################
+
+	tst.w		%d0			# is operation fmovem?
+	bmi.w		iea_fmovm		# yes
+
+#
+# here, we will have:
+#	fabs	fdabs	fsabs		facos		fmod
+#	fadd	fdadd	fsadd		fasin		frem
+#	fcmp				fatan		fscale
+#	fdiv	fddiv	fsdiv		fatanh		fsin
+#	fint				fcos		fsincos
+#	fintrz				fcosh		fsinh
+#	fmove	fdmove	fsmove		fetox		ftan
+#	fmul	fdmul	fsmul		fetoxm1		ftanh
+#	fneg	fdneg	fsneg		fgetexp		ftentox
+#	fsgldiv				fgetman		ftwotox
+#	fsglmul				flog10
+#	fsqrt				flog2
+#	fsub	fdsub	fssub		flogn
+#	ftst				flognp1
+# which can all use f<op>.{x,p}
+# so, now it's immediate data extended precision AND PACKED FORMAT!
+#
+iea_op:
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	btst		&0xa,%d0		# is src fmt x or p?
+	bne.b		iea_op_pack		# packed
+
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# pass: ptr to #<data>
+	lea		FP_SRC(%a6),%a1		# pass: ptr to super addr
+	mov.l		&0xc,%d0		# pass: 12 bytes
+	bsr.l		_imem_read		# read extended immediate
+
+	tst.l		%d1			# did ifetch fail?
+	bne.w		iea_iacc		# yes
+
+	bra.b		iea_op_setsrc
+
+iea_op_pack:
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# pass: ptr to #<data>
+	lea		FP_SRC(%a6),%a1		# pass: ptr to super dst
+	mov.l		&0xc,%d0		# pass: 12 bytes
+	bsr.l		_imem_read		# read packed operand
+
+	tst.l		%d1			# did ifetch fail?
+	bne.w		iea_iacc		# yes
+
+# The packed operand is an INF or a NAN if the exponent field is all ones.
+	bfextu		FP_SRC(%a6){&1:&15},%d0	# get exp
+	cmpi.w		%d0,&0x7fff		# INF or NAN?
+	beq.b		iea_op_setsrc		# operand is an INF or NAN
+
+# The packed operand is a zero if the mantissa is all zero, else it's
+# a normal packed op.
+	mov.b		3+FP_SRC(%a6),%d0	# get byte 4
+	andi.b		&0x0f,%d0		# clear all but last nybble
+	bne.b		iea_op_gp_not_spec	# not a zero
+	tst.l		FP_SRC_HI(%a6)		# is lw 2 zero?
+	bne.b		iea_op_gp_not_spec	# not a zero
+	tst.l		FP_SRC_LO(%a6)		# is lw 3 zero?
+	beq.b		iea_op_setsrc		# operand is a ZERO
+iea_op_gp_not_spec:
+	lea		FP_SRC(%a6),%a0		# pass: ptr to packed op
+	bsr.l		decbin			# convert to extended
+	fmovm.x		&0x80,FP_SRC(%a6)	# make this the srcop
+
+iea_op_setsrc:
+	addi.l		&0xc,EXC_EXTWPTR(%a6)	# update extension word pointer
+
+# FP_SRC now holds the src operand.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,STAG(%a6)		# could be ANYTHING!!!
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		iea_op_getdst		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM/DENORM/ZERO
+	mov.b		%d0,STAG(%a6)		# set new optype tag
+iea_op_getdst:
+	clr.b		STORE_FLG(%a6)		# clear "store result" boolean
+
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		iea_op_extract		# monadic
+	btst		&0x4,1+EXC_CMDREG(%a6)	# is operation fsincos,ftst,fcmp?
+	bne.b		iea_op_spec		# yes
+
+iea_op_loaddst:
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
+	bsr.l		load_fpn2		# load dst operand
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,DTAG(%a6)		# could be ANYTHING!!!
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		iea_op_extract		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM/DENORM/ZERO
+	mov.b		%d0,DTAG(%a6)		# set new optype tag
+	bra.b		iea_op_extract
+
+# the operation is fsincos, ftst, or fcmp. only fcmp is dyadic
+iea_op_spec:
+	btst		&0x3,1+EXC_CMDREG(%a6)	# is operation fsincos?
+	beq.b		iea_op_extract		# yes
+# now, we're left with ftst and fcmp. so, first let's tag them so that they don't
+# store a result. then, only fcmp will branch back and pick up a dst operand.
+	st		STORE_FLG(%a6)		# don't store a final result
+	btst		&0x1,1+EXC_CMDREG(%a6)	# is operation fcmp?
+	beq.b		iea_op_loaddst		# yes
+
+iea_op_extract:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass: rnd mode,prec
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x007f,%d1		# extract extension
+
+	fmov.l		&0x0,%fpcr
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+	mov.l		(tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+#	BSUN	: none
+#	SNAN	: all operations
+#	OPERR	: all reg-reg or mem-reg operations that can normally operr
+#	OVFL	: same as OPERR
+#	UNFL	: same as OPERR
+#	DZ	: same as OPERR
+#	INEX2	: same as OPERR
+#	INEX1	: all packed immediate operations
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.b		iea_op_ena		# some are enabled
+
+# now, we save the result, unless, of course, the operation was ftst or fcmp.
+# these don't save results.
+iea_op_save:
+	tst.b		STORE_FLG(%a6)		# does this op store a result?
+	bne.b		iea_op_exit1		# exit with no frestore
+
+iea_op_store:
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
+	bsr.l		store_fpreg		# store the result
+
+iea_op_exit1:
+	mov.l		EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
+	mov.l		EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6			# unravel the frame
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.w		iea_op_trace		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+iea_op_ena:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enable and set
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		iea_op_exc		# at least one was set
+
+# no exception occurred. now, did a disabled, exact overflow occur with inexact
+# enabled? if so, then we have to stuff an overflow frame into the FPU.
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+	beq.b		iea_op_save
+
+iea_op_ovfl:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
+	beq.b		iea_op_store		# no
+	bra.b		iea_op_exc_ovfl		# yes
+
+# an enabled exception occurred. we have to insert the exception type back into
+# the machine.
+iea_op_exc:
+	subi.l		&24,%d0			# fix offset to be 0-8
+	cmpi.b		%d0,&0x6		# is exception INEX?
+	bne.b		iea_op_exc_force	# no
+
+# the enabled exception was inexact. so, if it occurs with an overflow
+# or underflow that was disabled, then we have to force an overflow or
+# underflow frame.
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+	bne.b		iea_op_exc_ovfl		# yes
+	btst		&unfl_bit,FPSR_EXCEPT(%a6) # did underflow occur?
+	bne.b		iea_op_exc_unfl		# yes
+
+iea_op_exc_force:
+	mov.w		(tbl_iea_except.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+	bra.b		iea_op_exit2		# exit with frestore
+
+tbl_iea_except:
+	short		0xe002, 0xe006, 0xe004, 0xe005
+	short		0xe003, 0xe002, 0xe001, 0xe001
+
+iea_op_exc_ovfl:
+	mov.w		&0xe005,2+FP_SRC(%a6)
+	bra.b		iea_op_exit2
+
+iea_op_exc_unfl:
+	mov.w		&0xe003,2+FP_SRC(%a6)
+
+iea_op_exit2:
+	mov.l		EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
+	mov.l		EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore exceptional state
+
+	unlk		%a6			# unravel the frame
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.b		iea_op_trace		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+#
+# The opclass two instruction that took an "Unimplemented Effective Address"
+# exception was being traced. Make the "current" PC the FPIAR and put it in
+# the trace stack frame then jump to _real_trace().
+#
+#		 UNIMP EA FRAME		   TRACE FRAME
+#		*****************	*****************
+#		* 0x0 *  0x0f0	*	*    Current	*
+#		*****************	*      PC	*
+#		*    Current	*	*****************
+#		*      PC	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*      SR	*	*     Next	*
+#		*****************	*      PC	*
+#					*****************
+#					*      SR	*
+#					*****************
+iea_op_trace:
+	mov.l		(%sp),-(%sp)		# shift stack frame "down"
+	mov.w		0x8(%sp),0x4(%sp)
+	mov.w		&0x2024,0x6(%sp)	# stk fmt = 0x2; voff = 0x024
+	fmov.l		%fpiar,0x8(%sp)		# "Current PC" is in FPIAR
+
+	bra.l		_real_trace
+
+#########################################################################
+iea_fmovm:
+	btst		&14,%d0			# ctrl or data reg
+	beq.w		iea_fmovm_ctrl
+
+iea_fmovm_data:
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor mode
+	bne.b		iea_fmovm_data_s
+
+iea_fmovm_data_u:
+	mov.l		%usp,%a0
+	mov.l		%a0,EXC_A7(%a6)		# store current a7
+	bsr.l		fmovm_dynamic		# do dynamic fmovm
+	mov.l		EXC_A7(%a6),%a0		# load possibly new a7
+	mov.l		%a0,%usp		# update usp
+	bra.w		iea_fmovm_exit
+
+iea_fmovm_data_s:
+	clr.b		SPCOND_FLG(%a6)
+	lea		0x2+EXC_VOFF(%a6),%a0
+	mov.l		%a0,EXC_A7(%a6)
+	bsr.l		fmovm_dynamic		# do dynamic fmovm
+
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	beq.w		iea_fmovm_data_predec
+	cmpi.b		SPCOND_FLG(%a6),&mia7_flg
+	bne.w		iea_fmovm_exit
+
+# right now, d0 = the size.
+# the data has been fetched from the supervisor stack, but we have not
+# incremented the stack pointer by the appropriate number of bytes.
+# do it here.
+iea_fmovm_data_postinc:
+	btst		&0x7,EXC_SR(%a6)
+	bne.b		iea_fmovm_data_pi_trace
+
+	mov.w		EXC_SR(%a6),(EXC_SR,%a6,%d0)
+	mov.l		EXC_EXTWPTR(%a6),(EXC_PC,%a6,%d0)
+	mov.w		&0x00f0,(EXC_VOFF,%a6,%d0)
+
+	lea		(EXC_SR,%a6,%d0),%a0
+	mov.l		%a0,EXC_SR(%a6)
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+	mov.l		(%sp)+,%sp
+	bra.l		_fpsp_done
+
+iea_fmovm_data_pi_trace:
+	mov.w		EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
+	mov.l		EXC_EXTWPTR(%a6),(EXC_PC-0x4,%a6,%d0)
+	mov.w		&0x2024,(EXC_VOFF-0x4,%a6,%d0)
+	mov.l		EXC_PC(%a6),(EXC_VOFF+0x2-0x4,%a6,%d0)
+
+	lea		(EXC_SR-0x4,%a6,%d0),%a0
+	mov.l		%a0,EXC_SR(%a6)
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+	mov.l		(%sp)+,%sp
+	bra.l		_real_trace
+
+# right now, d1 = size and d0 = the strg.
+iea_fmovm_data_predec:
+	mov.b		%d1,EXC_VOFF(%a6)	# store strg
+	mov.b		%d0,0x1+EXC_VOFF(%a6)	# store size
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.l		(%a6),-(%sp)		# make a copy of a6
+	mov.l		%d0,-(%sp)		# save d0
+	mov.l		%d1,-(%sp)		# save d1
+	mov.l		EXC_EXTWPTR(%a6),-(%sp)	# make a copy of Next PC
+
+	clr.l		%d0
+	mov.b		0x1+EXC_VOFF(%a6),%d0	# fetch size
+	neg.l		%d0			# get negative of size
+
+	btst		&0x7,EXC_SR(%a6)	# is trace enabled?
+	beq.b		iea_fmovm_data_p2
+
+	mov.w		EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
+	mov.l		EXC_PC(%a6),(EXC_VOFF-0x2,%a6,%d0)
+	mov.l		(%sp)+,(EXC_PC-0x4,%a6,%d0)
+	mov.w		&0x2024,(EXC_VOFF-0x4,%a6,%d0)
+
+	pea		(%a6,%d0)		# create final sp
+	bra.b		iea_fmovm_data_p3
+
+iea_fmovm_data_p2:
+	mov.w		EXC_SR(%a6),(EXC_SR,%a6,%d0)
+	mov.l		(%sp)+,(EXC_PC,%a6,%d0)
+	mov.w		&0x00f0,(EXC_VOFF,%a6,%d0)
+
+	pea		(0x4,%a6,%d0)		# create final sp
+
+iea_fmovm_data_p3:
+	clr.l		%d1
+	mov.b		EXC_VOFF(%a6),%d1	# fetch strg
+
+	tst.b		%d1
+	bpl.b		fm_1
+	fmovm.x		&0x80,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_1:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_2
+	fmovm.x		&0x40,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_2:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_3
+	fmovm.x		&0x20,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_3:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_4
+	fmovm.x		&0x10,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_4:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_5
+	fmovm.x		&0x08,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_5:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_6
+	fmovm.x		&0x04,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_6:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_7
+	fmovm.x		&0x02,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_7:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_end
+	fmovm.x		&0x01,(0x4+0x8,%a6,%d0)
+fm_end:
+	mov.l		0x4(%sp),%d1
+	mov.l		0x8(%sp),%d0
+	mov.l		0xc(%sp),%a6
+	mov.l		(%sp)+,%sp
+
+	btst		&0x7,(%sp)		# is trace enabled?
+	beq.l		_fpsp_done
+	bra.l		_real_trace
+
+#########################################################################
+iea_fmovm_ctrl:
+
+	bsr.l		fmovm_ctrl		# load ctrl regs
+
+iea_fmovm_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	btst		&0x7,EXC_SR(%a6)	# is trace on?
+	bne.b		iea_fmovm_trace		# yes
+
+	mov.l		EXC_EXTWPTR(%a6),EXC_PC(%a6) # set Next PC
+
+	unlk		%a6			# unravel the frame
+
+	bra.l		_fpsp_done		# exit to os
+
+#
+# The control reg instruction that took an "Unimplemented Effective Address"
+# exception was being traced. The "Current PC" for the trace frame is the
+# PC stacked for Unimp EA. The "Next PC" is in EXC_EXTWPTR.
+# After fixing the stack frame, jump to _real_trace().
+#
+#		 UNIMP EA FRAME		   TRACE FRAME
+#		*****************	*****************
+#		* 0x0 *  0x0f0	*	*    Current	*
+#		*****************	*      PC	*
+#		*    Current	*	*****************
+#		*      PC	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*      SR	*	*     Next	*
+#		*****************	*      PC	*
+#					*****************
+#					*      SR	*
+#					*****************
+# this ain't a pretty solution, but it works:
+# -restore a6 (not with unlk)
+# -shift stack frame down over where old a6 used to be
+# -add LOCAL_SIZE to stack pointer
+iea_fmovm_trace:
+	mov.l		(%a6),%a6		# restore frame pointer
+	mov.w		EXC_SR+LOCAL_SIZE(%sp),0x0+LOCAL_SIZE(%sp)
+	mov.l		EXC_PC+LOCAL_SIZE(%sp),0x8+LOCAL_SIZE(%sp)
+	mov.l		EXC_EXTWPTR+LOCAL_SIZE(%sp),0x2+LOCAL_SIZE(%sp)
+	mov.w		&0x2024,0x6+LOCAL_SIZE(%sp) # stk fmt = 0x2; voff = 0x024
+	add.l		&LOCAL_SIZE,%sp		# clear stack frame
+
+	bra.l		_real_trace
+
+#########################################################################
+# The FPU is disabled and so we should really have taken the "Line
+# F Emulator" exception. So, here we create an 8-word stack frame
+# from our 4-word stack frame. This means we must calculate the length
+# the faulting instruction to get the "next PC". This is trivial for
+# immediate operands but requires some extra work for fmovm dynamic
+# which can use most addressing modes.
+iea_disabled:
+	mov.l		(%sp)+,%d0		# restore d0
+
+	link		%a6,&-LOCAL_SIZE	# init stack frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+
+# PC of instruction that took the exception is the PC in the frame
+	mov.l		EXC_PC(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)	# store OPWORD and EXTWORD
+
+	tst.w		%d0			# is instr fmovm?
+	bmi.b		iea_dis_fmovm		# yes
+# instruction is using an extended precision immediate operand. therefore,
+# the total instruction length is 16 bytes.
+iea_dis_immed:
+	mov.l		&0x10,%d0		# 16 bytes of instruction
+	bra.b		iea_dis_cont
+iea_dis_fmovm:
+	btst		&0xe,%d0		# is instr fmovm ctrl
+	bne.b		iea_dis_fmovm_data	# no
+# the instruction is a fmovm.l with 2 or 3 registers.
+	bfextu		%d0{&19:&3},%d1
+	mov.l		&0xc,%d0
+	cmpi.b		%d1,&0x7		# move all regs?
+	bne.b		iea_dis_cont
+	addq.l		&0x4,%d0
+	bra.b		iea_dis_cont
+# the instruction is an fmovm.x dynamic which can use many addressing
+# modes and thus can have several different total instruction lengths.
+# call fmovm_calc_ea which will go through the ea calc process and,
+# as a by-product, will tell us how long the instruction is.
+iea_dis_fmovm_data:
+	clr.l		%d0
+	bsr.l		fmovm_calc_ea
+	mov.l		EXC_EXTWPTR(%a6),%d0
+	sub.l		EXC_PC(%a6),%d0
+iea_dis_cont:
+	mov.w		%d0,EXC_VOFF(%a6)	# store stack shift value
+
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+# here, we actually create the 8-word frame from the 4-word frame,
+# with the "next PC" as additional info.
+# the <ea> field is let as undefined.
+	subq.l		&0x8,%sp		# make room for new stack
+	mov.l		%d0,-(%sp)		# save d0
+	mov.w		0xc(%sp),0x4(%sp)	# move SR
+	mov.l		0xe(%sp),0x6(%sp)	# move Current PC
+	clr.l		%d0
+	mov.w		0x12(%sp),%d0
+	mov.l		0x6(%sp),0x10(%sp)	# move Current PC
+	add.l		%d0,0x6(%sp)		# make Next PC
+	mov.w		&0x402c,0xa(%sp)	# insert offset,frame format
+	mov.l		(%sp)+,%d0		# restore d0
+
+	bra.l		_real_fpu_disabled
+
+##########
+
+iea_iacc:
+	movc		%pcr,%d0
+	btst		&0x1,%d0
+	bne.b		iea_iacc_cont
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1 on stack
+iea_iacc_cont:
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	subq.w		&0x8,%sp		# make stack frame bigger
+	mov.l		0x8(%sp),(%sp)		# store SR,hi(PC)
+	mov.w		0xc(%sp),0x4(%sp)	# store lo(PC)
+	mov.w		&0x4008,0x6(%sp)	# store voff
+	mov.l		0x2(%sp),0x8(%sp)	# store ea
+	mov.l		&0x09428001,0xc(%sp)	# store fslw
+
+iea_acc_done:
+	btst		&0x5,(%sp)		# user or supervisor mode?
+	beq.b		iea_acc_done2		# user
+	bset		&0x2,0xd(%sp)		# set supervisor TM bit
+
+iea_acc_done2:
+	bra.l		_real_access
+
+iea_dacc:
+	lea		-LOCAL_SIZE(%a6),%sp
+
+	movc		%pcr,%d1
+	btst		&0x1,%d1
+	bne.b		iea_dacc_cont
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1 on stack
+	fmovm.l		LOCAL_SIZE+USER_FPCR(%sp),%fpcr,%fpsr,%fpiar # restore ctrl regs
+iea_dacc_cont:
+	mov.l		(%a6),%a6
+
+	mov.l		0x4+LOCAL_SIZE(%sp),-0x8+0x4+LOCAL_SIZE(%sp)
+	mov.w		0x8+LOCAL_SIZE(%sp),-0x8+0x8+LOCAL_SIZE(%sp)
+	mov.w		&0x4008,-0x8+0xa+LOCAL_SIZE(%sp)
+	mov.l		%a0,-0x8+0xc+LOCAL_SIZE(%sp)
+	mov.w		%d0,-0x8+0x10+LOCAL_SIZE(%sp)
+	mov.w		&0x0001,-0x8+0x12+LOCAL_SIZE(%sp)
+
+	movm.l		LOCAL_SIZE+EXC_DREGS(%sp),&0x0303 # restore d0-d1/a0-a1
+	add.w		&LOCAL_SIZE-0x4,%sp
+
+	bra.b		iea_acc_done
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_operr(): 060FPSP entry point for FP Operr exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Operand Error exception in an operating system.		#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	_real_operr() - "callout" to operating system operr handler	#
+#	_dmem_write_{byte,word,long}() - store data to mem (opclass 3)	#
+#	store_dreg_{b,w,l}() - store data to data regfile (opclass 3)	#
+#	facc_out_{b,w,l}() - store to memory took access error (opcl 3)	#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP Operr exception frame	#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	No access error:						#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In a system where the FP Operr exception is enabled, the goal	#
+# is to get to the handler specified at _real_operr(). But, on the 060,	#
+# for opclass zero and two instruction taking this exception, the	#
+# input operand in the fsave frame may be incorrect for some cases	#
+# and needs to be corrected. This handler calls fix_skewed_ops() to	#
+# do just this and then exits through _real_operr().			#
+#	For opclass 3 instructions, the 060 doesn't store the default	#
+# operr result out to memory or data register file as it should.	#
+# This code must emulate the move out before finally exiting through	#
+# _real_inex(). The move out, if to memory, is performed using		#
+# _mem_write() "callout" routines that may return a failing result.	#
+# In this special case, the handler must exit through facc_out()	#
+# which creates an access error stack frame from the current operr	#
+# stack frame.								#
+#									#
+#########################################################################
+
+	global		_fpsp_operr
+_fpsp_operr:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&13,%d0			# is instr an fmove out?
+	bne.b		foperr_out		# fmove out
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source infinity or
+# denorm operand in the sgl or dbl format. NANs also become skewed, but can't
+# cause an operr so we don't need to check for them here.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+foperr_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+	bra.l		_real_operr
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# operand error exceptions. we do this here before passing control to
+# the user operand error handler.
+#
+# byte, word, and long destination format operations can pass
+# through here. we simply need to test the sign of the src
+# operand and save the appropriate minimum or maximum integer value
+# to the effective address as pointed to by the stacked effective address.
+#
+# although packed opclass three operations can take operand error
+# exceptions, they won't pass through here since they are caught
+# first by the unsupported data format exception handler. that handler
+# sends them directly to _real_operr() if necessary.
+#
+foperr_out:
+
+	mov.w		FP_SRC_EX(%a6),%d1	# fetch exponent
+	andi.w		&0x7fff,%d1
+	cmpi.w		%d1,&0x7fff
+	bne.b		foperr_out_not_qnan
+# the operand is either an infinity or a QNAN.
+	tst.l		FP_SRC_LO(%a6)
+	bne.b		foperr_out_qnan
+	mov.l		FP_SRC_HI(%a6),%d1
+	andi.l		&0x7fffffff,%d1
+	beq.b		foperr_out_not_qnan
+foperr_out_qnan:
+	mov.l		FP_SRC_HI(%a6),L_SCR1(%a6)
+	bra.b		foperr_out_jmp
+
+foperr_out_not_qnan:
+	mov.l		&0x7fffffff,%d1
+	tst.b		FP_SRC_EX(%a6)
+	bpl.b		foperr_out_not_qnan2
+	addq.l		&0x1,%d1
+foperr_out_not_qnan2:
+	mov.l		%d1,L_SCR1(%a6)
+
+foperr_out_jmp:
+	bfextu		%d0{&19:&3},%d0		# extract dst format field
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract <ea> mode,reg
+	mov.w		(tbl_operr.b,%pc,%d0.w*2),%a0
+	jmp		(tbl_operr.b,%pc,%a0)
+
+tbl_operr:
+	short		foperr_out_l - tbl_operr # long word integer
+	short		tbl_operr    - tbl_operr # sgl prec shouldn't happen
+	short		tbl_operr    - tbl_operr # ext prec shouldn't happen
+	short		foperr_exit  - tbl_operr # packed won't enter here
+	short		foperr_out_w - tbl_operr # word integer
+	short		tbl_operr    - tbl_operr # dbl prec shouldn't happen
+	short		foperr_out_b - tbl_operr # byte integer
+	short		tbl_operr    - tbl_operr # packed won't enter here
+
+foperr_out_b:
+	mov.b		L_SCR1(%a6),%d0		# load positive default result
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		foperr_out_b_save_dn	# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_byte	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_b		# yes
+
+	bra.w		foperr_exit
+foperr_out_b_save_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_b		# store result to regfile
+	bra.w		foperr_exit
+
+foperr_out_w:
+	mov.w		L_SCR1(%a6),%d0		# load positive default result
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		foperr_out_w_save_dn	# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_word	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_w		# yes
+
+	bra.w		foperr_exit
+foperr_out_w_save_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_w		# store result to regfile
+	bra.w		foperr_exit
+
+foperr_out_l:
+	mov.l		L_SCR1(%a6),%d0		# load positive default result
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		foperr_out_l_save_dn	# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_long	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.w		foperr_exit
+foperr_out_l_save_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_l		# store result to regfile
+	bra.w		foperr_exit
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_snan(): 060FPSP entry point for FP SNAN exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Signalling NAN exception in an operating system.		#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	_real_snan() - "callout" to operating system SNAN handler	#
+#	_dmem_write_{byte,word,long}() - store data to mem (opclass 3)	#
+#	store_dreg_{b,w,l}() - store data to data regfile (opclass 3)	#
+#	facc_out_{b,w,l,d,x}() - store to mem took acc error (opcl 3)	#
+#	_calc_ea_fout() - fix An if <ea> is -() or ()+; also get <ea>	#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP SNAN exception frame		#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	No access error:						#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In a system where the FP SNAN exception is enabled, the goal	#
+# is to get to the handler specified at _real_snan(). But, on the 060,	#
+# for opclass zero and two instructions taking this exception, the	#
+# input operand in the fsave frame may be incorrect for some cases	#
+# and needs to be corrected. This handler calls fix_skewed_ops() to	#
+# do just this and then exits through _real_snan().			#
+#	For opclass 3 instructions, the 060 doesn't store the default	#
+# SNAN result out to memory or data register file as it should.		#
+# This code must emulate the move out before finally exiting through	#
+# _real_snan(). The move out, if to memory, is performed using		#
+# _mem_write() "callout" routines that may return a failing result.	#
+# In this special case, the handler must exit through facc_out()	#
+# which creates an access error stack frame from the current SNAN	#
+# stack frame.								#
+#	For the case of an extended precision opclass 3 instruction,	#
+# if the effective addressing mode was -() or ()+, then the address	#
+# register must get updated by calling _calc_ea_fout(). If the <ea>	#
+# was -(a7) from supervisor mode, then the exception frame currently	#
+# on the system stack must be carefully moved "down" to make room	#
+# for the operand being moved.						#
+#									#
+#########################################################################
+
+	global		_fpsp_snan
+_fpsp_snan:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&13,%d0			# is instr an fmove out?
+	bne.w		fsnan_out		# fmove out
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source infinity or
+# denorm operand in the sgl or dbl format. NANs also become skewed and must be
+# fixed here.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+fsnan_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+	bra.l		_real_snan
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# snan exceptions. we do this here before passing control to
+# the user snan handler.
+#
+# byte, word, long, and packed destination format operations can pass
+# through here. since packed format operations already were handled by
+# fpsp_unsupp(), then we need to do nothing else for them here.
+# for byte, word, and long, we simply need to test the sign of the src
+# operand and save the appropriate minimum or maximum integer value
+# to the effective address as pointed to by the stacked effective address.
+#
+fsnan_out:
+
+	bfextu		%d0{&19:&3},%d0		# extract dst format field
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract <ea> mode,reg
+	mov.w		(tbl_snan.b,%pc,%d0.w*2),%a0
+	jmp		(tbl_snan.b,%pc,%a0)
+
+tbl_snan:
+	short		fsnan_out_l - tbl_snan # long word integer
+	short		fsnan_out_s - tbl_snan # sgl prec shouldn't happen
+	short		fsnan_out_x - tbl_snan # ext prec shouldn't happen
+	short		tbl_snan    - tbl_snan # packed needs no help
+	short		fsnan_out_w - tbl_snan # word integer
+	short		fsnan_out_d - tbl_snan # dbl prec shouldn't happen
+	short		fsnan_out_b - tbl_snan # byte integer
+	short		tbl_snan    - tbl_snan # packed needs no help
+
+fsnan_out_b:
+	mov.b		FP_SRC_HI(%a6),%d0	# load upper byte of SNAN
+	bset		&6,%d0			# set SNAN bit
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		fsnan_out_b_dn		# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_byte	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_b		# yes
+
+	bra.w		fsnan_exit
+fsnan_out_b_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_b		# store result to regfile
+	bra.w		fsnan_exit
+
+fsnan_out_w:
+	mov.w		FP_SRC_HI(%a6),%d0	# load upper word of SNAN
+	bset		&14,%d0			# set SNAN bit
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		fsnan_out_w_dn		# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_word	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_w		# yes
+
+	bra.w		fsnan_exit
+fsnan_out_w_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_w		# store result to regfile
+	bra.w		fsnan_exit
+
+fsnan_out_l:
+	mov.l		FP_SRC_HI(%a6),%d0	# load upper longword of SNAN
+	bset		&30,%d0			# set SNAN bit
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		fsnan_out_l_dn		# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_long	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.w		fsnan_exit
+fsnan_out_l_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_l		# store result to regfile
+	bra.w		fsnan_exit
+
+fsnan_out_s:
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		fsnan_out_d_dn		# yes
+	mov.l		FP_SRC_EX(%a6),%d0	# fetch SNAN sign
+	andi.l		&0x80000000,%d0		# keep sign
+	ori.l		&0x7fc00000,%d0		# insert new exponent,SNAN bit
+	mov.l		FP_SRC_HI(%a6),%d1	# load mantissa
+	lsr.l		&0x8,%d1		# shift mantissa for sgl
+	or.l		%d1,%d0			# create sgl SNAN
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_long	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.w		fsnan_exit
+fsnan_out_d_dn:
+	mov.l		FP_SRC_EX(%a6),%d0	# fetch SNAN sign
+	andi.l		&0x80000000,%d0		# keep sign
+	ori.l		&0x7fc00000,%d0		# insert new exponent,SNAN bit
+	mov.l		%d1,-(%sp)
+	mov.l		FP_SRC_HI(%a6),%d1	# load mantissa
+	lsr.l		&0x8,%d1		# shift mantissa for sgl
+	or.l		%d1,%d0			# create sgl SNAN
+	mov.l		(%sp)+,%d1
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_l		# store result to regfile
+	bra.w		fsnan_exit
+
+fsnan_out_d:
+	mov.l		FP_SRC_EX(%a6),%d0	# fetch SNAN sign
+	andi.l		&0x80000000,%d0		# keep sign
+	ori.l		&0x7ff80000,%d0		# insert new exponent,SNAN bit
+	mov.l		FP_SRC_HI(%a6),%d1	# load hi mantissa
+	mov.l		%d0,FP_SCR0_EX(%a6)	# store to temp space
+	mov.l		&11,%d0			# load shift amt
+	lsr.l		%d0,%d1
+	or.l		%d1,FP_SCR0_EX(%a6)	# create dbl hi
+	mov.l		FP_SRC_HI(%a6),%d1	# load hi mantissa
+	andi.l		&0x000007ff,%d1
+	ror.l		%d0,%d1
+	mov.l		%d1,FP_SCR0_HI(%a6)	# store to temp space
+	mov.l		FP_SRC_LO(%a6),%d1	# load lo mantissa
+	lsr.l		%d0,%d1
+	or.l		%d1,FP_SCR0_HI(%a6)	# create dbl lo
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	mov.l		EXC_EA(%a6),%a1		# pass: dst addr
+	movq.l		&0x8,%d0		# pass: size of 8 bytes
+	bsr.l		_dmem_write		# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_d		# yes
+
+	bra.w		fsnan_exit
+
+# for extended precision, if the addressing mode is pre-decrement or
+# post-increment, then the address register did not get updated.
+# in addition, for pre-decrement, the stacked <ea> is incorrect.
+fsnan_out_x:
+	clr.b		SPCOND_FLG(%a6)		# clear special case flag
+
+	mov.w		FP_SRC_EX(%a6),FP_SCR0_EX(%a6)
+	clr.w		2+FP_SCR0(%a6)
+	mov.l		FP_SRC_HI(%a6),%d0
+	bset		&30,%d0
+	mov.l		%d0,FP_SCR0_HI(%a6)
+	mov.l		FP_SRC_LO(%a6),FP_SCR0_LO(%a6)
+
+	btst		&0x5,EXC_SR(%a6)	# supervisor mode exception?
+	bne.b		fsnan_out_x_s		# yes
+
+	mov.l		%usp,%a0		# fetch user stack pointer
+	mov.l		%a0,EXC_A7(%a6)		# save on stack for calc_ea()
+	mov.l		(%a6),EXC_A6(%a6)
+
+	bsr.l		_calc_ea_fout		# find the correct ea,update An
+	mov.l		%a0,%a1
+	mov.l		%a0,EXC_EA(%a6)		# stack correct <ea>
+
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp		# restore user stack pointer
+	mov.l		EXC_A6(%a6),(%a6)
+
+fsnan_out_x_save:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	movq.l		&0xc,%d0		# pass: size of extended
+	bsr.l		_dmem_write		# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_x		# yes
+
+	bra.w		fsnan_exit
+
+fsnan_out_x_s:
+	mov.l		(%a6),EXC_A6(%a6)
+
+	bsr.l		_calc_ea_fout		# find the correct ea,update An
+	mov.l		%a0,%a1
+	mov.l		%a0,EXC_EA(%a6)		# stack correct <ea>
+
+	mov.l		EXC_A6(%a6),(%a6)
+
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
+	bne.b		fsnan_out_x_save	# no
+
+# the operation was "fmove.x SNAN,-(a7)" from supervisor mode.
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	mov.l		EXC_A6(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_PC+0x2(%sp),LOCAL_SIZE+EXC_PC+0x2-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+	mov.l		LOCAL_SIZE+FP_SCR0_EX(%sp),LOCAL_SIZE+EXC_SR(%sp)
+	mov.l		LOCAL_SIZE+FP_SCR0_HI(%sp),LOCAL_SIZE+EXC_PC+0x2(%sp)
+	mov.l		LOCAL_SIZE+FP_SCR0_LO(%sp),LOCAL_SIZE+EXC_EA(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+	bra.l		_real_snan
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_inex(): 060FPSP entry point for FP Inexact exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Inexact exception in an operating system.			#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	smovcr() - emulate an "fmovcr" instruction			#
+#	fout() - emulate an opclass 3 instruction			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	_real_inex() - "callout" to operating system inexact handler	#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP Inexact exception frame	#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In a system where the FP Inexact exception is enabled, the goal	#
+# is to get to the handler specified at _real_inex(). But, on the 060,	#
+# for opclass zero and two instruction taking this exception, the	#
+# hardware doesn't store the correct result to the destination FP	#
+# register as did the '040 and '881/2. This handler must emulate the	#
+# instruction in order to get this value and then store it to the	#
+# correct register before calling _real_inex().				#
+#	For opclass 3 instructions, the 060 doesn't store the default	#
+# inexact result out to memory or data register file as it should.	#
+# This code must emulate the move out by calling fout() before finally	#
+# exiting through _real_inex().						#
+#									#
+#########################################################################
+
+	global		_fpsp_inex
+_fpsp_inex:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&13,%d0			# is instr an fmove out?
+	bne.w		finex_out		# fmove out
+
+
+# the hardware, for "fabs" and "fneg" w/ a long source format, puts the
+# longword integer directly into the upper longword of the mantissa along
+# w/ an exponent value of 0x401e. we convert this to extended precision here.
+	bfextu		%d0{&19:&3},%d0		# fetch instr size
+	bne.b		finex_cont		# instr size is not long
+	cmpi.w		FP_SRC_EX(%a6),&0x401e	# is exponent 0x401e?
+	bne.b		finex_cont		# no
+	fmov.l		&0x0,%fpcr
+	fmov.l		FP_SRC_HI(%a6),%fp0	# load integer src
+	fmov.x		%fp0,FP_SRC(%a6)	# store integer as extended precision
+	mov.w		&0xe001,0x2+FP_SRC(%a6)
+
+finex_cont:
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+# Here, we zero the ccode and exception byte field since we're going to
+# emulate the whole instruction. Notice, though, that we don't kill the
+# INEX1 bit. This is because a packed op has long since been converted
+# to extended before arriving here. Therefore, we need to retain the
+# INEX1 bit from when the operand was first converted.
+	andi.l		&0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	bfextu		EXC_EXTWORD(%a6){&0:&6},%d1 # extract upper 6 of cmdreg
+	cmpi.b		%d1,&0x17		# is op an fmovecr?
+	beq.w		finex_fmovcr		# yes
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,STAG(%a6)		# maybe NORM,DENORM
+
+# bits four and five of the fp extension word separate the monadic and dyadic
+# operations that can pass through fpsp_inex(). remember that fcmp and ftst
+# will never take this exception, but fsincos will.
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		finex_extract		# monadic
+
+	btst		&0x4,1+EXC_CMDREG(%a6)	# is operation an fsincos?
+	bne.b		finex_extract		# yes
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		finex_op2_done		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+finex_op2_done:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+finex_extract:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x007f,%d1		# extract extension
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+	mov.l		(tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+# the operation has been emulated. the result is in fp0.
+finex_save:
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0
+	bsr.l		store_fpreg
+
+finex_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+	bra.l		_real_inex
+
+finex_fmovcr:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec,mode
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.l		&0x0000007f,%d1		# pass rom offset
+	bsr.l		smovcr
+	bra.b		finex_save
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# inexact exceptions. we do this here before passing control to
+# the user inexact handler.
+#
+# byte, word, and long destination format operations can pass
+# through here. so can double and single precision.
+# although packed opclass three operations can take inexact
+# exceptions, they won't pass through here since they are caught
+# first by the unsupported data format exception handler. that handler
+# sends them directly to _real_inex() if necessary.
+#
+finex_out:
+
+	mov.b		&NORM,STAG(%a6)		# src is a NORM
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec,mode
+
+	andi.l		&0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	bsr.l		fout			# store the default result
+
+	bra.b		finex_exit
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_dz(): 060FPSP entry point for FP DZ exception.		#
+#									#
+#	This handler should be the first code executed upon taking	#
+#	the FP DZ exception in an operating system.			#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword from memory	#
+#	fix_skewed_ops() - adjust fsave operand				#
+#	_real_dz() - "callout" exit point from FP DZ handler		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP DZ exception stack.		#
+#	- The fsave frame contains the source operand.			#
+#									#
+# OUTPUT **************************************************************	#
+#	- The system stack contains the FP DZ exception stack.		#
+#	- The fsave frame contains the adjusted source operand.		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In a system where the DZ exception is enabled, the goal is to	#
+# get to the handler specified at _real_dz(). But, on the 060, when the	#
+# exception is taken, the input operand in the fsave state frame may	#
+# be incorrect for some cases and need to be adjusted. So, this package	#
+# adjusts the operand using fix_skewed_ops() and then branches to	#
+# _real_dz().								#
+#									#
+#########################################################################
+
+	global		_fpsp_dz
+_fpsp_dz:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source zero
+# in the sgl or dbl format.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+fdz_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+	bra.l		_real_dz
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_fline(): 060FPSP entry point for "Line F emulator" exc.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	"Line F Emulator" exception in an operating system.		#
+#									#
+# XREF ****************************************************************	#
+#	_fpsp_unimp() - handle "FP Unimplemented" exceptions		#
+#	_real_fpu_disabled() - handle "FPU disabled" exceptions		#
+#	_real_fline() - handle "FLINE" exceptions			#
+#	_imem_read_long() - read instruction longword			#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains a "Line F Emulator" exception	#
+#	  stack frame.							#
+#									#
+# OUTPUT **************************************************************	#
+#	- The system stack is unchanged					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	When a "Line F Emulator" exception occurs, there are 3 possible	#
+# exception types, denoted by the exception stack frame format number:	#
+#	(1) FPU unimplemented instruction (6 word stack frame)		#
+#	(2) FPU disabled (8 word stack frame)				#
+#	(3) Line F (4 word stack frame)					#
+#									#
+#	This module determines which and forks the flow off to the	#
+# appropriate "callout" (for "disabled" and "Line F") or to the		#
+# correct emulation code (for "FPU unimplemented").			#
+#	This code also must check for "fmovecr" instructions w/ a	#
+# non-zero <ea> field. These may get flagged as "Line F" but should	#
+# really be flagged as "FPU Unimplemented". (This is a "feature" on	#
+# the '060.								#
+#									#
+#########################################################################
+
+	global		_fpsp_fline
+_fpsp_fline:
+
+# check to see if this exception is a "FP Unimplemented Instruction"
+# exception. if so, branch directly to that handler's entry point.
+	cmpi.w		0x6(%sp),&0x202c
+	beq.l		_fpsp_unimp
+
+# check to see if the FPU is disabled. if so, jump to the OS entry
+# point for that condition.
+	cmpi.w		0x6(%sp),&0x402c
+	beq.l		_real_fpu_disabled
+
+# the exception was an "F-Line Illegal" exception. we check to see
+# if the F-Line instruction is an "fmovecr" w/ a non-zero <ea>. if
+# so, convert the F-Line exception stack frame to an FP Unimplemented
+# Instruction exception stack frame else branch to the OS entry
+# point for the F-Line exception handler.
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+
+	mov.l		EXC_PC(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch instruction words
+
+	bfextu		%d0{&0:&10},%d1		# is it an fmovecr?
+	cmpi.w		%d1,&0x03c8
+	bne.b		fline_fline		# no
+
+	bfextu		%d0{&16:&6},%d1		# is it an fmovecr?
+	cmpi.b		%d1,&0x17
+	bne.b		fline_fline		# no
+
+# it's an fmovecr w/ a non-zero <ea> that has entered through
+# the F-Line Illegal exception.
+# so, we need to convert the F-Line exception stack frame into an
+# FP Unimplemented Instruction stack frame and jump to that entry
+# point.
+#
+# but, if the FPU is disabled, then we need to jump to the FPU diabled
+# entry point.
+	movc		%pcr,%d0
+	btst		&0x1,%d0
+	beq.b		fline_fmovcr
+
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	sub.l		&0x8,%sp		# make room for "Next PC", <ea>
+	mov.w		0x8(%sp),(%sp)
+	mov.l		0xa(%sp),0x2(%sp)	# move "Current PC"
+	mov.w		&0x402c,0x6(%sp)
+	mov.l		0x2(%sp),0xc(%sp)
+	addq.l		&0x4,0x2(%sp)		# set "Next PC"
+
+	bra.l		_real_fpu_disabled
+
+fline_fmovcr:
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	fmov.l		0x2(%sp),%fpiar		# set current PC
+	addq.l		&0x4,0x2(%sp)		# set Next PC
+
+	mov.l		(%sp),-(%sp)
+	mov.l		0x8(%sp),0x4(%sp)
+	mov.b		&0x20,0x6(%sp)
+
+	bra.l		_fpsp_unimp
+
+fline_fline:
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	bra.l		_real_fline
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_unimp(): 060FPSP entry point for FP "Unimplemented	#
+#		       Instruction" exception.				#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Unimplemented Instruction exception in an operating system.	#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_{word,long}() - read instruction word/longword	#
+#	load_fop() - load src/dst ops from memory and/or FP regfile	#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	tbl_trans - addr of table of emulation routines for trnscndls	#
+#	_real_access() - "callout" for access error exception		#
+#	_fpsp_done() - "callout" for exit; work all done		#
+#	_real_trace() - "callout" for Trace enabled exception		#
+#	smovcr() - emulate "fmovecr" instruction			#
+#	funimp_skew() - adjust fsave src ops to "incorrect" value	#
+#	_ftrapcc() - emulate an "ftrapcc" instruction			#
+#	_fdbcc() - emulate an "fdbcc" instruction			#
+#	_fscc() - emulate an "fscc" instruction				#
+#	_real_trap() - "callout" for Trap exception			#
+#	_real_bsun() - "callout" for enabled Bsun exception		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the "Unimplemented Instr" stk frame	#
+#									#
+# OUTPUT **************************************************************	#
+#	If access error:						#
+#	- The system stack is changed to an access error stack frame	#
+#	If Trace exception enabled:					#
+#	- The system stack is changed to a Trace exception stack frame	#
+#	Else: (normal case)						#
+#	- Correct result has been stored as appropriate			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	There are two main cases of instructions that may enter here to	#
+# be emulated: (1) the FPgen instructions, most of which were also	#
+# unimplemented on the 040, and (2) "ftrapcc", "fscc", and "fdbcc".	#
+#	For the first set, this handler calls the routine load_fop()	#
+# to load the source and destination (for dyadic) operands to be used	#
+# for instruction emulation. The correct emulation routine is then	#
+# chosen by decoding the instruction type and indexing into an		#
+# emulation subroutine index table. After emulation returns, this	#
+# handler checks to see if an exception should occur as a result of the #
+# FP instruction emulation. If so, then an FP exception of the correct	#
+# type is inserted into the FPU state frame using the "frestore"	#
+# instruction before exiting through _fpsp_done(). In either the	#
+# exceptional or non-exceptional cases, we must check to see if the	#
+# Trace exception is enabled. If so, then we must create a Trace	#
+# exception frame from the current exception frame and exit through	#
+# _real_trace().							#
+#	For "fdbcc", "ftrapcc", and "fscc", the emulation subroutines	#
+# _fdbcc(), _ftrapcc(), and _fscc() respectively are used. All three	#
+# may flag that a BSUN exception should be taken. If so, then the	#
+# current exception stack frame is converted into a BSUN exception	#
+# stack frame and an exit is made through _real_bsun(). If the		#
+# instruction was "ftrapcc" and a Trap exception should result, a Trap	#
+# exception stack frame is created from the current frame and an exit	#
+# is made through _real_trap(). If a Trace exception is pending, then	#
+# a Trace exception frame is created from the current frame and a jump	#
+# is made to _real_trace(). Finally, if none of these conditions exist,	#
+# then the handler exits though the callout _fpsp_done().		#
+#									#
+#	In any of the above scenarios, if a _mem_read() or _mem_write()	#
+# "callout" returns a failing value, then an access error stack frame	#
+# is created from the current stack frame and an exit is made through	#
+# _real_access().							#
+#									#
+#########################################################################
+
+#
+# FP UNIMPLEMENTED INSTRUCTION STACK FRAME:
+#
+#	*****************
+#	*		* => <ea> of fp unimp instr.
+#	-      EA	-
+#	*		*
+#	*****************
+#	* 0x2 *  0x02c	* => frame format and vector offset(vector #11)
+#	*****************
+#	*		*
+#	-    Next PC	- => PC of instr to execute after exc handling
+#	*		*
+#	*****************
+#	*      SR	* => SR at the time the exception was taken
+#	*****************
+#
+# Note: the !NULL bit does not get set in the fsave frame when the
+# machine encounters an fp unimp exception. Therefore, it must be set
+# before leaving this handler.
+#
+	global		_fpsp_unimp
+_fpsp_unimp:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1
+
+	btst		&0x5,EXC_SR(%a6)	# user mode exception?
+	bne.b		funimp_s		# no; supervisor mode
+
+# save the value of the user stack pointer onto the stack frame
+funimp_u:
+	mov.l		%usp,%a0		# fetch user stack pointer
+	mov.l		%a0,EXC_A7(%a6)		# store in stack frame
+	bra.b		funimp_cont
+
+# store the value of the supervisor stack pointer BEFORE the exc occurred.
+# old_sp is address just above stacked effective address.
+funimp_s:
+	lea		4+EXC_EA(%a6),%a0	# load old a7'
+	mov.l		%a0,EXC_A7(%a6)		# store a7'
+	mov.l		%a0,OLD_A7(%a6)		# make a copy
+
+funimp_cont:
+
+# the FPIAR holds the "current PC" of the faulting instruction.
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+############################################################################
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	clr.b		SPCOND_FLG(%a6)		# clear "special case" flag
+
+# Divide the fp instructions into 8 types based on the TYPE field in
+# bits 6-8 of the opword(classes 6,7 are undefined).
+# (for the '060, only two types  can take this exception)
+#	bftst		%d0{&7:&3}		# test TYPE
+	btst		&22,%d0			# type 0 or 1 ?
+	bne.w		funimp_misc		# type 1
+
+#########################################
+# TYPE == 0: General instructions	#
+#########################################
+funimp_gen:
+
+	clr.b		STORE_FLG(%a6)		# clear "store result" flag
+
+# clear the ccode byte and exception status byte
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	bfextu		%d0{&16:&6},%d1		# extract upper 6 of cmdreg
+	cmpi.b		%d1,&0x17		# is op an fmovecr?
+	beq.w		funimp_fmovcr		# yes
+
+funimp_gen_op:
+	bsr.l		_load_fop		# load
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x003f,%d1		# extract extension bits
+	lsl.w		&0x3,%d1		# shift right 3 bits
+	or.b		STAG(%a6),%d1		# insert src optag bits
+
+	lea		FP_DST(%a6),%a1		# pass dst ptr in a1
+	lea		FP_SRC(%a6),%a0		# pass src ptr in a0
+
+	mov.w		(tbl_trans.w,%pc,%d1.w*2),%d1
+	jsr		(tbl_trans.w,%pc,%d1.w*1) # emulate
+
+funimp_fsave:
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.w		funimp_ena		# some are enabled
+
+funimp_store:
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # fetch Dn
+	bsr.l		store_fpreg		# store result to fp regfile
+
+funimp_gen_exit:
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+funimp_gen_exit_cmp:
+	cmpi.b		SPCOND_FLG(%a6),&mia7_flg # was the ea mode (sp)+ ?
+	beq.b		funimp_gen_exit_a7	# yes
+
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg # was the ea mode -(sp) ?
+	beq.b		funimp_gen_exit_a7	# yes
+
+funimp_gen_exit_cont:
+	unlk		%a6
+
+funimp_gen_exit_cont2:
+	btst		&0x7,(%sp)		# is trace on?
+	beq.l		_fpsp_done		# no
+
+# this catches a problem with the case where an exception will be re-inserted
+# into the machine. the frestore has already been executed...so, the fmov.l
+# alone of the control register would trigger an unwanted exception.
+# until I feel like fixing this, we'll sidestep the exception.
+	fsave		-(%sp)
+	fmov.l		%fpiar,0x14(%sp)	# "Current PC" is in FPIAR
+	frestore	(%sp)+
+	mov.w		&0x2024,0x6(%sp)	# stk fmt = 0x2; voff = 0x24
+	bra.l		_real_trace
+
+funimp_gen_exit_a7:
+	btst		&0x5,EXC_SR(%a6)	# supervisor or user mode?
+	bne.b		funimp_gen_exit_a7_s	# supervisor
+
+	mov.l		%a0,-(%sp)
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp
+	mov.l		(%sp)+,%a0
+	bra.b		funimp_gen_exit_cont
+
+# if the instruction was executed from supervisor mode and the addressing
+# mode was (a7)+, then the stack frame for the rte must be shifted "up"
+# "n" bytes where "n" is the size of the src operand type.
+# f<op>.{b,w,l,s,d,x,p}
+funimp_gen_exit_a7_s:
+	mov.l		%d0,-(%sp)		# save d0
+	mov.l		EXC_A7(%a6),%d0		# load new a7'
+	sub.l		OLD_A7(%a6),%d0		# subtract old a7'
+	mov.l		0x2+EXC_PC(%a6),(0x2+EXC_PC,%a6,%d0) # shift stack frame
+	mov.l		EXC_SR(%a6),(EXC_SR,%a6,%d0) # shift stack frame
+	mov.w		%d0,EXC_SR(%a6)		# store incr number
+	mov.l		(%sp)+,%d0		# restore d0
+
+	unlk		%a6
+
+	add.w		(%sp),%sp		# stack frame shifted
+	bra.b		funimp_gen_exit_cont2
+
+######################
+# fmovecr.x #ccc,fpn #
+######################
+funimp_fmovcr:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.l		&0x0000007f,%d1		# pass rom offset in d1
+	bsr.l		smovcr
+	bra.w		funimp_fsave
+
+#########################################################################
+
+#
+# the user has enabled some exceptions. we figure not to see this too
+# often so that's why it gets lower priority.
+#
+funimp_ena:
+
+# was an exception set that was also enabled?
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled and set
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		funimp_exc		# at least one was set
+
+# no exception that was enabled was set BUT if we got an exact overflow
+# and overflow wasn't enabled but inexact was (yech!) then this is
+# an inexact exception; otherwise, return to normal non-exception flow.
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+	beq.w		funimp_store		# no; return to normal flow
+
+# the overflow w/ exact result happened but was inexact set in the FPCR?
+funimp_ovfl:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
+	beq.w		funimp_store		# no; return to normal flow
+	bra.b		funimp_exc_ovfl		# yes
+
+# some exception happened that was actually enabled.
+# we'll insert this new exception into the FPU and then return.
+funimp_exc:
+	subi.l		&24,%d0			# fix offset to be 0-8
+	cmpi.b		%d0,&0x6		# is exception INEX?
+	bne.b		funimp_exc_force	# no
+
+# the enabled exception was inexact. so, if it occurs with an overflow
+# or underflow that was disabled, then we have to force an overflow or
+# underflow frame. the eventual overflow or underflow handler will see that
+# it's actually an inexact and act appropriately. this is the only easy
+# way to have the EXOP available for the enabled inexact handler when
+# a disabled overflow or underflow has also happened.
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+	bne.b		funimp_exc_ovfl		# yes
+	btst		&unfl_bit,FPSR_EXCEPT(%a6) # did underflow occur?
+	bne.b		funimp_exc_unfl		# yes
+
+# force the fsave exception status bits to signal an exception of the
+# appropriate type. don't forget to "skew" the source operand in case we
+# "unskewed" the one the hardware initially gave us.
+funimp_exc_force:
+	mov.l		%d0,-(%sp)		# save d0
+	bsr.l		funimp_skew		# check for special case
+	mov.l		(%sp)+,%d0		# restore d0
+	mov.w		(tbl_funimp_except.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+	bra.b		funimp_gen_exit2	# exit with frestore
+
+tbl_funimp_except:
+	short		0xe002, 0xe006, 0xe004, 0xe005
+	short		0xe003, 0xe002, 0xe001, 0xe001
+
+# insert an overflow frame
+funimp_exc_ovfl:
+	bsr.l		funimp_skew		# check for special case
+	mov.w		&0xe005,2+FP_SRC(%a6)
+	bra.b		funimp_gen_exit2
+
+# insert an underflow frame
+funimp_exc_unfl:
+	bsr.l		funimp_skew		# check for special case
+	mov.w		&0xe003,2+FP_SRC(%a6)
+
+# this is the general exit point for an enabled exception that will be
+# restored into the machine for the instruction just emulated.
+funimp_gen_exit2:
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# insert exceptional status
+
+	bra.w		funimp_gen_exit_cmp
+
+############################################################################
+
+#
+# TYPE == 1: FDB<cc>, FS<cc>, FTRAP<cc>
+#
+# These instructions were implemented on the '881/2 and '040 in hardware but
+# are emulated in software on the '060.
+#
+funimp_misc:
+	bfextu		%d0{&10:&3},%d1		# extract mode field
+	cmpi.b		%d1,&0x1		# is it an fdb<cc>?
+	beq.w		funimp_fdbcc		# yes
+	cmpi.b		%d1,&0x7		# is it an fs<cc>?
+	bne.w		funimp_fscc		# yes
+	bfextu		%d0{&13:&3},%d1
+	cmpi.b		%d1,&0x2		# is it an fs<cc>?
+	blt.w		funimp_fscc		# yes
+
+#########################
+# ftrap<cc>		#
+# ftrap<cc>.w #<data>	#
+# ftrap<cc>.l #<data>	#
+#########################
+funimp_ftrapcc:
+
+	bsr.l		_ftrapcc		# FTRAP<cc>()
+
+	cmpi.b		SPCOND_FLG(%a6),&fbsun_flg # is enabled bsun occurring?
+	beq.w		funimp_bsun		# yes
+
+	cmpi.b		SPCOND_FLG(%a6),&ftrapcc_flg # should a trap occur?
+	bne.w		funimp_done		# no
+
+#	 FP UNIMP FRAME		   TRAP  FRAME
+#	*****************	*****************
+#	**    <EA>     **	**  Current PC **
+#	*****************	*****************
+#	* 0x2 *  0x02c	*	* 0x2 *  0x01c  *
+#	*****************	*****************
+#	**   Next PC   **	**   Next PC   **
+#	*****************	*****************
+#	*      SR	*	*      SR	*
+#	*****************	*****************
+#	    (6 words)		    (6 words)
+#
+# the ftrapcc instruction should take a trap. so, here we must create a
+# trap stack frame from an unimplemented fp instruction stack frame and
+# jump to the user supplied entry point for the trap exception
+funimp_ftrapcc_tp:
+	mov.l		USER_FPIAR(%a6),EXC_EA(%a6) # Address = Current PC
+	mov.w		&0x201c,EXC_VOFF(%a6)	# Vector Offset = 0x01c
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+	bra.l		_real_trap
+
+#########################
+# fdb<cc> Dn,<label>	#
+#########################
+funimp_fdbcc:
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# read displacement
+
+	tst.l		%d1			# did ifetch fail?
+	bne.w		funimp_iacc		# yes
+
+	ext.l		%d0			# sign extend displacement
+
+	bsr.l		_fdbcc			# FDB<cc>()
+
+	cmpi.b		SPCOND_FLG(%a6),&fbsun_flg # is enabled bsun occurring?
+	beq.w		funimp_bsun
+
+	bra.w		funimp_done		# branch to finish
+
+#################
+# fs<cc>.b <ea>	#
+#################
+funimp_fscc:
+
+	bsr.l		_fscc			# FS<cc>()
+
+# I am assuming here that an "fs<cc>.b -(An)" or "fs<cc>.b (An)+" instruction
+# does not need to update "An" before taking a bsun exception.
+	cmpi.b		SPCOND_FLG(%a6),&fbsun_flg # is enabled bsun occurring?
+	beq.w		funimp_bsun
+
+	btst		&0x5,EXC_SR(%a6)	# yes; is it a user mode exception?
+	bne.b		funimp_fscc_s		# no
+
+funimp_fscc_u:
+	mov.l		EXC_A7(%a6),%a0		# yes; set new USP
+	mov.l		%a0,%usp
+	bra.w		funimp_done		# branch to finish
+
+# remember, I'm assuming that post-increment is bogus...(it IS!!!)
+# so, the least significant WORD of the stacked effective address got
+# overwritten by the "fs<cc> -(An)". We must shift the stack frame "down"
+# so that the rte will work correctly without destroying the result.
+# even though the operation size is byte, the stack ptr is decr by 2.
+#
+# remember, also, this instruction may be traced.
+funimp_fscc_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg # was a7 modified?
+	bne.w		funimp_done		# no
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	btst		&0x7,(%sp)		# is trace enabled?
+	bne.b		funimp_fscc_s_trace	# yes
+
+	subq.l		&0x2,%sp
+	mov.l		0x2(%sp),(%sp)		# shift SR,hi(PC) "down"
+	mov.l		0x6(%sp),0x4(%sp)	# shift lo(PC),voff "down"
+	bra.l		_fpsp_done
+
+funimp_fscc_s_trace:
+	subq.l		&0x2,%sp
+	mov.l		0x2(%sp),(%sp)		# shift SR,hi(PC) "down"
+	mov.w		0x6(%sp),0x4(%sp)	# shift lo(PC)
+	mov.w		&0x2024,0x6(%sp)	# fmt/voff = $2024
+	fmov.l		%fpiar,0x8(%sp)		# insert "current PC"
+
+	bra.l		_real_trace
+
+#
+# The ftrap<cc>, fs<cc>, or fdb<cc> is to take an enabled bsun. we must convert
+# the fp unimplemented instruction exception stack frame into a bsun stack frame,
+# restore a bsun exception into the machine, and branch to the user
+# supplied bsun hook.
+#
+#	 FP UNIMP FRAME		   BSUN FRAME
+#	*****************	*****************
+#	**    <EA>     **	* 0x0 * 0x0c0	*
+#	*****************	*****************
+#	* 0x2 *  0x02c  *	** Current PC  **
+#	*****************	*****************
+#	**   Next PC   **	*      SR	*
+#	*****************	*****************
+#	*      SR	*	    (4 words)
+#	*****************
+#	    (6 words)
+#
+funimp_bsun:
+	mov.w		&0x00c0,2+EXC_EA(%a6)	# Fmt = 0x0; Vector Offset = 0x0c0
+	mov.l		USER_FPIAR(%a6),EXC_VOFF(%a6) # PC = Current PC
+	mov.w		EXC_SR(%a6),2+EXC_PC(%a6) # shift SR "up"
+
+	mov.w		&0xe000,2+FP_SRC(%a6)	# bsun exception enabled
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore bsun exception
+
+	unlk		%a6
+
+	addq.l		&0x4,%sp		# erase sludge
+
+	bra.l		_real_bsun		# branch to user bsun hook
+
+#
+# all ftrapcc/fscc/fdbcc processing has been completed. unwind the stack frame
+# and return.
+#
+# as usual, we have to check for trace mode being on here. since instructions
+# modifying the supervisor stack frame don't pass through here, this is a
+# relatively easy task.
+#
+funimp_done:
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	btst		&0x7,(%sp)		# is trace enabled?
+	bne.b		funimp_trace		# yes
+
+	bra.l		_fpsp_done
+
+#	 FP UNIMP FRAME		  TRACE  FRAME
+#	*****************	*****************
+#	**    <EA>     **	**  Current PC **
+#	*****************	*****************
+#	* 0x2 *  0x02c	*	* 0x2 *  0x024  *
+#	*****************	*****************
+#	**   Next PC   **	**   Next PC   **
+#	*****************	*****************
+#	*      SR	*	*      SR	*
+#	*****************	*****************
+#	    (6 words)		    (6 words)
+#
+# the fscc instruction should take a trace trap. so, here we must create a
+# trace stack frame from an unimplemented fp instruction stack frame and
+# jump to the user supplied entry point for the trace exception
+funimp_trace:
+	fmov.l		%fpiar,0x8(%sp)		# current PC is in fpiar
+	mov.b		&0x24,0x7(%sp)		# vector offset = 0x024
+
+	bra.l		_real_trace
+
+################################################################
+
+	global		tbl_trans
+	swbeg		&0x1c0
+tbl_trans:
+	short		tbl_trans - tbl_trans	# $00-0 fmovecr all
+	short		tbl_trans - tbl_trans	# $00-1 fmovecr all
+	short		tbl_trans - tbl_trans	# $00-2 fmovecr all
+	short		tbl_trans - tbl_trans	# $00-3 fmovecr all
+	short		tbl_trans - tbl_trans	# $00-4 fmovecr all
+	short		tbl_trans - tbl_trans	# $00-5 fmovecr all
+	short		tbl_trans - tbl_trans	# $00-6 fmovecr all
+	short		tbl_trans - tbl_trans	# $00-7 fmovecr all
+
+	short		tbl_trans - tbl_trans	# $01-0 fint norm
+	short		tbl_trans - tbl_trans	# $01-1 fint zero
+	short		tbl_trans - tbl_trans	# $01-2 fint inf
+	short		tbl_trans - tbl_trans	# $01-3 fint qnan
+	short		tbl_trans - tbl_trans	# $01-5 fint denorm
+	short		tbl_trans - tbl_trans	# $01-4 fint snan
+	short		tbl_trans - tbl_trans	# $01-6 fint unnorm
+	short		tbl_trans - tbl_trans	# $01-7 ERROR
+
+	short		ssinh	 - tbl_trans	# $02-0 fsinh norm
+	short		src_zero - tbl_trans	# $02-1 fsinh zero
+	short		src_inf	 - tbl_trans	# $02-2 fsinh inf
+	short		src_qnan - tbl_trans	# $02-3 fsinh qnan
+	short		ssinhd	 - tbl_trans	# $02-5 fsinh denorm
+	short		src_snan - tbl_trans	# $02-4 fsinh snan
+	short		tbl_trans - tbl_trans	# $02-6 fsinh unnorm
+	short		tbl_trans - tbl_trans	# $02-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $03-0 fintrz norm
+	short		tbl_trans - tbl_trans	# $03-1 fintrz zero
+	short		tbl_trans - tbl_trans	# $03-2 fintrz inf
+	short		tbl_trans - tbl_trans	# $03-3 fintrz qnan
+	short		tbl_trans - tbl_trans	# $03-5 fintrz denorm
+	short		tbl_trans - tbl_trans	# $03-4 fintrz snan
+	short		tbl_trans - tbl_trans	# $03-6 fintrz unnorm
+	short		tbl_trans - tbl_trans	# $03-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $04-0 fsqrt norm
+	short		tbl_trans - tbl_trans	# $04-1 fsqrt zero
+	short		tbl_trans - tbl_trans	# $04-2 fsqrt inf
+	short		tbl_trans - tbl_trans	# $04-3 fsqrt qnan
+	short		tbl_trans - tbl_trans	# $04-5 fsqrt denorm
+	short		tbl_trans - tbl_trans	# $04-4 fsqrt snan
+	short		tbl_trans - tbl_trans	# $04-6 fsqrt unnorm
+	short		tbl_trans - tbl_trans	# $04-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $05-0 ERROR
+	short		tbl_trans - tbl_trans	# $05-1 ERROR
+	short		tbl_trans - tbl_trans	# $05-2 ERROR
+	short		tbl_trans - tbl_trans	# $05-3 ERROR
+	short		tbl_trans - tbl_trans	# $05-4 ERROR
+	short		tbl_trans - tbl_trans	# $05-5 ERROR
+	short		tbl_trans - tbl_trans	# $05-6 ERROR
+	short		tbl_trans - tbl_trans	# $05-7 ERROR
+
+	short		slognp1	 - tbl_trans	# $06-0 flognp1 norm
+	short		src_zero - tbl_trans	# $06-1 flognp1 zero
+	short		sopr_inf - tbl_trans	# $06-2 flognp1 inf
+	short		src_qnan - tbl_trans	# $06-3 flognp1 qnan
+	short		slognp1d - tbl_trans	# $06-5 flognp1 denorm
+	short		src_snan - tbl_trans	# $06-4 flognp1 snan
+	short		tbl_trans - tbl_trans	# $06-6 flognp1 unnorm
+	short		tbl_trans - tbl_trans	# $06-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $07-0 ERROR
+	short		tbl_trans - tbl_trans	# $07-1 ERROR
+	short		tbl_trans - tbl_trans	# $07-2 ERROR
+	short		tbl_trans - tbl_trans	# $07-3 ERROR
+	short		tbl_trans - tbl_trans	# $07-4 ERROR
+	short		tbl_trans - tbl_trans	# $07-5 ERROR
+	short		tbl_trans - tbl_trans	# $07-6 ERROR
+	short		tbl_trans - tbl_trans	# $07-7 ERROR
+
+	short		setoxm1	 - tbl_trans	# $08-0 fetoxm1 norm
+	short		src_zero - tbl_trans	# $08-1 fetoxm1 zero
+	short		setoxm1i - tbl_trans	# $08-2 fetoxm1 inf
+	short		src_qnan - tbl_trans	# $08-3 fetoxm1 qnan
+	short		setoxm1d - tbl_trans	# $08-5 fetoxm1 denorm
+	short		src_snan - tbl_trans	# $08-4 fetoxm1 snan
+	short		tbl_trans - tbl_trans	# $08-6 fetoxm1 unnorm
+	short		tbl_trans - tbl_trans	# $08-7 ERROR
+
+	short		stanh	 - tbl_trans	# $09-0 ftanh norm
+	short		src_zero - tbl_trans	# $09-1 ftanh zero
+	short		src_one	 - tbl_trans	# $09-2 ftanh inf
+	short		src_qnan - tbl_trans	# $09-3 ftanh qnan
+	short		stanhd	 - tbl_trans	# $09-5 ftanh denorm
+	short		src_snan - tbl_trans	# $09-4 ftanh snan
+	short		tbl_trans - tbl_trans	# $09-6 ftanh unnorm
+	short		tbl_trans - tbl_trans	# $09-7 ERROR
+
+	short		satan	 - tbl_trans	# $0a-0 fatan norm
+	short		src_zero - tbl_trans	# $0a-1 fatan zero
+	short		spi_2	 - tbl_trans	# $0a-2 fatan inf
+	short		src_qnan - tbl_trans	# $0a-3 fatan qnan
+	short		satand	 - tbl_trans	# $0a-5 fatan denorm
+	short		src_snan - tbl_trans	# $0a-4 fatan snan
+	short		tbl_trans - tbl_trans	# $0a-6 fatan unnorm
+	short		tbl_trans - tbl_trans	# $0a-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $0b-0 ERROR
+	short		tbl_trans - tbl_trans	# $0b-1 ERROR
+	short		tbl_trans - tbl_trans	# $0b-2 ERROR
+	short		tbl_trans - tbl_trans	# $0b-3 ERROR
+	short		tbl_trans - tbl_trans	# $0b-4 ERROR
+	short		tbl_trans - tbl_trans	# $0b-5 ERROR
+	short		tbl_trans - tbl_trans	# $0b-6 ERROR
+	short		tbl_trans - tbl_trans	# $0b-7 ERROR
+
+	short		sasin	 - tbl_trans	# $0c-0 fasin norm
+	short		src_zero - tbl_trans	# $0c-1 fasin zero
+	short		t_operr	 - tbl_trans	# $0c-2 fasin inf
+	short		src_qnan - tbl_trans	# $0c-3 fasin qnan
+	short		sasind	 - tbl_trans	# $0c-5 fasin denorm
+	short		src_snan - tbl_trans	# $0c-4 fasin snan
+	short		tbl_trans - tbl_trans	# $0c-6 fasin unnorm
+	short		tbl_trans - tbl_trans	# $0c-7 ERROR
+
+	short		satanh	 - tbl_trans	# $0d-0 fatanh norm
+	short		src_zero - tbl_trans	# $0d-1 fatanh zero
+	short		t_operr	 - tbl_trans	# $0d-2 fatanh inf
+	short		src_qnan - tbl_trans	# $0d-3 fatanh qnan
+	short		satanhd	 - tbl_trans	# $0d-5 fatanh denorm
+	short		src_snan - tbl_trans	# $0d-4 fatanh snan
+	short		tbl_trans - tbl_trans	# $0d-6 fatanh unnorm
+	short		tbl_trans - tbl_trans	# $0d-7 ERROR
+
+	short		ssin	 - tbl_trans	# $0e-0 fsin norm
+	short		src_zero - tbl_trans	# $0e-1 fsin zero
+	short		t_operr	 - tbl_trans	# $0e-2 fsin inf
+	short		src_qnan - tbl_trans	# $0e-3 fsin qnan
+	short		ssind	 - tbl_trans	# $0e-5 fsin denorm
+	short		src_snan - tbl_trans	# $0e-4 fsin snan
+	short		tbl_trans - tbl_trans	# $0e-6 fsin unnorm
+	short		tbl_trans - tbl_trans	# $0e-7 ERROR
+
+	short		stan	 - tbl_trans	# $0f-0 ftan norm
+	short		src_zero - tbl_trans	# $0f-1 ftan zero
+	short		t_operr	 - tbl_trans	# $0f-2 ftan inf
+	short		src_qnan - tbl_trans	# $0f-3 ftan qnan
+	short		stand	 - tbl_trans	# $0f-5 ftan denorm
+	short		src_snan - tbl_trans	# $0f-4 ftan snan
+	short		tbl_trans - tbl_trans	# $0f-6 ftan unnorm
+	short		tbl_trans - tbl_trans	# $0f-7 ERROR
+
+	short		setox	 - tbl_trans	# $10-0 fetox norm
+	short		ld_pone	 - tbl_trans	# $10-1 fetox zero
+	short		szr_inf	 - tbl_trans	# $10-2 fetox inf
+	short		src_qnan - tbl_trans	# $10-3 fetox qnan
+	short		setoxd	 - tbl_trans	# $10-5 fetox denorm
+	short		src_snan - tbl_trans	# $10-4 fetox snan
+	short		tbl_trans - tbl_trans	# $10-6 fetox unnorm
+	short		tbl_trans - tbl_trans	# $10-7 ERROR
+
+	short		stwotox	 - tbl_trans	# $11-0 ftwotox norm
+	short		ld_pone	 - tbl_trans	# $11-1 ftwotox zero
+	short		szr_inf	 - tbl_trans	# $11-2 ftwotox inf
+	short		src_qnan - tbl_trans	# $11-3 ftwotox qnan
+	short		stwotoxd - tbl_trans	# $11-5 ftwotox denorm
+	short		src_snan - tbl_trans	# $11-4 ftwotox snan
+	short		tbl_trans - tbl_trans	# $11-6 ftwotox unnorm
+	short		tbl_trans - tbl_trans	# $11-7 ERROR
+
+	short		stentox	 - tbl_trans	# $12-0 ftentox norm
+	short		ld_pone	 - tbl_trans	# $12-1 ftentox zero
+	short		szr_inf	 - tbl_trans	# $12-2 ftentox inf
+	short		src_qnan - tbl_trans	# $12-3 ftentox qnan
+	short		stentoxd - tbl_trans	# $12-5 ftentox denorm
+	short		src_snan - tbl_trans	# $12-4 ftentox snan
+	short		tbl_trans - tbl_trans	# $12-6 ftentox unnorm
+	short		tbl_trans - tbl_trans	# $12-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $13-0 ERROR
+	short		tbl_trans - tbl_trans	# $13-1 ERROR
+	short		tbl_trans - tbl_trans	# $13-2 ERROR
+	short		tbl_trans - tbl_trans	# $13-3 ERROR
+	short		tbl_trans - tbl_trans	# $13-4 ERROR
+	short		tbl_trans - tbl_trans	# $13-5 ERROR
+	short		tbl_trans - tbl_trans	# $13-6 ERROR
+	short		tbl_trans - tbl_trans	# $13-7 ERROR
+
+	short		slogn	 - tbl_trans	# $14-0 flogn norm
+	short		t_dz2	 - tbl_trans	# $14-1 flogn zero
+	short		sopr_inf - tbl_trans	# $14-2 flogn inf
+	short		src_qnan - tbl_trans	# $14-3 flogn qnan
+	short		slognd	 - tbl_trans	# $14-5 flogn denorm
+	short		src_snan - tbl_trans	# $14-4 flogn snan
+	short		tbl_trans - tbl_trans	# $14-6 flogn unnorm
+	short		tbl_trans - tbl_trans	# $14-7 ERROR
+
+	short		slog10	 - tbl_trans	# $15-0 flog10 norm
+	short		t_dz2	 - tbl_trans	# $15-1 flog10 zero
+	short		sopr_inf - tbl_trans	# $15-2 flog10 inf
+	short		src_qnan - tbl_trans	# $15-3 flog10 qnan
+	short		slog10d	 - tbl_trans	# $15-5 flog10 denorm
+	short		src_snan - tbl_trans	# $15-4 flog10 snan
+	short		tbl_trans - tbl_trans	# $15-6 flog10 unnorm
+	short		tbl_trans - tbl_trans	# $15-7 ERROR
+
+	short		slog2	 - tbl_trans	# $16-0 flog2 norm
+	short		t_dz2	 - tbl_trans	# $16-1 flog2 zero
+	short		sopr_inf - tbl_trans	# $16-2 flog2 inf
+	short		src_qnan - tbl_trans	# $16-3 flog2 qnan
+	short		slog2d	 - tbl_trans	# $16-5 flog2 denorm
+	short		src_snan - tbl_trans	# $16-4 flog2 snan
+	short		tbl_trans - tbl_trans	# $16-6 flog2 unnorm
+	short		tbl_trans - tbl_trans	# $16-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $17-0 ERROR
+	short		tbl_trans - tbl_trans	# $17-1 ERROR
+	short		tbl_trans - tbl_trans	# $17-2 ERROR
+	short		tbl_trans - tbl_trans	# $17-3 ERROR
+	short		tbl_trans - tbl_trans	# $17-4 ERROR
+	short		tbl_trans - tbl_trans	# $17-5 ERROR
+	short		tbl_trans - tbl_trans	# $17-6 ERROR
+	short		tbl_trans - tbl_trans	# $17-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $18-0 fabs norm
+	short		tbl_trans - tbl_trans	# $18-1 fabs zero
+	short		tbl_trans - tbl_trans	# $18-2 fabs inf
+	short		tbl_trans - tbl_trans	# $18-3 fabs qnan
+	short		tbl_trans - tbl_trans	# $18-5 fabs denorm
+	short		tbl_trans - tbl_trans	# $18-4 fabs snan
+	short		tbl_trans - tbl_trans	# $18-6 fabs unnorm
+	short		tbl_trans - tbl_trans	# $18-7 ERROR
+
+	short		scosh	 - tbl_trans	# $19-0 fcosh norm
+	short		ld_pone	 - tbl_trans	# $19-1 fcosh zero
+	short		ld_pinf	 - tbl_trans	# $19-2 fcosh inf
+	short		src_qnan - tbl_trans	# $19-3 fcosh qnan
+	short		scoshd	 - tbl_trans	# $19-5 fcosh denorm
+	short		src_snan - tbl_trans	# $19-4 fcosh snan
+	short		tbl_trans - tbl_trans	# $19-6 fcosh unnorm
+	short		tbl_trans - tbl_trans	# $19-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $1a-0 fneg norm
+	short		tbl_trans - tbl_trans	# $1a-1 fneg zero
+	short		tbl_trans - tbl_trans	# $1a-2 fneg inf
+	short		tbl_trans - tbl_trans	# $1a-3 fneg qnan
+	short		tbl_trans - tbl_trans	# $1a-5 fneg denorm
+	short		tbl_trans - tbl_trans	# $1a-4 fneg snan
+	short		tbl_trans - tbl_trans	# $1a-6 fneg unnorm
+	short		tbl_trans - tbl_trans	# $1a-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $1b-0 ERROR
+	short		tbl_trans - tbl_trans	# $1b-1 ERROR
+	short		tbl_trans - tbl_trans	# $1b-2 ERROR
+	short		tbl_trans - tbl_trans	# $1b-3 ERROR
+	short		tbl_trans - tbl_trans	# $1b-4 ERROR
+	short		tbl_trans - tbl_trans	# $1b-5 ERROR
+	short		tbl_trans - tbl_trans	# $1b-6 ERROR
+	short		tbl_trans - tbl_trans	# $1b-7 ERROR
+
+	short		sacos	 - tbl_trans	# $1c-0 facos norm
+	short		ld_ppi2	 - tbl_trans	# $1c-1 facos zero
+	short		t_operr	 - tbl_trans	# $1c-2 facos inf
+	short		src_qnan - tbl_trans	# $1c-3 facos qnan
+	short		sacosd	 - tbl_trans	# $1c-5 facos denorm
+	short		src_snan - tbl_trans	# $1c-4 facos snan
+	short		tbl_trans - tbl_trans	# $1c-6 facos unnorm
+	short		tbl_trans - tbl_trans	# $1c-7 ERROR
+
+	short		scos	 - tbl_trans	# $1d-0 fcos norm
+	short		ld_pone	 - tbl_trans	# $1d-1 fcos zero
+	short		t_operr	 - tbl_trans	# $1d-2 fcos inf
+	short		src_qnan - tbl_trans	# $1d-3 fcos qnan
+	short		scosd	 - tbl_trans	# $1d-5 fcos denorm
+	short		src_snan - tbl_trans	# $1d-4 fcos snan
+	short		tbl_trans - tbl_trans	# $1d-6 fcos unnorm
+	short		tbl_trans - tbl_trans	# $1d-7 ERROR
+
+	short		sgetexp	 - tbl_trans	# $1e-0 fgetexp norm
+	short		src_zero - tbl_trans	# $1e-1 fgetexp zero
+	short		t_operr	 - tbl_trans	# $1e-2 fgetexp inf
+	short		src_qnan - tbl_trans	# $1e-3 fgetexp qnan
+	short		sgetexpd - tbl_trans	# $1e-5 fgetexp denorm
+	short		src_snan - tbl_trans	# $1e-4 fgetexp snan
+	short		tbl_trans - tbl_trans	# $1e-6 fgetexp unnorm
+	short		tbl_trans - tbl_trans	# $1e-7 ERROR
+
+	short		sgetman	 - tbl_trans	# $1f-0 fgetman norm
+	short		src_zero - tbl_trans	# $1f-1 fgetman zero
+	short		t_operr	 - tbl_trans	# $1f-2 fgetman inf
+	short		src_qnan - tbl_trans	# $1f-3 fgetman qnan
+	short		sgetmand - tbl_trans	# $1f-5 fgetman denorm
+	short		src_snan - tbl_trans	# $1f-4 fgetman snan
+	short		tbl_trans - tbl_trans	# $1f-6 fgetman unnorm
+	short		tbl_trans - tbl_trans	# $1f-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $20-0 fdiv norm
+	short		tbl_trans - tbl_trans	# $20-1 fdiv zero
+	short		tbl_trans - tbl_trans	# $20-2 fdiv inf
+	short		tbl_trans - tbl_trans	# $20-3 fdiv qnan
+	short		tbl_trans - tbl_trans	# $20-5 fdiv denorm
+	short		tbl_trans - tbl_trans	# $20-4 fdiv snan
+	short		tbl_trans - tbl_trans	# $20-6 fdiv unnorm
+	short		tbl_trans - tbl_trans	# $20-7 ERROR
+
+	short		smod_snorm - tbl_trans	# $21-0 fmod norm
+	short		smod_szero - tbl_trans	# $21-1 fmod zero
+	short		smod_sinf - tbl_trans	# $21-2 fmod inf
+	short		sop_sqnan - tbl_trans	# $21-3 fmod qnan
+	short		smod_sdnrm - tbl_trans	# $21-5 fmod denorm
+	short		sop_ssnan - tbl_trans	# $21-4 fmod snan
+	short		tbl_trans - tbl_trans	# $21-6 fmod unnorm
+	short		tbl_trans - tbl_trans	# $21-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $22-0 fadd norm
+	short		tbl_trans - tbl_trans	# $22-1 fadd zero
+	short		tbl_trans - tbl_trans	# $22-2 fadd inf
+	short		tbl_trans - tbl_trans	# $22-3 fadd qnan
+	short		tbl_trans - tbl_trans	# $22-5 fadd denorm
+	short		tbl_trans - tbl_trans	# $22-4 fadd snan
+	short		tbl_trans - tbl_trans	# $22-6 fadd unnorm
+	short		tbl_trans - tbl_trans	# $22-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $23-0 fmul norm
+	short		tbl_trans - tbl_trans	# $23-1 fmul zero
+	short		tbl_trans - tbl_trans	# $23-2 fmul inf
+	short		tbl_trans - tbl_trans	# $23-3 fmul qnan
+	short		tbl_trans - tbl_trans	# $23-5 fmul denorm
+	short		tbl_trans - tbl_trans	# $23-4 fmul snan
+	short		tbl_trans - tbl_trans	# $23-6 fmul unnorm
+	short		tbl_trans - tbl_trans	# $23-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $24-0 fsgldiv norm
+	short		tbl_trans - tbl_trans	# $24-1 fsgldiv zero
+	short		tbl_trans - tbl_trans	# $24-2 fsgldiv inf
+	short		tbl_trans - tbl_trans	# $24-3 fsgldiv qnan
+	short		tbl_trans - tbl_trans	# $24-5 fsgldiv denorm
+	short		tbl_trans - tbl_trans	# $24-4 fsgldiv snan
+	short		tbl_trans - tbl_trans	# $24-6 fsgldiv unnorm
+	short		tbl_trans - tbl_trans	# $24-7 ERROR
+
+	short		srem_snorm - tbl_trans	# $25-0 frem norm
+	short		srem_szero - tbl_trans	# $25-1 frem zero
+	short		srem_sinf - tbl_trans	# $25-2 frem inf
+	short		sop_sqnan - tbl_trans	# $25-3 frem qnan
+	short		srem_sdnrm - tbl_trans	# $25-5 frem denorm
+	short		sop_ssnan - tbl_trans	# $25-4 frem snan
+	short		tbl_trans - tbl_trans	# $25-6 frem unnorm
+	short		tbl_trans - tbl_trans	# $25-7 ERROR
+
+	short		sscale_snorm - tbl_trans # $26-0 fscale norm
+	short		sscale_szero - tbl_trans # $26-1 fscale zero
+	short		sscale_sinf - tbl_trans	# $26-2 fscale inf
+	short		sop_sqnan - tbl_trans	# $26-3 fscale qnan
+	short		sscale_sdnrm - tbl_trans # $26-5 fscale denorm
+	short		sop_ssnan - tbl_trans	# $26-4 fscale snan
+	short		tbl_trans - tbl_trans	# $26-6 fscale unnorm
+	short		tbl_trans - tbl_trans	# $26-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $27-0 fsglmul norm
+	short		tbl_trans - tbl_trans	# $27-1 fsglmul zero
+	short		tbl_trans - tbl_trans	# $27-2 fsglmul inf
+	short		tbl_trans - tbl_trans	# $27-3 fsglmul qnan
+	short		tbl_trans - tbl_trans	# $27-5 fsglmul denorm
+	short		tbl_trans - tbl_trans	# $27-4 fsglmul snan
+	short		tbl_trans - tbl_trans	# $27-6 fsglmul unnorm
+	short		tbl_trans - tbl_trans	# $27-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $28-0 fsub norm
+	short		tbl_trans - tbl_trans	# $28-1 fsub zero
+	short		tbl_trans - tbl_trans	# $28-2 fsub inf
+	short		tbl_trans - tbl_trans	# $28-3 fsub qnan
+	short		tbl_trans - tbl_trans	# $28-5 fsub denorm
+	short		tbl_trans - tbl_trans	# $28-4 fsub snan
+	short		tbl_trans - tbl_trans	# $28-6 fsub unnorm
+	short		tbl_trans - tbl_trans	# $28-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $29-0 ERROR
+	short		tbl_trans - tbl_trans	# $29-1 ERROR
+	short		tbl_trans - tbl_trans	# $29-2 ERROR
+	short		tbl_trans - tbl_trans	# $29-3 ERROR
+	short		tbl_trans - tbl_trans	# $29-4 ERROR
+	short		tbl_trans - tbl_trans	# $29-5 ERROR
+	short		tbl_trans - tbl_trans	# $29-6 ERROR
+	short		tbl_trans - tbl_trans	# $29-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $2a-0 ERROR
+	short		tbl_trans - tbl_trans	# $2a-1 ERROR
+	short		tbl_trans - tbl_trans	# $2a-2 ERROR
+	short		tbl_trans - tbl_trans	# $2a-3 ERROR
+	short		tbl_trans - tbl_trans	# $2a-4 ERROR
+	short		tbl_trans - tbl_trans	# $2a-5 ERROR
+	short		tbl_trans - tbl_trans	# $2a-6 ERROR
+	short		tbl_trans - tbl_trans	# $2a-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $2b-0 ERROR
+	short		tbl_trans - tbl_trans	# $2b-1 ERROR
+	short		tbl_trans - tbl_trans	# $2b-2 ERROR
+	short		tbl_trans - tbl_trans	# $2b-3 ERROR
+	short		tbl_trans - tbl_trans	# $2b-4 ERROR
+	short		tbl_trans - tbl_trans	# $2b-5 ERROR
+	short		tbl_trans - tbl_trans	# $2b-6 ERROR
+	short		tbl_trans - tbl_trans	# $2b-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $2c-0 ERROR
+	short		tbl_trans - tbl_trans	# $2c-1 ERROR
+	short		tbl_trans - tbl_trans	# $2c-2 ERROR
+	short		tbl_trans - tbl_trans	# $2c-3 ERROR
+	short		tbl_trans - tbl_trans	# $2c-4 ERROR
+	short		tbl_trans - tbl_trans	# $2c-5 ERROR
+	short		tbl_trans - tbl_trans	# $2c-6 ERROR
+	short		tbl_trans - tbl_trans	# $2c-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $2d-0 ERROR
+	short		tbl_trans - tbl_trans	# $2d-1 ERROR
+	short		tbl_trans - tbl_trans	# $2d-2 ERROR
+	short		tbl_trans - tbl_trans	# $2d-3 ERROR
+	short		tbl_trans - tbl_trans	# $2d-4 ERROR
+	short		tbl_trans - tbl_trans	# $2d-5 ERROR
+	short		tbl_trans - tbl_trans	# $2d-6 ERROR
+	short		tbl_trans - tbl_trans	# $2d-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $2e-0 ERROR
+	short		tbl_trans - tbl_trans	# $2e-1 ERROR
+	short		tbl_trans - tbl_trans	# $2e-2 ERROR
+	short		tbl_trans - tbl_trans	# $2e-3 ERROR
+	short		tbl_trans - tbl_trans	# $2e-4 ERROR
+	short		tbl_trans - tbl_trans	# $2e-5 ERROR
+	short		tbl_trans - tbl_trans	# $2e-6 ERROR
+	short		tbl_trans - tbl_trans	# $2e-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $2f-0 ERROR
+	short		tbl_trans - tbl_trans	# $2f-1 ERROR
+	short		tbl_trans - tbl_trans	# $2f-2 ERROR
+	short		tbl_trans - tbl_trans	# $2f-3 ERROR
+	short		tbl_trans - tbl_trans	# $2f-4 ERROR
+	short		tbl_trans - tbl_trans	# $2f-5 ERROR
+	short		tbl_trans - tbl_trans	# $2f-6 ERROR
+	short		tbl_trans - tbl_trans	# $2f-7 ERROR
+
+	short		ssincos	 - tbl_trans	# $30-0 fsincos norm
+	short		ssincosz - tbl_trans	# $30-1 fsincos zero
+	short		ssincosi - tbl_trans	# $30-2 fsincos inf
+	short		ssincosqnan - tbl_trans	# $30-3 fsincos qnan
+	short		ssincosd - tbl_trans	# $30-5 fsincos denorm
+	short		ssincossnan - tbl_trans	# $30-4 fsincos snan
+	short		tbl_trans - tbl_trans	# $30-6 fsincos unnorm
+	short		tbl_trans - tbl_trans	# $30-7 ERROR
+
+	short		ssincos	 - tbl_trans	# $31-0 fsincos norm
+	short		ssincosz - tbl_trans	# $31-1 fsincos zero
+	short		ssincosi - tbl_trans	# $31-2 fsincos inf
+	short		ssincosqnan - tbl_trans	# $31-3 fsincos qnan
+	short		ssincosd - tbl_trans	# $31-5 fsincos denorm
+	short		ssincossnan - tbl_trans	# $31-4 fsincos snan
+	short		tbl_trans - tbl_trans	# $31-6 fsincos unnorm
+	short		tbl_trans - tbl_trans	# $31-7 ERROR
+
+	short		ssincos	 - tbl_trans	# $32-0 fsincos norm
+	short		ssincosz - tbl_trans	# $32-1 fsincos zero
+	short		ssincosi - tbl_trans	# $32-2 fsincos inf
+	short		ssincosqnan - tbl_trans	# $32-3 fsincos qnan
+	short		ssincosd - tbl_trans	# $32-5 fsincos denorm
+	short		ssincossnan - tbl_trans	# $32-4 fsincos snan
+	short		tbl_trans - tbl_trans	# $32-6 fsincos unnorm
+	short		tbl_trans - tbl_trans	# $32-7 ERROR
+
+	short		ssincos	 - tbl_trans	# $33-0 fsincos norm
+	short		ssincosz - tbl_trans	# $33-1 fsincos zero
+	short		ssincosi - tbl_trans	# $33-2 fsincos inf
+	short		ssincosqnan - tbl_trans	# $33-3 fsincos qnan
+	short		ssincosd - tbl_trans	# $33-5 fsincos denorm
+	short		ssincossnan - tbl_trans	# $33-4 fsincos snan
+	short		tbl_trans - tbl_trans	# $33-6 fsincos unnorm
+	short		tbl_trans - tbl_trans	# $33-7 ERROR
+
+	short		ssincos	 - tbl_trans	# $34-0 fsincos norm
+	short		ssincosz - tbl_trans	# $34-1 fsincos zero
+	short		ssincosi - tbl_trans	# $34-2 fsincos inf
+	short		ssincosqnan - tbl_trans	# $34-3 fsincos qnan
+	short		ssincosd - tbl_trans	# $34-5 fsincos denorm
+	short		ssincossnan - tbl_trans	# $34-4 fsincos snan
+	short		tbl_trans - tbl_trans	# $34-6 fsincos unnorm
+	short		tbl_trans - tbl_trans	# $34-7 ERROR
+
+	short		ssincos	 - tbl_trans	# $35-0 fsincos norm
+	short		ssincosz - tbl_trans	# $35-1 fsincos zero
+	short		ssincosi - tbl_trans	# $35-2 fsincos inf
+	short		ssincosqnan - tbl_trans	# $35-3 fsincos qnan
+	short		ssincosd - tbl_trans	# $35-5 fsincos denorm
+	short		ssincossnan - tbl_trans	# $35-4 fsincos snan
+	short		tbl_trans - tbl_trans	# $35-6 fsincos unnorm
+	short		tbl_trans - tbl_trans	# $35-7 ERROR
+
+	short		ssincos	 - tbl_trans	# $36-0 fsincos norm
+	short		ssincosz - tbl_trans	# $36-1 fsincos zero
+	short		ssincosi - tbl_trans	# $36-2 fsincos inf
+	short		ssincosqnan - tbl_trans	# $36-3 fsincos qnan
+	short		ssincosd - tbl_trans	# $36-5 fsincos denorm
+	short		ssincossnan - tbl_trans	# $36-4 fsincos snan
+	short		tbl_trans - tbl_trans	# $36-6 fsincos unnorm
+	short		tbl_trans - tbl_trans	# $36-7 ERROR
+
+	short		ssincos	 - tbl_trans	# $37-0 fsincos norm
+	short		ssincosz - tbl_trans	# $37-1 fsincos zero
+	short		ssincosi - tbl_trans	# $37-2 fsincos inf
+	short		ssincosqnan - tbl_trans	# $37-3 fsincos qnan
+	short		ssincosd - tbl_trans	# $37-5 fsincos denorm
+	short		ssincossnan - tbl_trans	# $37-4 fsincos snan
+	short		tbl_trans - tbl_trans	# $37-6 fsincos unnorm
+	short		tbl_trans - tbl_trans	# $37-7 ERROR
+
+##########
+
+# the instruction fetch access for the displacement word for the
+# fdbcc emulation failed. here, we create an access error frame
+# from the current frame and branch to _real_access().
+funimp_iacc:
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+
+	mov.l		USER_FPIAR(%a6),EXC_PC(%a6) # store current PC
+
+	unlk		%a6
+
+	mov.l		(%sp),-(%sp)		# store SR,hi(PC)
+	mov.w		0x8(%sp),0x4(%sp)	# store lo(PC)
+	mov.w		&0x4008,0x6(%sp)	# store voff
+	mov.l		0x2(%sp),0x8(%sp)	# store EA
+	mov.l		&0x09428001,0xc(%sp)	# store FSLW
+
+	btst		&0x5,(%sp)		# user or supervisor mode?
+	beq.b		funimp_iacc_end		# user
+	bset		&0x2,0xd(%sp)		# set supervisor TM bit
+
+funimp_iacc_end:
+	bra.l		_real_access
+
+#########################################################################
+# ssin():     computes the sine of a normalized input			#
+# ssind():    computes the sine of a denormalized input			#
+# scos():     computes the cosine of a normalized input			#
+# scosd():    computes the cosine of a denormalized input		#
+# ssincos():  computes the sine and cosine of a normalized input	#
+# ssincosd(): computes the sine and cosine of a denormalized input	#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = sin(X) or cos(X)						#
+#									#
+#    For ssincos(X):							#
+#	fp0 = sin(X)							#
+#	fp1 = cos(X)							#
+#									#
+# ACCURACY and MONOTONICITY ******************************************* #
+#	The returned result is within 1 ulp in 64 significant bit, i.e.	#
+#	within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	SIN and COS:							#
+#	1. If SIN is invoked, set AdjN := 0; otherwise, set AdjN := 1.	#
+#									#
+#	2. If |X| >= 15Pi or |X| < 2**(-40), go to 7.			#
+#									#
+#	3. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let	#
+#		k = N mod 4, so in particular, k = 0,1,2,or 3.		#
+#		Overwrite k by k := k + AdjN.				#
+#									#
+#	4. If k is even, go to 6.					#
+#									#
+#	5. (k is odd) Set j := (k-1)/2, sgn := (-1)**j.			#
+#		Return sgn*cos(r) where cos(r) is approximated by an	#
+#		even polynomial in r, 1 + r*r*(B1+s*(B2+ ... + s*B8)),	#
+#		s = r*r.						#
+#		Exit.							#
+#									#
+#	6. (k is even) Set j := k/2, sgn := (-1)**j. Return sgn*sin(r)	#
+#		where sin(r) is approximated by an odd polynomial in r	#
+#		r + r*s*(A1+s*(A2+ ... + s*A7)),	s = r*r.	#
+#		Exit.							#
+#									#
+#	7. If |X| > 1, go to 9.						#
+#									#
+#	8. (|X|<2**(-40)) If SIN is invoked, return X;			#
+#		otherwise return 1.					#
+#									#
+#	9. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi,		#
+#		go back to 3.						#
+#									#
+#	SINCOS:								#
+#	1. If |X| >= 15Pi or |X| < 2**(-40), go to 6.			#
+#									#
+#	2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let	#
+#		k = N mod 4, so in particular, k = 0,1,2,or 3.		#
+#									#
+#	3. If k is even, go to 5.					#
+#									#
+#	4. (k is odd) Set j1 := (k-1)/2, j2 := j1 (EOR) (k mod 2), ie.	#
+#		j1 exclusive or with the l.s.b. of k.			#
+#		sgn1 := (-1)**j1, sgn2 := (-1)**j2.			#
+#		SIN(X) = sgn1 * cos(r) and COS(X) = sgn2*sin(r) where	#
+#		sin(r) and cos(r) are computed as odd and even		#
+#		polynomials in r, respectively. Exit			#
+#									#
+#	5. (k is even) Set j1 := k/2, sgn1 := (-1)**j1.			#
+#		SIN(X) = sgn1 * sin(r) and COS(X) = sgn1*cos(r) where	#
+#		sin(r) and cos(r) are computed as odd and even		#
+#		polynomials in r, respectively. Exit			#
+#									#
+#	6. If |X| > 1, go to 8.						#
+#									#
+#	7. (|X|<2**(-40)) SIN(X) = X and COS(X) = 1. Exit.		#
+#									#
+#	8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi,		#
+#		go back to 2.						#
+#									#
+#########################################################################
+
+SINA7:	long		0xBD6AAA77,0xCCC994F5
+SINA6:	long		0x3DE61209,0x7AAE8DA1
+SINA5:	long		0xBE5AE645,0x2A118AE4
+SINA4:	long		0x3EC71DE3,0xA5341531
+SINA3:	long		0xBF2A01A0,0x1A018B59,0x00000000,0x00000000
+SINA2:	long		0x3FF80000,0x88888888,0x888859AF,0x00000000
+SINA1:	long		0xBFFC0000,0xAAAAAAAA,0xAAAAAA99,0x00000000
+
+COSB8:	long		0x3D2AC4D0,0xD6011EE3
+COSB7:	long		0xBDA9396F,0x9F45AC19
+COSB6:	long		0x3E21EED9,0x0612C972
+COSB5:	long		0xBE927E4F,0xB79D9FCF
+COSB4:	long		0x3EFA01A0,0x1A01D423,0x00000000,0x00000000
+COSB3:	long		0xBFF50000,0xB60B60B6,0x0B61D438,0x00000000
+COSB2:	long		0x3FFA0000,0xAAAAAAAA,0xAAAAAB5E
+COSB1:	long		0xBF000000
+
+	set		INARG,FP_SCR0
+
+	set		X,FP_SCR0
+#	set		XDCARE,X+2
+	set		XFRAC,X+4
+
+	set		RPRIME,FP_SCR0
+	set		SPRIME,FP_SCR1
+
+	set		POSNEG1,L_SCR1
+	set		TWOTO63,L_SCR1
+
+	set		ENDFLAG,L_SCR2
+	set		INT,L_SCR2
+
+	set		ADJN,L_SCR3
+
+############################################
+	global		ssin
+ssin:
+	mov.l		&0,ADJN(%a6)		# yes; SET ADJN TO 0
+	bra.b		SINBGN
+
+############################################
+	global		scos
+scos:
+	mov.l		&1,ADJN(%a6)		# yes; SET ADJN TO 1
+
+############################################
+SINBGN:
+#--SAVE FPCR, FP1. CHECK IF |X| IS TOO SMALL OR LARGE
+
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	fmov.x		%fp0,X(%a6)		# save input at X
+
+# "COMPACTIFY" X
+	mov.l		(%a0),%d1		# put exp in hi word
+	mov.w		4(%a0),%d1		# fetch hi(man)
+	and.l		&0x7FFFFFFF,%d1		# strip sign
+
+	cmpi.l		%d1,&0x3FD78000		# is |X| >= 2**(-40)?
+	bge.b		SOK1			# no
+	bra.w		SINSM			# yes; input is very small
+
+SOK1:
+	cmp.l		%d1,&0x4004BC7E		# is |X| < 15 PI?
+	blt.b		SINMAIN			# no
+	bra.w		SREDUCEX		# yes; input is very large
+
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+SINMAIN:
+	fmov.x		%fp0,%fp1
+	fmul.d		TWOBYPI(%pc),%fp1	# X*2/PI
+
+	lea		PITBL+0x200(%pc),%a1	# TABLE OF N*PI/2, N = -32,...,32
+
+	fmov.l		%fp1,INT(%a6)		# CONVERT TO INTEGER
+
+	mov.l		INT(%a6),%d1		# make a copy of N
+	asl.l		&4,%d1			# N *= 16
+	add.l		%d1,%a1			# tbl_addr = a1 + (N*16)
+
+# A1 IS THE ADDRESS OF N*PIBY2
+# ...WHICH IS IN TWO PIECES Y1 & Y2
+	fsub.x		(%a1)+,%fp0		# X-Y1
+	fsub.s		(%a1),%fp0		# fp0 = R = (X-Y1)-Y2
+
+SINCONT:
+#--continuation from REDUCEX
+
+#--GET N+ADJN AND SEE IF SIN(R) OR COS(R) IS NEEDED
+	mov.l		INT(%a6),%d1
+	add.l		ADJN(%a6),%d1		# SEE IF D0 IS ODD OR EVEN
+	ror.l		&1,%d1			# D0 WAS ODD IFF D0 IS NEGATIVE
+	cmp.l		%d1,&0
+	blt.w		COSPOLY
+
+#--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
+#--THEN WE RETURN	SGN*SIN(R). SGN*SIN(R) IS COMPUTED BY
+#--R' + R'*S*(A1 + S(A2 + S(A3 + S(A4 + ... + SA7)))), WHERE
+#--R' = SGN*R, S=R*R. THIS CAN BE REWRITTEN AS
+#--R' + R'*S*( [A1+T(A3+T(A5+TA7))] + [S(A2+T(A4+TA6))])
+#--WHERE T=S*S.
+#--NOTE THAT A3 THROUGH A7 ARE STORED IN DOUBLE PRECISION
+#--WHILE A1 AND A2 ARE IN DOUBLE-EXTENDED FORMAT.
+SINPOLY:
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmov.x		%fp0,X(%a6)		# X IS R
+	fmul.x		%fp0,%fp0		# FP0 IS S
+
+	fmov.d		SINA7(%pc),%fp3
+	fmov.d		SINA6(%pc),%fp2
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS T
+
+	ror.l		&1,%d1
+	and.l		&0x80000000,%d1
+# ...LEAST SIG. BIT OF D0 IN SIGN POSITION
+	eor.l		%d1,X(%a6)		# X IS NOW R'= SGN*R
+
+	fmul.x		%fp1,%fp3		# TA7
+	fmul.x		%fp1,%fp2		# TA6
+
+	fadd.d		SINA5(%pc),%fp3		# A5+TA7
+	fadd.d		SINA4(%pc),%fp2		# A4+TA6
+
+	fmul.x		%fp1,%fp3		# T(A5+TA7)
+	fmul.x		%fp1,%fp2		# T(A4+TA6)
+
+	fadd.d		SINA3(%pc),%fp3		# A3+T(A5+TA7)
+	fadd.x		SINA2(%pc),%fp2		# A2+T(A4+TA6)
+
+	fmul.x		%fp3,%fp1		# T(A3+T(A5+TA7))
+
+	fmul.x		%fp0,%fp2		# S(A2+T(A4+TA6))
+	fadd.x		SINA1(%pc),%fp1		# A1+T(A3+T(A5+TA7))
+	fmul.x		X(%a6),%fp0		# R'*S
+
+	fadd.x		%fp2,%fp1		# [A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))]
+
+	fmul.x		%fp1,%fp0		# SIN(R')-R'
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fadd.x		X(%a6),%fp0		# last inst - possible exception set
+	bra		t_inx2
+
+#--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
+#--THEN WE RETURN	SGN*COS(R). SGN*COS(R) IS COMPUTED BY
+#--SGN + S'*(B1 + S(B2 + S(B3 + S(B4 + ... + SB8)))), WHERE
+#--S=R*R AND S'=SGN*S. THIS CAN BE REWRITTEN AS
+#--SGN + S'*([B1+T(B3+T(B5+TB7))] + [S(B2+T(B4+T(B6+TB8)))])
+#--WHERE T=S*S.
+#--NOTE THAT B4 THROUGH B8 ARE STORED IN DOUBLE PRECISION
+#--WHILE B2 AND B3 ARE IN DOUBLE-EXTENDED FORMAT, B1 IS -1/2
+#--AND IS THEREFORE STORED AS SINGLE PRECISION.
+COSPOLY:
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmul.x		%fp0,%fp0		# FP0 IS S
+
+	fmov.d		COSB8(%pc),%fp2
+	fmov.d		COSB7(%pc),%fp3
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS T
+
+	fmov.x		%fp0,X(%a6)		# X IS S
+	ror.l		&1,%d1
+	and.l		&0x80000000,%d1
+# ...LEAST SIG. BIT OF D0 IN SIGN POSITION
+
+	fmul.x		%fp1,%fp2		# TB8
+
+	eor.l		%d1,X(%a6)		# X IS NOW S'= SGN*S
+	and.l		&0x80000000,%d1
+
+	fmul.x		%fp1,%fp3		# TB7
+
+	or.l		&0x3F800000,%d1		# D0 IS SGN IN SINGLE
+	mov.l		%d1,POSNEG1(%a6)
+
+	fadd.d		COSB6(%pc),%fp2		# B6+TB8
+	fadd.d		COSB5(%pc),%fp3		# B5+TB7
+
+	fmul.x		%fp1,%fp2		# T(B6+TB8)
+	fmul.x		%fp1,%fp3		# T(B5+TB7)
+
+	fadd.d		COSB4(%pc),%fp2		# B4+T(B6+TB8)
+	fadd.x		COSB3(%pc),%fp3		# B3+T(B5+TB7)
+
+	fmul.x		%fp1,%fp2		# T(B4+T(B6+TB8))
+	fmul.x		%fp3,%fp1		# T(B3+T(B5+TB7))
+
+	fadd.x		COSB2(%pc),%fp2		# B2+T(B4+T(B6+TB8))
+	fadd.s		COSB1(%pc),%fp1		# B1+T(B3+T(B5+TB7))
+
+	fmul.x		%fp2,%fp0		# S(B2+T(B4+T(B6+TB8)))
+
+	fadd.x		%fp1,%fp0
+
+	fmul.x		X(%a6),%fp0
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fadd.s		POSNEG1(%a6),%fp0	# last inst - possible exception set
+	bra		t_inx2
+
+##############################################
+
+# SINe: Big OR Small?
+#--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
+#--IF |X| < 2**(-40), RETURN X OR 1.
+SINBORS:
+	cmp.l		%d1,&0x3FFF8000
+	bgt.l		SREDUCEX
+
+SINSM:
+	mov.l		ADJN(%a6),%d1
+	cmp.l		%d1,&0
+	bgt.b		COSTINY
+
+# here, the operation may underflow iff the precision is sgl or dbl.
+# extended denorms are handled through another entry point.
+SINTINY:
+#	mov.w		&0x0000,XDCARE(%a6)	# JUST IN CASE
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		X(%a6),%fp0		# last inst - possible exception set
+	bra		t_catch
+
+COSTINY:
+	fmov.s		&0x3F800000,%fp0	# fp0 = 1.0
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fadd.s		&0x80800000,%fp0	# last inst - possible exception set
+	bra		t_pinx2
+
+################################################
+	global		ssind
+#--SIN(X) = X FOR DENORMALIZED X
+ssind:
+	bra		t_extdnrm
+
+############################################
+	global		scosd
+#--COS(X) = 1 FOR DENORMALIZED X
+scosd:
+	fmov.s		&0x3F800000,%fp0	# fp0 = 1.0
+	bra		t_pinx2
+
+##################################################
+
+	global		ssincos
+ssincos:
+#--SET ADJN TO 4
+	mov.l		&4,ADJN(%a6)
+
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	fmov.x		%fp0,X(%a6)
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1		# COMPACTIFY X
+
+	cmp.l		%d1,&0x3FD78000		# |X| >= 2**(-40)?
+	bge.b		SCOK1
+	bra.w		SCSM
+
+SCOK1:
+	cmp.l		%d1,&0x4004BC7E		# |X| < 15 PI?
+	blt.b		SCMAIN
+	bra.w		SREDUCEX
+
+
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+SCMAIN:
+	fmov.x		%fp0,%fp1
+
+	fmul.d		TWOBYPI(%pc),%fp1	# X*2/PI
+
+	lea		PITBL+0x200(%pc),%a1	# TABLE OF N*PI/2, N = -32,...,32
+
+	fmov.l		%fp1,INT(%a6)		# CONVERT TO INTEGER
+
+	mov.l		INT(%a6),%d1
+	asl.l		&4,%d1
+	add.l		%d1,%a1			# ADDRESS OF N*PIBY2, IN Y1, Y2
+
+	fsub.x		(%a1)+,%fp0		# X-Y1
+	fsub.s		(%a1),%fp0		# FP0 IS R = (X-Y1)-Y2
+
+SCCONT:
+#--continuation point from REDUCEX
+
+	mov.l		INT(%a6),%d1
+	ror.l		&1,%d1
+	cmp.l		%d1,&0			# D0 < 0 IFF N IS ODD
+	bge.w		NEVEN
+
+SNODD:
+#--REGISTERS SAVED SO FAR: D0, A0, FP2.
+	fmovm.x		&0x04,-(%sp)		# save fp2
+
+	fmov.x		%fp0,RPRIME(%a6)
+	fmul.x		%fp0,%fp0		# FP0 IS S = R*R
+	fmov.d		SINA7(%pc),%fp1		# A7
+	fmov.d		COSB8(%pc),%fp2		# B8
+	fmul.x		%fp0,%fp1		# SA7
+	fmul.x		%fp0,%fp2		# SB8
+
+	mov.l		%d2,-(%sp)
+	mov.l		%d1,%d2
+	ror.l		&1,%d2
+	and.l		&0x80000000,%d2
+	eor.l		%d1,%d2
+	and.l		&0x80000000,%d2
+
+	fadd.d		SINA6(%pc),%fp1		# A6+SA7
+	fadd.d		COSB7(%pc),%fp2		# B7+SB8
+
+	fmul.x		%fp0,%fp1		# S(A6+SA7)
+	eor.l		%d2,RPRIME(%a6)
+	mov.l		(%sp)+,%d2
+	fmul.x		%fp0,%fp2		# S(B7+SB8)
+	ror.l		&1,%d1
+	and.l		&0x80000000,%d1
+	mov.l		&0x3F800000,POSNEG1(%a6)
+	eor.l		%d1,POSNEG1(%a6)
+
+	fadd.d		SINA5(%pc),%fp1		# A5+S(A6+SA7)
+	fadd.d		COSB6(%pc),%fp2		# B6+S(B7+SB8)
+
+	fmul.x		%fp0,%fp1		# S(A5+S(A6+SA7))
+	fmul.x		%fp0,%fp2		# S(B6+S(B7+SB8))
+	fmov.x		%fp0,SPRIME(%a6)
+
+	fadd.d		SINA4(%pc),%fp1		# A4+S(A5+S(A6+SA7))
+	eor.l		%d1,SPRIME(%a6)
+	fadd.d		COSB5(%pc),%fp2		# B5+S(B6+S(B7+SB8))
+
+	fmul.x		%fp0,%fp1		# S(A4+...)
+	fmul.x		%fp0,%fp2		# S(B5+...)
+
+	fadd.d		SINA3(%pc),%fp1		# A3+S(A4+...)
+	fadd.d		COSB4(%pc),%fp2		# B4+S(B5+...)
+
+	fmul.x		%fp0,%fp1		# S(A3+...)
+	fmul.x		%fp0,%fp2		# S(B4+...)
+
+	fadd.x		SINA2(%pc),%fp1		# A2+S(A3+...)
+	fadd.x		COSB3(%pc),%fp2		# B3+S(B4+...)
+
+	fmul.x		%fp0,%fp1		# S(A2+...)
+	fmul.x		%fp0,%fp2		# S(B3+...)
+
+	fadd.x		SINA1(%pc),%fp1		# A1+S(A2+...)
+	fadd.x		COSB2(%pc),%fp2		# B2+S(B3+...)
+
+	fmul.x		%fp0,%fp1		# S(A1+...)
+	fmul.x		%fp2,%fp0		# S(B2+...)
+
+	fmul.x		RPRIME(%a6),%fp1	# R'S(A1+...)
+	fadd.s		COSB1(%pc),%fp0		# B1+S(B2...)
+	fmul.x		SPRIME(%a6),%fp0	# S'(B1+S(B2+...))
+
+	fmovm.x		(%sp)+,&0x20		# restore fp2
+
+	fmov.l		%d0,%fpcr
+	fadd.x		RPRIME(%a6),%fp1	# COS(X)
+	bsr		sto_cos			# store cosine result
+	fadd.s		POSNEG1(%a6),%fp0	# SIN(X)
+	bra		t_inx2
+
+NEVEN:
+#--REGISTERS SAVED SO FAR: FP2.
+	fmovm.x		&0x04,-(%sp)		# save fp2
+
+	fmov.x		%fp0,RPRIME(%a6)
+	fmul.x		%fp0,%fp0		# FP0 IS S = R*R
+
+	fmov.d		COSB8(%pc),%fp1		# B8
+	fmov.d		SINA7(%pc),%fp2		# A7
+
+	fmul.x		%fp0,%fp1		# SB8
+	fmov.x		%fp0,SPRIME(%a6)
+	fmul.x		%fp0,%fp2		# SA7
+
+	ror.l		&1,%d1
+	and.l		&0x80000000,%d1
+
+	fadd.d		COSB7(%pc),%fp1		# B7+SB8
+	fadd.d		SINA6(%pc),%fp2		# A6+SA7
+
+	eor.l		%d1,RPRIME(%a6)
+	eor.l		%d1,SPRIME(%a6)
+
+	fmul.x		%fp0,%fp1		# S(B7+SB8)
+
+	or.l		&0x3F800000,%d1
+	mov.l		%d1,POSNEG1(%a6)
+
+	fmul.x		%fp0,%fp2		# S(A6+SA7)
+
+	fadd.d		COSB6(%pc),%fp1		# B6+S(B7+SB8)
+	fadd.d		SINA5(%pc),%fp2		# A5+S(A6+SA7)
+
+	fmul.x		%fp0,%fp1		# S(B6+S(B7+SB8))
+	fmul.x		%fp0,%fp2		# S(A5+S(A6+SA7))
+
+	fadd.d		COSB5(%pc),%fp1		# B5+S(B6+S(B7+SB8))
+	fadd.d		SINA4(%pc),%fp2		# A4+S(A5+S(A6+SA7))
+
+	fmul.x		%fp0,%fp1		# S(B5+...)
+	fmul.x		%fp0,%fp2		# S(A4+...)
+
+	fadd.d		COSB4(%pc),%fp1		# B4+S(B5+...)
+	fadd.d		SINA3(%pc),%fp2		# A3+S(A4+...)
+
+	fmul.x		%fp0,%fp1		# S(B4+...)
+	fmul.x		%fp0,%fp2		# S(A3+...)
+
+	fadd.x		COSB3(%pc),%fp1		# B3+S(B4+...)
+	fadd.x		SINA2(%pc),%fp2		# A2+S(A3+...)
+
+	fmul.x		%fp0,%fp1		# S(B3+...)
+	fmul.x		%fp0,%fp2		# S(A2+...)
+
+	fadd.x		COSB2(%pc),%fp1		# B2+S(B3+...)
+	fadd.x		SINA1(%pc),%fp2		# A1+S(A2+...)
+
+	fmul.x		%fp0,%fp1		# S(B2+...)
+	fmul.x		%fp2,%fp0		# s(a1+...)
+
+
+	fadd.s		COSB1(%pc),%fp1		# B1+S(B2...)
+	fmul.x		RPRIME(%a6),%fp0	# R'S(A1+...)
+	fmul.x		SPRIME(%a6),%fp1	# S'(B1+S(B2+...))
+
+	fmovm.x		(%sp)+,&0x20		# restore fp2
+
+	fmov.l		%d0,%fpcr
+	fadd.s		POSNEG1(%a6),%fp1	# COS(X)
+	bsr		sto_cos			# store cosine result
+	fadd.x		RPRIME(%a6),%fp0	# SIN(X)
+	bra		t_inx2
+
+################################################
+
+SCBORS:
+	cmp.l		%d1,&0x3FFF8000
+	bgt.w		SREDUCEX
+
+################################################
+
+SCSM:
+#	mov.w		&0x0000,XDCARE(%a6)
+	fmov.s		&0x3F800000,%fp1
+
+	fmov.l		%d0,%fpcr
+	fsub.s		&0x00800000,%fp1
+	bsr		sto_cos			# store cosine result
+	fmov.l		%fpcr,%d0		# d0 must have fpcr,too
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		X(%a6),%fp0
+	bra		t_catch
+
+##############################################
+
+	global		ssincosd
+#--SIN AND COS OF X FOR DENORMALIZED X
+ssincosd:
+	mov.l		%d0,-(%sp)		# save d0
+	fmov.s		&0x3F800000,%fp1
+	bsr		sto_cos			# store cosine result
+	mov.l		(%sp)+,%d0		# restore d0
+	bra		t_extdnrm
+
+############################################
+
+#--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
+#--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
+#--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
+SREDUCEX:
+	fmovm.x		&0x3c,-(%sp)		# save {fp2-fp5}
+	mov.l		%d2,-(%sp)		# save d2
+	fmov.s		&0x00000000,%fp1	# fp1 = 0
+
+#--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
+#--there is a danger of unwanted overflow in first LOOP iteration.  In this
+#--case, reduce argument by one remainder step to make subsequent reduction
+#--safe.
+	cmp.l		%d1,&0x7ffeffff		# is arg dangerously large?
+	bne.b		SLOOP			# no
+
+# yes; create 2**16383*PI/2
+	mov.w		&0x7ffe,FP_SCR0_EX(%a6)
+	mov.l		&0xc90fdaa2,FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)
+
+# create low half of 2**16383*PI/2 at FP_SCR1
+	mov.w		&0x7fdc,FP_SCR1_EX(%a6)
+	mov.l		&0x85a308d3,FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)
+
+	ftest.x		%fp0			# test sign of argument
+	fblt.w		sred_neg
+
+	or.b		&0x80,FP_SCR0_EX(%a6)	# positive arg
+	or.b		&0x80,FP_SCR1_EX(%a6)
+sred_neg:
+	fadd.x		FP_SCR0(%a6),%fp0	# high part of reduction is exact
+	fmov.x		%fp0,%fp1		# save high result in fp1
+	fadd.x		FP_SCR1(%a6),%fp0	# low part of reduction
+	fsub.x		%fp0,%fp1		# determine low component of result
+	fadd.x		FP_SCR1(%a6),%fp1	# fp0/fp1 are reduced argument.
+
+#--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
+#--integer quotient will be stored in N
+#--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
+SLOOP:
+	fmov.x		%fp0,INARG(%a6)		# +-2**K * F, 1 <= F < 2
+	mov.w		INARG(%a6),%d1
+	mov.l		%d1,%a1			# save a copy of D0
+	and.l		&0x00007FFF,%d1
+	sub.l		&0x00003FFF,%d1		# d0 = K
+	cmp.l		%d1,&28
+	ble.b		SLASTLOOP
+SCONTLOOP:
+	sub.l		&27,%d1			# d0 = L := K-27
+	mov.b		&0,ENDFLAG(%a6)
+	bra.b		SWORK
+SLASTLOOP:
+	clr.l		%d1			# d0 = L := 0
+	mov.b		&1,ENDFLAG(%a6)
+
+SWORK:
+#--FIND THE REMAINDER OF (R,r) W.R.T.	2**L * (PI/2). L IS SO CHOSEN
+#--THAT	INT( X * (2/PI) / 2**(L) ) < 2**29.
+
+#--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
+#--2**L * (PIby2_1), 2**L * (PIby2_2)
+
+	mov.l		&0x00003FFE,%d2		# BIASED EXP OF 2/PI
+	sub.l		%d1,%d2			# BIASED EXP OF 2**(-L)*(2/PI)
+
+	mov.l		&0xA2F9836E,FP_SCR0_HI(%a6)
+	mov.l		&0x4E44152A,FP_SCR0_LO(%a6)
+	mov.w		%d2,FP_SCR0_EX(%a6)	# FP_SCR0 = 2**(-L)*(2/PI)
+
+	fmov.x		%fp0,%fp2
+	fmul.x		FP_SCR0(%a6),%fp2	# fp2 = X * 2**(-L)*(2/PI)
+
+#--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
+#--FLOATING POINT FORMAT, THE TWO FMOVE'S	FMOVE.L FP <--> N
+#--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
+#--(SIGN(INARG)*2**63	+	FP2) - SIGN(INARG)*2**63 WILL GIVE
+#--US THE DESIRED VALUE IN FLOATING POINT.
+	mov.l		%a1,%d2
+	swap		%d2
+	and.l		&0x80000000,%d2
+	or.l		&0x5F000000,%d2		# d2 = SIGN(INARG)*2**63 IN SGL
+	mov.l		%d2,TWOTO63(%a6)
+	fadd.s		TWOTO63(%a6),%fp2	# THE FRACTIONAL PART OF FP1 IS ROUNDED
+	fsub.s		TWOTO63(%a6),%fp2	# fp2 = N
+#	fint.x		%fp2
+
+#--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
+	mov.l		%d1,%d2			# d2 = L
+
+	add.l		&0x00003FFF,%d2		# BIASED EXP OF 2**L * (PI/2)
+	mov.w		%d2,FP_SCR0_EX(%a6)
+	mov.l		&0xC90FDAA2,FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)		# FP_SCR0 = 2**(L) * Piby2_1
+
+	add.l		&0x00003FDD,%d1
+	mov.w		%d1,FP_SCR1_EX(%a6)
+	mov.l		&0x85A308D3,FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)		# FP_SCR1 = 2**(L) * Piby2_2
+
+	mov.b		ENDFLAG(%a6),%d1
+
+#--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
+#--P2 = 2**(L) * Piby2_2
+	fmov.x		%fp2,%fp4		# fp4 = N
+	fmul.x		FP_SCR0(%a6),%fp4	# fp4 = W = N*P1
+	fmov.x		%fp2,%fp5		# fp5 = N
+	fmul.x		FP_SCR1(%a6),%fp5	# fp5 = w = N*P2
+	fmov.x		%fp4,%fp3		# fp3 = W = N*P1
+
+#--we want P+p = W+w  but  |p| <= half ulp of P
+#--Then, we need to compute  A := R-P   and  a := r-p
+	fadd.x		%fp5,%fp3		# fp3 = P
+	fsub.x		%fp3,%fp4		# fp4 = W-P
+
+	fsub.x		%fp3,%fp0		# fp0 = A := R - P
+	fadd.x		%fp5,%fp4		# fp4 = p = (W-P)+w
+
+	fmov.x		%fp0,%fp3		# fp3 = A
+	fsub.x		%fp4,%fp1		# fp1 = a := r - p
+
+#--Now we need to normalize (A,a) to  "new (R,r)" where R+r = A+a but
+#--|r| <= half ulp of R.
+	fadd.x		%fp1,%fp0		# fp0 = R := A+a
+#--No need to calculate r if this is the last loop
+	cmp.b		%d1,&0
+	bgt.w		SRESTORE
+
+#--Need to calculate r
+	fsub.x		%fp0,%fp3		# fp3 = A-R
+	fadd.x		%fp3,%fp1		# fp1 = r := (A-R)+a
+	bra.w		SLOOP
+
+SRESTORE:
+	fmov.l		%fp2,INT(%a6)
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		(%sp)+,&0x3c		# restore {fp2-fp5}
+
+	mov.l		ADJN(%a6),%d1
+	cmp.l		%d1,&4
+
+	blt.w		SINCONT
+	bra.w		SCCONT
+
+#########################################################################
+# stan():  computes the tangent of a normalized input			#
+# stand(): computes the tangent of a denormalized input			#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = tan(X)							#
+#									#
+# ACCURACY and MONOTONICITY ******************************************* #
+#	The returned result is within 3 ulp in 64 significant bit, i.e. #
+#	within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM *********************************************************** #
+#									#
+#	1. If |X| >= 15Pi or |X| < 2**(-40), go to 6.			#
+#									#
+#	2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let	#
+#		k = N mod 2, so in particular, k = 0 or 1.		#
+#									#
+#	3. If k is odd, go to 5.					#
+#									#
+#	4. (k is even) Tan(X) = tan(r) and tan(r) is approximated by a	#
+#		rational function U/V where				#
+#		U = r + r*s*(P1 + s*(P2 + s*P3)), and			#
+#		V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))),  s = r*r.	#
+#		Exit.							#
+#									#
+#	4. (k is odd) Tan(X) = -cot(r). Since tan(r) is approximated by #
+#		a rational function U/V where				#
+#		U = r + r*s*(P1 + s*(P2 + s*P3)), and			#
+#		V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r,	#
+#		-Cot(r) = -V/U. Exit.					#
+#									#
+#	6. If |X| > 1, go to 8.						#
+#									#
+#	7. (|X|<2**(-40)) Tan(X) = X. Exit.				#
+#									#
+#	8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, go back	#
+#		to 2.							#
+#									#
+#########################################################################
+
+TANQ4:
+	long		0x3EA0B759,0xF50F8688
+TANP3:
+	long		0xBEF2BAA5,0xA8924F04
+
+TANQ3:
+	long		0xBF346F59,0xB39BA65F,0x00000000,0x00000000
+
+TANP2:
+	long		0x3FF60000,0xE073D3FC,0x199C4A00,0x00000000
+
+TANQ2:
+	long		0x3FF90000,0xD23CD684,0x15D95FA1,0x00000000
+
+TANP1:
+	long		0xBFFC0000,0x8895A6C5,0xFB423BCA,0x00000000
+
+TANQ1:
+	long		0xBFFD0000,0xEEF57E0D,0xA84BC8CE,0x00000000
+
+INVTWOPI:
+	long		0x3FFC0000,0xA2F9836E,0x4E44152A,0x00000000
+
+TWOPI1:
+	long		0x40010000,0xC90FDAA2,0x00000000,0x00000000
+TWOPI2:
+	long		0x3FDF0000,0x85A308D4,0x00000000,0x00000000
+
+#--N*PI/2, -32 <= N <= 32, IN A LEADING TERM IN EXT. AND TRAILING
+#--TERM IN SGL. NOTE THAT PI IS 64-BIT LONG, THUS N*PI/2 IS AT
+#--MOST 69 BITS LONG.
+#	global		PITBL
+PITBL:
+	long		0xC0040000,0xC90FDAA2,0x2168C235,0x21800000
+	long		0xC0040000,0xC2C75BCD,0x105D7C23,0xA0D00000
+	long		0xC0040000,0xBC7EDCF7,0xFF523611,0xA1E80000
+	long		0xC0040000,0xB6365E22,0xEE46F000,0x21480000
+	long		0xC0040000,0xAFEDDF4D,0xDD3BA9EE,0xA1200000
+	long		0xC0040000,0xA9A56078,0xCC3063DD,0x21FC0000
+	long		0xC0040000,0xA35CE1A3,0xBB251DCB,0x21100000
+	long		0xC0040000,0x9D1462CE,0xAA19D7B9,0xA1580000
+	long		0xC0040000,0x96CBE3F9,0x990E91A8,0x21E00000
+	long		0xC0040000,0x90836524,0x88034B96,0x20B00000
+	long		0xC0040000,0x8A3AE64F,0x76F80584,0xA1880000
+	long		0xC0040000,0x83F2677A,0x65ECBF73,0x21C40000
+	long		0xC0030000,0xFB53D14A,0xA9C2F2C2,0x20000000
+	long		0xC0030000,0xEEC2D3A0,0x87AC669F,0x21380000
+	long		0xC0030000,0xE231D5F6,0x6595DA7B,0xA1300000
+	long		0xC0030000,0xD5A0D84C,0x437F4E58,0x9FC00000
+	long		0xC0030000,0xC90FDAA2,0x2168C235,0x21000000
+	long		0xC0030000,0xBC7EDCF7,0xFF523611,0xA1680000
+	long		0xC0030000,0xAFEDDF4D,0xDD3BA9EE,0xA0A00000
+	long		0xC0030000,0xA35CE1A3,0xBB251DCB,0x20900000
+	long		0xC0030000,0x96CBE3F9,0x990E91A8,0x21600000
+	long		0xC0030000,0x8A3AE64F,0x76F80584,0xA1080000
+	long		0xC0020000,0xFB53D14A,0xA9C2F2C2,0x1F800000
+	long		0xC0020000,0xE231D5F6,0x6595DA7B,0xA0B00000
+	long		0xC0020000,0xC90FDAA2,0x2168C235,0x20800000
+	long		0xC0020000,0xAFEDDF4D,0xDD3BA9EE,0xA0200000
+	long		0xC0020000,0x96CBE3F9,0x990E91A8,0x20E00000
+	long		0xC0010000,0xFB53D14A,0xA9C2F2C2,0x1F000000
+	long		0xC0010000,0xC90FDAA2,0x2168C235,0x20000000
+	long		0xC0010000,0x96CBE3F9,0x990E91A8,0x20600000
+	long		0xC0000000,0xC90FDAA2,0x2168C235,0x1F800000
+	long		0xBFFF0000,0xC90FDAA2,0x2168C235,0x1F000000
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+	long		0x3FFF0000,0xC90FDAA2,0x2168C235,0x9F000000
+	long		0x40000000,0xC90FDAA2,0x2168C235,0x9F800000
+	long		0x40010000,0x96CBE3F9,0x990E91A8,0xA0600000
+	long		0x40010000,0xC90FDAA2,0x2168C235,0xA0000000
+	long		0x40010000,0xFB53D14A,0xA9C2F2C2,0x9F000000
+	long		0x40020000,0x96CBE3F9,0x990E91A8,0xA0E00000
+	long		0x40020000,0xAFEDDF4D,0xDD3BA9EE,0x20200000
+	long		0x40020000,0xC90FDAA2,0x2168C235,0xA0800000
+	long		0x40020000,0xE231D5F6,0x6595DA7B,0x20B00000
+	long		0x40020000,0xFB53D14A,0xA9C2F2C2,0x9F800000
+	long		0x40030000,0x8A3AE64F,0x76F80584,0x21080000
+	long		0x40030000,0x96CBE3F9,0x990E91A8,0xA1600000
+	long		0x40030000,0xA35CE1A3,0xBB251DCB,0xA0900000
+	long		0x40030000,0xAFEDDF4D,0xDD3BA9EE,0x20A00000
+	long		0x40030000,0xBC7EDCF7,0xFF523611,0x21680000
+	long		0x40030000,0xC90FDAA2,0x2168C235,0xA1000000
+	long		0x40030000,0xD5A0D84C,0x437F4E58,0x1FC00000
+	long		0x40030000,0xE231D5F6,0x6595DA7B,0x21300000
+	long		0x40030000,0xEEC2D3A0,0x87AC669F,0xA1380000
+	long		0x40030000,0xFB53D14A,0xA9C2F2C2,0xA0000000
+	long		0x40040000,0x83F2677A,0x65ECBF73,0xA1C40000
+	long		0x40040000,0x8A3AE64F,0x76F80584,0x21880000
+	long		0x40040000,0x90836524,0x88034B96,0xA0B00000
+	long		0x40040000,0x96CBE3F9,0x990E91A8,0xA1E00000
+	long		0x40040000,0x9D1462CE,0xAA19D7B9,0x21580000
+	long		0x40040000,0xA35CE1A3,0xBB251DCB,0xA1100000
+	long		0x40040000,0xA9A56078,0xCC3063DD,0xA1FC0000
+	long		0x40040000,0xAFEDDF4D,0xDD3BA9EE,0x21200000
+	long		0x40040000,0xB6365E22,0xEE46F000,0xA1480000
+	long		0x40040000,0xBC7EDCF7,0xFF523611,0x21E80000
+	long		0x40040000,0xC2C75BCD,0x105D7C23,0x20D00000
+	long		0x40040000,0xC90FDAA2,0x2168C235,0xA1800000
+
+	set		INARG,FP_SCR0
+
+	set		TWOTO63,L_SCR1
+	set		INT,L_SCR1
+	set		ENDFLAG,L_SCR2
+
+	global		stan
+stan:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+
+	cmp.l		%d1,&0x3FD78000		# |X| >= 2**(-40)?
+	bge.b		TANOK1
+	bra.w		TANSM
+TANOK1:
+	cmp.l		%d1,&0x4004BC7E		# |X| < 15 PI?
+	blt.b		TANMAIN
+	bra.w		REDUCEX
+
+TANMAIN:
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+	fmov.x		%fp0,%fp1
+	fmul.d		TWOBYPI(%pc),%fp1	# X*2/PI
+
+	lea.l		PITBL+0x200(%pc),%a1	# TABLE OF N*PI/2, N = -32,...,32
+
+	fmov.l		%fp1,%d1		# CONVERT TO INTEGER
+
+	asl.l		&4,%d1
+	add.l		%d1,%a1			# ADDRESS N*PIBY2 IN Y1, Y2
+
+	fsub.x		(%a1)+,%fp0		# X-Y1
+
+	fsub.s		(%a1),%fp0		# FP0 IS R = (X-Y1)-Y2
+
+	ror.l		&5,%d1
+	and.l		&0x80000000,%d1		# D0 WAS ODD IFF D0 < 0
+
+TANCONT:
+	fmovm.x		&0x0c,-(%sp)		# save fp2,fp3
+
+	cmp.l		%d1,&0
+	blt.w		NODD
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# S = R*R
+
+	fmov.d		TANQ4(%pc),%fp3
+	fmov.d		TANP3(%pc),%fp2
+
+	fmul.x		%fp1,%fp3		# SQ4
+	fmul.x		%fp1,%fp2		# SP3
+
+	fadd.d		TANQ3(%pc),%fp3		# Q3+SQ4
+	fadd.x		TANP2(%pc),%fp2		# P2+SP3
+
+	fmul.x		%fp1,%fp3		# S(Q3+SQ4)
+	fmul.x		%fp1,%fp2		# S(P2+SP3)
+
+	fadd.x		TANQ2(%pc),%fp3		# Q2+S(Q3+SQ4)
+	fadd.x		TANP1(%pc),%fp2		# P1+S(P2+SP3)
+
+	fmul.x		%fp1,%fp3		# S(Q2+S(Q3+SQ4))
+	fmul.x		%fp1,%fp2		# S(P1+S(P2+SP3))
+
+	fadd.x		TANQ1(%pc),%fp3		# Q1+S(Q2+S(Q3+SQ4))
+	fmul.x		%fp0,%fp2		# RS(P1+S(P2+SP3))
+
+	fmul.x		%fp3,%fp1		# S(Q1+S(Q2+S(Q3+SQ4)))
+
+	fadd.x		%fp2,%fp0		# R+RS(P1+S(P2+SP3))
+
+	fadd.s		&0x3F800000,%fp1	# 1+S(Q1+...)
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2,fp3
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fdiv.x		%fp1,%fp0		# last inst - possible exception set
+	bra		t_inx2
+
+NODD:
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp0,%fp0		# S = R*R
+
+	fmov.d		TANQ4(%pc),%fp3
+	fmov.d		TANP3(%pc),%fp2
+
+	fmul.x		%fp0,%fp3		# SQ4
+	fmul.x		%fp0,%fp2		# SP3
+
+	fadd.d		TANQ3(%pc),%fp3		# Q3+SQ4
+	fadd.x		TANP2(%pc),%fp2		# P2+SP3
+
+	fmul.x		%fp0,%fp3		# S(Q3+SQ4)
+	fmul.x		%fp0,%fp2		# S(P2+SP3)
+
+	fadd.x		TANQ2(%pc),%fp3		# Q2+S(Q3+SQ4)
+	fadd.x		TANP1(%pc),%fp2		# P1+S(P2+SP3)
+
+	fmul.x		%fp0,%fp3		# S(Q2+S(Q3+SQ4))
+	fmul.x		%fp0,%fp2		# S(P1+S(P2+SP3))
+
+	fadd.x		TANQ1(%pc),%fp3		# Q1+S(Q2+S(Q3+SQ4))
+	fmul.x		%fp1,%fp2		# RS(P1+S(P2+SP3))
+
+	fmul.x		%fp3,%fp0		# S(Q1+S(Q2+S(Q3+SQ4)))
+
+	fadd.x		%fp2,%fp1		# R+RS(P1+S(P2+SP3))
+	fadd.s		&0x3F800000,%fp0	# 1+S(Q1+...)
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2,fp3
+
+	fmov.x		%fp1,-(%sp)
+	eor.l		&0x80000000,(%sp)
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fdiv.x		(%sp)+,%fp0		# last inst - possible exception set
+	bra		t_inx2
+
+TANBORS:
+#--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
+#--IF |X| < 2**(-40), RETURN X OR 1.
+	cmp.l		%d1,&0x3FFF8000
+	bgt.b		REDUCEX
+
+TANSM:
+	fmov.x		%fp0,-(%sp)
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		(%sp)+,%fp0		# last inst - posibble exception set
+	bra		t_catch
+
+	global		stand
+#--TAN(X) = X FOR DENORMALIZED X
+stand:
+	bra		t_extdnrm
+
+#--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
+#--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
+#--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
+REDUCEX:
+	fmovm.x		&0x3c,-(%sp)		# save {fp2-fp5}
+	mov.l		%d2,-(%sp)		# save d2
+	fmov.s		&0x00000000,%fp1	# fp1 = 0
+
+#--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
+#--there is a danger of unwanted overflow in first LOOP iteration.  In this
+#--case, reduce argument by one remainder step to make subsequent reduction
+#--safe.
+	cmp.l		%d1,&0x7ffeffff		# is arg dangerously large?
+	bne.b		LOOP			# no
+
+# yes; create 2**16383*PI/2
+	mov.w		&0x7ffe,FP_SCR0_EX(%a6)
+	mov.l		&0xc90fdaa2,FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)
+
+# create low half of 2**16383*PI/2 at FP_SCR1
+	mov.w		&0x7fdc,FP_SCR1_EX(%a6)
+	mov.l		&0x85a308d3,FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)
+
+	ftest.x		%fp0			# test sign of argument
+	fblt.w		red_neg
+
+	or.b		&0x80,FP_SCR0_EX(%a6)	# positive arg
+	or.b		&0x80,FP_SCR1_EX(%a6)
+red_neg:
+	fadd.x		FP_SCR0(%a6),%fp0	# high part of reduction is exact
+	fmov.x		%fp0,%fp1		# save high result in fp1
+	fadd.x		FP_SCR1(%a6),%fp0	# low part of reduction
+	fsub.x		%fp0,%fp1		# determine low component of result
+	fadd.x		FP_SCR1(%a6),%fp1	# fp0/fp1 are reduced argument.
+
+#--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
+#--integer quotient will be stored in N
+#--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
+LOOP:
+	fmov.x		%fp0,INARG(%a6)		# +-2**K * F, 1 <= F < 2
+	mov.w		INARG(%a6),%d1
+	mov.l		%d1,%a1			# save a copy of D0
+	and.l		&0x00007FFF,%d1
+	sub.l		&0x00003FFF,%d1		# d0 = K
+	cmp.l		%d1,&28
+	ble.b		LASTLOOP
+CONTLOOP:
+	sub.l		&27,%d1			# d0 = L := K-27
+	mov.b		&0,ENDFLAG(%a6)
+	bra.b		WORK
+LASTLOOP:
+	clr.l		%d1			# d0 = L := 0
+	mov.b		&1,ENDFLAG(%a6)
+
+WORK:
+#--FIND THE REMAINDER OF (R,r) W.R.T.	2**L * (PI/2). L IS SO CHOSEN
+#--THAT	INT( X * (2/PI) / 2**(L) ) < 2**29.
+
+#--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
+#--2**L * (PIby2_1), 2**L * (PIby2_2)
+
+	mov.l		&0x00003FFE,%d2		# BIASED EXP OF 2/PI
+	sub.l		%d1,%d2			# BIASED EXP OF 2**(-L)*(2/PI)
+
+	mov.l		&0xA2F9836E,FP_SCR0_HI(%a6)
+	mov.l		&0x4E44152A,FP_SCR0_LO(%a6)
+	mov.w		%d2,FP_SCR0_EX(%a6)	# FP_SCR0 = 2**(-L)*(2/PI)
+
+	fmov.x		%fp0,%fp2
+	fmul.x		FP_SCR0(%a6),%fp2	# fp2 = X * 2**(-L)*(2/PI)
+
+#--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
+#--FLOATING POINT FORMAT, THE TWO FMOVE'S	FMOVE.L FP <--> N
+#--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
+#--(SIGN(INARG)*2**63	+	FP2) - SIGN(INARG)*2**63 WILL GIVE
+#--US THE DESIRED VALUE IN FLOATING POINT.
+	mov.l		%a1,%d2
+	swap		%d2
+	and.l		&0x80000000,%d2
+	or.l		&0x5F000000,%d2		# d2 = SIGN(INARG)*2**63 IN SGL
+	mov.l		%d2,TWOTO63(%a6)
+	fadd.s		TWOTO63(%a6),%fp2	# THE FRACTIONAL PART OF FP1 IS ROUNDED
+	fsub.s		TWOTO63(%a6),%fp2	# fp2 = N
+#	fintrz.x	%fp2,%fp2
+
+#--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
+	mov.l		%d1,%d2			# d2 = L
+
+	add.l		&0x00003FFF,%d2		# BIASED EXP OF 2**L * (PI/2)
+	mov.w		%d2,FP_SCR0_EX(%a6)
+	mov.l		&0xC90FDAA2,FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)		# FP_SCR0 = 2**(L) * Piby2_1
+
+	add.l		&0x00003FDD,%d1
+	mov.w		%d1,FP_SCR1_EX(%a6)
+	mov.l		&0x85A308D3,FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)		# FP_SCR1 = 2**(L) * Piby2_2
+
+	mov.b		ENDFLAG(%a6),%d1
+
+#--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
+#--P2 = 2**(L) * Piby2_2
+	fmov.x		%fp2,%fp4		# fp4 = N
+	fmul.x		FP_SCR0(%a6),%fp4	# fp4 = W = N*P1
+	fmov.x		%fp2,%fp5		# fp5 = N
+	fmul.x		FP_SCR1(%a6),%fp5	# fp5 = w = N*P2
+	fmov.x		%fp4,%fp3		# fp3 = W = N*P1
+
+#--we want P+p = W+w  but  |p| <= half ulp of P
+#--Then, we need to compute  A := R-P   and  a := r-p
+	fadd.x		%fp5,%fp3		# fp3 = P
+	fsub.x		%fp3,%fp4		# fp4 = W-P
+
+	fsub.x		%fp3,%fp0		# fp0 = A := R - P
+	fadd.x		%fp5,%fp4		# fp4 = p = (W-P)+w
+
+	fmov.x		%fp0,%fp3		# fp3 = A
+	fsub.x		%fp4,%fp1		# fp1 = a := r - p
+
+#--Now we need to normalize (A,a) to  "new (R,r)" where R+r = A+a but
+#--|r| <= half ulp of R.
+	fadd.x		%fp1,%fp0		# fp0 = R := A+a
+#--No need to calculate r if this is the last loop
+	cmp.b		%d1,&0
+	bgt.w		RESTORE
+
+#--Need to calculate r
+	fsub.x		%fp0,%fp3		# fp3 = A-R
+	fadd.x		%fp3,%fp1		# fp1 = r := (A-R)+a
+	bra.w		LOOP
+
+RESTORE:
+	fmov.l		%fp2,INT(%a6)
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		(%sp)+,&0x3c		# restore {fp2-fp5}
+
+	mov.l		INT(%a6),%d1
+	ror.l		&1,%d1
+
+	bra.w		TANCONT
+
+#########################################################################
+# satan():  computes the arctangent of a normalized number		#
+# satand(): computes the arctangent of a denormalized number		#
+#									#
+# INPUT	*************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = arctan(X)							#
+#									#
+# ACCURACY and MONOTONICITY ******************************************* #
+#	The returned result is within 2 ulps in	64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM *********************************************************** #
+#	Step 1. If |X| >= 16 or |X| < 1/16, go to Step 5.		#
+#									#
+#	Step 2. Let X = sgn * 2**k * 1.xxxxxxxx...x.			#
+#		Note that k = -4, -3,..., or 3.				#
+#		Define F = sgn * 2**k * 1.xxxx1, i.e. the first 5	#
+#		significant bits of X with a bit-1 attached at the 6-th	#
+#		bit position. Define u to be u = (X-F) / (1 + X*F).	#
+#									#
+#	Step 3. Approximate arctan(u) by a polynomial poly.		#
+#									#
+#	Step 4. Return arctan(F) + poly, arctan(F) is fetched from a	#
+#		table of values calculated beforehand. Exit.		#
+#									#
+#	Step 5. If |X| >= 16, go to Step 7.				#
+#									#
+#	Step 6. Approximate arctan(X) by an odd polynomial in X. Exit.	#
+#									#
+#	Step 7. Define X' = -1/X. Approximate arctan(X') by an odd	#
+#		polynomial in X'.					#
+#		Arctan(X) = sign(X)*Pi/2 + arctan(X'). Exit.		#
+#									#
+#########################################################################
+
+ATANA3:	long		0xBFF6687E,0x314987D8
+ATANA2:	long		0x4002AC69,0x34A26DB3
+ATANA1:	long		0xBFC2476F,0x4E1DA28E
+
+ATANB6:	long		0x3FB34444,0x7F876989
+ATANB5:	long		0xBFB744EE,0x7FAF45DB
+ATANB4:	long		0x3FBC71C6,0x46940220
+ATANB3:	long		0xBFC24924,0x921872F9
+ATANB2:	long		0x3FC99999,0x99998FA9
+ATANB1:	long		0xBFD55555,0x55555555
+
+ATANC5:	long		0xBFB70BF3,0x98539E6A
+ATANC4:	long		0x3FBC7187,0x962D1D7D
+ATANC3:	long		0xBFC24924,0x827107B8
+ATANC2:	long		0x3FC99999,0x9996263E
+ATANC1:	long		0xBFD55555,0x55555536
+
+PPIBY2:	long		0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+NPIBY2:	long		0xBFFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+PTINY:	long		0x00010000,0x80000000,0x00000000,0x00000000
+NTINY:	long		0x80010000,0x80000000,0x00000000,0x00000000
+
+ATANTBL:
+	long		0x3FFB0000,0x83D152C5,0x060B7A51,0x00000000
+	long		0x3FFB0000,0x8BC85445,0x65498B8B,0x00000000
+	long		0x3FFB0000,0x93BE4060,0x17626B0D,0x00000000
+	long		0x3FFB0000,0x9BB3078D,0x35AEC202,0x00000000
+	long		0x3FFB0000,0xA3A69A52,0x5DDCE7DE,0x00000000
+	long		0x3FFB0000,0xAB98E943,0x62765619,0x00000000
+	long		0x3FFB0000,0xB389E502,0xF9C59862,0x00000000
+	long		0x3FFB0000,0xBB797E43,0x6B09E6FB,0x00000000
+	long		0x3FFB0000,0xC367A5C7,0x39E5F446,0x00000000
+	long		0x3FFB0000,0xCB544C61,0xCFF7D5C6,0x00000000
+	long		0x3FFB0000,0xD33F62F8,0x2488533E,0x00000000
+	long		0x3FFB0000,0xDB28DA81,0x62404C77,0x00000000
+	long		0x3FFB0000,0xE310A407,0x8AD34F18,0x00000000
+	long		0x3FFB0000,0xEAF6B0A8,0x188EE1EB,0x00000000
+	long		0x3FFB0000,0xF2DAF194,0x9DBE79D5,0x00000000
+	long		0x3FFB0000,0xFABD5813,0x61D47E3E,0x00000000
+	long		0x3FFC0000,0x8346AC21,0x0959ECC4,0x00000000
+	long		0x3FFC0000,0x8B232A08,0x304282D8,0x00000000
+	long		0x3FFC0000,0x92FB70B8,0xD29AE2F9,0x00000000
+	long		0x3FFC0000,0x9ACF476F,0x5CCD1CB4,0x00000000
+	long		0x3FFC0000,0xA29E7630,0x4954F23F,0x00000000
+	long		0x3FFC0000,0xAA68C5D0,0x8AB85230,0x00000000
+	long		0x3FFC0000,0xB22DFFFD,0x9D539F83,0x00000000
+	long		0x3FFC0000,0xB9EDEF45,0x3E900EA5,0x00000000
+	long		0x3FFC0000,0xC1A85F1C,0xC75E3EA5,0x00000000
+	long		0x3FFC0000,0xC95D1BE8,0x28138DE6,0x00000000
+	long		0x3FFC0000,0xD10BF300,0x840D2DE4,0x00000000
+	long		0x3FFC0000,0xD8B4B2BA,0x6BC05E7A,0x00000000
+	long		0x3FFC0000,0xE0572A6B,0xB42335F6,0x00000000
+	long		0x3FFC0000,0xE7F32A70,0xEA9CAA8F,0x00000000
+	long		0x3FFC0000,0xEF888432,0x64ECEFAA,0x00000000
+	long		0x3FFC0000,0xF7170A28,0xECC06666,0x00000000
+	long		0x3FFD0000,0x812FD288,0x332DAD32,0x00000000
+	long		0x3FFD0000,0x88A8D1B1,0x218E4D64,0x00000000
+	long		0x3FFD0000,0x9012AB3F,0x23E4AEE8,0x00000000
+	long		0x3FFD0000,0x976CC3D4,0x11E7F1B9,0x00000000
+	long		0x3FFD0000,0x9EB68949,0x3889A227,0x00000000
+	long		0x3FFD0000,0xA5EF72C3,0x4487361B,0x00000000
+	long		0x3FFD0000,0xAD1700BA,0xF07A7227,0x00000000
+	long		0x3FFD0000,0xB42CBCFA,0xFD37EFB7,0x00000000
+	long		0x3FFD0000,0xBB303A94,0x0BA80F89,0x00000000
+	long		0x3FFD0000,0xC22115C6,0xFCAEBBAF,0x00000000
+	long		0x3FFD0000,0xC8FEF3E6,0x86331221,0x00000000
+	long		0x3FFD0000,0xCFC98330,0xB4000C70,0x00000000
+	long		0x3FFD0000,0xD6807AA1,0x102C5BF9,0x00000000
+	long		0x3FFD0000,0xDD2399BC,0x31252AA3,0x00000000
+	long		0x3FFD0000,0xE3B2A855,0x6B8FC517,0x00000000
+	long		0x3FFD0000,0xEA2D764F,0x64315989,0x00000000
+	long		0x3FFD0000,0xF3BF5BF8,0xBAD1A21D,0x00000000
+	long		0x3FFE0000,0x801CE39E,0x0D205C9A,0x00000000
+	long		0x3FFE0000,0x8630A2DA,0xDA1ED066,0x00000000
+	long		0x3FFE0000,0x8C1AD445,0xF3E09B8C,0x00000000
+	long		0x3FFE0000,0x91DB8F16,0x64F350E2,0x00000000
+	long		0x3FFE0000,0x97731420,0x365E538C,0x00000000
+	long		0x3FFE0000,0x9CE1C8E6,0xA0B8CDBA,0x00000000
+	long		0x3FFE0000,0xA22832DB,0xCADAAE09,0x00000000
+	long		0x3FFE0000,0xA746F2DD,0xB7602294,0x00000000
+	long		0x3FFE0000,0xAC3EC0FB,0x997DD6A2,0x00000000
+	long		0x3FFE0000,0xB110688A,0xEBDC6F6A,0x00000000
+	long		0x3FFE0000,0xB5BCC490,0x59ECC4B0,0x00000000
+	long		0x3FFE0000,0xBA44BC7D,0xD470782F,0x00000000
+	long		0x3FFE0000,0xBEA94144,0xFD049AAC,0x00000000
+	long		0x3FFE0000,0xC2EB4ABB,0x661628B6,0x00000000
+	long		0x3FFE0000,0xC70BD54C,0xE602EE14,0x00000000
+	long		0x3FFE0000,0xCD000549,0xADEC7159,0x00000000
+	long		0x3FFE0000,0xD48457D2,0xD8EA4EA3,0x00000000
+	long		0x3FFE0000,0xDB948DA7,0x12DECE3B,0x00000000
+	long		0x3FFE0000,0xE23855F9,0x69E8096A,0x00000000
+	long		0x3FFE0000,0xE8771129,0xC4353259,0x00000000
+	long		0x3FFE0000,0xEE57C16E,0x0D379C0D,0x00000000
+	long		0x3FFE0000,0xF3E10211,0xA87C3779,0x00000000
+	long		0x3FFE0000,0xF919039D,0x758B8D41,0x00000000
+	long		0x3FFE0000,0xFE058B8F,0x64935FB3,0x00000000
+	long		0x3FFF0000,0x8155FB49,0x7B685D04,0x00000000
+	long		0x3FFF0000,0x83889E35,0x49D108E1,0x00000000
+	long		0x3FFF0000,0x859CFA76,0x511D724B,0x00000000
+	long		0x3FFF0000,0x87952ECF,0xFF8131E7,0x00000000
+	long		0x3FFF0000,0x89732FD1,0x9557641B,0x00000000
+	long		0x3FFF0000,0x8B38CAD1,0x01932A35,0x00000000
+	long		0x3FFF0000,0x8CE7A8D8,0x301EE6B5,0x00000000
+	long		0x3FFF0000,0x8F46A39E,0x2EAE5281,0x00000000
+	long		0x3FFF0000,0x922DA7D7,0x91888487,0x00000000
+	long		0x3FFF0000,0x94D19FCB,0xDEDF5241,0x00000000
+	long		0x3FFF0000,0x973AB944,0x19D2A08B,0x00000000
+	long		0x3FFF0000,0x996FF00E,0x08E10B96,0x00000000
+	long		0x3FFF0000,0x9B773F95,0x12321DA7,0x00000000
+	long		0x3FFF0000,0x9D55CC32,0x0F935624,0x00000000
+	long		0x3FFF0000,0x9F100575,0x006CC571,0x00000000
+	long		0x3FFF0000,0xA0A9C290,0xD97CC06C,0x00000000
+	long		0x3FFF0000,0xA22659EB,0xEBC0630A,0x00000000
+	long		0x3FFF0000,0xA388B4AF,0xF6EF0EC9,0x00000000
+	long		0x3FFF0000,0xA4D35F10,0x61D292C4,0x00000000
+	long		0x3FFF0000,0xA60895DC,0xFBE3187E,0x00000000
+	long		0x3FFF0000,0xA72A51DC,0x7367BEAC,0x00000000
+	long		0x3FFF0000,0xA83A5153,0x0956168F,0x00000000
+	long		0x3FFF0000,0xA93A2007,0x7539546E,0x00000000
+	long		0x3FFF0000,0xAA9E7245,0x023B2605,0x00000000
+	long		0x3FFF0000,0xAC4C84BA,0x6FE4D58F,0x00000000
+	long		0x3FFF0000,0xADCE4A4A,0x606B9712,0x00000000
+	long		0x3FFF0000,0xAF2A2DCD,0x8D263C9C,0x00000000
+	long		0x3FFF0000,0xB0656F81,0xF22265C7,0x00000000
+	long		0x3FFF0000,0xB1846515,0x0F71496A,0x00000000
+	long		0x3FFF0000,0xB28AAA15,0x6F9ADA35,0x00000000
+	long		0x3FFF0000,0xB37B44FF,0x3766B895,0x00000000
+	long		0x3FFF0000,0xB458C3DC,0xE9630433,0x00000000
+	long		0x3FFF0000,0xB525529D,0x562246BD,0x00000000
+	long		0x3FFF0000,0xB5E2CCA9,0x5F9D88CC,0x00000000
+	long		0x3FFF0000,0xB692CADA,0x7ACA1ADA,0x00000000
+	long		0x3FFF0000,0xB736AEA7,0xA6925838,0x00000000
+	long		0x3FFF0000,0xB7CFAB28,0x7E9F7B36,0x00000000
+	long		0x3FFF0000,0xB85ECC66,0xCB219835,0x00000000
+	long		0x3FFF0000,0xB8E4FD5A,0x20A593DA,0x00000000
+	long		0x3FFF0000,0xB99F41F6,0x4AFF9BB5,0x00000000
+	long		0x3FFF0000,0xBA7F1E17,0x842BBE7B,0x00000000
+	long		0x3FFF0000,0xBB471285,0x7637E17D,0x00000000
+	long		0x3FFF0000,0xBBFABE8A,0x4788DF6F,0x00000000
+	long		0x3FFF0000,0xBC9D0FAD,0x2B689D79,0x00000000
+	long		0x3FFF0000,0xBD306A39,0x471ECD86,0x00000000
+	long		0x3FFF0000,0xBDB6C731,0x856AF18A,0x00000000
+	long		0x3FFF0000,0xBE31CAC5,0x02E80D70,0x00000000
+	long		0x3FFF0000,0xBEA2D55C,0xE33194E2,0x00000000
+	long		0x3FFF0000,0xBF0B10B7,0xC03128F0,0x00000000
+	long		0x3FFF0000,0xBF6B7A18,0xDACB778D,0x00000000
+	long		0x3FFF0000,0xBFC4EA46,0x63FA18F6,0x00000000
+	long		0x3FFF0000,0xC0181BDE,0x8B89A454,0x00000000
+	long		0x3FFF0000,0xC065B066,0xCFBF6439,0x00000000
+	long		0x3FFF0000,0xC0AE345F,0x56340AE6,0x00000000
+	long		0x3FFF0000,0xC0F22291,0x9CB9E6A7,0x00000000
+
+	set		X,FP_SCR0
+	set		XDCARE,X+2
+	set		XFRAC,X+4
+	set		XFRACLO,X+8
+
+	set		ATANF,FP_SCR1
+	set		ATANFHI,ATANF+4
+	set		ATANFLO,ATANF+8
+
+	global		satan
+#--ENTRY POINT FOR ATAN(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+satan:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	fmov.x		%fp0,X(%a6)
+	and.l		&0x7FFFFFFF,%d1
+
+	cmp.l		%d1,&0x3FFB8000		# |X| >= 1/16?
+	bge.b		ATANOK1
+	bra.w		ATANSM
+
+ATANOK1:
+	cmp.l		%d1,&0x4002FFFF		# |X| < 16 ?
+	ble.b		ATANMAIN
+	bra.w		ATANBIG
+
+#--THE MOST LIKELY CASE, |X| IN [1/16, 16). WE USE TABLE TECHNIQUE
+#--THE IDEA IS ATAN(X) = ATAN(F) + ATAN( [X-F] / [1+XF] ).
+#--SO IF F IS CHOSEN TO BE CLOSE TO X AND ATAN(F) IS STORED IN
+#--A TABLE, ALL WE NEED IS TO APPROXIMATE ATAN(U) WHERE
+#--U = (X-F)/(1+XF) IS SMALL (REMEMBER F IS CLOSE TO X). IT IS
+#--TRUE THAT A DIVIDE IS NOW NEEDED, BUT THE APPROXIMATION FOR
+#--ATAN(U) IS A VERY SHORT POLYNOMIAL AND THE INDEXING TO
+#--FETCH F AND SAVING OF REGISTERS CAN BE ALL HIDED UNDER THE
+#--DIVIDE. IN THE END THIS METHOD IS MUCH FASTER THAN A TRADITIONAL
+#--ONE. NOTE ALSO THAT THE TRADITIONAL SCHEME THAT APPROXIMATE
+#--ATAN(X) DIRECTLY WILL NEED TO USE A RATIONAL APPROXIMATION
+#--(DIVISION NEEDED) ANYWAY BECAUSE A POLYNOMIAL APPROXIMATION
+#--WILL INVOLVE A VERY LONG POLYNOMIAL.
+
+#--NOW WE SEE X AS +-2^K * 1.BBBBBBB....B <- 1. + 63 BITS
+#--WE CHOSE F TO BE +-2^K * 1.BBBB1
+#--THAT IS IT MATCHES THE EXPONENT AND FIRST 5 BITS OF X, THE
+#--SIXTH BITS IS SET TO BE 1. SINCE K = -4, -3, ..., 3, THERE
+#--ARE ONLY 8 TIMES 16 = 2^7 = 128 |F|'S. SINCE ATAN(-|F|) IS
+#-- -ATAN(|F|), WE NEED TO STORE ONLY ATAN(|F|).
+
+ATANMAIN:
+
+	and.l		&0xF8000000,XFRAC(%a6)	# FIRST 5 BITS
+	or.l		&0x04000000,XFRAC(%a6)	# SET 6-TH BIT TO 1
+	mov.l		&0x00000000,XFRACLO(%a6) # LOCATION OF X IS NOW F
+
+	fmov.x		%fp0,%fp1		# FP1 IS X
+	fmul.x		X(%a6),%fp1		# FP1 IS X*F, NOTE THAT X*F > 0
+	fsub.x		X(%a6),%fp0		# FP0 IS X-F
+	fadd.s		&0x3F800000,%fp1	# FP1 IS 1 + X*F
+	fdiv.x		%fp1,%fp0		# FP0 IS U = (X-F)/(1+X*F)
+
+#--WHILE THE DIVISION IS TAKING ITS TIME, WE FETCH ATAN(|F|)
+#--CREATE ATAN(F) AND STORE IT IN ATANF, AND
+#--SAVE REGISTERS FP2.
+
+	mov.l		%d2,-(%sp)		# SAVE d2 TEMPORARILY
+	mov.l		%d1,%d2			# THE EXP AND 16 BITS OF X
+	and.l		&0x00007800,%d1		# 4 VARYING BITS OF F'S FRACTION
+	and.l		&0x7FFF0000,%d2		# EXPONENT OF F
+	sub.l		&0x3FFB0000,%d2		# K+4
+	asr.l		&1,%d2
+	add.l		%d2,%d1			# THE 7 BITS IDENTIFYING F
+	asr.l		&7,%d1			# INDEX INTO TBL OF ATAN(|F|)
+	lea		ATANTBL(%pc),%a1
+	add.l		%d1,%a1			# ADDRESS OF ATAN(|F|)
+	mov.l		(%a1)+,ATANF(%a6)
+	mov.l		(%a1)+,ATANFHI(%a6)
+	mov.l		(%a1)+,ATANFLO(%a6)	# ATANF IS NOW ATAN(|F|)
+	mov.l		X(%a6),%d1		# LOAD SIGN AND EXPO. AGAIN
+	and.l		&0x80000000,%d1		# SIGN(F)
+	or.l		%d1,ATANF(%a6)		# ATANF IS NOW SIGN(F)*ATAN(|F|)
+	mov.l		(%sp)+,%d2		# RESTORE d2
+
+#--THAT'S ALL I HAVE TO DO FOR NOW,
+#--BUT ALAS, THE DIVIDE IS STILL CRANKING!
+
+#--U IN FP0, WE ARE NOW READY TO COMPUTE ATAN(U) AS
+#--U + A1*U*V*(A2 + V*(A3 + V)), V = U*U
+#--THE POLYNOMIAL MAY LOOK STRANGE, BUT IS NEVERTHELESS CORRECT.
+#--THE NATURAL FORM IS U + U*V*(A1 + V*(A2 + V*A3))
+#--WHAT WE HAVE HERE IS MERELY	A1 = A3, A2 = A1/A3, A3 = A2/A3.
+#--THE REASON FOR THIS REARRANGEMENT IS TO MAKE THE INDEPENDENT
+#--PARTS A1*U*V AND (A2 + ... STUFF) MORE LOAD-BALANCED
+
+	fmovm.x		&0x04,-(%sp)		# save fp2
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1
+	fmov.d		ATANA3(%pc),%fp2
+	fadd.x		%fp1,%fp2		# A3+V
+	fmul.x		%fp1,%fp2		# V*(A3+V)
+	fmul.x		%fp0,%fp1		# U*V
+	fadd.d		ATANA2(%pc),%fp2	# A2+V*(A3+V)
+	fmul.d		ATANA1(%pc),%fp1	# A1*U*V
+	fmul.x		%fp2,%fp1		# A1*U*V*(A2+V*(A3+V))
+	fadd.x		%fp1,%fp0		# ATAN(U), FP1 RELEASED
+
+	fmovm.x		(%sp)+,&0x20		# restore fp2
+
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	fadd.x		ATANF(%a6),%fp0		# ATAN(X)
+	bra		t_inx2
+
+ATANBORS:
+#--|X| IS IN d0 IN COMPACT FORM. FP1, d0 SAVED.
+#--FP0 IS X AND |X| <= 1/16 OR |X| >= 16.
+	cmp.l		%d1,&0x3FFF8000
+	bgt.w		ATANBIG			# I.E. |X| >= 16
+
+ATANSM:
+#--|X| <= 1/16
+#--IF |X| < 2^(-40), RETURN X AS ANSWER. OTHERWISE, APPROXIMATE
+#--ATAN(X) BY X + X*Y*(B1+Y*(B2+Y*(B3+Y*(B4+Y*(B5+Y*B6)))))
+#--WHICH IS X + X*Y*( [B1+Z*(B3+Z*B5)] + [Y*(B2+Z*(B4+Z*B6)] )
+#--WHERE Y = X*X, AND Z = Y*Y.
+
+	cmp.l		%d1,&0x3FD78000
+	blt.w		ATANTINY
+
+#--COMPUTE POLYNOMIAL
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmul.x		%fp0,%fp0		# FPO IS Y = X*X
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS Z = Y*Y
+
+	fmov.d		ATANB6(%pc),%fp2
+	fmov.d		ATANB5(%pc),%fp3
+
+	fmul.x		%fp1,%fp2		# Z*B6
+	fmul.x		%fp1,%fp3		# Z*B5
+
+	fadd.d		ATANB4(%pc),%fp2	# B4+Z*B6
+	fadd.d		ATANB3(%pc),%fp3	# B3+Z*B5
+
+	fmul.x		%fp1,%fp2		# Z*(B4+Z*B6)
+	fmul.x		%fp3,%fp1		# Z*(B3+Z*B5)
+
+	fadd.d		ATANB2(%pc),%fp2	# B2+Z*(B4+Z*B6)
+	fadd.d		ATANB1(%pc),%fp1	# B1+Z*(B3+Z*B5)
+
+	fmul.x		%fp0,%fp2		# Y*(B2+Z*(B4+Z*B6))
+	fmul.x		X(%a6),%fp0		# X*Y
+
+	fadd.x		%fp2,%fp1		# [B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]
+
+	fmul.x		%fp1,%fp0		# X*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))])
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	fadd.x		X(%a6),%fp0
+	bra		t_inx2
+
+ATANTINY:
+#--|X| < 2^(-40), ATAN(X) = X
+
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		X(%a6),%fp0		# last inst - possible exception set
+
+	bra		t_catch
+
+ATANBIG:
+#--IF |X| > 2^(100), RETURN	SIGN(X)*(PI/2 - TINY). OTHERWISE,
+#--RETURN SIGN(X)*PI/2 + ATAN(-1/X).
+	cmp.l		%d1,&0x40638000
+	bgt.w		ATANHUGE
+
+#--APPROXIMATE ATAN(-1/X) BY
+#--X'+X'*Y*(C1+Y*(C2+Y*(C3+Y*(C4+Y*C5)))), X' = -1/X, Y = X'*X'
+#--THIS CAN BE RE-WRITTEN AS
+#--X'+X'*Y*( [C1+Z*(C3+Z*C5)] + [Y*(C2+Z*C4)] ), Z = Y*Y.
+
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmov.s		&0xBF800000,%fp1	# LOAD -1
+	fdiv.x		%fp0,%fp1		# FP1 IS -1/X
+
+#--DIVIDE IS STILL CRANKING
+
+	fmov.x		%fp1,%fp0		# FP0 IS X'
+	fmul.x		%fp0,%fp0		# FP0 IS Y = X'*X'
+	fmov.x		%fp1,X(%a6)		# X IS REALLY X'
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS Z = Y*Y
+
+	fmov.d		ATANC5(%pc),%fp3
+	fmov.d		ATANC4(%pc),%fp2
+
+	fmul.x		%fp1,%fp3		# Z*C5
+	fmul.x		%fp1,%fp2		# Z*B4
+
+	fadd.d		ATANC3(%pc),%fp3	# C3+Z*C5
+	fadd.d		ATANC2(%pc),%fp2	# C2+Z*C4
+
+	fmul.x		%fp3,%fp1		# Z*(C3+Z*C5), FP3 RELEASED
+	fmul.x		%fp0,%fp2		# Y*(C2+Z*C4)
+
+	fadd.d		ATANC1(%pc),%fp1	# C1+Z*(C3+Z*C5)
+	fmul.x		X(%a6),%fp0		# X'*Y
+
+	fadd.x		%fp2,%fp1		# [Y*(C2+Z*C4)]+[C1+Z*(C3+Z*C5)]
+
+	fmul.x		%fp1,%fp0		# X'*Y*([B1+Z*(B3+Z*B5)]
+#					...	+[Y*(B2+Z*(B4+Z*B6))])
+	fadd.x		X(%a6),%fp0
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	tst.b		(%a0)
+	bpl.b		pos_big
+
+neg_big:
+	fadd.x		NPIBY2(%pc),%fp0
+	bra		t_minx2
+
+pos_big:
+	fadd.x		PPIBY2(%pc),%fp0
+	bra		t_pinx2
+
+ATANHUGE:
+#--RETURN SIGN(X)*(PIBY2 - TINY) = SIGN(X)*PIBY2 - SIGN(X)*TINY
+	tst.b		(%a0)
+	bpl.b		pos_huge
+
+neg_huge:
+	fmov.x		NPIBY2(%pc),%fp0
+	fmov.l		%d0,%fpcr
+	fadd.x		PTINY(%pc),%fp0
+	bra		t_minx2
+
+pos_huge:
+	fmov.x		PPIBY2(%pc),%fp0
+	fmov.l		%d0,%fpcr
+	fadd.x		NTINY(%pc),%fp0
+	bra		t_pinx2
+
+	global		satand
+#--ENTRY POINT FOR ATAN(X) FOR DENORMALIZED ARGUMENT
+satand:
+	bra		t_extdnrm
+
+#########################################################################
+# sasin():  computes the inverse sine of a normalized input		#
+# sasind(): computes the inverse sine of a denormalized input		#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = arcsin(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in	64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	ASIN								#
+#	1. If |X| >= 1, go to 3.					#
+#									#
+#	2. (|X| < 1) Calculate asin(X) by				#
+#		z := sqrt( [1-X][1+X] )					#
+#		asin(X) = atan( x / z ).				#
+#		Exit.							#
+#									#
+#	3. If |X| > 1, go to 5.						#
+#									#
+#	4. (|X| = 1) sgn := sign(X), return asin(X) := sgn * Pi/2. Exit.#
+#									#
+#	5. (|X| > 1) Generate an invalid operation by 0 * infinity.	#
+#		Exit.							#
+#									#
+#########################################################################
+
+	global		sasin
+sasin:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x3FFF8000
+	bge.b		ASINBIG
+
+# This catch is added here for the '060 QSP. Originally, the call to
+# satan() would handle this case by causing the exception which would
+# not be caught until gen_except(). Now, with the exceptions being
+# detected inside of satan(), the exception would have been handled there
+# instead of inside sasin() as expected.
+	cmp.l		%d1,&0x3FD78000
+	blt.w		ASINTINY
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--ASIN(X) = ATAN( X / SQRT( (1-X)(1+X) ) )
+
+ASINMAIN:
+	fmov.s		&0x3F800000,%fp1
+	fsub.x		%fp0,%fp1		# 1-X
+	fmovm.x		&0x4,-(%sp)		#  {fp2}
+	fmov.s		&0x3F800000,%fp2
+	fadd.x		%fp0,%fp2		# 1+X
+	fmul.x		%fp2,%fp1		# (1+X)(1-X)
+	fmovm.x		(%sp)+,&0x20		#  {fp2}
+	fsqrt.x		%fp1			# SQRT([1-X][1+X])
+	fdiv.x		%fp1,%fp0		# X/SQRT([1-X][1+X])
+	fmovm.x		&0x01,-(%sp)		# save X/SQRT(...)
+	lea		(%sp),%a0		# pass ptr to X/SQRT(...)
+	bsr		satan
+	add.l		&0xc,%sp		# clear X/SQRT(...) from stack
+	bra		t_inx2
+
+ASINBIG:
+	fabs.x		%fp0			# |X|
+	fcmp.s		%fp0,&0x3F800000
+	fbgt		t_operr			# cause an operr exception
+
+#--|X| = 1, ASIN(X) = +- PI/2.
+ASINONE:
+	fmov.x		PIBY2(%pc),%fp0
+	mov.l		(%a0),%d1
+	and.l		&0x80000000,%d1		# SIGN BIT OF X
+	or.l		&0x3F800000,%d1		# +-1 IN SGL FORMAT
+	mov.l		%d1,-(%sp)		# push SIGN(X) IN SGL-FMT
+	fmov.l		%d0,%fpcr
+	fmul.s		(%sp)+,%fp0
+	bra		t_inx2
+
+#--|X| < 2^(-40), ATAN(X) = X
+ASINTINY:
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		(%a0),%fp0		# last inst - possible exception
+	bra		t_catch
+
+	global		sasind
+#--ASIN(X) = X FOR DENORMALIZED X
+sasind:
+	bra		t_extdnrm
+
+#########################################################################
+# sacos():  computes the inverse cosine of a normalized input		#
+# sacosd(): computes the inverse cosine of a denormalized input		#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = arccos(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in	64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM *********************************************************** #
+#									#
+#	ACOS								#
+#	1. If |X| >= 1, go to 3.					#
+#									#
+#	2. (|X| < 1) Calculate acos(X) by				#
+#		z := (1-X) / (1+X)					#
+#		acos(X) = 2 * atan( sqrt(z) ).				#
+#		Exit.							#
+#									#
+#	3. If |X| > 1, go to 5.						#
+#									#
+#	4. (|X| = 1) If X > 0, return 0. Otherwise, return Pi. Exit.	#
+#									#
+#	5. (|X| > 1) Generate an invalid operation by 0 * infinity.	#
+#		Exit.							#
+#									#
+#########################################################################
+
+	global		sacos
+sacos:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1		# pack exp w/ upper 16 fraction
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x3FFF8000
+	bge.b		ACOSBIG
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--ACOS(X) = 2 * ATAN(	SQRT( (1-X)/(1+X) ) )
+
+ACOSMAIN:
+	fmov.s		&0x3F800000,%fp1
+	fadd.x		%fp0,%fp1		# 1+X
+	fneg.x		%fp0			# -X
+	fadd.s		&0x3F800000,%fp0	# 1-X
+	fdiv.x		%fp1,%fp0		# (1-X)/(1+X)
+	fsqrt.x		%fp0			# SQRT((1-X)/(1+X))
+	mov.l		%d0,-(%sp)		# save original users fpcr
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save SQRT(...) to stack
+	lea		(%sp),%a0		# pass ptr to sqrt
+	bsr		satan			# ATAN(SQRT([1-X]/[1+X]))
+	add.l		&0xc,%sp		# clear SQRT(...) from stack
+
+	fmov.l		(%sp)+,%fpcr		# restore users round prec,mode
+	fadd.x		%fp0,%fp0		# 2 * ATAN( STUFF )
+	bra		t_pinx2
+
+ACOSBIG:
+	fabs.x		%fp0
+	fcmp.s		%fp0,&0x3F800000
+	fbgt		t_operr			# cause an operr exception
+
+#--|X| = 1, ACOS(X) = 0 OR PI
+	tst.b		(%a0)			# is X positive or negative?
+	bpl.b		ACOSP1
+
+#--X = -1
+#Returns PI and inexact exception
+ACOSM1:
+	fmov.x		PI(%pc),%fp0		# load PI
+	fmov.l		%d0,%fpcr		# load round mode,prec
+	fadd.s		&0x00800000,%fp0	# add a small value
+	bra		t_pinx2
+
+ACOSP1:
+	bra		ld_pzero		# answer is positive zero
+
+	global		sacosd
+#--ACOS(X) = PI/2 FOR DENORMALIZED X
+sacosd:
+	fmov.l		%d0,%fpcr		# load user's rnd mode/prec
+	fmov.x		PIBY2(%pc),%fp0
+	bra		t_pinx2
+
+#########################################################################
+# setox():    computes the exponential for a normalized input		#
+# setoxd():   computes the exponential for a denormalized input		#
+# setoxm1():  computes the exponential minus 1 for a normalized input	#
+# setoxm1d(): computes the exponential minus 1 for a denormalized input	#
+#									#
+# INPUT	*************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = exp(X) or exp(X)-1					#
+#									#
+# ACCURACY and MONOTONICITY ******************************************* #
+#	The returned result is within 0.85 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM and IMPLEMENTATION **************************************** #
+#									#
+#	setoxd								#
+#	------								#
+#	Step 1.	Set ans := 1.0						#
+#									#
+#	Step 2.	Return	ans := ans + sign(X)*2^(-126). Exit.		#
+#	Notes:	This will always generate one exception -- inexact.	#
+#									#
+#									#
+#	setox								#
+#	-----								#
+#									#
+#	Step 1.	Filter out extreme cases of input argument.		#
+#		1.1	If |X| >= 2^(-65), go to Step 1.3.		#
+#		1.2	Go to Step 7.					#
+#		1.3	If |X| < 16380 log(2), go to Step 2.		#
+#		1.4	Go to Step 8.					#
+#	Notes:	The usual case should take the branches 1.1 -> 1.3 -> 2.#
+#		To avoid the use of floating-point comparisons, a	#
+#		compact representation of |X| is used. This format is a	#
+#		32-bit integer, the upper (more significant) 16 bits	#
+#		are the sign and biased exponent field of |X|; the	#
+#		lower 16 bits are the 16 most significant fraction	#
+#		(including the explicit bit) bits of |X|. Consequently,	#
+#		the comparisons in Steps 1.1 and 1.3 can be performed	#
+#		by integer comparison. Note also that the constant	#
+#		16380 log(2) used in Step 1.3 is also in the compact	#
+#		form. Thus taking the branch to Step 2 guarantees	#
+#		|X| < 16380 log(2). There is no harm to have a small	#
+#		number of cases where |X| is less than,	but close to,	#
+#		16380 log(2) and the branch to Step 9 is taken.		#
+#									#
+#	Step 2.	Calculate N = round-to-nearest-int( X * 64/log2 ).	#
+#		2.1	Set AdjFlag := 0 (indicates the branch 1.3 -> 2 #
+#			was taken)					#
+#		2.2	N := round-to-nearest-integer( X * 64/log2 ).	#
+#		2.3	Calculate	J = N mod 64; so J = 0,1,2,..., #
+#			or 63.						#
+#		2.4	Calculate	M = (N - J)/64; so N = 64M + J.	#
+#		2.5	Calculate the address of the stored value of	#
+#			2^(J/64).					#
+#		2.6	Create the value Scale = 2^M.			#
+#	Notes:	The calculation in 2.2 is really performed by		#
+#			Z := X * constant				#
+#			N := round-to-nearest-integer(Z)		#
+#		where							#
+#			constant := single-precision( 64/log 2 ).	#
+#									#
+#		Using a single-precision constant avoids memory		#
+#		access. Another effect of using a single-precision	#
+#		"constant" is that the calculated value Z is		#
+#									#
+#			Z = X*(64/log2)*(1+eps), |eps| <= 2^(-24).	#
+#									#
+#		This error has to be considered later in Steps 3 and 4.	#
+#									#
+#	Step 3.	Calculate X - N*log2/64.				#
+#		3.1	R := X + N*L1,					#
+#				where L1 := single-precision(-log2/64).	#
+#		3.2	R := R + N*L2,					#
+#				L2 := extended-precision(-log2/64 - L1).#
+#	Notes:	a) The way L1 and L2 are chosen ensures L1+L2		#
+#		approximate the value -log2/64 to 88 bits of accuracy.	#
+#		b) N*L1 is exact because N is no longer than 22 bits	#
+#		and L1 is no longer than 24 bits.			#
+#		c) The calculation X+N*L1 is also exact due to		#
+#		cancellation. Thus, R is practically X+N(L1+L2) to full	#
+#		64 bits.						#
+#		d) It is important to estimate how large can |R| be	#
+#		after Step 3.2.						#
+#									#
+#		N = rnd-to-int( X*64/log2 (1+eps) ), |eps|<=2^(-24)	#
+#		X*64/log2 (1+eps)	=	N + f,	|f| <= 0.5	#
+#		X*64/log2 - N	=	f - eps*X 64/log2		#
+#		X - N*log2/64	=	f*log2/64 - eps*X		#
+#									#
+#									#
+#		Now |X| <= 16446 log2, thus				#
+#									#
+#			|X - N*log2/64| <= (0.5 + 16446/2^(18))*log2/64	#
+#					<= 0.57 log2/64.		#
+#		 This bound will be used in Step 4.			#
+#									#
+#	Step 4.	Approximate exp(R)-1 by a polynomial			#
+#		p = R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))	#
+#	Notes:	a) In order to reduce memory access, the coefficients	#
+#		are made as "short" as possible: A1 (which is 1/2), A4	#
+#		and A5 are single precision; A2 and A3 are double	#
+#		precision.						#
+#		b) Even with the restrictions above,			#
+#		   |p - (exp(R)-1)| < 2^(-68.8) for all |R| <= 0.0062.	#
+#		Note that 0.0062 is slightly bigger than 0.57 log2/64.	#
+#		c) To fully utilize the pipeline, p is separated into	#
+#		two independent pieces of roughly equal complexities	#
+#			p = [ R + R*S*(A2 + S*A4) ]	+		#
+#				[ S*(A1 + S*(A3 + S*A5)) ]		#
+#		where S = R*R.						#
+#									#
+#	Step 5.	Compute 2^(J/64)*exp(R) = 2^(J/64)*(1+p) by		#
+#				ans := T + ( T*p + t)			#
+#		where T and t are the stored values for 2^(J/64).	#
+#	Notes:	2^(J/64) is stored as T and t where T+t approximates	#
+#		2^(J/64) to roughly 85 bits; T is in extended precision	#
+#		and t is in single precision. Note also that T is	#
+#		rounded to 62 bits so that the last two bits of T are	#
+#		zero. The reason for such a special form is that T-1,	#
+#		T-2, and T-8 will all be exact --- a property that will	#
+#		give much more accurate computation of the function	#
+#		EXPM1.							#
+#									#
+#	Step 6.	Reconstruction of exp(X)				#
+#			exp(X) = 2^M * 2^(J/64) * exp(R).		#
+#		6.1	If AdjFlag = 0, go to 6.3			#
+#		6.2	ans := ans * AdjScale				#
+#		6.3	Restore the user FPCR				#
+#		6.4	Return ans := ans * Scale. Exit.		#
+#	Notes:	If AdjFlag = 0, we have X = Mlog2 + Jlog2/64 + R,	#
+#		|M| <= 16380, and Scale = 2^M. Moreover, exp(X) will	#
+#		neither overflow nor underflow. If AdjFlag = 1, that	#
+#		means that						#
+#			X = (M1+M)log2 + Jlog2/64 + R, |M1+M| >= 16380.	#
+#		Hence, exp(X) may overflow or underflow or neither.	#
+#		When that is the case, AdjScale = 2^(M1) where M1 is	#
+#		approximately M. Thus 6.2 will never cause		#
+#		over/underflow. Possible exception in 6.4 is overflow	#
+#		or underflow. The inexact exception is not generated in	#
+#		6.4. Although one can argue that the inexact flag	#
+#		should always be raised, to simulate that exception	#
+#		cost to much than the flag is worth in practical uses.	#
+#									#
+#	Step 7.	Return 1 + X.						#
+#		7.1	ans := X					#
+#		7.2	Restore user FPCR.				#
+#		7.3	Return ans := 1 + ans. Exit			#
+#	Notes:	For non-zero X, the inexact exception will always be	#
+#		raised by 7.3. That is the only exception raised by 7.3.#
+#		Note also that we use the FMOVEM instruction to move X	#
+#		in Step 7.1 to avoid unnecessary trapping. (Although	#
+#		the FMOVEM may not seem relevant since X is normalized,	#
+#		the precaution will be useful in the library version of	#
+#		this code where the separate entry for denormalized	#
+#		inputs will be done away with.)				#
+#									#
+#	Step 8.	Handle exp(X) where |X| >= 16380log2.			#
+#		8.1	If |X| > 16480 log2, go to Step 9.		#
+#		(mimic 2.2 - 2.6)					#
+#		8.2	N := round-to-integer( X * 64/log2 )		#
+#		8.3	Calculate J = N mod 64, J = 0,1,...,63		#
+#		8.4	K := (N-J)/64, M1 := truncate(K/2), M = K-M1,	#
+#			AdjFlag := 1.					#
+#		8.5	Calculate the address of the stored value	#
+#			2^(J/64).					#
+#		8.6	Create the values Scale = 2^M, AdjScale = 2^M1.	#
+#		8.7	Go to Step 3.					#
+#	Notes:	Refer to notes for 2.2 - 2.6.				#
+#									#
+#	Step 9.	Handle exp(X), |X| > 16480 log2.			#
+#		9.1	If X < 0, go to 9.3				#
+#		9.2	ans := Huge, go to 9.4				#
+#		9.3	ans := Tiny.					#
+#		9.4	Restore user FPCR.				#
+#		9.5	Return ans := ans * ans. Exit.			#
+#	Notes:	Exp(X) will surely overflow or underflow, depending on	#
+#		X's sign. "Huge" and "Tiny" are respectively large/tiny	#
+#		extended-precision numbers whose square over/underflow	#
+#		with an inexact result. Thus, 9.5 always raises the	#
+#		inexact together with either overflow or underflow.	#
+#									#
+#	setoxm1d							#
+#	--------							#
+#									#
+#	Step 1.	Set ans := 0						#
+#									#
+#	Step 2.	Return	ans := X + ans. Exit.				#
+#	Notes:	This will return X with the appropriate rounding	#
+#		 precision prescribed by the user FPCR.			#
+#									#
+#	setoxm1								#
+#	-------								#
+#									#
+#	Step 1.	Check |X|						#
+#		1.1	If |X| >= 1/4, go to Step 1.3.			#
+#		1.2	Go to Step 7.					#
+#		1.3	If |X| < 70 log(2), go to Step 2.		#
+#		1.4	Go to Step 10.					#
+#	Notes:	The usual case should take the branches 1.1 -> 1.3 -> 2.#
+#		However, it is conceivable |X| can be small very often	#
+#		because EXPM1 is intended to evaluate exp(X)-1		#
+#		accurately when |X| is small. For further details on	#
+#		the comparisons, see the notes on Step 1 of setox.	#
+#									#
+#	Step 2.	Calculate N = round-to-nearest-int( X * 64/log2 ).	#
+#		2.1	N := round-to-nearest-integer( X * 64/log2 ).	#
+#		2.2	Calculate	J = N mod 64; so J = 0,1,2,..., #
+#			or 63.						#
+#		2.3	Calculate	M = (N - J)/64; so N = 64M + J.	#
+#		2.4	Calculate the address of the stored value of	#
+#			2^(J/64).					#
+#		2.5	Create the values Sc = 2^M and			#
+#			OnebySc := -2^(-M).				#
+#	Notes:	See the notes on Step 2 of setox.			#
+#									#
+#	Step 3.	Calculate X - N*log2/64.				#
+#		3.1	R := X + N*L1,					#
+#				where L1 := single-precision(-log2/64).	#
+#		3.2	R := R + N*L2,					#
+#				L2 := extended-precision(-log2/64 - L1).#
+#	Notes:	Applying the analysis of Step 3 of setox in this case	#
+#		shows that |R| <= 0.0055 (note that |X| <= 70 log2 in	#
+#		this case).						#
+#									#
+#	Step 4.	Approximate exp(R)-1 by a polynomial			#
+#			p = R+R*R*(A1+R*(A2+R*(A3+R*(A4+R*(A5+R*A6)))))	#
+#	Notes:	a) In order to reduce memory access, the coefficients	#
+#		are made as "short" as possible: A1 (which is 1/2), A5	#
+#		and A6 are single precision; A2, A3 and A4 are double	#
+#		precision.						#
+#		b) Even with the restriction above,			#
+#			|p - (exp(R)-1)| <	|R| * 2^(-72.7)		#
+#		for all |R| <= 0.0055.					#
+#		c) To fully utilize the pipeline, p is separated into	#
+#		two independent pieces of roughly equal complexity	#
+#			p = [ R*S*(A2 + S*(A4 + S*A6)) ]	+	#
+#				[ R + S*(A1 + S*(A3 + S*A5)) ]		#
+#		where S = R*R.						#
+#									#
+#	Step 5.	Compute 2^(J/64)*p by					#
+#				p := T*p				#
+#		where T and t are the stored values for 2^(J/64).	#
+#	Notes:	2^(J/64) is stored as T and t where T+t approximates	#
+#		2^(J/64) to roughly 85 bits; T is in extended precision	#
+#		and t is in single precision. Note also that T is	#
+#		rounded to 62 bits so that the last two bits of T are	#
+#		zero. The reason for such a special form is that T-1,	#
+#		T-2, and T-8 will all be exact --- a property that will	#
+#		be exploited in Step 6 below. The total relative error	#
+#		in p is no bigger than 2^(-67.7) compared to the final	#
+#		result.							#
+#									#
+#	Step 6.	Reconstruction of exp(X)-1				#
+#			exp(X)-1 = 2^M * ( 2^(J/64) + p - 2^(-M) ).	#
+#		6.1	If M <= 63, go to Step 6.3.			#
+#		6.2	ans := T + (p + (t + OnebySc)). Go to 6.6	#
+#		6.3	If M >= -3, go to 6.5.				#
+#		6.4	ans := (T + (p + t)) + OnebySc. Go to 6.6	#
+#		6.5	ans := (T + OnebySc) + (p + t).			#
+#		6.6	Restore user FPCR.				#
+#		6.7	Return ans := Sc * ans. Exit.			#
+#	Notes:	The various arrangements of the expressions give	#
+#		accurate evaluations.					#
+#									#
+#	Step 7.	exp(X)-1 for |X| < 1/4.					#
+#		7.1	If |X| >= 2^(-65), go to Step 9.		#
+#		7.2	Go to Step 8.					#
+#									#
+#	Step 8.	Calculate exp(X)-1, |X| < 2^(-65).			#
+#		8.1	If |X| < 2^(-16312), goto 8.3			#
+#		8.2	Restore FPCR; return ans := X - 2^(-16382).	#
+#			Exit.						#
+#		8.3	X := X * 2^(140).				#
+#		8.4	Restore FPCR; ans := ans - 2^(-16382).		#
+#		 Return ans := ans*2^(140). Exit			#
+#	Notes:	The idea is to return "X - tiny" under the user		#
+#		precision and rounding modes. To avoid unnecessary	#
+#		inefficiency, we stay away from denormalized numbers	#
+#		the best we can. For |X| >= 2^(-16312), the		#
+#		straightforward 8.2 generates the inexact exception as	#
+#		the case warrants.					#
+#									#
+#	Step 9.	Calculate exp(X)-1, |X| < 1/4, by a polynomial		#
+#			p = X + X*X*(B1 + X*(B2 + ... + X*B12))		#
+#	Notes:	a) In order to reduce memory access, the coefficients	#
+#		are made as "short" as possible: B1 (which is 1/2), B9	#
+#		to B12 are single precision; B3 to B8 are double	#
+#		precision; and B2 is double extended.			#
+#		b) Even with the restriction above,			#
+#			|p - (exp(X)-1)| < |X| 2^(-70.6)		#
+#		for all |X| <= 0.251.					#
+#		Note that 0.251 is slightly bigger than 1/4.		#
+#		c) To fully preserve accuracy, the polynomial is	#
+#		computed as						#
+#			X + ( S*B1 +	Q ) where S = X*X and		#
+#			Q	=	X*S*(B2 + X*(B3 + ... + X*B12))	#
+#		d) To fully utilize the pipeline, Q is separated into	#
+#		two independent pieces of roughly equal complexity	#
+#			Q = [ X*S*(B2 + S*(B4 + ... + S*B12)) ] +	#
+#				[ S*S*(B3 + S*(B5 + ... + S*B11)) ]	#
+#									#
+#	Step 10. Calculate exp(X)-1 for |X| >= 70 log 2.		#
+#		10.1 If X >= 70log2 , exp(X) - 1 = exp(X) for all	#
+#		practical purposes. Therefore, go to Step 1 of setox.	#
+#		10.2 If X <= -70log2, exp(X) - 1 = -1 for all practical	#
+#		purposes.						#
+#		ans := -1						#
+#		Restore user FPCR					#
+#		Return ans := ans + 2^(-126). Exit.			#
+#	Notes:	10.2 will always create an inexact and return -1 + tiny	#
+#		in the user rounding precision and mode.		#
+#									#
+#########################################################################
+
+L2:	long		0x3FDC0000,0x82E30865,0x4361C4C6,0x00000000
+
+EEXPA3:	long		0x3FA55555,0x55554CC1
+EEXPA2:	long		0x3FC55555,0x55554A54
+
+EM1A4:	long		0x3F811111,0x11174385
+EM1A3:	long		0x3FA55555,0x55554F5A
+
+EM1A2:	long		0x3FC55555,0x55555555,0x00000000,0x00000000
+
+EM1B8:	long		0x3EC71DE3,0xA5774682
+EM1B7:	long		0x3EFA01A0,0x19D7CB68
+
+EM1B6:	long		0x3F2A01A0,0x1A019DF3
+EM1B5:	long		0x3F56C16C,0x16C170E2
+
+EM1B4:	long		0x3F811111,0x11111111
+EM1B3:	long		0x3FA55555,0x55555555
+
+EM1B2:	long		0x3FFC0000,0xAAAAAAAA,0xAAAAAAAB
+	long		0x00000000
+
+TWO140:	long		0x48B00000,0x00000000
+TWON140:
+	long		0x37300000,0x00000000
+
+EEXPTBL:
+	long		0x3FFF0000,0x80000000,0x00000000,0x00000000
+	long		0x3FFF0000,0x8164D1F3,0xBC030774,0x9F841A9B
+	long		0x3FFF0000,0x82CD8698,0xAC2BA1D8,0x9FC1D5B9
+	long		0x3FFF0000,0x843A28C3,0xACDE4048,0xA0728369
+	long		0x3FFF0000,0x85AAC367,0xCC487B14,0x1FC5C95C
+	long		0x3FFF0000,0x871F6196,0x9E8D1010,0x1EE85C9F
+	long		0x3FFF0000,0x88980E80,0x92DA8528,0x9FA20729
+	long		0x3FFF0000,0x8A14D575,0x496EFD9C,0xA07BF9AF
+	long		0x3FFF0000,0x8B95C1E3,0xEA8BD6E8,0xA0020DCF
+	long		0x3FFF0000,0x8D1ADF5B,0x7E5BA9E4,0x205A63DA
+	long		0x3FFF0000,0x8EA4398B,0x45CD53C0,0x1EB70051
+	long		0x3FFF0000,0x9031DC43,0x1466B1DC,0x1F6EB029
+	long		0x3FFF0000,0x91C3D373,0xAB11C338,0xA0781494
+	long		0x3FFF0000,0x935A2B2F,0x13E6E92C,0x9EB319B0
+	long		0x3FFF0000,0x94F4EFA8,0xFEF70960,0x2017457D
+	long		0x3FFF0000,0x96942D37,0x20185A00,0x1F11D537
+	long		0x3FFF0000,0x9837F051,0x8DB8A970,0x9FB952DD
+	long		0x3FFF0000,0x99E04593,0x20B7FA64,0x1FE43087
+	long		0x3FFF0000,0x9B8D39B9,0xD54E5538,0x1FA2A818
+	long		0x3FFF0000,0x9D3ED9A7,0x2CFFB750,0x1FDE494D
+	long		0x3FFF0000,0x9EF53260,0x91A111AC,0x20504890
+	long		0x3FFF0000,0xA0B0510F,0xB9714FC4,0xA073691C
+	long		0x3FFF0000,0xA2704303,0x0C496818,0x1F9B7A05
+	long		0x3FFF0000,0xA43515AE,0x09E680A0,0xA0797126
+	long		0x3FFF0000,0xA5FED6A9,0xB15138EC,0xA071A140
+	long		0x3FFF0000,0xA7CD93B4,0xE9653568,0x204F62DA
+	long		0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x1F283C4A
+	long		0x3FFF0000,0xAB7A39B5,0xA93ED338,0x9F9A7FDC
+	long		0x3FFF0000,0xAD583EEA,0x42A14AC8,0xA05B3FAC
+	long		0x3FFF0000,0xAF3B78AD,0x690A4374,0x1FDF2610
+	long		0x3FFF0000,0xB123F581,0xD2AC2590,0x9F705F90
+	long		0x3FFF0000,0xB311C412,0xA9112488,0x201F678A
+	long		0x3FFF0000,0xB504F333,0xF9DE6484,0x1F32FB13
+	long		0x3FFF0000,0xB6FD91E3,0x28D17790,0x20038B30
+	long		0x3FFF0000,0xB8FBAF47,0x62FB9EE8,0x200DC3CC
+	long		0x3FFF0000,0xBAFF5AB2,0x133E45FC,0x9F8B2AE6
+	long		0x3FFF0000,0xBD08A39F,0x580C36C0,0xA02BBF70
+	long		0x3FFF0000,0xBF1799B6,0x7A731084,0xA00BF518
+	long		0x3FFF0000,0xC12C4CCA,0x66709458,0xA041DD41
+	long		0x3FFF0000,0xC346CCDA,0x24976408,0x9FDF137B
+	long		0x3FFF0000,0xC5672A11,0x5506DADC,0x201F1568
+	long		0x3FFF0000,0xC78D74C8,0xABB9B15C,0x1FC13A2E
+	long		0x3FFF0000,0xC9B9BD86,0x6E2F27A4,0xA03F8F03
+	long		0x3FFF0000,0xCBEC14FE,0xF2727C5C,0x1FF4907D
+	long		0x3FFF0000,0xCE248C15,0x1F8480E4,0x9E6E53E4
+	long		0x3FFF0000,0xD06333DA,0xEF2B2594,0x1FD6D45C
+	long		0x3FFF0000,0xD2A81D91,0xF12AE45C,0xA076EDB9
+	long		0x3FFF0000,0xD4F35AAB,0xCFEDFA20,0x9FA6DE21
+	long		0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x1EE69A2F
+	long		0x3FFF0000,0xD99D15C2,0x78AFD7B4,0x207F439F
+	long		0x3FFF0000,0xDBFBB797,0xDAF23754,0x201EC207
+	long		0x3FFF0000,0xDE60F482,0x5E0E9124,0x9E8BE175
+	long		0x3FFF0000,0xE0CCDEEC,0x2A94E110,0x20032C4B
+	long		0x3FFF0000,0xE33F8972,0xBE8A5A50,0x2004DFF5
+	long		0x3FFF0000,0xE5B906E7,0x7C8348A8,0x1E72F47A
+	long		0x3FFF0000,0xE8396A50,0x3C4BDC68,0x1F722F22
+	long		0x3FFF0000,0xEAC0C6E7,0xDD243930,0xA017E945
+	long		0x3FFF0000,0xED4F301E,0xD9942B84,0x1F401A5B
+	long		0x3FFF0000,0xEFE4B99B,0xDCDAF5CC,0x9FB9A9E3
+	long		0x3FFF0000,0xF281773C,0x59FFB138,0x20744C05
+	long		0x3FFF0000,0xF5257D15,0x2486CC2C,0x1F773A19
+	long		0x3FFF0000,0xF7D0DF73,0x0AD13BB8,0x1FFE90D5
+	long		0x3FFF0000,0xFA83B2DB,0x722A033C,0xA041ED22
+	long		0x3FFF0000,0xFD3E0C0C,0xF486C174,0x1F853F3A
+
+	set		ADJFLAG,L_SCR2
+	set		SCALE,FP_SCR0
+	set		ADJSCALE,FP_SCR1
+	set		SC,FP_SCR0
+	set		ONEBYSC,FP_SCR1
+
+	global		setox
+setox:
+#--entry point for EXP(X), here X is finite, non-zero, and not NaN's
+
+#--Step 1.
+	mov.l		(%a0),%d1		# load part of input X
+	and.l		&0x7FFF0000,%d1		# biased expo. of X
+	cmp.l		%d1,&0x3FBE0000		# 2^(-65)
+	bge.b		EXPC1			# normal case
+	bra		EXPSM
+
+EXPC1:
+#--The case |X| >= 2^(-65)
+	mov.w		4(%a0),%d1		# expo. and partial sig. of |X|
+	cmp.l		%d1,&0x400CB167		# 16380 log2 trunc. 16 bits
+	blt.b		EXPMAIN			# normal case
+	bra		EEXPBIG
+
+EXPMAIN:
+#--Step 2.
+#--This is the normal branch:	2^(-65) <= |X| < 16380 log2.
+	fmov.x		(%a0),%fp0		# load input from (a0)
+
+	fmov.x		%fp0,%fp1
+	fmul.s		&0x42B8AA3B,%fp0	# 64/log2 * X
+	fmovm.x		&0xc,-(%sp)		# save fp2 {%fp2/%fp3}
+	mov.l		&0,ADJFLAG(%a6)
+	fmov.l		%fp0,%d1		# N = int( X * 64/log2 )
+	lea		EEXPTBL(%pc),%a1
+	fmov.l		%d1,%fp0		# convert to floating-format
+
+	mov.l		%d1,L_SCR1(%a6)		# save N temporarily
+	and.l		&0x3F,%d1		# D0 is J = N mod 64
+	lsl.l		&4,%d1
+	add.l		%d1,%a1			# address of 2^(J/64)
+	mov.l		L_SCR1(%a6),%d1
+	asr.l		&6,%d1			# D0 is M
+	add.w		&0x3FFF,%d1		# biased expo. of 2^(M)
+	mov.w		L2(%pc),L_SCR1(%a6)	# prefetch L2, no need in CB
+
+EXPCONT1:
+#--Step 3.
+#--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
+#--a0 points to 2^(J/64), D0 is biased expo. of 2^(M)
+	fmov.x		%fp0,%fp2
+	fmul.s		&0xBC317218,%fp0	# N * L1, L1 = lead(-log2/64)
+	fmul.x		L2(%pc),%fp2		# N * L2, L1+L2 = -log2/64
+	fadd.x		%fp1,%fp0		# X + N*L1
+	fadd.x		%fp2,%fp0		# fp0 is R, reduced arg.
+
+#--Step 4.
+#--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
+#-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))
+#--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
+#--[R+R*S*(A2+S*A4)] + [S*(A1+S*(A3+S*A5))]
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# fp1 IS S = R*R
+
+	fmov.s		&0x3AB60B70,%fp2	# fp2 IS A5
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*A5
+	fmov.x		%fp1,%fp3
+	fmul.s		&0x3C088895,%fp3	# fp3 IS S*A4
+
+	fadd.d		EEXPA3(%pc),%fp2	# fp2 IS A3+S*A5
+	fadd.d		EEXPA2(%pc),%fp3	# fp3 IS A2+S*A4
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*(A3+S*A5)
+	mov.w		%d1,SCALE(%a6)		# SCALE is 2^(M) in extended
+	mov.l		&0x80000000,SCALE+4(%a6)
+	clr.l		SCALE+8(%a6)
+
+	fmul.x		%fp1,%fp3		# fp3 IS S*(A2+S*A4)
+
+	fadd.s		&0x3F000000,%fp2	# fp2 IS A1+S*(A3+S*A5)
+	fmul.x		%fp0,%fp3		# fp3 IS R*S*(A2+S*A4)
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*(A1+S*(A3+S*A5))
+	fadd.x		%fp3,%fp0		# fp0 IS R+R*S*(A2+S*A4),
+
+	fmov.x		(%a1)+,%fp1		# fp1 is lead. pt. of 2^(J/64)
+	fadd.x		%fp2,%fp0		# fp0 is EXP(R) - 1
+
+#--Step 5
+#--final reconstruction process
+#--EXP(X) = 2^M * ( 2^(J/64) + 2^(J/64)*(EXP(R)-1) )
+
+	fmul.x		%fp1,%fp0		# 2^(J/64)*(Exp(R)-1)
+	fmovm.x		(%sp)+,&0x30		# fp2 restored {%fp2/%fp3}
+	fadd.s		(%a1),%fp0		# accurate 2^(J/64)
+
+	fadd.x		%fp1,%fp0		# 2^(J/64) + 2^(J/64)*...
+	mov.l		ADJFLAG(%a6),%d1
+
+#--Step 6
+	tst.l		%d1
+	beq.b		NORMAL
+ADJUST:
+	fmul.x		ADJSCALE(%a6),%fp0
+NORMAL:
+	fmov.l		%d0,%fpcr		# restore user FPCR
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		SCALE(%a6),%fp0		# multiply 2^(M)
+	bra		t_catch
+
+EXPSM:
+#--Step 7
+	fmovm.x		(%a0),&0x80		# load X
+	fmov.l		%d0,%fpcr
+	fadd.s		&0x3F800000,%fp0	# 1+X in user mode
+	bra		t_pinx2
+
+EEXPBIG:
+#--Step 8
+	cmp.l		%d1,&0x400CB27C		# 16480 log2
+	bgt.b		EXP2BIG
+#--Steps 8.2 -- 8.6
+	fmov.x		(%a0),%fp0		# load input from (a0)
+
+	fmov.x		%fp0,%fp1
+	fmul.s		&0x42B8AA3B,%fp0	# 64/log2 * X
+	fmovm.x		&0xc,-(%sp)		# save fp2 {%fp2/%fp3}
+	mov.l		&1,ADJFLAG(%a6)
+	fmov.l		%fp0,%d1		# N = int( X * 64/log2 )
+	lea		EEXPTBL(%pc),%a1
+	fmov.l		%d1,%fp0		# convert to floating-format
+	mov.l		%d1,L_SCR1(%a6)		# save N temporarily
+	and.l		&0x3F,%d1		# D0 is J = N mod 64
+	lsl.l		&4,%d1
+	add.l		%d1,%a1			# address of 2^(J/64)
+	mov.l		L_SCR1(%a6),%d1
+	asr.l		&6,%d1			# D0 is K
+	mov.l		%d1,L_SCR1(%a6)		# save K temporarily
+	asr.l		&1,%d1			# D0 is M1
+	sub.l		%d1,L_SCR1(%a6)		# a1 is M
+	add.w		&0x3FFF,%d1		# biased expo. of 2^(M1)
+	mov.w		%d1,ADJSCALE(%a6)	# ADJSCALE := 2^(M1)
+	mov.l		&0x80000000,ADJSCALE+4(%a6)
+	clr.l		ADJSCALE+8(%a6)
+	mov.l		L_SCR1(%a6),%d1		# D0 is M
+	add.w		&0x3FFF,%d1		# biased expo. of 2^(M)
+	bra.w		EXPCONT1		# go back to Step 3
+
+EXP2BIG:
+#--Step 9
+	tst.b		(%a0)			# is X positive or negative?
+	bmi		t_unfl2
+	bra		t_ovfl2
+
+	global		setoxd
+setoxd:
+#--entry point for EXP(X), X is denormalized
+	mov.l		(%a0),-(%sp)
+	andi.l		&0x80000000,(%sp)
+	ori.l		&0x00800000,(%sp)	# sign(X)*2^(-126)
+
+	fmov.s		&0x3F800000,%fp0
+
+	fmov.l		%d0,%fpcr
+	fadd.s		(%sp)+,%fp0
+	bra		t_pinx2
+
+	global		setoxm1
+setoxm1:
+#--entry point for EXPM1(X), here X is finite, non-zero, non-NaN
+
+#--Step 1.
+#--Step 1.1
+	mov.l		(%a0),%d1		# load part of input X
+	and.l		&0x7FFF0000,%d1		# biased expo. of X
+	cmp.l		%d1,&0x3FFD0000		# 1/4
+	bge.b		EM1CON1			# |X| >= 1/4
+	bra		EM1SM
+
+EM1CON1:
+#--Step 1.3
+#--The case |X| >= 1/4
+	mov.w		4(%a0),%d1		# expo. and partial sig. of |X|
+	cmp.l		%d1,&0x4004C215		# 70log2 rounded up to 16 bits
+	ble.b		EM1MAIN			# 1/4 <= |X| <= 70log2
+	bra		EM1BIG
+
+EM1MAIN:
+#--Step 2.
+#--This is the case:	1/4 <= |X| <= 70 log2.
+	fmov.x		(%a0),%fp0		# load input from (a0)
+
+	fmov.x		%fp0,%fp1
+	fmul.s		&0x42B8AA3B,%fp0	# 64/log2 * X
+	fmovm.x		&0xc,-(%sp)		# save fp2 {%fp2/%fp3}
+	fmov.l		%fp0,%d1		# N = int( X * 64/log2 )
+	lea		EEXPTBL(%pc),%a1
+	fmov.l		%d1,%fp0		# convert to floating-format
+
+	mov.l		%d1,L_SCR1(%a6)		# save N temporarily
+	and.l		&0x3F,%d1		# D0 is J = N mod 64
+	lsl.l		&4,%d1
+	add.l		%d1,%a1			# address of 2^(J/64)
+	mov.l		L_SCR1(%a6),%d1
+	asr.l		&6,%d1			# D0 is M
+	mov.l		%d1,L_SCR1(%a6)		# save a copy of M
+
+#--Step 3.
+#--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
+#--a0 points to 2^(J/64), D0 and a1 both contain M
+	fmov.x		%fp0,%fp2
+	fmul.s		&0xBC317218,%fp0	# N * L1, L1 = lead(-log2/64)
+	fmul.x		L2(%pc),%fp2		# N * L2, L1+L2 = -log2/64
+	fadd.x		%fp1,%fp0		# X + N*L1
+	fadd.x		%fp2,%fp0		# fp0 is R, reduced arg.
+	add.w		&0x3FFF,%d1		# D0 is biased expo. of 2^M
+
+#--Step 4.
+#--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
+#-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*(A5 + R*A6)))))
+#--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
+#--[R*S*(A2+S*(A4+S*A6))] + [R+S*(A1+S*(A3+S*A5))]
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# fp1 IS S = R*R
+
+	fmov.s		&0x3950097B,%fp2	# fp2 IS a6
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*A6
+	fmov.x		%fp1,%fp3
+	fmul.s		&0x3AB60B6A,%fp3	# fp3 IS S*A5
+
+	fadd.d		EM1A4(%pc),%fp2		# fp2 IS A4+S*A6
+	fadd.d		EM1A3(%pc),%fp3		# fp3 IS A3+S*A5
+	mov.w		%d1,SC(%a6)		# SC is 2^(M) in extended
+	mov.l		&0x80000000,SC+4(%a6)
+	clr.l		SC+8(%a6)
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*(A4+S*A6)
+	mov.l		L_SCR1(%a6),%d1		# D0 is	M
+	neg.w		%d1			# D0 is -M
+	fmul.x		%fp1,%fp3		# fp3 IS S*(A3+S*A5)
+	add.w		&0x3FFF,%d1		# biased expo. of 2^(-M)
+	fadd.d		EM1A2(%pc),%fp2		# fp2 IS A2+S*(A4+S*A6)
+	fadd.s		&0x3F000000,%fp3	# fp3 IS A1+S*(A3+S*A5)
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*(A2+S*(A4+S*A6))
+	or.w		&0x8000,%d1		# signed/expo. of -2^(-M)
+	mov.w		%d1,ONEBYSC(%a6)	# OnebySc is -2^(-M)
+	mov.l		&0x80000000,ONEBYSC+4(%a6)
+	clr.l		ONEBYSC+8(%a6)
+	fmul.x		%fp3,%fp1		# fp1 IS S*(A1+S*(A3+S*A5))
+
+	fmul.x		%fp0,%fp2		# fp2 IS R*S*(A2+S*(A4+S*A6))
+	fadd.x		%fp1,%fp0		# fp0 IS R+S*(A1+S*(A3+S*A5))
+
+	fadd.x		%fp2,%fp0		# fp0 IS EXP(R)-1
+
+	fmovm.x		(%sp)+,&0x30		# fp2 restored {%fp2/%fp3}
+
+#--Step 5
+#--Compute 2^(J/64)*p
+
+	fmul.x		(%a1),%fp0		# 2^(J/64)*(Exp(R)-1)
+
+#--Step 6
+#--Step 6.1
+	mov.l		L_SCR1(%a6),%d1		# retrieve M
+	cmp.l		%d1,&63
+	ble.b		MLE63
+#--Step 6.2	M >= 64
+	fmov.s		12(%a1),%fp1		# fp1 is t
+	fadd.x		ONEBYSC(%a6),%fp1	# fp1 is t+OnebySc
+	fadd.x		%fp1,%fp0		# p+(t+OnebySc), fp1 released
+	fadd.x		(%a1),%fp0		# T+(p+(t+OnebySc))
+	bra		EM1SCALE
+MLE63:
+#--Step 6.3	M <= 63
+	cmp.l		%d1,&-3
+	bge.b		MGEN3
+MLTN3:
+#--Step 6.4	M <= -4
+	fadd.s		12(%a1),%fp0		# p+t
+	fadd.x		(%a1),%fp0		# T+(p+t)
+	fadd.x		ONEBYSC(%a6),%fp0	# OnebySc + (T+(p+t))
+	bra		EM1SCALE
+MGEN3:
+#--Step 6.5	-3 <= M <= 63
+	fmov.x		(%a1)+,%fp1		# fp1 is T
+	fadd.s		(%a1),%fp0		# fp0 is p+t
+	fadd.x		ONEBYSC(%a6),%fp1	# fp1 is T+OnebySc
+	fadd.x		%fp1,%fp0		# (T+OnebySc)+(p+t)
+
+EM1SCALE:
+#--Step 6.6
+	fmov.l		%d0,%fpcr
+	fmul.x		SC(%a6),%fp0
+	bra		t_inx2
+
+EM1SM:
+#--Step 7	|X| < 1/4.
+	cmp.l		%d1,&0x3FBE0000		# 2^(-65)
+	bge.b		EM1POLY
+
+EM1TINY:
+#--Step 8	|X| < 2^(-65)
+	cmp.l		%d1,&0x00330000		# 2^(-16312)
+	blt.b		EM12TINY
+#--Step 8.2
+	mov.l		&0x80010000,SC(%a6)	# SC is -2^(-16382)
+	mov.l		&0x80000000,SC+4(%a6)
+	clr.l		SC+8(%a6)
+	fmov.x		(%a0),%fp0
+	fmov.l		%d0,%fpcr
+	mov.b		&FADD_OP,%d1		# last inst is ADD
+	fadd.x		SC(%a6),%fp0
+	bra		t_catch
+
+EM12TINY:
+#--Step 8.3
+	fmov.x		(%a0),%fp0
+	fmul.d		TWO140(%pc),%fp0
+	mov.l		&0x80010000,SC(%a6)
+	mov.l		&0x80000000,SC+4(%a6)
+	clr.l		SC+8(%a6)
+	fadd.x		SC(%a6),%fp0
+	fmov.l		%d0,%fpcr
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.d		TWON140(%pc),%fp0
+	bra		t_catch
+
+EM1POLY:
+#--Step 9	exp(X)-1 by a simple polynomial
+	fmov.x		(%a0),%fp0		# fp0 is X
+	fmul.x		%fp0,%fp0		# fp0 is S := X*X
+	fmovm.x		&0xc,-(%sp)		# save fp2 {%fp2/%fp3}
+	fmov.s		&0x2F30CAA8,%fp1	# fp1 is B12
+	fmul.x		%fp0,%fp1		# fp1 is S*B12
+	fmov.s		&0x310F8290,%fp2	# fp2 is B11
+	fadd.s		&0x32D73220,%fp1	# fp1 is B10+S*B12
+
+	fmul.x		%fp0,%fp2		# fp2 is S*B11
+	fmul.x		%fp0,%fp1		# fp1 is S*(B10 + ...
+
+	fadd.s		&0x3493F281,%fp2	# fp2 is B9+S*...
+	fadd.d		EM1B8(%pc),%fp1		# fp1 is B8+S*...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*(B9+...
+	fmul.x		%fp0,%fp1		# fp1 is S*(B8+...
+
+	fadd.d		EM1B7(%pc),%fp2		# fp2 is B7+S*...
+	fadd.d		EM1B6(%pc),%fp1		# fp1 is B6+S*...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*(B7+...
+	fmul.x		%fp0,%fp1		# fp1 is S*(B6+...
+
+	fadd.d		EM1B5(%pc),%fp2		# fp2 is B5+S*...
+	fadd.d		EM1B4(%pc),%fp1		# fp1 is B4+S*...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*(B5+...
+	fmul.x		%fp0,%fp1		# fp1 is S*(B4+...
+
+	fadd.d		EM1B3(%pc),%fp2		# fp2 is B3+S*...
+	fadd.x		EM1B2(%pc),%fp1		# fp1 is B2+S*...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*(B3+...
+	fmul.x		%fp0,%fp1		# fp1 is S*(B2+...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*S*(B3+...)
+	fmul.x		(%a0),%fp1		# fp1 is X*S*(B2...
+
+	fmul.s		&0x3F000000,%fp0	# fp0 is S*B1
+	fadd.x		%fp2,%fp1		# fp1 is Q
+
+	fmovm.x		(%sp)+,&0x30		# fp2 restored {%fp2/%fp3}
+
+	fadd.x		%fp1,%fp0		# fp0 is S*B1+Q
+
+	fmov.l		%d0,%fpcr
+	fadd.x		(%a0),%fp0
+	bra		t_inx2
+
+EM1BIG:
+#--Step 10	|X| > 70 log2
+	mov.l		(%a0),%d1
+	cmp.l		%d1,&0
+	bgt.w		EXPC1
+#--Step 10.2
+	fmov.s		&0xBF800000,%fp0	# fp0 is -1
+	fmov.l		%d0,%fpcr
+	fadd.s		&0x00800000,%fp0	# -1 + 2^(-126)
+	bra		t_minx2
+
+	global		setoxm1d
+setoxm1d:
+#--entry point for EXPM1(X), here X is denormalized
+#--Step 0.
+	bra		t_extdnrm
+
+#########################################################################
+# sgetexp():  returns the exponent portion of the input argument.	#
+#	      The exponent bias is removed and the exponent value is	#
+#	      returned as an extended precision number in fp0.		#
+# sgetexpd(): handles denormalized numbers.				#
+#									#
+# sgetman():  extracts the mantissa of the input argument. The		#
+#	      mantissa is converted to an extended precision number w/	#
+#	      an exponent of $3fff and is returned in fp0. The range of #
+#	      the result is [1.0 - 2.0).				#
+# sgetmand(): handles denormalized numbers.				#
+#									#
+# INPUT *************************************************************** #
+#	a0  = pointer to extended precision input			#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = exponent(X) or mantissa(X)				#
+#									#
+#########################################################################
+
+	global		sgetexp
+sgetexp:
+	mov.w		SRC_EX(%a0),%d0		# get the exponent
+	bclr		&0xf,%d0		# clear the sign bit
+	subi.w		&0x3fff,%d0		# subtract off the bias
+	fmov.w		%d0,%fp0		# return exp in fp0
+	blt.b		sgetexpn		# it's negative
+	rts
+
+sgetexpn:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+	global		sgetexpd
+sgetexpd:
+	bsr.l		norm			# normalize
+	neg.w		%d0			# new exp = -(shft amt)
+	subi.w		&0x3fff,%d0		# subtract off the bias
+	fmov.w		%d0,%fp0		# return exp in fp0
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+	global		sgetman
+sgetman:
+	mov.w		SRC_EX(%a0),%d0		# get the exp
+	ori.w		&0x7fff,%d0		# clear old exp
+	bclr		&0xe,%d0		# make it the new exp +-3fff
+
+# here, we build the result in a tmp location so as not to disturb the input
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6) # copy to tmp loc
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6) # copy to tmp loc
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmov.x		FP_SCR0(%a6),%fp0	# put new value back in fp0
+	bmi.b		sgetmann		# it's negative
+	rts
+
+sgetmann:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+#
+# For denormalized numbers, shift the mantissa until the j-bit = 1,
+# then load the exponent with +/1 $3fff.
+#
+	global		sgetmand
+sgetmand:
+	bsr.l		norm			# normalize exponent
+	bra.b		sgetman
+
+#########################################################################
+# scosh():  computes the hyperbolic cosine of a normalized input	#
+# scoshd(): computes the hyperbolic cosine of a denormalized input	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = cosh(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	COSH								#
+#	1. If |X| > 16380 log2, go to 3.				#
+#									#
+#	2. (|X| <= 16380 log2) Cosh(X) is obtained by the formulae	#
+#		y = |X|, z = exp(Y), and				#
+#		cosh(X) = (1/2)*( z + 1/z ).				#
+#		Exit.							#
+#									#
+#	3. (|X| > 16380 log2). If |X| > 16480 log2, go to 5.		#
+#									#
+#	4. (16380 log2 < |X| <= 16480 log2)				#
+#		cosh(X) = sign(X) * exp(|X|)/2.				#
+#		However, invoking exp(|X|) may cause premature		#
+#		overflow. Thus, we calculate sinh(X) as follows:	#
+#		Y	:= |X|						#
+#		Fact	:=	2**(16380)				#
+#		Y'	:= Y - 16381 log2				#
+#		cosh(X) := Fact * exp(Y').				#
+#		Exit.							#
+#									#
+#	5. (|X| > 16480 log2) sinh(X) must overflow. Return		#
+#		Huge*Huge to generate overflow and an infinity with	#
+#		the appropriate sign. Huge is the largest finite number	#
+#		in extended format. Exit.				#
+#									#
+#########################################################################
+
+TWO16380:
+	long		0x7FFB0000,0x80000000,0x00000000,0x00000000
+
+	global		scosh
+scosh:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x400CB167
+	bgt.b		COSHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 16380 LOG2
+#--COSH(X) = (1/2) * ( EXP(X) + 1/EXP(X) )
+
+	fabs.x		%fp0			# |X|
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save |X| to stack
+	lea		(%sp),%a0		# pass ptr to |X|
+	bsr		setox			# FP0 IS EXP(|X|)
+	add.l		&0xc,%sp		# erase |X| from stack
+	fmul.s		&0x3F000000,%fp0	# (1/2)EXP(|X|)
+	mov.l		(%sp)+,%d0
+
+	fmov.s		&0x3E800000,%fp1	# (1/4)
+	fdiv.x		%fp0,%fp1		# 1/(2 EXP(|X|))
+
+	fmov.l		%d0,%fpcr
+	mov.b		&FADD_OP,%d1		# last inst is ADD
+	fadd.x		%fp1,%fp0
+	bra		t_catch
+
+COSHBIG:
+	cmp.l		%d1,&0x400CB2B3
+	bgt.b		COSHHUGE
+
+	fabs.x		%fp0
+	fsub.d		T1(%pc),%fp0		# (|X|-16381LOG2_LEAD)
+	fsub.d		T2(%pc),%fp0		# |X| - 16381 LOG2, ACCURATE
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save fp0 to stack
+	lea		(%sp),%a0		# pass ptr to fp0
+	bsr		setox
+	add.l		&0xc,%sp		# clear fp0 from stack
+	mov.l		(%sp)+,%d0
+
+	fmov.l		%d0,%fpcr
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		TWO16380(%pc),%fp0
+	bra		t_catch
+
+COSHHUGE:
+	bra		t_ovfl2
+
+	global		scoshd
+#--COSH(X) = 1 FOR DENORMALIZED X
+scoshd:
+	fmov.s		&0x3F800000,%fp0
+
+	fmov.l		%d0,%fpcr
+	fadd.s		&0x00800000,%fp0
+	bra		t_pinx2
+
+#########################################################################
+# ssinh():  computes the hyperbolic sine of a normalized input		#
+# ssinhd(): computes the hyperbolic sine of a denormalized input	#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = sinh(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM *********************************************************** #
+#									#
+#       SINH								#
+#       1. If |X| > 16380 log2, go to 3.				#
+#									#
+#       2. (|X| <= 16380 log2) Sinh(X) is obtained by the formula	#
+#               y = |X|, sgn = sign(X), and z = expm1(Y),		#
+#               sinh(X) = sgn*(1/2)*( z + z/(1+z) ).			#
+#          Exit.							#
+#									#
+#       3. If |X| > 16480 log2, go to 5.				#
+#									#
+#       4. (16380 log2 < |X| <= 16480 log2)				#
+#               sinh(X) = sign(X) * exp(|X|)/2.				#
+#          However, invoking exp(|X|) may cause premature overflow.	#
+#          Thus, we calculate sinh(X) as follows:			#
+#             Y       := |X|						#
+#             sgn     := sign(X)					#
+#             sgnFact := sgn * 2**(16380)				#
+#             Y'      := Y - 16381 log2					#
+#             sinh(X) := sgnFact * exp(Y').				#
+#          Exit.							#
+#									#
+#       5. (|X| > 16480 log2) sinh(X) must overflow. Return		#
+#          sign(X)*Huge*Huge to generate overflow and an infinity with	#
+#          the appropriate sign. Huge is the largest finite number in	#
+#          extended format. Exit.					#
+#									#
+#########################################################################
+
+	global		ssinh
+ssinh:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	mov.l		%d1,%a1			# save (compacted) operand
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x400CB167
+	bgt.b		SINHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 16380 LOG2
+#--Y = |X|, Z = EXPM1(Y), SINH(X) = SIGN(X)*(1/2)*( Z + Z/(1+Z) )
+
+	fabs.x		%fp0			# Y = |X|
+
+	movm.l		&0x8040,-(%sp)		# {a1/d0}
+	fmovm.x		&0x01,-(%sp)		# save Y on stack
+	lea		(%sp),%a0		# pass ptr to Y
+	clr.l		%d0
+	bsr		setoxm1			# FP0 IS Z = EXPM1(Y)
+	add.l		&0xc,%sp		# clear Y from stack
+	fmov.l		&0,%fpcr
+	movm.l		(%sp)+,&0x0201		# {a1/d0}
+
+	fmov.x		%fp0,%fp1
+	fadd.s		&0x3F800000,%fp1	# 1+Z
+	fmov.x		%fp0,-(%sp)
+	fdiv.x		%fp1,%fp0		# Z/(1+Z)
+	mov.l		%a1,%d1
+	and.l		&0x80000000,%d1
+	or.l		&0x3F000000,%d1
+	fadd.x		(%sp)+,%fp0
+	mov.l		%d1,-(%sp)
+
+	fmov.l		%d0,%fpcr
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.s		(%sp)+,%fp0		# last fp inst - possible exceptions set
+	bra		t_catch
+
+SINHBIG:
+	cmp.l		%d1,&0x400CB2B3
+	bgt		t_ovfl
+	fabs.x		%fp0
+	fsub.d		T1(%pc),%fp0		# (|X|-16381LOG2_LEAD)
+	mov.l		&0,-(%sp)
+	mov.l		&0x80000000,-(%sp)
+	mov.l		%a1,%d1
+	and.l		&0x80000000,%d1
+	or.l		&0x7FFB0000,%d1
+	mov.l		%d1,-(%sp)		# EXTENDED FMT
+	fsub.d		T2(%pc),%fp0		# |X| - 16381 LOG2, ACCURATE
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save fp0 on stack
+	lea		(%sp),%a0		# pass ptr to fp0
+	bsr		setox
+	add.l		&0xc,%sp		# clear fp0 from stack
+
+	mov.l		(%sp)+,%d0
+	fmov.l		%d0,%fpcr
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		(%sp)+,%fp0		# possible exception
+	bra		t_catch
+
+	global		ssinhd
+#--SINH(X) = X FOR DENORMALIZED X
+ssinhd:
+	bra		t_extdnrm
+
+#########################################################################
+# stanh():  computes the hyperbolic tangent of a normalized input	#
+# stanhd(): computes the hyperbolic tangent of a denormalized input	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = tanh(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	TANH								#
+#	1. If |X| >= (5/2) log2 or |X| <= 2**(-40), go to 3.		#
+#									#
+#	2. (2**(-40) < |X| < (5/2) log2) Calculate tanh(X) by		#
+#		sgn := sign(X), y := 2|X|, z := expm1(Y), and		#
+#		tanh(X) = sgn*( z/(2+z) ).				#
+#		Exit.							#
+#									#
+#	3. (|X| <= 2**(-40) or |X| >= (5/2) log2). If |X| < 1,		#
+#		go to 7.						#
+#									#
+#	4. (|X| >= (5/2) log2) If |X| >= 50 log2, go to 6.		#
+#									#
+#	5. ((5/2) log2 <= |X| < 50 log2) Calculate tanh(X) by		#
+#		sgn := sign(X), y := 2|X|, z := exp(Y),			#
+#		tanh(X) = sgn - [ sgn*2/(1+z) ].			#
+#		Exit.							#
+#									#
+#	6. (|X| >= 50 log2) Tanh(X) = +-1 (round to nearest). Thus, we	#
+#		calculate Tanh(X) by					#
+#		sgn := sign(X), Tiny := 2**(-126),			#
+#		tanh(X) := sgn - sgn*Tiny.				#
+#		Exit.							#
+#									#
+#	7. (|X| < 2**(-40)). Tanh(X) = X.	Exit.			#
+#									#
+#########################################################################
+
+	set		X,FP_SCR0
+	set		XFRAC,X+4
+
+	set		SGN,L_SCR3
+
+	set		V,FP_SCR0
+
+	global		stanh
+stanh:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	fmov.x		%fp0,X(%a6)
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	mov.l		%d1,X(%a6)
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1, &0x3fd78000	# is |X| < 2^(-40)?
+	blt.w		TANHBORS		# yes
+	cmp.l		%d1, &0x3fffddce	# is |X| > (5/2)LOG2?
+	bgt.w		TANHBORS		# yes
+
+#--THIS IS THE USUAL CASE
+#--Y = 2|X|, Z = EXPM1(Y), TANH(X) = SIGN(X) * Z / (Z+2).
+
+	mov.l		X(%a6),%d1
+	mov.l		%d1,SGN(%a6)
+	and.l		&0x7FFF0000,%d1
+	add.l		&0x00010000,%d1		# EXPONENT OF 2|X|
+	mov.l		%d1,X(%a6)
+	and.l		&0x80000000,SGN(%a6)
+	fmov.x		X(%a6),%fp0		# FP0 IS Y = 2|X|
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x1,-(%sp)		# save Y on stack
+	lea		(%sp),%a0		# pass ptr to Y
+	bsr		setoxm1			# FP0 IS Z = EXPM1(Y)
+	add.l		&0xc,%sp		# clear Y from stack
+	mov.l		(%sp)+,%d0
+
+	fmov.x		%fp0,%fp1
+	fadd.s		&0x40000000,%fp1	# Z+2
+	mov.l		SGN(%a6),%d1
+	fmov.x		%fp1,V(%a6)
+	eor.l		%d1,V(%a6)
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	fdiv.x		V(%a6),%fp0
+	bra		t_inx2
+
+TANHBORS:
+	cmp.l		%d1,&0x3FFF8000
+	blt.w		TANHSM
+
+	cmp.l		%d1,&0x40048AA1
+	bgt.w		TANHHUGE
+
+#-- (5/2) LOG2 < |X| < 50 LOG2,
+#--TANH(X) = 1 - (2/[EXP(2X)+1]). LET Y = 2|X|, SGN = SIGN(X),
+#--TANH(X) = SGN -	SGN*2/[EXP(Y)+1].
+
+	mov.l		X(%a6),%d1
+	mov.l		%d1,SGN(%a6)
+	and.l		&0x7FFF0000,%d1
+	add.l		&0x00010000,%d1		# EXPO OF 2|X|
+	mov.l		%d1,X(%a6)		# Y = 2|X|
+	and.l		&0x80000000,SGN(%a6)
+	mov.l		SGN(%a6),%d1
+	fmov.x		X(%a6),%fp0		# Y = 2|X|
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save Y on stack
+	lea		(%sp),%a0		# pass ptr to Y
+	bsr		setox			# FP0 IS EXP(Y)
+	add.l		&0xc,%sp		# clear Y from stack
+	mov.l		(%sp)+,%d0
+	mov.l		SGN(%a6),%d1
+	fadd.s		&0x3F800000,%fp0	# EXP(Y)+1
+
+	eor.l		&0xC0000000,%d1		# -SIGN(X)*2
+	fmov.s		%d1,%fp1		# -SIGN(X)*2 IN SGL FMT
+	fdiv.x		%fp0,%fp1		# -SIGN(X)2 / [EXP(Y)+1 ]
+
+	mov.l		SGN(%a6),%d1
+	or.l		&0x3F800000,%d1		# SGN
+	fmov.s		%d1,%fp0		# SGN IN SGL FMT
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	mov.b		&FADD_OP,%d1		# last inst is ADD
+	fadd.x		%fp1,%fp0
+	bra		t_inx2
+
+TANHSM:
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		X(%a6),%fp0		# last inst - possible exception set
+	bra		t_catch
+
+#---RETURN SGN(X) - SGN(X)EPS
+TANHHUGE:
+	mov.l		X(%a6),%d1
+	and.l		&0x80000000,%d1
+	or.l		&0x3F800000,%d1
+	fmov.s		%d1,%fp0
+	and.l		&0x80000000,%d1
+	eor.l		&0x80800000,%d1		# -SIGN(X)*EPS
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	fadd.s		%d1,%fp0
+	bra		t_inx2
+
+	global		stanhd
+#--TANH(X) = X FOR DENORMALIZED X
+stanhd:
+	bra		t_extdnrm
+
+#########################################################################
+# slogn():    computes the natural logarithm of a normalized input	#
+# slognd():   computes the natural logarithm of a denormalized input	#
+# slognp1():  computes the log(1+X) of a normalized input		#
+# slognp1d(): computes the log(1+X) of a denormalized input		#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = log(X) or log(1+X)					#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 2 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	LOGN:								#
+#	Step 1. If |X-1| < 1/16, approximate log(X) by an odd		#
+#		polynomial in u, where u = 2(X-1)/(X+1). Otherwise,	#
+#		move on to Step 2.					#
+#									#
+#	Step 2. X = 2**k * Y where 1 <= Y < 2. Define F to be the first	#
+#		seven significant bits of Y plus 2**(-7), i.e.		#
+#		F = 1.xxxxxx1 in base 2 where the six "x" match those	#
+#		of Y. Note that |Y-F| <= 2**(-7).			#
+#									#
+#	Step 3. Define u = (Y-F)/F. Approximate log(1+u) by a		#
+#		polynomial in u, log(1+u) = poly.			#
+#									#
+#	Step 4. Reconstruct						#
+#		log(X) = log( 2**k * Y ) = k*log(2) + log(F) + log(1+u)	#
+#		by k*log(2) + (log(F) + poly). The values of log(F) are	#
+#		calculated beforehand and stored in the program.	#
+#									#
+#	lognp1:								#
+#	Step 1: If |X| < 1/16, approximate log(1+X) by an odd		#
+#		polynomial in u where u = 2X/(2+X). Otherwise, move on	#
+#		to Step 2.						#
+#									#
+#	Step 2: Let 1+X = 2**k * Y, where 1 <= Y < 2. Define F as done	#
+#		in Step 2 of the algorithm for LOGN and compute		#
+#		log(1+X) as k*log(2) + log(F) + poly where poly		#
+#		approximates log(1+u), u = (Y-F)/F.			#
+#									#
+#	Implementation Notes:						#
+#	Note 1. There are 64 different possible values for F, thus 64	#
+#		log(F)'s need to be tabulated. Moreover, the values of	#
+#		1/F are also tabulated so that the division in (Y-F)/F	#
+#		can be performed by a multiplication.			#
+#									#
+#	Note 2. In Step 2 of lognp1, in order to preserved accuracy,	#
+#		the value Y-F has to be calculated carefully when	#
+#		1/2 <= X < 3/2.						#
+#									#
+#	Note 3. To fully exploit the pipeline, polynomials are usually	#
+#		separated into two parts evaluated independently before	#
+#		being added up.						#
+#									#
+#########################################################################
+LOGOF2:
+	long		0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
+
+one:
+	long		0x3F800000
+zero:
+	long		0x00000000
+infty:
+	long		0x7F800000
+negone:
+	long		0xBF800000
+
+LOGA6:
+	long		0x3FC2499A,0xB5E4040B
+LOGA5:
+	long		0xBFC555B5,0x848CB7DB
+
+LOGA4:
+	long		0x3FC99999,0x987D8730
+LOGA3:
+	long		0xBFCFFFFF,0xFF6F7E97
+
+LOGA2:
+	long		0x3FD55555,0x555555A4
+LOGA1:
+	long		0xBFE00000,0x00000008
+
+LOGB5:
+	long		0x3F175496,0xADD7DAD6
+LOGB4:
+	long		0x3F3C71C2,0xFE80C7E0
+
+LOGB3:
+	long		0x3F624924,0x928BCCFF
+LOGB2:
+	long		0x3F899999,0x999995EC
+
+LOGB1:
+	long		0x3FB55555,0x55555555
+TWO:
+	long		0x40000000,0x00000000
+
+LTHOLD:
+	long		0x3f990000,0x80000000,0x00000000,0x00000000
+
+LOGTBL:
+	long		0x3FFE0000,0xFE03F80F,0xE03F80FE,0x00000000
+	long		0x3FF70000,0xFF015358,0x833C47E2,0x00000000
+	long		0x3FFE0000,0xFA232CF2,0x52138AC0,0x00000000
+	long		0x3FF90000,0xBDC8D83E,0xAD88D549,0x00000000
+	long		0x3FFE0000,0xF6603D98,0x0F6603DA,0x00000000
+	long		0x3FFA0000,0x9CF43DCF,0xF5EAFD48,0x00000000
+	long		0x3FFE0000,0xF2B9D648,0x0F2B9D65,0x00000000
+	long		0x3FFA0000,0xDA16EB88,0xCB8DF614,0x00000000
+	long		0x3FFE0000,0xEF2EB71F,0xC4345238,0x00000000
+	long		0x3FFB0000,0x8B29B775,0x1BD70743,0x00000000
+	long		0x3FFE0000,0xEBBDB2A5,0xC1619C8C,0x00000000
+	long		0x3FFB0000,0xA8D839F8,0x30C1FB49,0x00000000
+	long		0x3FFE0000,0xE865AC7B,0x7603A197,0x00000000
+	long		0x3FFB0000,0xC61A2EB1,0x8CD907AD,0x00000000
+	long		0x3FFE0000,0xE525982A,0xF70C880E,0x00000000
+	long		0x3FFB0000,0xE2F2A47A,0xDE3A18AF,0x00000000
+	long		0x3FFE0000,0xE1FC780E,0x1FC780E2,0x00000000
+	long		0x3FFB0000,0xFF64898E,0xDF55D551,0x00000000
+	long		0x3FFE0000,0xDEE95C4C,0xA037BA57,0x00000000
+	long		0x3FFC0000,0x8DB956A9,0x7B3D0148,0x00000000
+	long		0x3FFE0000,0xDBEB61EE,0xD19C5958,0x00000000
+	long		0x3FFC0000,0x9B8FE100,0xF47BA1DE,0x00000000
+	long		0x3FFE0000,0xD901B203,0x6406C80E,0x00000000
+	long		0x3FFC0000,0xA9372F1D,0x0DA1BD17,0x00000000
+	long		0x3FFE0000,0xD62B80D6,0x2B80D62C,0x00000000
+	long		0x3FFC0000,0xB6B07F38,0xCE90E46B,0x00000000
+	long		0x3FFE0000,0xD3680D36,0x80D3680D,0x00000000
+	long		0x3FFC0000,0xC3FD0329,0x06488481,0x00000000
+	long		0x3FFE0000,0xD0B69FCB,0xD2580D0B,0x00000000
+	long		0x3FFC0000,0xD11DE0FF,0x15AB18CA,0x00000000
+	long		0x3FFE0000,0xCE168A77,0x25080CE1,0x00000000
+	long		0x3FFC0000,0xDE1433A1,0x6C66B150,0x00000000
+	long		0x3FFE0000,0xCB8727C0,0x65C393E0,0x00000000
+	long		0x3FFC0000,0xEAE10B5A,0x7DDC8ADD,0x00000000
+	long		0x3FFE0000,0xC907DA4E,0x871146AD,0x00000000
+	long		0x3FFC0000,0xF7856E5E,0xE2C9B291,0x00000000
+	long		0x3FFE0000,0xC6980C69,0x80C6980C,0x00000000
+	long		0x3FFD0000,0x82012CA5,0xA68206D7,0x00000000
+	long		0x3FFE0000,0xC4372F85,0x5D824CA6,0x00000000
+	long		0x3FFD0000,0x882C5FCD,0x7256A8C5,0x00000000
+	long		0x3FFE0000,0xC1E4BBD5,0x95F6E947,0x00000000
+	long		0x3FFD0000,0x8E44C60B,0x4CCFD7DE,0x00000000
+	long		0x3FFE0000,0xBFA02FE8,0x0BFA02FF,0x00000000
+	long		0x3FFD0000,0x944AD09E,0xF4351AF6,0x00000000
+	long		0x3FFE0000,0xBD691047,0x07661AA3,0x00000000
+	long		0x3FFD0000,0x9A3EECD4,0xC3EAA6B2,0x00000000
+	long		0x3FFE0000,0xBB3EE721,0xA54D880C,0x00000000
+	long		0x3FFD0000,0xA0218434,0x353F1DE8,0x00000000
+	long		0x3FFE0000,0xB92143FA,0x36F5E02E,0x00000000
+	long		0x3FFD0000,0xA5F2FCAB,0xBBC506DA,0x00000000
+	long		0x3FFE0000,0xB70FBB5A,0x19BE3659,0x00000000
+	long		0x3FFD0000,0xABB3B8BA,0x2AD362A5,0x00000000
+	long		0x3FFE0000,0xB509E68A,0x9B94821F,0x00000000
+	long		0x3FFD0000,0xB1641795,0xCE3CA97B,0x00000000
+	long		0x3FFE0000,0xB30F6352,0x8917C80B,0x00000000
+	long		0x3FFD0000,0xB7047551,0x5D0F1C61,0x00000000
+	long		0x3FFE0000,0xB11FD3B8,0x0B11FD3C,0x00000000
+	long		0x3FFD0000,0xBC952AFE,0xEA3D13E1,0x00000000
+	long		0x3FFE0000,0xAF3ADDC6,0x80AF3ADE,0x00000000
+	long		0x3FFD0000,0xC2168ED0,0xF458BA4A,0x00000000
+	long		0x3FFE0000,0xAD602B58,0x0AD602B6,0x00000000
+	long		0x3FFD0000,0xC788F439,0xB3163BF1,0x00000000
+	long		0x3FFE0000,0xAB8F69E2,0x8359CD11,0x00000000
+	long		0x3FFD0000,0xCCECAC08,0xBF04565D,0x00000000
+	long		0x3FFE0000,0xA9C84A47,0xA07F5638,0x00000000
+	long		0x3FFD0000,0xD2420487,0x2DD85160,0x00000000
+	long		0x3FFE0000,0xA80A80A8,0x0A80A80B,0x00000000
+	long		0x3FFD0000,0xD7894992,0x3BC3588A,0x00000000
+	long		0x3FFE0000,0xA655C439,0x2D7B73A8,0x00000000
+	long		0x3FFD0000,0xDCC2C4B4,0x9887DACC,0x00000000
+	long		0x3FFE0000,0xA4A9CF1D,0x96833751,0x00000000
+	long		0x3FFD0000,0xE1EEBD3E,0x6D6A6B9E,0x00000000
+	long		0x3FFE0000,0xA3065E3F,0xAE7CD0E0,0x00000000
+	long		0x3FFD0000,0xE70D785C,0x2F9F5BDC,0x00000000
+	long		0x3FFE0000,0xA16B312E,0xA8FC377D,0x00000000
+	long		0x3FFD0000,0xEC1F392C,0x5179F283,0x00000000
+	long		0x3FFE0000,0x9FD809FD,0x809FD80A,0x00000000
+	long		0x3FFD0000,0xF12440D3,0xE36130E6,0x00000000
+	long		0x3FFE0000,0x9E4CAD23,0xDD5F3A20,0x00000000
+	long		0x3FFD0000,0xF61CCE92,0x346600BB,0x00000000
+	long		0x3FFE0000,0x9CC8E160,0xC3FB19B9,0x00000000
+	long		0x3FFD0000,0xFB091FD3,0x8145630A,0x00000000
+	long		0x3FFE0000,0x9B4C6F9E,0xF03A3CAA,0x00000000
+	long		0x3FFD0000,0xFFE97042,0xBFA4C2AD,0x00000000
+	long		0x3FFE0000,0x99D722DA,0xBDE58F06,0x00000000
+	long		0x3FFE0000,0x825EFCED,0x49369330,0x00000000
+	long		0x3FFE0000,0x9868C809,0x868C8098,0x00000000
+	long		0x3FFE0000,0x84C37A7A,0xB9A905C9,0x00000000
+	long		0x3FFE0000,0x97012E02,0x5C04B809,0x00000000
+	long		0x3FFE0000,0x87224C2E,0x8E645FB7,0x00000000
+	long		0x3FFE0000,0x95A02568,0x095A0257,0x00000000
+	long		0x3FFE0000,0x897B8CAC,0x9F7DE298,0x00000000
+	long		0x3FFE0000,0x94458094,0x45809446,0x00000000
+	long		0x3FFE0000,0x8BCF55DE,0xC4CD05FE,0x00000000
+	long		0x3FFE0000,0x92F11384,0x0497889C,0x00000000
+	long		0x3FFE0000,0x8E1DC0FB,0x89E125E5,0x00000000
+	long		0x3FFE0000,0x91A2B3C4,0xD5E6F809,0x00000000
+	long		0x3FFE0000,0x9066E68C,0x955B6C9B,0x00000000
+	long		0x3FFE0000,0x905A3863,0x3E06C43B,0x00000000
+	long		0x3FFE0000,0x92AADE74,0xC7BE59E0,0x00000000
+	long		0x3FFE0000,0x8F1779D9,0xFDC3A219,0x00000000
+	long		0x3FFE0000,0x94E9BFF6,0x15845643,0x00000000
+	long		0x3FFE0000,0x8DDA5202,0x37694809,0x00000000
+	long		0x3FFE0000,0x9723A1B7,0x20134203,0x00000000
+	long		0x3FFE0000,0x8CA29C04,0x6514E023,0x00000000
+	long		0x3FFE0000,0x995899C8,0x90EB8990,0x00000000
+	long		0x3FFE0000,0x8B70344A,0x139BC75A,0x00000000
+	long		0x3FFE0000,0x9B88BDAA,0x3A3DAE2F,0x00000000
+	long		0x3FFE0000,0x8A42F870,0x5669DB46,0x00000000
+	long		0x3FFE0000,0x9DB4224F,0xFFE1157C,0x00000000
+	long		0x3FFE0000,0x891AC73A,0xE9819B50,0x00000000
+	long		0x3FFE0000,0x9FDADC26,0x8B7A12DA,0x00000000
+	long		0x3FFE0000,0x87F78087,0xF78087F8,0x00000000
+	long		0x3FFE0000,0xA1FCFF17,0xCE733BD4,0x00000000
+	long		0x3FFE0000,0x86D90544,0x7A34ACC6,0x00000000
+	long		0x3FFE0000,0xA41A9E8F,0x5446FB9F,0x00000000
+	long		0x3FFE0000,0x85BF3761,0x2CEE3C9B,0x00000000
+	long		0x3FFE0000,0xA633CD7E,0x6771CD8B,0x00000000
+	long		0x3FFE0000,0x84A9F9C8,0x084A9F9D,0x00000000
+	long		0x3FFE0000,0xA8489E60,0x0B435A5E,0x00000000
+	long		0x3FFE0000,0x83993052,0x3FBE3368,0x00000000
+	long		0x3FFE0000,0xAA59233C,0xCCA4BD49,0x00000000
+	long		0x3FFE0000,0x828CBFBE,0xB9A020A3,0x00000000
+	long		0x3FFE0000,0xAC656DAE,0x6BCC4985,0x00000000
+	long		0x3FFE0000,0x81848DA8,0xFAF0D277,0x00000000
+	long		0x3FFE0000,0xAE6D8EE3,0x60BB2468,0x00000000
+	long		0x3FFE0000,0x80808080,0x80808081,0x00000000
+	long		0x3FFE0000,0xB07197A2,0x3C46C654,0x00000000
+
+	set		ADJK,L_SCR1
+
+	set		X,FP_SCR0
+	set		XDCARE,X+2
+	set		XFRAC,X+4
+
+	set		F,FP_SCR1
+	set		FFRAC,F+4
+
+	set		KLOG2,FP_SCR0
+
+	set		SAVEU,FP_SCR0
+
+	global		slogn
+#--ENTRY POINT FOR LOG(X) FOR X FINITE, NON-ZERO, NOT NAN'S
+slogn:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	mov.l		&0x00000000,ADJK(%a6)
+
+LOGBGN:
+#--FPCR SAVED AND CLEARED, INPUT IS 2^(ADJK)*FP0, FP0 CONTAINS
+#--A FINITE, NON-ZERO, NORMALIZED NUMBER.
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+
+	mov.l		(%a0),X(%a6)
+	mov.l		4(%a0),X+4(%a6)
+	mov.l		8(%a0),X+8(%a6)
+
+	cmp.l		%d1,&0			# CHECK IF X IS NEGATIVE
+	blt.w		LOGNEG			# LOG OF NEGATIVE ARGUMENT IS INVALID
+# X IS POSITIVE, CHECK IF X IS NEAR 1
+	cmp.l		%d1,&0x3ffef07d		# IS X < 15/16?
+	blt.b		LOGMAIN			# YES
+	cmp.l		%d1,&0x3fff8841		# IS X > 17/16?
+	ble.w		LOGNEAR1		# NO
+
+LOGMAIN:
+#--THIS SHOULD BE THE USUAL CASE, X NOT VERY CLOSE TO 1
+
+#--X = 2^(K) * Y, 1 <= Y < 2. THUS, Y = 1.XXXXXXXX....XX IN BINARY.
+#--WE DEFINE F = 1.XXXXXX1, I.E. FIRST 7 BITS OF Y AND ATTACH A 1.
+#--THE IDEA IS THAT LOG(X) = K*LOG2 + LOG(Y)
+#--			 = K*LOG2 + LOG(F) + LOG(1 + (Y-F)/F).
+#--NOTE THAT U = (Y-F)/F IS VERY SMALL AND THUS APPROXIMATING
+#--LOG(1+U) CAN BE VERY EFFICIENT.
+#--ALSO NOTE THAT THE VALUE 1/F IS STORED IN A TABLE SO THAT NO
+#--DIVISION IS NEEDED TO CALCULATE (Y-F)/F.
+
+#--GET K, Y, F, AND ADDRESS OF 1/F.
+	asr.l		&8,%d1
+	asr.l		&8,%d1			# SHIFTED 16 BITS, BIASED EXPO. OF X
+	sub.l		&0x3FFF,%d1		# THIS IS K
+	add.l		ADJK(%a6),%d1		# ADJUST K, ORIGINAL INPUT MAY BE  DENORM.
+	lea		LOGTBL(%pc),%a0		# BASE ADDRESS OF 1/F AND LOG(F)
+	fmov.l		%d1,%fp1		# CONVERT K TO FLOATING-POINT FORMAT
+
+#--WHILE THE CONVERSION IS GOING ON, WE GET F AND ADDRESS OF 1/F
+	mov.l		&0x3FFF0000,X(%a6)	# X IS NOW Y, I.E. 2^(-K)*X
+	mov.l		XFRAC(%a6),FFRAC(%a6)
+	and.l		&0xFE000000,FFRAC(%a6)	# FIRST 7 BITS OF Y
+	or.l		&0x01000000,FFRAC(%a6)	# GET F: ATTACH A 1 AT THE EIGHTH BIT
+	mov.l		FFRAC(%a6),%d1	# READY TO GET ADDRESS OF 1/F
+	and.l		&0x7E000000,%d1
+	asr.l		&8,%d1
+	asr.l		&8,%d1
+	asr.l		&4,%d1			# SHIFTED 20, D0 IS THE DISPLACEMENT
+	add.l		%d1,%a0			# A0 IS THE ADDRESS FOR 1/F
+
+	fmov.x		X(%a6),%fp0
+	mov.l		&0x3fff0000,F(%a6)
+	clr.l		F+8(%a6)
+	fsub.x		F(%a6),%fp0		# Y-F
+	fmovm.x		&0xc,-(%sp)		# SAVE FP2-3 WHILE FP0 IS NOT READY
+#--SUMMARY: FP0 IS Y-F, A0 IS ADDRESS OF 1/F, FP1 IS K
+#--REGISTERS SAVED: FPCR, FP1, FP2
+
+LP1CONT1:
+#--AN RE-ENTRY POINT FOR LOGNP1
+	fmul.x		(%a0),%fp0		# FP0 IS U = (Y-F)/F
+	fmul.x		LOGOF2(%pc),%fp1	# GET K*LOG2 WHILE FP0 IS NOT READY
+	fmov.x		%fp0,%fp2
+	fmul.x		%fp2,%fp2		# FP2 IS V=U*U
+	fmov.x		%fp1,KLOG2(%a6)		# PUT K*LOG2 IN MEMEORY, FREE FP1
+
+#--LOG(1+U) IS APPROXIMATED BY
+#--U + V*(A1+U*(A2+U*(A3+U*(A4+U*(A5+U*A6))))) WHICH IS
+#--[U + V*(A1+V*(A3+V*A5))]  +  [U*V*(A2+V*(A4+V*A6))]
+
+	fmov.x		%fp2,%fp3
+	fmov.x		%fp2,%fp1
+
+	fmul.d		LOGA6(%pc),%fp1		# V*A6
+	fmul.d		LOGA5(%pc),%fp2		# V*A5
+
+	fadd.d		LOGA4(%pc),%fp1		# A4+V*A6
+	fadd.d		LOGA3(%pc),%fp2		# A3+V*A5
+
+	fmul.x		%fp3,%fp1		# V*(A4+V*A6)
+	fmul.x		%fp3,%fp2		# V*(A3+V*A5)
+
+	fadd.d		LOGA2(%pc),%fp1		# A2+V*(A4+V*A6)
+	fadd.d		LOGA1(%pc),%fp2		# A1+V*(A3+V*A5)
+
+	fmul.x		%fp3,%fp1		# V*(A2+V*(A4+V*A6))
+	add.l		&16,%a0			# ADDRESS OF LOG(F)
+	fmul.x		%fp3,%fp2		# V*(A1+V*(A3+V*A5))
+
+	fmul.x		%fp0,%fp1		# U*V*(A2+V*(A4+V*A6))
+	fadd.x		%fp2,%fp0		# U+V*(A1+V*(A3+V*A5))
+
+	fadd.x		(%a0),%fp1		# LOG(F)+U*V*(A2+V*(A4+V*A6))
+	fmovm.x		(%sp)+,&0x30		# RESTORE FP2-3
+	fadd.x		%fp1,%fp0		# FP0 IS LOG(F) + LOG(1+U)
+
+	fmov.l		%d0,%fpcr
+	fadd.x		KLOG2(%a6),%fp0		# FINAL ADD
+	bra		t_inx2
+
+
+LOGNEAR1:
+
+# if the input is exactly equal to one, then exit through ld_pzero.
+# if these 2 lines weren't here, the correct answer would be returned
+# but the INEX2 bit would be set.
+	fcmp.b		%fp0,&0x1		# is it equal to one?
+	fbeq.l		ld_pzero		# yes
+
+#--REGISTERS SAVED: FPCR, FP1. FP0 CONTAINS THE INPUT.
+	fmov.x		%fp0,%fp1
+	fsub.s		one(%pc),%fp1		# FP1 IS X-1
+	fadd.s		one(%pc),%fp0		# FP0 IS X+1
+	fadd.x		%fp1,%fp1		# FP1 IS 2(X-1)
+#--LOG(X) = LOG(1+U/2)-LOG(1-U/2) WHICH IS AN ODD POLYNOMIAL
+#--IN U, U = 2(X-1)/(X+1) = FP1/FP0
+
+LP1CONT2:
+#--THIS IS AN RE-ENTRY POINT FOR LOGNP1
+	fdiv.x		%fp0,%fp1		# FP1 IS U
+	fmovm.x		&0xc,-(%sp)		# SAVE FP2-3
+#--REGISTERS SAVED ARE NOW FPCR,FP1,FP2,FP3
+#--LET V=U*U, W=V*V, CALCULATE
+#--U + U*V*(B1 + V*(B2 + V*(B3 + V*(B4 + V*B5)))) BY
+#--U + U*V*(  [B1 + W*(B3 + W*B5)]  +  [V*(B2 + W*B4)]  )
+	fmov.x		%fp1,%fp0
+	fmul.x		%fp0,%fp0		# FP0 IS V
+	fmov.x		%fp1,SAVEU(%a6)		# STORE U IN MEMORY, FREE FP1
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS W
+
+	fmov.d		LOGB5(%pc),%fp3
+	fmov.d		LOGB4(%pc),%fp2
+
+	fmul.x		%fp1,%fp3		# W*B5
+	fmul.x		%fp1,%fp2		# W*B4
+
+	fadd.d		LOGB3(%pc),%fp3		# B3+W*B5
+	fadd.d		LOGB2(%pc),%fp2		# B2+W*B4
+
+	fmul.x		%fp3,%fp1		# W*(B3+W*B5), FP3 RELEASED
+
+	fmul.x		%fp0,%fp2		# V*(B2+W*B4)
+
+	fadd.d		LOGB1(%pc),%fp1		# B1+W*(B3+W*B5)
+	fmul.x		SAVEU(%a6),%fp0		# FP0 IS U*V
+
+	fadd.x		%fp2,%fp1		# B1+W*(B3+W*B5) + V*(B2+W*B4), FP2 RELEASED
+	fmovm.x		(%sp)+,&0x30		# FP2-3 RESTORED
+
+	fmul.x		%fp1,%fp0		# U*V*( [B1+W*(B3+W*B5)] + [V*(B2+W*B4)] )
+
+	fmov.l		%d0,%fpcr
+	fadd.x		SAVEU(%a6),%fp0
+	bra		t_inx2
+
+#--REGISTERS SAVED FPCR. LOG(-VE) IS INVALID
+LOGNEG:
+	bra		t_operr
+
+	global		slognd
+slognd:
+#--ENTRY POINT FOR LOG(X) FOR DENORMALIZED INPUT
+
+	mov.l		&-100,ADJK(%a6)		# INPUT = 2^(ADJK) * FP0
+
+#----normalize the input value by left shifting k bits (k to be determined
+#----below), adjusting exponent and storing -k to  ADJK
+#----the value TWOTO100 is no longer needed.
+#----Note that this code assumes the denormalized input is NON-ZERO.
+
+	movm.l		&0x3f00,-(%sp)		# save some registers  {d2-d7}
+	mov.l		(%a0),%d3		# D3 is exponent of smallest norm. #
+	mov.l		4(%a0),%d4
+	mov.l		8(%a0),%d5		# (D4,D5) is (Hi_X,Lo_X)
+	clr.l		%d2			# D2 used for holding K
+
+	tst.l		%d4
+	bne.b		Hi_not0
+
+Hi_0:
+	mov.l		%d5,%d4
+	clr.l		%d5
+	mov.l		&32,%d2
+	clr.l		%d6
+	bfffo		%d4{&0:&32},%d6
+	lsl.l		%d6,%d4
+	add.l		%d6,%d2			# (D3,D4,D5) is normalized
+
+	mov.l		%d3,X(%a6)
+	mov.l		%d4,XFRAC(%a6)
+	mov.l		%d5,XFRAC+4(%a6)
+	neg.l		%d2
+	mov.l		%d2,ADJK(%a6)
+	fmov.x		X(%a6),%fp0
+	movm.l		(%sp)+,&0xfc		# restore registers {d2-d7}
+	lea		X(%a6),%a0
+	bra.w		LOGBGN			# begin regular log(X)
+
+Hi_not0:
+	clr.l		%d6
+	bfffo		%d4{&0:&32},%d6		# find first 1
+	mov.l		%d6,%d2			# get k
+	lsl.l		%d6,%d4
+	mov.l		%d5,%d7			# a copy of D5
+	lsl.l		%d6,%d5
+	neg.l		%d6
+	add.l		&32,%d6
+	lsr.l		%d6,%d7
+	or.l		%d7,%d4			# (D3,D4,D5) normalized
+
+	mov.l		%d3,X(%a6)
+	mov.l		%d4,XFRAC(%a6)
+	mov.l		%d5,XFRAC+4(%a6)
+	neg.l		%d2
+	mov.l		%d2,ADJK(%a6)
+	fmov.x		X(%a6),%fp0
+	movm.l		(%sp)+,&0xfc		# restore registers {d2-d7}
+	lea		X(%a6),%a0
+	bra.w		LOGBGN			# begin regular log(X)
+
+	global		slognp1
+#--ENTRY POINT FOR LOG(1+X) FOR X FINITE, NON-ZERO, NOT NAN'S
+slognp1:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	fabs.x		%fp0			# test magnitude
+	fcmp.x		%fp0,LTHOLD(%pc)	# compare with min threshold
+	fbgt.w		LP1REAL			# if greater, continue
+	fmov.l		%d0,%fpcr
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		(%a0),%fp0		# return signed argument
+	bra		t_catch
+
+LP1REAL:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	mov.l		&0x00000000,ADJK(%a6)
+	fmov.x		%fp0,%fp1		# FP1 IS INPUT Z
+	fadd.s		one(%pc),%fp0		# X := ROUND(1+Z)
+	fmov.x		%fp0,X(%a6)
+	mov.w		XFRAC(%a6),XDCARE(%a6)
+	mov.l		X(%a6),%d1
+	cmp.l		%d1,&0
+	ble.w		LP1NEG0			# LOG OF ZERO OR -VE
+	cmp.l		%d1,&0x3ffe8000		# IS BOUNDS [1/2,3/2]?
+	blt.w		LOGMAIN
+	cmp.l		%d1,&0x3fffc000
+	bgt.w		LOGMAIN
+#--IF 1+Z > 3/2 OR 1+Z < 1/2, THEN X, WHICH IS ROUNDING 1+Z,
+#--CONTAINS AT LEAST 63 BITS OF INFORMATION OF Z. IN THAT CASE,
+#--SIMPLY INVOKE LOG(X) FOR LOG(1+Z).
+
+LP1NEAR1:
+#--NEXT SEE IF EXP(-1/16) < X < EXP(1/16)
+	cmp.l		%d1,&0x3ffef07d
+	blt.w		LP1CARE
+	cmp.l		%d1,&0x3fff8841
+	bgt.w		LP1CARE
+
+LP1ONE16:
+#--EXP(-1/16) < X < EXP(1/16). LOG(1+Z) = LOG(1+U/2) - LOG(1-U/2)
+#--WHERE U = 2Z/(2+Z) = 2Z/(1+X).
+	fadd.x		%fp1,%fp1		# FP1 IS 2Z
+	fadd.s		one(%pc),%fp0		# FP0 IS 1+X
+#--U = FP1/FP0
+	bra.w		LP1CONT2
+
+LP1CARE:
+#--HERE WE USE THE USUAL TABLE DRIVEN APPROACH. CARE HAS TO BE
+#--TAKEN BECAUSE 1+Z CAN HAVE 67 BITS OF INFORMATION AND WE MUST
+#--PRESERVE ALL THE INFORMATION. BECAUSE 1+Z IS IN [1/2,3/2],
+#--THERE ARE ONLY TWO CASES.
+#--CASE 1: 1+Z < 1, THEN K = -1 AND Y-F = (2-F) + 2Z
+#--CASE 2: 1+Z > 1, THEN K = 0  AND Y-F = (1-F) + Z
+#--ON RETURNING TO LP1CONT1, WE MUST HAVE K IN FP1, ADDRESS OF
+#--(1/F) IN A0, Y-F IN FP0, AND FP2 SAVED.
+
+	mov.l		XFRAC(%a6),FFRAC(%a6)
+	and.l		&0xFE000000,FFRAC(%a6)
+	or.l		&0x01000000,FFRAC(%a6)	# F OBTAINED
+	cmp.l		%d1,&0x3FFF8000		# SEE IF 1+Z > 1
+	bge.b		KISZERO
+
+KISNEG1:
+	fmov.s		TWO(%pc),%fp0
+	mov.l		&0x3fff0000,F(%a6)
+	clr.l		F+8(%a6)
+	fsub.x		F(%a6),%fp0		# 2-F
+	mov.l		FFRAC(%a6),%d1
+	and.l		&0x7E000000,%d1
+	asr.l		&8,%d1
+	asr.l		&8,%d1
+	asr.l		&4,%d1			# D0 CONTAINS DISPLACEMENT FOR 1/F
+	fadd.x		%fp1,%fp1		# GET 2Z
+	fmovm.x		&0xc,-(%sp)		# SAVE FP2  {%fp2/%fp3}
+	fadd.x		%fp1,%fp0		# FP0 IS Y-F = (2-F)+2Z
+	lea		LOGTBL(%pc),%a0		# A0 IS ADDRESS OF 1/F
+	add.l		%d1,%a0
+	fmov.s		negone(%pc),%fp1	# FP1 IS K = -1
+	bra.w		LP1CONT1
+
+KISZERO:
+	fmov.s		one(%pc),%fp0
+	mov.l		&0x3fff0000,F(%a6)
+	clr.l		F+8(%a6)
+	fsub.x		F(%a6),%fp0		# 1-F
+	mov.l		FFRAC(%a6),%d1
+	and.l		&0x7E000000,%d1
+	asr.l		&8,%d1
+	asr.l		&8,%d1
+	asr.l		&4,%d1
+	fadd.x		%fp1,%fp0		# FP0 IS Y-F
+	fmovm.x		&0xc,-(%sp)		# FP2 SAVED {%fp2/%fp3}
+	lea		LOGTBL(%pc),%a0
+	add.l		%d1,%a0			# A0 IS ADDRESS OF 1/F
+	fmov.s		zero(%pc),%fp1		# FP1 IS K = 0
+	bra.w		LP1CONT1
+
+LP1NEG0:
+#--FPCR SAVED. D0 IS X IN COMPACT FORM.
+	cmp.l		%d1,&0
+	blt.b		LP1NEG
+LP1ZERO:
+	fmov.s		negone(%pc),%fp0
+
+	fmov.l		%d0,%fpcr
+	bra		t_dz
+
+LP1NEG:
+	fmov.s		zero(%pc),%fp0
+
+	fmov.l		%d0,%fpcr
+	bra		t_operr
+
+	global		slognp1d
+#--ENTRY POINT FOR LOG(1+Z) FOR DENORMALIZED INPUT
+# Simply return the denorm
+slognp1d:
+	bra		t_extdnrm
+
+#########################################################################
+# satanh():  computes the inverse hyperbolic tangent of a norm input	#
+# satanhd(): computes the inverse hyperbolic tangent of a denorm input	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = arctanh(X)						#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in	64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	ATANH								#
+#	1. If |X| >= 1, go to 3.					#
+#									#
+#	2. (|X| < 1) Calculate atanh(X) by				#
+#		sgn := sign(X)						#
+#		y := |X|						#
+#		z := 2y/(1-y)						#
+#		atanh(X) := sgn * (1/2) * logp1(z)			#
+#		Exit.							#
+#									#
+#	3. If |X| > 1, go to 5.						#
+#									#
+#	4. (|X| = 1) Generate infinity with an appropriate sign and	#
+#		divide-by-zero by					#
+#		sgn := sign(X)						#
+#		atan(X) := sgn / (+0).					#
+#		Exit.							#
+#									#
+#	5. (|X| > 1) Generate an invalid operation by 0 * infinity.	#
+#		Exit.							#
+#									#
+#########################################################################
+
+	global		satanh
+satanh:
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x3FFF8000
+	bge.b		ATANHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--Y = |X|, Z = 2Y/(1-Y), ATANH(X) = SIGN(X) * (1/2) * LOG1P(Z).
+
+	fabs.x		(%a0),%fp0		# Y = |X|
+	fmov.x		%fp0,%fp1
+	fneg.x		%fp1			# -Y
+	fadd.x		%fp0,%fp0		# 2Y
+	fadd.s		&0x3F800000,%fp1	# 1-Y
+	fdiv.x		%fp1,%fp0		# 2Y/(1-Y)
+	mov.l		(%a0),%d1
+	and.l		&0x80000000,%d1
+	or.l		&0x3F000000,%d1		# SIGN(X)*HALF
+	mov.l		%d1,-(%sp)
+
+	mov.l		%d0,-(%sp)		# save rnd prec,mode
+	clr.l		%d0			# pass ext prec,RN
+	fmovm.x		&0x01,-(%sp)		# save Z on stack
+	lea		(%sp),%a0		# pass ptr to Z
+	bsr		slognp1			# LOG1P(Z)
+	add.l		&0xc,%sp		# clear Z from stack
+
+	mov.l		(%sp)+,%d0		# fetch old prec,mode
+	fmov.l		%d0,%fpcr		# load it
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.s		(%sp)+,%fp0
+	bra		t_catch
+
+ATANHBIG:
+	fabs.x		(%a0),%fp0		# |X|
+	fcmp.s		%fp0,&0x3F800000
+	fbgt		t_operr
+	bra		t_dz
+
+	global		satanhd
+#--ATANH(X) = X FOR DENORMALIZED X
+satanhd:
+	bra		t_extdnrm
+
+#########################################################################
+# slog10():  computes the base-10 logarithm of a normalized input	#
+# slog10d(): computes the base-10 logarithm of a denormalized input	#
+# slog2():   computes the base-2 logarithm of a normalized input	#
+# slog2d():  computes the base-2 logarithm of a denormalized input	#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = log_10(X) or log_2(X)					#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 1.7 ulps in 64 significant bit,	#
+#	i.e. within 0.5003 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#       slog10d:							#
+#									#
+#       Step 0.	If X < 0, create a NaN and raise the invalid operation	#
+#               flag. Otherwise, save FPCR in D1; set FpCR to default.	#
+#       Notes:  Default means round-to-nearest mode, no floating-point	#
+#               traps, and precision control = double extended.		#
+#									#
+#       Step 1. Call slognd to obtain Y = log(X), the natural log of X.	#
+#       Notes:  Even if X is denormalized, log(X) is always normalized.	#
+#									#
+#       Step 2.  Compute log_10(X) = log(X) * (1/log(10)).		#
+#            2.1 Restore the user FPCR					#
+#            2.2 Return ans := Y * INV_L10.				#
+#									#
+#       slog10:								#
+#									#
+#       Step 0. If X < 0, create a NaN and raise the invalid operation	#
+#               flag. Otherwise, save FPCR in D1; set FpCR to default.	#
+#       Notes:  Default means round-to-nearest mode, no floating-point	#
+#               traps, and precision control = double extended.		#
+#									#
+#       Step 1. Call sLogN to obtain Y = log(X), the natural log of X.	#
+#									#
+#       Step 2.   Compute log_10(X) = log(X) * (1/log(10)).		#
+#            2.1  Restore the user FPCR					#
+#            2.2  Return ans := Y * INV_L10.				#
+#									#
+#       sLog2d:								#
+#									#
+#       Step 0. If X < 0, create a NaN and raise the invalid operation	#
+#               flag. Otherwise, save FPCR in D1; set FpCR to default.	#
+#       Notes:  Default means round-to-nearest mode, no floating-point	#
+#               traps, and precision control = double extended.		#
+#									#
+#       Step 1. Call slognd to obtain Y = log(X), the natural log of X.	#
+#       Notes:  Even if X is denormalized, log(X) is always normalized.	#
+#									#
+#       Step 2.   Compute log_10(X) = log(X) * (1/log(2)).		#
+#            2.1  Restore the user FPCR					#
+#            2.2  Return ans := Y * INV_L2.				#
+#									#
+#       sLog2:								#
+#									#
+#       Step 0. If X < 0, create a NaN and raise the invalid operation	#
+#               flag. Otherwise, save FPCR in D1; set FpCR to default.	#
+#       Notes:  Default means round-to-nearest mode, no floating-point	#
+#               traps, and precision control = double extended.		#
+#									#
+#       Step 1. If X is not an integer power of two, i.e., X != 2^k,	#
+#               go to Step 3.						#
+#									#
+#       Step 2.   Return k.						#
+#            2.1  Get integer k, X = 2^k.				#
+#            2.2  Restore the user FPCR.				#
+#            2.3  Return ans := convert-to-double-extended(k).		#
+#									#
+#       Step 3. Call sLogN to obtain Y = log(X), the natural log of X.	#
+#									#
+#       Step 4.   Compute log_2(X) = log(X) * (1/log(2)).		#
+#            4.1  Restore the user FPCR					#
+#            4.2  Return ans := Y * INV_L2.				#
+#									#
+#########################################################################
+
+INV_L10:
+	long		0x3FFD0000,0xDE5BD8A9,0x37287195,0x00000000
+
+INV_L2:
+	long		0x3FFF0000,0xB8AA3B29,0x5C17F0BC,0x00000000
+
+	global		slog10
+#--entry point for Log10(X), X is normalized
+slog10:
+	fmov.b		&0x1,%fp0
+	fcmp.x		%fp0,(%a0)		# if operand == 1,
+	fbeq.l		ld_pzero		# return an EXACT zero
+
+	mov.l		(%a0),%d1
+	blt.w		invalid
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	bsr		slogn			# log(X), X normal.
+	fmov.l		(%sp)+,%fpcr
+	fmul.x		INV_L10(%pc),%fp0
+	bra		t_inx2
+
+	global		slog10d
+#--entry point for Log10(X), X is denormalized
+slog10d:
+	mov.l		(%a0),%d1
+	blt.w		invalid
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	bsr		slognd			# log(X), X denorm.
+	fmov.l		(%sp)+,%fpcr
+	fmul.x		INV_L10(%pc),%fp0
+	bra		t_minx2
+
+	global		slog2
+#--entry point for Log2(X), X is normalized
+slog2:
+	mov.l		(%a0),%d1
+	blt.w		invalid
+
+	mov.l		8(%a0),%d1
+	bne.b		continue		# X is not 2^k
+
+	mov.l		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	bne.b		continue
+
+#--X = 2^k.
+	mov.w		(%a0),%d1
+	and.l		&0x00007FFF,%d1
+	sub.l		&0x3FFF,%d1
+	beq.l		ld_pzero
+	fmov.l		%d0,%fpcr
+	fmov.l		%d1,%fp0
+	bra		t_inx2
+
+continue:
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	bsr		slogn			# log(X), X normal.
+	fmov.l		(%sp)+,%fpcr
+	fmul.x		INV_L2(%pc),%fp0
+	bra		t_inx2
+
+invalid:
+	bra		t_operr
+
+	global		slog2d
+#--entry point for Log2(X), X is denormalized
+slog2d:
+	mov.l		(%a0),%d1
+	blt.w		invalid
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	bsr		slognd			# log(X), X denorm.
+	fmov.l		(%sp)+,%fpcr
+	fmul.x		INV_L2(%pc),%fp0
+	bra		t_minx2
+
+#########################################################################
+# stwotox():  computes 2**X for a normalized input			#
+# stwotoxd(): computes 2**X for a denormalized input			#
+# stentox():  computes 10**X for a normalized input			#
+# stentoxd(): computes 10**X for a denormalized input			#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = 2**X or 10**X						#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 2 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	twotox								#
+#	1. If |X| > 16480, go to ExpBig.				#
+#									#
+#	2. If |X| < 2**(-70), go to ExpSm.				#
+#									#
+#	3. Decompose X as X = N/64 + r where |r| <= 1/128. Furthermore	#
+#		decompose N as						#
+#		 N = 64(M + M') + j,  j = 0,1,2,...,63.			#
+#									#
+#	4. Overwrite r := r * log2. Then				#
+#		2**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r).		#
+#		Go to expr to compute that expression.			#
+#									#
+#	tentox								#
+#	1. If |X| > 16480*log_10(2) (base 10 log of 2), go to ExpBig.	#
+#									#
+#	2. If |X| < 2**(-70), go to ExpSm.				#
+#									#
+#	3. Set y := X*log_2(10)*64 (base 2 log of 10). Set		#
+#		N := round-to-int(y). Decompose N as			#
+#		 N = 64(M + M') + j,  j = 0,1,2,...,63.			#
+#									#
+#	4. Define r as							#
+#		r := ((X - N*L1)-N*L2) * L10				#
+#		where L1, L2 are the leading and trailing parts of	#
+#		log_10(2)/64 and L10 is the natural log of 10. Then	#
+#		10**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r).		#
+#		Go to expr to compute that expression.			#
+#									#
+#	expr								#
+#	1. Fetch 2**(j/64) from table as Fact1 and Fact2.		#
+#									#
+#	2. Overwrite Fact1 and Fact2 by					#
+#		Fact1 := 2**(M) * Fact1					#
+#		Fact2 := 2**(M) * Fact2					#
+#		Thus Fact1 + Fact2 = 2**(M) * 2**(j/64).		#
+#									#
+#	3. Calculate P where 1 + P approximates exp(r):			#
+#		P = r + r*r*(A1+r*(A2+...+r*A5)).			#
+#									#
+#	4. Let AdjFact := 2**(M'). Return				#
+#		AdjFact * ( Fact1 + ((Fact1*P) + Fact2) ).		#
+#		Exit.							#
+#									#
+#	ExpBig								#
+#	1. Generate overflow by Huge * Huge if X > 0; otherwise,	#
+#	        generate underflow by Tiny * Tiny.			#
+#									#
+#	ExpSm								#
+#	1. Return 1 + X.						#
+#									#
+#########################################################################
+
+L2TEN64:
+	long		0x406A934F,0x0979A371	# 64LOG10/LOG2
+L10TWO1:
+	long		0x3F734413,0x509F8000	# LOG2/64LOG10
+
+L10TWO2:
+	long		0xBFCD0000,0xC0219DC1,0xDA994FD2,0x00000000
+
+LOG10:	long		0x40000000,0x935D8DDD,0xAAA8AC17,0x00000000
+
+LOG2:	long		0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
+
+EXPA5:	long		0x3F56C16D,0x6F7BD0B2
+EXPA4:	long		0x3F811112,0x302C712C
+EXPA3:	long		0x3FA55555,0x55554CC1
+EXPA2:	long		0x3FC55555,0x55554A54
+EXPA1:	long		0x3FE00000,0x00000000,0x00000000,0x00000000
+
+TEXPTBL:
+	long		0x3FFF0000,0x80000000,0x00000000,0x3F738000
+	long		0x3FFF0000,0x8164D1F3,0xBC030773,0x3FBEF7CA
+	long		0x3FFF0000,0x82CD8698,0xAC2BA1D7,0x3FBDF8A9
+	long		0x3FFF0000,0x843A28C3,0xACDE4046,0x3FBCD7C9
+	long		0x3FFF0000,0x85AAC367,0xCC487B15,0xBFBDE8DA
+	long		0x3FFF0000,0x871F6196,0x9E8D1010,0x3FBDE85C
+	long		0x3FFF0000,0x88980E80,0x92DA8527,0x3FBEBBF1
+	long		0x3FFF0000,0x8A14D575,0x496EFD9A,0x3FBB80CA
+	long		0x3FFF0000,0x8B95C1E3,0xEA8BD6E7,0xBFBA8373
+	long		0x3FFF0000,0x8D1ADF5B,0x7E5BA9E6,0xBFBE9670
+	long		0x3FFF0000,0x8EA4398B,0x45CD53C0,0x3FBDB700
+	long		0x3FFF0000,0x9031DC43,0x1466B1DC,0x3FBEEEB0
+	long		0x3FFF0000,0x91C3D373,0xAB11C336,0x3FBBFD6D
+	long		0x3FFF0000,0x935A2B2F,0x13E6E92C,0xBFBDB319
+	long		0x3FFF0000,0x94F4EFA8,0xFEF70961,0x3FBDBA2B
+	long		0x3FFF0000,0x96942D37,0x20185A00,0x3FBE91D5
+	long		0x3FFF0000,0x9837F051,0x8DB8A96F,0x3FBE8D5A
+	long		0x3FFF0000,0x99E04593,0x20B7FA65,0xBFBCDE7B
+	long		0x3FFF0000,0x9B8D39B9,0xD54E5539,0xBFBEBAAF
+	long		0x3FFF0000,0x9D3ED9A7,0x2CFFB751,0xBFBD86DA
+	long		0x3FFF0000,0x9EF53260,0x91A111AE,0xBFBEBEDD
+	long		0x3FFF0000,0xA0B0510F,0xB9714FC2,0x3FBCC96E
+	long		0x3FFF0000,0xA2704303,0x0C496819,0xBFBEC90B
+	long		0x3FFF0000,0xA43515AE,0x09E6809E,0x3FBBD1DB
+	long		0x3FFF0000,0xA5FED6A9,0xB15138EA,0x3FBCE5EB
+	long		0x3FFF0000,0xA7CD93B4,0xE965356A,0xBFBEC274
+	long		0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x3FBEA83C
+	long		0x3FFF0000,0xAB7A39B5,0xA93ED337,0x3FBECB00
+	long		0x3FFF0000,0xAD583EEA,0x42A14AC6,0x3FBE9301
+	long		0x3FFF0000,0xAF3B78AD,0x690A4375,0xBFBD8367
+	long		0x3FFF0000,0xB123F581,0xD2AC2590,0xBFBEF05F
+	long		0x3FFF0000,0xB311C412,0xA9112489,0x3FBDFB3C
+	long		0x3FFF0000,0xB504F333,0xF9DE6484,0x3FBEB2FB
+	long		0x3FFF0000,0xB6FD91E3,0x28D17791,0x3FBAE2CB
+	long		0x3FFF0000,0xB8FBAF47,0x62FB9EE9,0x3FBCDC3C
+	long		0x3FFF0000,0xBAFF5AB2,0x133E45FB,0x3FBEE9AA
+	long		0x3FFF0000,0xBD08A39F,0x580C36BF,0xBFBEAEFD
+	long		0x3FFF0000,0xBF1799B6,0x7A731083,0xBFBCBF51
+	long		0x3FFF0000,0xC12C4CCA,0x66709456,0x3FBEF88A
+	long		0x3FFF0000,0xC346CCDA,0x24976407,0x3FBD83B2
+	long		0x3FFF0000,0xC5672A11,0x5506DADD,0x3FBDF8AB
+	long		0x3FFF0000,0xC78D74C8,0xABB9B15D,0xBFBDFB17
+	long		0x3FFF0000,0xC9B9BD86,0x6E2F27A3,0xBFBEFE3C
+	long		0x3FFF0000,0xCBEC14FE,0xF2727C5D,0xBFBBB6F8
+	long		0x3FFF0000,0xCE248C15,0x1F8480E4,0xBFBCEE53
+	long		0x3FFF0000,0xD06333DA,0xEF2B2595,0xBFBDA4AE
+	long		0x3FFF0000,0xD2A81D91,0xF12AE45A,0x3FBC9124
+	long		0x3FFF0000,0xD4F35AAB,0xCFEDFA1F,0x3FBEB243
+	long		0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x3FBDE69A
+	long		0x3FFF0000,0xD99D15C2,0x78AFD7B6,0xBFB8BC61
+	long		0x3FFF0000,0xDBFBB797,0xDAF23755,0x3FBDF610
+	long		0x3FFF0000,0xDE60F482,0x5E0E9124,0xBFBD8BE1
+	long		0x3FFF0000,0xE0CCDEEC,0x2A94E111,0x3FBACB12
+	long		0x3FFF0000,0xE33F8972,0xBE8A5A51,0x3FBB9BFE
+	long		0x3FFF0000,0xE5B906E7,0x7C8348A8,0x3FBCF2F4
+	long		0x3FFF0000,0xE8396A50,0x3C4BDC68,0x3FBEF22F
+	long		0x3FFF0000,0xEAC0C6E7,0xDD24392F,0xBFBDBF4A
+	long		0x3FFF0000,0xED4F301E,0xD9942B84,0x3FBEC01A
+	long		0x3FFF0000,0xEFE4B99B,0xDCDAF5CB,0x3FBE8CAC
+	long		0x3FFF0000,0xF281773C,0x59FFB13A,0xBFBCBB3F
+	long		0x3FFF0000,0xF5257D15,0x2486CC2C,0x3FBEF73A
+	long		0x3FFF0000,0xF7D0DF73,0x0AD13BB9,0xBFB8B795
+	long		0x3FFF0000,0xFA83B2DB,0x722A033A,0x3FBEF84B
+	long		0x3FFF0000,0xFD3E0C0C,0xF486C175,0xBFBEF581
+
+	set		INT,L_SCR1
+
+	set		X,FP_SCR0
+	set		XDCARE,X+2
+	set		XFRAC,X+4
+
+	set		ADJFACT,FP_SCR0
+
+	set		FACT1,FP_SCR0
+	set		FACT1HI,FACT1+4
+	set		FACT1LOW,FACT1+8
+
+	set		FACT2,FP_SCR1
+	set		FACT2HI,FACT2+4
+	set		FACT2LOW,FACT2+8
+
+	global		stwotox
+#--ENTRY POINT FOR 2**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+stwotox:
+	fmovm.x		(%a0),&0x80		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	fmov.x		%fp0,X(%a6)
+	and.l		&0x7FFFFFFF,%d1
+
+	cmp.l		%d1,&0x3FB98000		# |X| >= 2**(-70)?
+	bge.b		TWOOK1
+	bra.w		EXPBORS
+
+TWOOK1:
+	cmp.l		%d1,&0x400D80C0		# |X| > 16480?
+	ble.b		TWOMAIN
+	bra.w		EXPBORS
+
+TWOMAIN:
+#--USUAL CASE, 2^(-70) <= |X| <= 16480
+
+	fmov.x		%fp0,%fp1
+	fmul.s		&0x42800000,%fp1	# 64 * X
+	fmov.l		%fp1,INT(%a6)		# N = ROUND-TO-INT(64 X)
+	mov.l		%d2,-(%sp)
+	lea		TEXPTBL(%pc),%a1	# LOAD ADDRESS OF TABLE OF 2^(J/64)
+	fmov.l		INT(%a6),%fp1		# N --> FLOATING FMT
+	mov.l		INT(%a6),%d1
+	mov.l		%d1,%d2
+	and.l		&0x3F,%d1		# D0 IS J
+	asl.l		&4,%d1			# DISPLACEMENT FOR 2^(J/64)
+	add.l		%d1,%a1			# ADDRESS FOR 2^(J/64)
+	asr.l		&6,%d2			# d2 IS L, N = 64L + J
+	mov.l		%d2,%d1
+	asr.l		&1,%d1			# D0 IS M
+	sub.l		%d1,%d2			# d2 IS M', N = 64(M+M') + J
+	add.l		&0x3FFF,%d2
+
+#--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
+#--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
+#--ADJFACT = 2^(M').
+#--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
+
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmul.s		&0x3C800000,%fp1	# (1/64)*N
+	mov.l		(%a1)+,FACT1(%a6)
+	mov.l		(%a1)+,FACT1HI(%a6)
+	mov.l		(%a1)+,FACT1LOW(%a6)
+	mov.w		(%a1)+,FACT2(%a6)
+
+	fsub.x		%fp1,%fp0		# X - (1/64)*INT(64 X)
+
+	mov.w		(%a1)+,FACT2HI(%a6)
+	clr.w		FACT2HI+2(%a6)
+	clr.l		FACT2LOW(%a6)
+	add.w		%d1,FACT1(%a6)
+	fmul.x		LOG2(%pc),%fp0		# FP0 IS R
+	add.w		%d1,FACT2(%a6)
+
+	bra.w		expr
+
+EXPBORS:
+#--FPCR, D0 SAVED
+	cmp.l		%d1,&0x3FFF8000
+	bgt.b		TEXPBIG
+
+#--|X| IS SMALL, RETURN 1 + X
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	fadd.s		&0x3F800000,%fp0	# RETURN 1 + X
+	bra		t_pinx2
+
+TEXPBIG:
+#--|X| IS LARGE, GENERATE OVERFLOW IF X > 0; ELSE GENERATE UNDERFLOW
+#--REGISTERS SAVE SO FAR ARE FPCR AND  D0
+	mov.l		X(%a6),%d1
+	cmp.l		%d1,&0
+	blt.b		EXPNEG
+
+	bra		t_ovfl2			# t_ovfl expects positive value
+
+EXPNEG:
+	bra		t_unfl2			# t_unfl expects positive value
+
+	global		stwotoxd
+stwotoxd:
+#--ENTRY POINT FOR 2**(X) FOR DENORMALIZED ARGUMENT
+
+	fmov.l		%d0,%fpcr		# set user's rounding mode/precision
+	fmov.s		&0x3F800000,%fp0	# RETURN 1 + X
+	mov.l		(%a0),%d1
+	or.l		&0x00800001,%d1
+	fadd.s		%d1,%fp0
+	bra		t_pinx2
+
+	global		stentox
+#--ENTRY POINT FOR 10**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+stentox:
+	fmovm.x		(%a0),&0x80		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	fmov.x		%fp0,X(%a6)
+	and.l		&0x7FFFFFFF,%d1
+
+	cmp.l		%d1,&0x3FB98000		# |X| >= 2**(-70)?
+	bge.b		TENOK1
+	bra.w		EXPBORS
+
+TENOK1:
+	cmp.l		%d1,&0x400B9B07		# |X| <= 16480*log2/log10 ?
+	ble.b		TENMAIN
+	bra.w		EXPBORS
+
+TENMAIN:
+#--USUAL CASE, 2^(-70) <= |X| <= 16480 LOG 2 / LOG 10
+
+	fmov.x		%fp0,%fp1
+	fmul.d		L2TEN64(%pc),%fp1	# X*64*LOG10/LOG2
+	fmov.l		%fp1,INT(%a6)		# N=INT(X*64*LOG10/LOG2)
+	mov.l		%d2,-(%sp)
+	lea		TEXPTBL(%pc),%a1	# LOAD ADDRESS OF TABLE OF 2^(J/64)
+	fmov.l		INT(%a6),%fp1		# N --> FLOATING FMT
+	mov.l		INT(%a6),%d1
+	mov.l		%d1,%d2
+	and.l		&0x3F,%d1		# D0 IS J
+	asl.l		&4,%d1			# DISPLACEMENT FOR 2^(J/64)
+	add.l		%d1,%a1			# ADDRESS FOR 2^(J/64)
+	asr.l		&6,%d2			# d2 IS L, N = 64L + J
+	mov.l		%d2,%d1
+	asr.l		&1,%d1			# D0 IS M
+	sub.l		%d1,%d2			# d2 IS M', N = 64(M+M') + J
+	add.l		&0x3FFF,%d2
+
+#--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
+#--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
+#--ADJFACT = 2^(M').
+#--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmov.x		%fp1,%fp2
+
+	fmul.d		L10TWO1(%pc),%fp1	# N*(LOG2/64LOG10)_LEAD
+	mov.l		(%a1)+,FACT1(%a6)
+
+	fmul.x		L10TWO2(%pc),%fp2	# N*(LOG2/64LOG10)_TRAIL
+
+	mov.l		(%a1)+,FACT1HI(%a6)
+	mov.l		(%a1)+,FACT1LOW(%a6)
+	fsub.x		%fp1,%fp0		# X - N L_LEAD
+	mov.w		(%a1)+,FACT2(%a6)
+
+	fsub.x		%fp2,%fp0		# X - N L_TRAIL
+
+	mov.w		(%a1)+,FACT2HI(%a6)
+	clr.w		FACT2HI+2(%a6)
+	clr.l		FACT2LOW(%a6)
+
+	fmul.x		LOG10(%pc),%fp0		# FP0 IS R
+	add.w		%d1,FACT1(%a6)
+	add.w		%d1,FACT2(%a6)
+
+expr:
+#--FPCR, FP2, FP3 ARE SAVED IN ORDER AS SHOWN.
+#--ADJFACT CONTAINS 2**(M'), FACT1 + FACT2 = 2**(M) * 2**(J/64).
+#--FP0 IS R. THE FOLLOWING CODE COMPUTES
+#--	2**(M'+M) * 2**(J/64) * EXP(R)
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS S = R*R
+
+	fmov.d		EXPA5(%pc),%fp2		# FP2 IS A5
+	fmov.d		EXPA4(%pc),%fp3		# FP3 IS A4
+
+	fmul.x		%fp1,%fp2		# FP2 IS S*A5
+	fmul.x		%fp1,%fp3		# FP3 IS S*A4
+
+	fadd.d		EXPA3(%pc),%fp2		# FP2 IS A3+S*A5
+	fadd.d		EXPA2(%pc),%fp3		# FP3 IS A2+S*A4
+
+	fmul.x		%fp1,%fp2		# FP2 IS S*(A3+S*A5)
+	fmul.x		%fp1,%fp3		# FP3 IS S*(A2+S*A4)
+
+	fadd.d		EXPA1(%pc),%fp2		# FP2 IS A1+S*(A3+S*A5)
+	fmul.x		%fp0,%fp3		# FP3 IS R*S*(A2+S*A4)
+
+	fmul.x		%fp1,%fp2		# FP2 IS S*(A1+S*(A3+S*A5))
+	fadd.x		%fp3,%fp0		# FP0 IS R+R*S*(A2+S*A4)
+	fadd.x		%fp2,%fp0		# FP0 IS EXP(R) - 1
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+#--FINAL RECONSTRUCTION PROCESS
+#--EXP(X) = 2^M*2^(J/64) + 2^M*2^(J/64)*(EXP(R)-1)  -  (1 OR 0)
+
+	fmul.x		FACT1(%a6),%fp0
+	fadd.x		FACT2(%a6),%fp0
+	fadd.x		FACT1(%a6),%fp0
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	mov.w		%d2,ADJFACT(%a6)	# INSERT EXPONENT
+	mov.l		(%sp)+,%d2
+	mov.l		&0x80000000,ADJFACT+4(%a6)
+	clr.l		ADJFACT+8(%a6)
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		ADJFACT(%a6),%fp0	# FINAL ADJUSTMENT
+	bra		t_catch
+
+	global		stentoxd
+stentoxd:
+#--ENTRY POINT FOR 10**(X) FOR DENORMALIZED ARGUMENT
+
+	fmov.l		%d0,%fpcr		# set user's rounding mode/precision
+	fmov.s		&0x3F800000,%fp0	# RETURN 1 + X
+	mov.l		(%a0),%d1
+	or.l		&0x00800001,%d1
+	fadd.s		%d1,%fp0
+	bra		t_pinx2
+
+#########################################################################
+# smovcr(): returns the ROM constant at the offset specified in d1	#
+#	    rounded to the mode and precision specified in d0.		#
+#									#
+# INPUT	***************************************************************	#
+#	d0 = rnd prec,mode						#
+#	d1 = ROM offset							#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = the ROM constant rounded to the user's rounding mode,prec	#
+#									#
+#########################################################################
+
+	global		smovcr
+smovcr:
+	mov.l		%d1,-(%sp)		# save rom offset for a sec
+
+	lsr.b		&0x4,%d0		# shift ctrl bits to lo
+	mov.l		%d0,%d1			# make a copy
+	andi.w		&0x3,%d1		# extract rnd mode
+	andi.w		&0xc,%d0		# extract rnd prec
+	swap		%d0			# put rnd prec in hi
+	mov.w		%d1,%d0			# put rnd mode in lo
+
+	mov.l		(%sp)+,%d1		# get rom offset
+
+#
+# check range of offset
+#
+	tst.b		%d1			# if zero, offset is to pi
+	beq.b		pi_tbl			# it is pi
+	cmpi.b		%d1,&0x0a		# check range $01 - $0a
+	ble.b		z_val			# if in this range, return zero
+	cmpi.b		%d1,&0x0e		# check range $0b - $0e
+	ble.b		sm_tbl			# valid constants in this range
+	cmpi.b		%d1,&0x2f		# check range $10 - $2f
+	ble.b		z_val			# if in this range, return zero
+	cmpi.b		%d1,&0x3f		# check range $30 - $3f
+	ble.b		bg_tbl			# valid constants in this range
+
+z_val:
+	bra.l		ld_pzero		# return a zero
+
+#
+# the answer is PI rounded to the proper precision.
+#
+# fetch a pointer to the answer table relating to the proper rounding
+# precision.
+#
+pi_tbl:
+	tst.b		%d0			# is rmode RN?
+	bne.b		pi_not_rn		# no
+pi_rn:
+	lea.l		PIRN(%pc),%a0		# yes; load PI RN table addr
+	bra.w		set_finx
+pi_not_rn:
+	cmpi.b		%d0,&rp_mode		# is rmode RP?
+	beq.b		pi_rp			# yes
+pi_rzrm:
+	lea.l		PIRZRM(%pc),%a0		# no; load PI RZ,RM table addr
+	bra.b		set_finx
+pi_rp:
+	lea.l		PIRP(%pc),%a0		# load PI RP table addr
+	bra.b		set_finx
+
+#
+# the answer is one of:
+#	$0B	log10(2)	(inexact)
+#	$0C	e		(inexact)
+#	$0D	log2(e)		(inexact)
+#	$0E	log10(e)	(exact)
+#
+# fetch a pointer to the answer table relating to the proper rounding
+# precision.
+#
+sm_tbl:
+	subi.b		&0xb,%d1		# make offset in 0-4 range
+	tst.b		%d0			# is rmode RN?
+	bne.b		sm_not_rn		# no
+sm_rn:
+	lea.l		SMALRN(%pc),%a0		# yes; load RN table addr
+sm_tbl_cont:
+	cmpi.b		%d1,&0x2		# is result log10(e)?
+	ble.b		set_finx		# no; answer is inexact
+	bra.b		no_finx			# yes; answer is exact
+sm_not_rn:
+	cmpi.b		%d0,&rp_mode		# is rmode RP?
+	beq.b		sm_rp			# yes
+sm_rzrm:
+	lea.l		SMALRZRM(%pc),%a0	# no; load RZ,RM table addr
+	bra.b		sm_tbl_cont
+sm_rp:
+	lea.l		SMALRP(%pc),%a0		# load RP table addr
+	bra.b		sm_tbl_cont
+
+#
+# the answer is one of:
+#	$30	ln(2)		(inexact)
+#	$31	ln(10)		(inexact)
+#	$32	10^0		(exact)
+#	$33	10^1		(exact)
+#	$34	10^2		(exact)
+#	$35	10^4		(exact)
+#	$36	10^8		(exact)
+#	$37	10^16		(exact)
+#	$38	10^32		(inexact)
+#	$39	10^64		(inexact)
+#	$3A	10^128		(inexact)
+#	$3B	10^256		(inexact)
+#	$3C	10^512		(inexact)
+#	$3D	10^1024		(inexact)
+#	$3E	10^2048		(inexact)
+#	$3F	10^4096		(inexact)
+#
+# fetch a pointer to the answer table relating to the proper rounding
+# precision.
+#
+bg_tbl:
+	subi.b		&0x30,%d1		# make offset in 0-f range
+	tst.b		%d0			# is rmode RN?
+	bne.b		bg_not_rn		# no
+bg_rn:
+	lea.l		BIGRN(%pc),%a0		# yes; load RN table addr
+bg_tbl_cont:
+	cmpi.b		%d1,&0x1		# is offset <= $31?
+	ble.b		set_finx		# yes; answer is inexact
+	cmpi.b		%d1,&0x7		# is $32 <= offset <= $37?
+	ble.b		no_finx			# yes; answer is exact
+	bra.b		set_finx		# no; answer is inexact
+bg_not_rn:
+	cmpi.b		%d0,&rp_mode		# is rmode RP?
+	beq.b		bg_rp			# yes
+bg_rzrm:
+	lea.l		BIGRZRM(%pc),%a0	# no; load RZ,RM table addr
+	bra.b		bg_tbl_cont
+bg_rp:
+	lea.l		BIGRP(%pc),%a0		# load RP table addr
+	bra.b		bg_tbl_cont
+
+# answer is inexact, so set INEX2 and AINEX in the user's FPSR.
+set_finx:
+	ori.l		&inx2a_mask,USER_FPSR(%a6) # set INEX2/AINEX
+no_finx:
+	mulu.w		&0xc,%d1		# offset points into tables
+	swap		%d0			# put rnd prec in lo word
+	tst.b		%d0			# is precision extended?
+
+	bne.b		not_ext			# if xprec, do not call round
+
+# Precision is extended
+	fmovm.x		(%a0,%d1.w),&0x80	# return result in fp0
+	rts
+
+# Precision is single or double
+not_ext:
+	swap		%d0			# rnd prec in upper word
+
+# call round() to round the answer to the proper precision.
+# exponents out of range for single or double DO NOT cause underflow
+# or overflow.
+	mov.w		0x0(%a0,%d1.w),FP_SCR1_EX(%a6) # load first word
+	mov.l		0x4(%a0,%d1.w),FP_SCR1_HI(%a6) # load second word
+	mov.l		0x8(%a0,%d1.w),FP_SCR1_LO(%a6) # load third word
+	mov.l		%d0,%d1
+	clr.l		%d0			# clear g,r,s
+	lea		FP_SCR1(%a6),%a0	# pass ptr to answer
+	clr.w		LOCAL_SGN(%a0)		# sign always positive
+	bsr.l		_round			# round the mantissa
+
+	fmovm.x		(%a0),&0x80		# return rounded result in fp0
+	rts
+
+	align		0x4
+
+PIRN:	long		0x40000000,0xc90fdaa2,0x2168c235	# pi
+PIRZRM:	long		0x40000000,0xc90fdaa2,0x2168c234	# pi
+PIRP:	long		0x40000000,0xc90fdaa2,0x2168c235	# pi
+
+SMALRN:	long		0x3ffd0000,0x9a209a84,0xfbcff798	# log10(2)
+	long		0x40000000,0xadf85458,0xa2bb4a9a	# e
+	long		0x3fff0000,0xb8aa3b29,0x5c17f0bc	# log2(e)
+	long		0x3ffd0000,0xde5bd8a9,0x37287195	# log10(e)
+	long		0x00000000,0x00000000,0x00000000	# 0.0
+
+SMALRZRM:
+	long		0x3ffd0000,0x9a209a84,0xfbcff798	# log10(2)
+	long		0x40000000,0xadf85458,0xa2bb4a9a	# e
+	long		0x3fff0000,0xb8aa3b29,0x5c17f0bb	# log2(e)
+	long		0x3ffd0000,0xde5bd8a9,0x37287195	# log10(e)
+	long		0x00000000,0x00000000,0x00000000	# 0.0
+
+SMALRP:	long		0x3ffd0000,0x9a209a84,0xfbcff799	# log10(2)
+	long		0x40000000,0xadf85458,0xa2bb4a9b	# e
+	long		0x3fff0000,0xb8aa3b29,0x5c17f0bc	# log2(e)
+	long		0x3ffd0000,0xde5bd8a9,0x37287195	# log10(e)
+	long		0x00000000,0x00000000,0x00000000	# 0.0
+
+BIGRN:	long		0x3ffe0000,0xb17217f7,0xd1cf79ac	# ln(2)
+	long		0x40000000,0x935d8ddd,0xaaa8ac17	# ln(10)
+
+	long		0x3fff0000,0x80000000,0x00000000	# 10 ^ 0
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59E	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D5	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CE0	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8E	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C7	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C17	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE5	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979B	# 10 ^ 4096
+
+BIGRZRM:
+	long		0x3ffe0000,0xb17217f7,0xd1cf79ab	# ln(2)
+	long		0x40000000,0x935d8ddd,0xaaa8ac16	# ln(10)
+
+	long		0x3fff0000,0x80000000,0x00000000	# 10 ^ 0
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59D	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D5	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CDF	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8D	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C6	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C17	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE4	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979A	# 10 ^ 4096
+
+BIGRP:
+	long		0x3ffe0000,0xb17217f7,0xd1cf79ac	# ln(2)
+	long		0x40000000,0x935d8ddd,0xaaa8ac17	# ln(10)
+
+	long		0x3fff0000,0x80000000,0x00000000	# 10 ^ 0
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59E	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D6	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CE0	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8E	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C7	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C18	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE5	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979B	# 10 ^ 4096
+
+#########################################################################
+# sscale(): computes the destination operand scaled by the source	#
+#	    operand. If the absoulute value of the source operand is	#
+#	    >= 2^14, an overflow or underflow is returned.		#
+#									#
+# INPUT *************************************************************** #
+#	a0  = pointer to double-extended source operand X		#
+#	a1  = pointer to double-extended destination operand Y		#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 =  scale(X,Y)						#
+#									#
+#########################################################################
+
+set	SIGN,		L_SCR1
+
+	global		sscale
+sscale:
+	mov.l		%d0,-(%sp)		# store off ctrl bits for now
+
+	mov.w		DST_EX(%a1),%d1		# get dst exponent
+	smi.b		SIGN(%a6)		# use SIGN to hold dst sign
+	andi.l		&0x00007fff,%d1		# strip sign from dst exp
+
+	mov.w		SRC_EX(%a0),%d0		# check src bounds
+	andi.w		&0x7fff,%d0		# clr src sign bit
+	cmpi.w		%d0,&0x3fff		# is src ~ ZERO?
+	blt.w		src_small		# yes
+	cmpi.w		%d0,&0x400c		# no; is src too big?
+	bgt.w		src_out			# yes
+
+#
+# Source is within 2^14 range.
+#
+src_ok:
+	fintrz.x	SRC(%a0),%fp0		# calc int of src
+	fmov.l		%fp0,%d0		# int src to d0
+# don't want any accrued bits from the fintrz showing up later since
+# we may need to read the fpsr for the last fp op in t_catch2().
+	fmov.l		&0x0,%fpsr
+
+	tst.b		DST_HI(%a1)		# is dst denormalized?
+	bmi.b		sok_norm
+
+# the dst is a DENORM. normalize the DENORM and add the adjustment to
+# the src value. then, jump to the norm part of the routine.
+sok_dnrm:
+	mov.l		%d0,-(%sp)		# save src for now
+
+	mov.w		DST_EX(%a1),FP_SCR0_EX(%a6) # make a copy
+	mov.l		DST_HI(%a1),FP_SCR0_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR0_LO(%a6)
+
+	lea		FP_SCR0(%a6),%a0	# pass ptr to DENORM
+	bsr.l		norm			# normalize the DENORM
+	neg.l		%d0
+	add.l		(%sp)+,%d0		# add adjustment to src
+
+	fmovm.x		FP_SCR0(%a6),&0x80	# load normalized DENORM
+
+	cmpi.w		%d0,&-0x3fff		# is the shft amt really low?
+	bge.b		sok_norm2		# thank goodness no
+
+# the multiply factor that we're trying to create should be a denorm
+# for the multiply to work. therefore, we're going to actually do a
+# multiply with a denorm which will cause an unimplemented data type
+# exception to be put into the machine which will be caught and corrected
+# later. we don't do this with the DENORMs above because this method
+# is slower. but, don't fret, I don't see it being used much either.
+	fmov.l		(%sp)+,%fpcr		# restore user fpcr
+	mov.l		&0x80000000,%d1		# load normalized mantissa
+	subi.l		&-0x3fff,%d0		# how many should we shift?
+	neg.l		%d0			# make it positive
+	cmpi.b		%d0,&0x20		# is it > 32?
+	bge.b		sok_dnrm_32		# yes
+	lsr.l		%d0,%d1			# no; bit stays in upper lw
+	clr.l		-(%sp)			# insert zero low mantissa
+	mov.l		%d1,-(%sp)		# insert new high mantissa
+	clr.l		-(%sp)			# make zero exponent
+	bra.b		sok_norm_cont
+sok_dnrm_32:
+	subi.b		&0x20,%d0		# get shift count
+	lsr.l		%d0,%d1			# make low mantissa longword
+	mov.l		%d1,-(%sp)		# insert new low mantissa
+	clr.l		-(%sp)			# insert zero high mantissa
+	clr.l		-(%sp)			# make zero exponent
+	bra.b		sok_norm_cont
+
+# the src will force the dst to a DENORM value or worse. so, let's
+# create an fp multiply that will create the result.
+sok_norm:
+	fmovm.x		DST(%a1),&0x80		# load fp0 with normalized src
+sok_norm2:
+	fmov.l		(%sp)+,%fpcr		# restore user fpcr
+
+	addi.w		&0x3fff,%d0		# turn src amt into exp value
+	swap		%d0			# put exponent in high word
+	clr.l		-(%sp)			# insert new exponent
+	mov.l		&0x80000000,-(%sp)	# insert new high mantissa
+	mov.l		%d0,-(%sp)		# insert new lo mantissa
+
+sok_norm_cont:
+	fmov.l		%fpcr,%d0		# d0 needs fpcr for t_catch2
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		(%sp)+,%fp0		# do the multiply
+	bra		t_catch2		# catch any exceptions
+
+#
+# Source is outside of 2^14 range.  Test the sign and branch
+# to the appropriate exception handler.
+#
+src_out:
+	mov.l		(%sp)+,%d0		# restore ctrl bits
+	exg		%a0,%a1			# swap src,dst ptrs
+	tst.b		SRC_EX(%a1)		# is src negative?
+	bmi		t_unfl			# yes; underflow
+	bra		t_ovfl_sc		# no; overflow
+
+#
+# The source input is below 1, so we check for denormalized numbers
+# and set unfl.
+#
+src_small:
+	tst.b		DST_HI(%a1)		# is dst denormalized?
+	bpl.b		ssmall_done		# yes
+
+	mov.l		(%sp)+,%d0
+	fmov.l		%d0,%fpcr		# no; load control bits
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		DST(%a1),%fp0		# simply return dest
+	bra		t_catch2
+ssmall_done:
+	mov.l		(%sp)+,%d0		# load control bits into d1
+	mov.l		%a1,%a0			# pass ptr to dst
+	bra		t_resdnrm
+
+#########################################################################
+# smod(): computes the fp MOD of the input values X,Y.			#
+# srem(): computes the fp (IEEE) REM of the input values X,Y.		#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input X			#
+#	a1 = pointer to extended precision input Y			#
+#	d0 = round precision,mode					#
+#									#
+#	The input operands X and Y can be either normalized or		#
+#	denormalized.							#
+#									#
+# OUTPUT ************************************************************** #
+#      fp0 = FREM(X,Y) or FMOD(X,Y)					#
+#									#
+# ALGORITHM *********************************************************** #
+#									#
+#       Step 1.  Save and strip signs of X and Y: signX := sign(X),	#
+#                signY := sign(Y), X := |X|, Y := |Y|,			#
+#                signQ := signX EOR signY. Record whether MOD or REM	#
+#                is requested.						#
+#									#
+#       Step 2.  Set L := expo(X)-expo(Y), k := 0, Q := 0.		#
+#                If (L < 0) then					#
+#                   R := X, go to Step 4.				#
+#                else							#
+#                   R := 2^(-L)X, j := L.				#
+#                endif							#
+#									#
+#       Step 3.  Perform MOD(X,Y)					#
+#            3.1 If R = Y, go to Step 9.				#
+#            3.2 If R > Y, then { R := R - Y, Q := Q + 1}		#
+#            3.3 If j = 0, go to Step 4.				#
+#            3.4 k := k + 1, j := j - 1, Q := 2Q, R := 2R. Go to	#
+#                Step 3.1.						#
+#									#
+#       Step 4.  At this point, R = X - QY = MOD(X,Y). Set		#
+#                Last_Subtract := false (used in Step 7 below). If	#
+#                MOD is requested, go to Step 6.			#
+#									#
+#       Step 5.  R = MOD(X,Y), but REM(X,Y) is requested.		#
+#            5.1 If R < Y/2, then R = MOD(X,Y) = REM(X,Y). Go to	#
+#                Step 6.						#
+#            5.2 If R > Y/2, then { set Last_Subtract := true,		#
+#                Q := Q + 1, Y := signY*Y }. Go to Step 6.		#
+#            5.3 This is the tricky case of R = Y/2. If Q is odd,	#
+#                then { Q := Q + 1, signX := -signX }.			#
+#									#
+#       Step 6.  R := signX*R.						#
+#									#
+#       Step 7.  If Last_Subtract = true, R := R - Y.			#
+#									#
+#       Step 8.  Return signQ, last 7 bits of Q, and R as required.	#
+#									#
+#       Step 9.  At this point, R = 2^(-j)*X - Q Y = Y. Thus,		#
+#                X = 2^(j)*(Q+1)Y. set Q := 2^(j)*(Q+1),		#
+#                R := 0. Return signQ, last 7 bits of Q, and R.		#
+#									#
+#########################################################################
+
+	set		Mod_Flag,L_SCR3
+	set		Sc_Flag,L_SCR3+1
+
+	set		SignY,L_SCR2
+	set		SignX,L_SCR2+2
+	set		SignQ,L_SCR3+2
+
+	set		Y,FP_SCR0
+	set		Y_Hi,Y+4
+	set		Y_Lo,Y+8
+
+	set		R,FP_SCR1
+	set		R_Hi,R+4
+	set		R_Lo,R+8
+
+Scale:
+	long		0x00010000,0x80000000,0x00000000,0x00000000
+
+	global		smod
+smod:
+	clr.b		FPSR_QBYTE(%a6)
+	mov.l		%d0,-(%sp)		# save ctrl bits
+	clr.b		Mod_Flag(%a6)
+	bra.b		Mod_Rem
+
+	global		srem
+srem:
+	clr.b		FPSR_QBYTE(%a6)
+	mov.l		%d0,-(%sp)		# save ctrl bits
+	mov.b		&0x1,Mod_Flag(%a6)
+
+Mod_Rem:
+#..Save sign of X and Y
+	movm.l		&0x3f00,-(%sp)		# save data registers
+	mov.w		SRC_EX(%a0),%d3
+	mov.w		%d3,SignY(%a6)
+	and.l		&0x00007FFF,%d3		# Y := |Y|
+
+#
+	mov.l		SRC_HI(%a0),%d4
+	mov.l		SRC_LO(%a0),%d5		# (D3,D4,D5) is |Y|
+
+	tst.l		%d3
+	bne.b		Y_Normal
+
+	mov.l		&0x00003FFE,%d3		# $3FFD + 1
+	tst.l		%d4
+	bne.b		HiY_not0
+
+HiY_0:
+	mov.l		%d5,%d4
+	clr.l		%d5
+	sub.l		&32,%d3
+	clr.l		%d6
+	bfffo		%d4{&0:&32},%d6
+	lsl.l		%d6,%d4
+	sub.l		%d6,%d3			# (D3,D4,D5) is normalized
+#	                                        ...with bias $7FFD
+	bra.b		Chk_X
+
+HiY_not0:
+	clr.l		%d6
+	bfffo		%d4{&0:&32},%d6
+	sub.l		%d6,%d3
+	lsl.l		%d6,%d4
+	mov.l		%d5,%d7			# a copy of D5
+	lsl.l		%d6,%d5
+	neg.l		%d6
+	add.l		&32,%d6
+	lsr.l		%d6,%d7
+	or.l		%d7,%d4			# (D3,D4,D5) normalized
+#                                       ...with bias $7FFD
+	bra.b		Chk_X
+
+Y_Normal:
+	add.l		&0x00003FFE,%d3		# (D3,D4,D5) normalized
+#                                       ...with bias $7FFD
+
+Chk_X:
+	mov.w		DST_EX(%a1),%d0
+	mov.w		%d0,SignX(%a6)
+	mov.w		SignY(%a6),%d1
+	eor.l		%d0,%d1
+	and.l		&0x00008000,%d1
+	mov.w		%d1,SignQ(%a6)		# sign(Q) obtained
+	and.l		&0x00007FFF,%d0
+	mov.l		DST_HI(%a1),%d1
+	mov.l		DST_LO(%a1),%d2		# (D0,D1,D2) is |X|
+	tst.l		%d0
+	bne.b		X_Normal
+	mov.l		&0x00003FFE,%d0
+	tst.l		%d1
+	bne.b		HiX_not0
+
+HiX_0:
+	mov.l		%d2,%d1
+	clr.l		%d2
+	sub.l		&32,%d0
+	clr.l		%d6
+	bfffo		%d1{&0:&32},%d6
+	lsl.l		%d6,%d1
+	sub.l		%d6,%d0			# (D0,D1,D2) is normalized
+#                                       ...with bias $7FFD
+	bra.b		Init
+
+HiX_not0:
+	clr.l		%d6
+	bfffo		%d1{&0:&32},%d6
+	sub.l		%d6,%d0
+	lsl.l		%d6,%d1
+	mov.l		%d2,%d7			# a copy of D2
+	lsl.l		%d6,%d2
+	neg.l		%d6
+	add.l		&32,%d6
+	lsr.l		%d6,%d7
+	or.l		%d7,%d1			# (D0,D1,D2) normalized
+#                                       ...with bias $7FFD
+	bra.b		Init
+
+X_Normal:
+	add.l		&0x00003FFE,%d0		# (D0,D1,D2) normalized
+#                                       ...with bias $7FFD
+
+Init:
+#
+	mov.l		%d3,L_SCR1(%a6)		# save biased exp(Y)
+	mov.l		%d0,-(%sp)		# save biased exp(X)
+	sub.l		%d3,%d0			# L := expo(X)-expo(Y)
+
+	clr.l		%d6			# D6 := carry <- 0
+	clr.l		%d3			# D3 is Q
+	mov.l		&0,%a1			# A1 is k; j+k=L, Q=0
+
+#..(Carry,D1,D2) is R
+	tst.l		%d0
+	bge.b		Mod_Loop_pre
+
+#..expo(X) < expo(Y). Thus X = mod(X,Y)
+#
+	mov.l		(%sp)+,%d0		# restore d0
+	bra.w		Get_Mod
+
+Mod_Loop_pre:
+	addq.l		&0x4,%sp		# erase exp(X)
+#..At this point  R = 2^(-L)X; Q = 0; k = 0; and  k+j = L
+Mod_Loop:
+	tst.l		%d6			# test carry bit
+	bgt.b		R_GT_Y
+
+#..At this point carry = 0, R = (D1,D2), Y = (D4,D5)
+	cmp.l		%d1,%d4			# compare hi(R) and hi(Y)
+	bne.b		R_NE_Y
+	cmp.l		%d2,%d5			# compare lo(R) and lo(Y)
+	bne.b		R_NE_Y
+
+#..At this point, R = Y
+	bra.w		Rem_is_0
+
+R_NE_Y:
+#..use the borrow of the previous compare
+	bcs.b		R_LT_Y			# borrow is set iff R < Y
+
+R_GT_Y:
+#..If Carry is set, then Y < (Carry,D1,D2) < 2Y. Otherwise, Carry = 0
+#..and Y < (D1,D2) < 2Y. Either way, perform R - Y
+	sub.l		%d5,%d2			# lo(R) - lo(Y)
+	subx.l		%d4,%d1			# hi(R) - hi(Y)
+	clr.l		%d6			# clear carry
+	addq.l		&1,%d3			# Q := Q + 1
+
+R_LT_Y:
+#..At this point, Carry=0, R < Y. R = 2^(k-L)X - QY; k+j = L; j >= 0.
+	tst.l		%d0			# see if j = 0.
+	beq.b		PostLoop
+
+	add.l		%d3,%d3			# Q := 2Q
+	add.l		%d2,%d2			# lo(R) = 2lo(R)
+	roxl.l		&1,%d1			# hi(R) = 2hi(R) + carry
+	scs		%d6			# set Carry if 2(R) overflows
+	addq.l		&1,%a1			# k := k+1
+	subq.l		&1,%d0			# j := j - 1
+#..At this point, R=(Carry,D1,D2) = 2^(k-L)X - QY, j+k=L, j >= 0, R < 2Y.
+
+	bra.b		Mod_Loop
+
+PostLoop:
+#..k = L, j = 0, Carry = 0, R = (D1,D2) = X - QY, R < Y.
+
+#..normalize R.
+	mov.l		L_SCR1(%a6),%d0		# new biased expo of R
+	tst.l		%d1
+	bne.b		HiR_not0
+
+HiR_0:
+	mov.l		%d2,%d1
+	clr.l		%d2
+	sub.l		&32,%d0
+	clr.l		%d6
+	bfffo		%d1{&0:&32},%d6
+	lsl.l		%d6,%d1
+	sub.l		%d6,%d0			# (D0,D1,D2) is normalized
+#                                       ...with bias $7FFD
+	bra.b		Get_Mod
+
+HiR_not0:
+	clr.l		%d6
+	bfffo		%d1{&0:&32},%d6
+	bmi.b		Get_Mod			# already normalized
+	sub.l		%d6,%d0
+	lsl.l		%d6,%d1
+	mov.l		%d2,%d7			# a copy of D2
+	lsl.l		%d6,%d2
+	neg.l		%d6
+	add.l		&32,%d6
+	lsr.l		%d6,%d7
+	or.l		%d7,%d1			# (D0,D1,D2) normalized
+
+#
+Get_Mod:
+	cmp.l		%d0,&0x000041FE
+	bge.b		No_Scale
+Do_Scale:
+	mov.w		%d0,R(%a6)
+	mov.l		%d1,R_Hi(%a6)
+	mov.l		%d2,R_Lo(%a6)
+	mov.l		L_SCR1(%a6),%d6
+	mov.w		%d6,Y(%a6)
+	mov.l		%d4,Y_Hi(%a6)
+	mov.l		%d5,Y_Lo(%a6)
+	fmov.x		R(%a6),%fp0		# no exception
+	mov.b		&1,Sc_Flag(%a6)
+	bra.b		ModOrRem
+No_Scale:
+	mov.l		%d1,R_Hi(%a6)
+	mov.l		%d2,R_Lo(%a6)
+	sub.l		&0x3FFE,%d0
+	mov.w		%d0,R(%a6)
+	mov.l		L_SCR1(%a6),%d6
+	sub.l		&0x3FFE,%d6
+	mov.l		%d6,L_SCR1(%a6)
+	fmov.x		R(%a6),%fp0
+	mov.w		%d6,Y(%a6)
+	mov.l		%d4,Y_Hi(%a6)
+	mov.l		%d5,Y_Lo(%a6)
+	clr.b		Sc_Flag(%a6)
+
+#
+ModOrRem:
+	tst.b		Mod_Flag(%a6)
+	beq.b		Fix_Sign
+
+	mov.l		L_SCR1(%a6),%d6		# new biased expo(Y)
+	subq.l		&1,%d6			# biased expo(Y/2)
+	cmp.l		%d0,%d6
+	blt.b		Fix_Sign
+	bgt.b		Last_Sub
+
+	cmp.l		%d1,%d4
+	bne.b		Not_EQ
+	cmp.l		%d2,%d5
+	bne.b		Not_EQ
+	bra.w		Tie_Case
+
+Not_EQ:
+	bcs.b		Fix_Sign
+
+Last_Sub:
+#
+	fsub.x		Y(%a6),%fp0		# no exceptions
+	addq.l		&1,%d3			# Q := Q + 1
+
+#
+Fix_Sign:
+#..Get sign of X
+	mov.w		SignX(%a6),%d6
+	bge.b		Get_Q
+	fneg.x		%fp0
+
+#..Get Q
+#
+Get_Q:
+	clr.l		%d6
+	mov.w		SignQ(%a6),%d6		# D6 is sign(Q)
+	mov.l		&8,%d7
+	lsr.l		%d7,%d6
+	and.l		&0x0000007F,%d3		# 7 bits of Q
+	or.l		%d6,%d3			# sign and bits of Q
+#	swap		%d3
+#	fmov.l		%fpsr,%d6
+#	and.l		&0xFF00FFFF,%d6
+#	or.l		%d3,%d6
+#	fmov.l		%d6,%fpsr		# put Q in fpsr
+	mov.b		%d3,FPSR_QBYTE(%a6)	# put Q in fpsr
+
+#
+Restore:
+	movm.l		(%sp)+,&0xfc		#  {%d2-%d7}
+	mov.l		(%sp)+,%d0
+	fmov.l		%d0,%fpcr
+	tst.b		Sc_Flag(%a6)
+	beq.b		Finish
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		Scale(%pc),%fp0		# may cause underflow
+	bra		t_catch2
+# the '040 package did this apparently to see if the dst operand for the
+# preceding fmul was a denorm. but, it better not have been since the
+# algorithm just got done playing with fp0 and expected no exceptions
+# as a result. trust me...
+#	bra		t_avoid_unsupp		# check for denorm as a
+#						;result of the scaling
+
+Finish:
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		%fp0,%fp0		# capture exceptions & round
+	bra		t_catch2
+
+Rem_is_0:
+#..R = 2^(-j)X - Q Y = Y, thus R = 0 and quotient = 2^j (Q+1)
+	addq.l		&1,%d3
+	cmp.l		%d0,&8			# D0 is j
+	bge.b		Q_Big
+
+	lsl.l		%d0,%d3
+	bra.b		Set_R_0
+
+Q_Big:
+	clr.l		%d3
+
+Set_R_0:
+	fmov.s		&0x00000000,%fp0
+	clr.b		Sc_Flag(%a6)
+	bra.w		Fix_Sign
+
+Tie_Case:
+#..Check parity of Q
+	mov.l		%d3,%d6
+	and.l		&0x00000001,%d6
+	tst.l		%d6
+	beq.w		Fix_Sign		# Q is even
+
+#..Q is odd, Q := Q + 1, signX := -signX
+	addq.l		&1,%d3
+	mov.w		SignX(%a6),%d6
+	eor.l		&0x00008000,%d6
+	mov.w		%d6,SignX(%a6)
+	bra.w		Fix_Sign
+
+qnan:	long		0x7fff0000, 0xffffffff, 0xffffffff
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	t_dz(): Handle DZ exception during transcendental emulation.	#
+#	        Sets N bit according to sign of source operand.		#
+#	t_dz2(): Handle DZ exception during transcendental emulation.	#
+#		 Sets N bit always.					#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to source operand					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default result						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	- Store properly signed INF into fp0.				#
+#	- Set FPSR exception status dz bit, ccode inf bit, and		#
+#	  accrued dz bit.						#
+#									#
+#########################################################################
+
+	global		t_dz
+t_dz:
+	tst.b		SRC_EX(%a0)		# no; is src negative?
+	bmi.b		t_dz2			# yes
+
+dz_pinf:
+	fmov.s		&0x7f800000,%fp0	# return +INF in fp0
+	ori.l		&dzinf_mask,USER_FPSR(%a6) # set I/DZ/ADZ
+	rts
+
+	global		t_dz2
+t_dz2:
+	fmov.s		&0xff800000,%fp0	# return -INF in fp0
+	ori.l		&dzinf_mask+neg_mask,USER_FPSR(%a6) # set N/I/DZ/ADZ
+	rts
+
+#################################################################
+# OPERR exception:						#
+#	- set FPSR exception status operr bit, condition code	#
+#	  nan bit; Store default NAN into fp0			#
+#################################################################
+	global		t_operr
+t_operr:
+	ori.l		&opnan_mask,USER_FPSR(%a6) # set NaN/OPERR/AIOP
+	fmovm.x		qnan(%pc),&0x80		# return default NAN in fp0
+	rts
+
+#################################################################
+# Extended DENORM:						#
+#	- For all functions that have a denormalized input and	#
+#	  that f(x)=x, this is the entry point.			#
+#	- we only return the EXOP here if either underflow or	#
+#	  inexact is enabled.					#
+#################################################################
+
+# Entry point for scale w/ extended denorm. The function does
+# NOT set INEX2/AUNFL/AINEX.
+	global		t_resdnrm
+t_resdnrm:
+	ori.l		&unfl_mask,USER_FPSR(%a6) # set UNFL
+	bra.b		xdnrm_con
+
+	global		t_extdnrm
+t_extdnrm:
+	ori.l		&unfinx_mask,USER_FPSR(%a6) # set UNFL/INEX2/AUNFL/AINEX
+
+xdnrm_con:
+	mov.l		%a0,%a1			# make copy of src ptr
+	mov.l		%d0,%d1			# make copy of rnd prec,mode
+	andi.b		&0xc0,%d1		# extended precision?
+	bne.b		xdnrm_sd		# no
+
+# result precision is extended.
+	tst.b		LOCAL_EX(%a0)		# is denorm negative?
+	bpl.b		xdnrm_exit		# no
+
+	bset		&neg_bit,FPSR_CC(%a6)	# yes; set 'N' ccode bit
+	bra.b		xdnrm_exit
+
+# result precision is single or double
+xdnrm_sd:
+	mov.l		%a1,-(%sp)
+	tst.b		LOCAL_EX(%a0)		# is denorm pos or neg?
+	smi.b		%d1			# set d0 accodingly
+	bsr.l		unf_sub
+	mov.l		(%sp)+,%a1
+xdnrm_exit:
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+
+	mov.b		FPCR_ENABLE(%a6),%d0
+	andi.b		&0x0a,%d0		# is UNFL or INEX enabled?
+	bne.b		xdnrm_ena		# yes
+	rts
+
+################
+# unfl enabled #
+################
+# we have a DENORM that needs to be converted into an EXOP.
+# so, normalize the mantissa, add 0x6000 to the new exponent,
+# and return the result in fp1.
+xdnrm_ena:
+	mov.w		LOCAL_EX(%a1),FP_SCR0_EX(%a6)
+	mov.l		LOCAL_HI(%a1),FP_SCR0_HI(%a6)
+	mov.l		LOCAL_LO(%a1),FP_SCR0_LO(%a6)
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize mantissa
+	addi.l		&0x6000,%d0		# add extra bias
+	andi.w		&0x8000,FP_SCR0_EX(%a6)	# keep old sign
+	or.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#################################################################
+# UNFL exception:						#
+#	- This routine is for cases where even an EXOP isn't	#
+#	  large enough to hold the range of this result.	#
+#	  In such a case, the EXOP equals zero.			#
+#	- Return the default result to the proper precision	#
+#	  with the sign of this result being the same as that	#
+#	  of the src operand.					#
+#	- t_unfl2() is provided to force the result sign to	#
+#	  positive which is the desired result for fetox().	#
+#################################################################
+	global		t_unfl
+t_unfl:
+	ori.l		&unfinx_mask,USER_FPSR(%a6) # set UNFL/INEX2/AUNFL/AINEX
+
+	tst.b		(%a0)			# is result pos or neg?
+	smi.b		%d1			# set d1 accordingly
+	bsr.l		unf_sub			# calc default unfl result
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+
+	fmov.s		&0x00000000,%fp1	# return EXOP in fp1
+	rts
+
+# t_unfl2 ALWAYS tells unf_sub to create a positive result
+	global		t_unfl2
+t_unfl2:
+	ori.l		&unfinx_mask,USER_FPSR(%a6) # set UNFL/INEX2/AUNFL/AINEX
+
+	sf.b		%d1			# set d0 to represent positive
+	bsr.l		unf_sub			# calc default unfl result
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+
+	fmov.s		&0x0000000,%fp1		# return EXOP in fp1
+	rts
+
+#################################################################
+# OVFL exception:						#
+#	- This routine is for cases where even an EXOP isn't	#
+#	  large enough to hold the range of this result.	#
+#	- Return the default result to the proper precision	#
+#	  with the sign of this result being the same as that	#
+#	  of the src operand.					#
+#	- t_ovfl2() is provided to force the result sign to	#
+#	  positive which is the desired result for fcosh().	#
+#	- t_ovfl_sc() is provided for scale() which only sets	#
+#	  the inexact bits if the number is inexact for the	#
+#	  precision indicated.					#
+#################################################################
+
+	global		t_ovfl_sc
+t_ovfl_sc:
+	ori.l		&ovfl_inx_mask,USER_FPSR(%a6) # set OVFL/AOVFL/AINEX
+
+	mov.b		%d0,%d1			# fetch rnd mode/prec
+	andi.b		&0xc0,%d1		# extract rnd prec
+	beq.b		ovfl_work		# prec is extended
+
+	tst.b		LOCAL_HI(%a0)		# is dst a DENORM?
+	bmi.b		ovfl_sc_norm		# no
+
+# dst op is a DENORM. we have to normalize the mantissa to see if the
+# result would be inexact for the given precision. make a copy of the
+# dst so we don't screw up the version passed to us.
+	mov.w		LOCAL_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		LOCAL_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		LOCAL_LO(%a0),FP_SCR0_LO(%a6)
+	lea		FP_SCR0(%a6),%a0	# pass ptr to FP_SCR0
+	movm.l		&0xc080,-(%sp)		# save d0-d1/a0
+	bsr.l		norm			# normalize mantissa
+	movm.l		(%sp)+,&0x0103		# restore d0-d1/a0
+
+ovfl_sc_norm:
+	cmpi.b		%d1,&0x40		# is prec dbl?
+	bne.b		ovfl_sc_dbl		# no; sgl
+ovfl_sc_sgl:
+	tst.l		LOCAL_LO(%a0)		# is lo lw of sgl set?
+	bne.b		ovfl_sc_inx		# yes
+	tst.b		3+LOCAL_HI(%a0)		# is lo byte of hi lw set?
+	bne.b		ovfl_sc_inx		# yes
+	bra.b		ovfl_work		# don't set INEX2
+ovfl_sc_dbl:
+	mov.l		LOCAL_LO(%a0),%d1	# are any of lo 11 bits of
+	andi.l		&0x7ff,%d1		# dbl mantissa set?
+	beq.b		ovfl_work		# no; don't set INEX2
+ovfl_sc_inx:
+	ori.l		&inex2_mask,USER_FPSR(%a6) # set INEX2
+	bra.b		ovfl_work		# continue
+
+	global		t_ovfl
+t_ovfl:
+	ori.l		&ovfinx_mask,USER_FPSR(%a6) # set OVFL/INEX2/AOVFL/AINEX
+
+ovfl_work:
+	tst.b		LOCAL_EX(%a0)		# what is the sign?
+	smi.b		%d1			# set d1 accordingly
+	bsr.l		ovf_res			# calc default ovfl result
+	mov.b		%d0,FPSR_CC(%a6)	# insert new ccodes
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+
+	fmov.s		&0x00000000,%fp1	# return EXOP in fp1
+	rts
+
+# t_ovfl2 ALWAYS tells ovf_res to create a positive result
+	global		t_ovfl2
+t_ovfl2:
+	ori.l		&ovfinx_mask,USER_FPSR(%a6) # set OVFL/INEX2/AOVFL/AINEX
+
+	sf.b		%d1			# clear sign flag for positive
+	bsr.l		ovf_res			# calc default ovfl result
+	mov.b		%d0,FPSR_CC(%a6)	# insert new ccodes
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+
+	fmov.s		&0x00000000,%fp1	# return EXOP in fp1
+	rts
+
+#################################################################
+# t_catch():							#
+#	- the last operation of a transcendental emulation	#
+#	  routine may have caused an underflow or overflow.	#
+#	  we find out if this occurred by doing an fsave and	#
+#	  checking the exception bit. if one did occur, then we	#
+#	  jump to fgen_except() which creates the default	#
+#	  result and EXOP for us.				#
+#################################################################
+	global		t_catch
+t_catch:
+
+	fsave		-(%sp)
+	tst.b		0x2(%sp)
+	bmi.b		catch
+	add.l		&0xc,%sp
+
+#################################################################
+# INEX2 exception:						#
+#	- The inex2 and ainex bits are set.			#
+#################################################################
+	global		t_inx2
+t_inx2:
+	fblt.w		t_minx2
+	fbeq.w		inx2_zero
+
+	global		t_pinx2
+t_pinx2:
+	ori.w		&inx2a_mask,2+USER_FPSR(%a6) # set INEX2/AINEX
+	rts
+
+	global		t_minx2
+t_minx2:
+	ori.l		&inx2a_mask+neg_mask,USER_FPSR(%a6) # set N/INEX2/AINEX
+	rts
+
+inx2_zero:
+	mov.b		&z_bmask,FPSR_CC(%a6)
+	ori.w		&inx2a_mask,2+USER_FPSR(%a6) # set INEX2/AINEX
+	rts
+
+# an underflow or overflow exception occurred.
+# we must set INEX/AINEX since the fmul/fdiv/fmov emulation may not!
+catch:
+	ori.w		&inx2a_mask,FPSR_EXCEPT(%a6)
+catch2:
+	bsr.l		fgen_except
+	add.l		&0xc,%sp
+	rts
+
+	global		t_catch2
+t_catch2:
+
+	fsave		-(%sp)
+
+	tst.b		0x2(%sp)
+	bmi.b		catch2
+	add.l		&0xc,%sp
+
+	fmov.l		%fpsr,%d0
+	or.l		%d0,USER_FPSR(%a6)
+
+	rts
+
+#########################################################################
+
+#########################################################################
+# unf_res(): underflow default result calculation for transcendentals	#
+#									#
+# INPUT:								#
+#	d0   : rnd mode,precision					#
+#	d1.b : sign bit of result ('11111111 = (-) ; '00000000 = (+))	#
+# OUTPUT:								#
+#	a0   : points to result (in instruction memory)			#
+#########################################################################
+unf_sub:
+	ori.l		&unfinx_mask,USER_FPSR(%a6)
+
+	andi.w		&0x10,%d1		# keep sign bit in 4th spot
+
+	lsr.b		&0x4,%d0		# shift rnd prec,mode to lo bits
+	andi.b		&0xf,%d0		# strip hi rnd mode bit
+	or.b		%d1,%d0			# concat {sgn,mode,prec}
+
+	mov.l		%d0,%d1			# make a copy
+	lsl.b		&0x1,%d1		# mult index 2 by 2
+
+	mov.b		(tbl_unf_cc.b,%pc,%d0.w*1),FPSR_CC(%a6) # insert ccode bits
+	lea		(tbl_unf_result.b,%pc,%d1.w*8),%a0 # grab result ptr
+	rts
+
+tbl_unf_cc:
+	byte		0x4, 0x4, 0x4, 0x0
+	byte		0x4, 0x4, 0x4, 0x0
+	byte		0x4, 0x4, 0x4, 0x0
+	byte		0x0, 0x0, 0x0, 0x0
+	byte		0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
+	byte		0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
+	byte		0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
+
+tbl_unf_result:
+	long		0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+	long		0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+	long		0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+	long		0x00000000, 0x00000000, 0x00000001, 0x0 # MIN; ext
+
+	long		0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+	long		0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+	long		0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+	long		0x3f810000, 0x00000100, 0x00000000, 0x0 # MIN; sgl
+
+	long		0x3c010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
+	long		0x3c010000, 0x00000000, 0x00000000, 0x0 # ZER0;dbl
+	long		0x3c010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
+	long		0x3c010000, 0x00000000, 0x00000800, 0x0 # MIN; dbl
+
+	long		0x0,0x0,0x0,0x0
+	long		0x0,0x0,0x0,0x0
+	long		0x0,0x0,0x0,0x0
+	long		0x0,0x0,0x0,0x0
+
+	long		0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+	long		0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+	long		0x80000000, 0x00000000, 0x00000001, 0x0 # MIN; ext
+	long		0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+
+	long		0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+	long		0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+	long		0xbf810000, 0x00000100, 0x00000000, 0x0 # MIN; sgl
+	long		0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+
+	long		0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
+	long		0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
+	long		0xbc010000, 0x00000000, 0x00000800, 0x0 # MIN; dbl
+	long		0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
+
+############################################################
+
+#########################################################################
+# src_zero(): Return signed zero according to sign of src operand.	#
+#########################################################################
+	global		src_zero
+src_zero:
+	tst.b		SRC_EX(%a0)		# get sign of src operand
+	bmi.b		ld_mzero		# if neg, load neg zero
+
+#
+# ld_pzero(): return a positive zero.
+#
+	global		ld_pzero
+ld_pzero:
+	fmov.s		&0x00000000,%fp0	# load +0
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+
+# ld_mzero(): return a negative zero.
+	global		ld_mzero
+ld_mzero:
+	fmov.s		&0x80000000,%fp0	# load -0
+	mov.b		&neg_bmask+z_bmask,FPSR_CC(%a6) # set 'N','Z' ccode bits
+	rts
+
+#########################################################################
+# dst_zero(): Return signed zero according to sign of dst operand.	#
+#########################################################################
+	global		dst_zero
+dst_zero:
+	tst.b		DST_EX(%a1)		# get sign of dst operand
+	bmi.b		ld_mzero		# if neg, load neg zero
+	bra.b		ld_pzero		# load positive zero
+
+#########################################################################
+# src_inf(): Return signed inf according to sign of src operand.	#
+#########################################################################
+	global		src_inf
+src_inf:
+	tst.b		SRC_EX(%a0)		# get sign of src operand
+	bmi.b		ld_minf			# if negative branch
+
+#
+# ld_pinf(): return a positive infinity.
+#
+	global		ld_pinf
+ld_pinf:
+	fmov.s		&0x7f800000,%fp0	# load +INF
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'INF' ccode bit
+	rts
+
+#
+# ld_minf():return a negative infinity.
+#
+	global		ld_minf
+ld_minf:
+	fmov.s		&0xff800000,%fp0	# load -INF
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+	rts
+
+#########################################################################
+# dst_inf(): Return signed inf according to sign of dst operand.	#
+#########################################################################
+	global		dst_inf
+dst_inf:
+	tst.b		DST_EX(%a1)		# get sign of dst operand
+	bmi.b		ld_minf			# if negative branch
+	bra.b		ld_pinf
+
+	global		szr_inf
+#################################################################
+# szr_inf(): Return +ZERO for a negative src operand or		#
+#	            +INF for a positive src operand.		#
+#	     Routine used for fetox, ftwotox, and ftentox.	#
+#################################################################
+szr_inf:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.b		ld_pzero
+	bra.b		ld_pinf
+
+#########################################################################
+# sopr_inf(): Return +INF for a positive src operand or			#
+#	      jump to operand error routine for a negative src operand.	#
+#	      Routine used for flogn, flognp1, flog10, and flog2.	#
+#########################################################################
+	global		sopr_inf
+sopr_inf:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.w		t_operr
+	bra.b		ld_pinf
+
+#################################################################
+# setoxm1i(): Return minus one for a negative src operand or	#
+#	      positive infinity for a positive src operand.	#
+#	      Routine used for fetoxm1.				#
+#################################################################
+	global		setoxm1i
+setoxm1i:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.b		ld_mone
+	bra.b		ld_pinf
+
+#########################################################################
+# src_one(): Return signed one according to sign of src operand.	#
+#########################################################################
+	global		src_one
+src_one:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.b		ld_mone
+
+#
+# ld_pone(): return positive one.
+#
+	global		ld_pone
+ld_pone:
+	fmov.s		&0x3f800000,%fp0	# load +1
+	clr.b		FPSR_CC(%a6)
+	rts
+
+#
+# ld_mone(): return negative one.
+#
+	global		ld_mone
+ld_mone:
+	fmov.s		&0xbf800000,%fp0	# load -1
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+ppiby2:	long		0x3fff0000, 0xc90fdaa2, 0x2168c235
+mpiby2:	long		0xbfff0000, 0xc90fdaa2, 0x2168c235
+
+#################################################################
+# spi_2(): Return signed PI/2 according to sign of src operand.	#
+#################################################################
+	global		spi_2
+spi_2:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.b		ld_mpi2
+
+#
+# ld_ppi2(): return positive PI/2.
+#
+	global		ld_ppi2
+ld_ppi2:
+	fmov.l		%d0,%fpcr
+	fmov.x		ppiby2(%pc),%fp0	# load +pi/2
+	bra.w		t_pinx2			# set INEX2
+
+#
+# ld_mpi2(): return negative PI/2.
+#
+	global		ld_mpi2
+ld_mpi2:
+	fmov.l		%d0,%fpcr
+	fmov.x		mpiby2(%pc),%fp0	# load -pi/2
+	bra.w		t_minx2			# set INEX2
+
+####################################################
+# The following routines give support for fsincos. #
+####################################################
+
+#
+# ssincosz(): When the src operand is ZERO, store a one in the
+#	      cosine register and return a ZERO in fp0 w/ the same sign
+#	      as the src operand.
+#
+	global		ssincosz
+ssincosz:
+	fmov.s		&0x3f800000,%fp1
+	tst.b		SRC_EX(%a0)		# test sign
+	bpl.b		sincoszp
+	fmov.s		&0x80000000,%fp0	# return sin result in fp0
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)
+	bra.b		sto_cos			# store cosine result
+sincoszp:
+	fmov.s		&0x00000000,%fp0	# return sin result in fp0
+	mov.b		&z_bmask,FPSR_CC(%a6)
+	bra.b		sto_cos			# store cosine result
+
+#
+# ssincosi(): When the src operand is INF, store a QNAN in the cosine
+#	      register and jump to the operand error routine for negative
+#	      src operands.
+#
+	global		ssincosi
+ssincosi:
+	fmov.x		qnan(%pc),%fp1		# load NAN
+	bsr.l		sto_cos			# store cosine result
+	bra.w		t_operr
+
+#
+# ssincosqnan(): When the src operand is a QNAN, store the QNAN in the cosine
+#		 register and branch to the src QNAN routine.
+#
+	global		ssincosqnan
+ssincosqnan:
+	fmov.x		LOCAL_EX(%a0),%fp1
+	bsr.l		sto_cos
+	bra.w		src_qnan
+
+#
+# ssincossnan(): When the src operand is an SNAN, store the SNAN w/ the SNAN bit set
+#		 in the cosine register and branch to the src SNAN routine.
+#
+	global		ssincossnan
+ssincossnan:
+	fmov.x		LOCAL_EX(%a0),%fp1
+	bsr.l		sto_cos
+	bra.w		src_snan
+
+########################################################################
+
+#########################################################################
+# sto_cos(): store fp1 to the fpreg designated by the CMDREG dst field.	#
+#	     fp1 holds the result of the cosine portion of ssincos().	#
+#	     the value in fp1 will not take any exceptions when moved.	#
+# INPUT:								#
+#	fp1 : fp value to store						#
+# MODIFIED:								#
+#	d0								#
+#########################################################################
+	global		sto_cos
+sto_cos:
+	mov.b		1+EXC_CMDREG(%a6),%d0
+	andi.w		&0x7,%d0
+	mov.w		(tbl_sto_cos.b,%pc,%d0.w*2),%d0
+	jmp		(tbl_sto_cos.b,%pc,%d0.w*1)
+
+tbl_sto_cos:
+	short		sto_cos_0 - tbl_sto_cos
+	short		sto_cos_1 - tbl_sto_cos
+	short		sto_cos_2 - tbl_sto_cos
+	short		sto_cos_3 - tbl_sto_cos
+	short		sto_cos_4 - tbl_sto_cos
+	short		sto_cos_5 - tbl_sto_cos
+	short		sto_cos_6 - tbl_sto_cos
+	short		sto_cos_7 - tbl_sto_cos
+
+sto_cos_0:
+	fmovm.x		&0x40,EXC_FP0(%a6)
+	rts
+sto_cos_1:
+	fmovm.x		&0x40,EXC_FP1(%a6)
+	rts
+sto_cos_2:
+	fmov.x		%fp1,%fp2
+	rts
+sto_cos_3:
+	fmov.x		%fp1,%fp3
+	rts
+sto_cos_4:
+	fmov.x		%fp1,%fp4
+	rts
+sto_cos_5:
+	fmov.x		%fp1,%fp5
+	rts
+sto_cos_6:
+	fmov.x		%fp1,%fp6
+	rts
+sto_cos_7:
+	fmov.x		%fp1,%fp7
+	rts
+
+##################################################################
+	global		smod_sdnrm
+	global		smod_snorm
+smod_sdnrm:
+smod_snorm:
+	mov.b		DTAG(%a6),%d1
+	beq.l		smod
+	cmpi.b		%d1,&ZERO
+	beq.w		smod_zro
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		smod
+	cmpi.b		%d1,&SNAN
+	beq.l		dst_snan
+	bra.l		dst_qnan
+
+	global		smod_szero
+smod_szero:
+	mov.b		DTAG(%a6),%d1
+	beq.l		t_operr
+	cmpi.b		%d1,&ZERO
+	beq.l		t_operr
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		t_operr
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		dst_snan
+
+	global		smod_sinf
+smod_sinf:
+	mov.b		DTAG(%a6),%d1
+	beq.l		smod_fpn
+	cmpi.b		%d1,&ZERO
+	beq.l		smod_zro
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		smod_fpn
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		dst_snan
+
+smod_zro:
+srem_zro:
+	mov.b		SRC_EX(%a0),%d1		# get src sign
+	mov.b		DST_EX(%a1),%d0		# get dst sign
+	eor.b		%d0,%d1			# get qbyte sign
+	andi.b		&0x80,%d1
+	mov.b		%d1,FPSR_QBYTE(%a6)
+	tst.b		%d0
+	bpl.w		ld_pzero
+	bra.w		ld_mzero
+
+smod_fpn:
+srem_fpn:
+	clr.b		FPSR_QBYTE(%a6)
+	mov.l		%d0,-(%sp)
+	mov.b		SRC_EX(%a0),%d1		# get src sign
+	mov.b		DST_EX(%a1),%d0		# get dst sign
+	eor.b		%d0,%d1			# get qbyte sign
+	andi.b		&0x80,%d1
+	mov.b		%d1,FPSR_QBYTE(%a6)
+	cmpi.b		DTAG(%a6),&DENORM
+	bne.b		smod_nrm
+	lea		DST(%a1),%a0
+	mov.l		(%sp)+,%d0
+	bra		t_resdnrm
+smod_nrm:
+	fmov.l		(%sp)+,%fpcr
+	fmov.x		DST(%a1),%fp0
+	tst.b		DST_EX(%a1)
+	bmi.b		smod_nrm_neg
+	rts
+
+smod_nrm_neg:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode
+	rts
+
+#########################################################################
+	global		srem_snorm
+	global		srem_sdnrm
+srem_sdnrm:
+srem_snorm:
+	mov.b		DTAG(%a6),%d1
+	beq.l		srem
+	cmpi.b		%d1,&ZERO
+	beq.w		srem_zro
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		srem
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		dst_snan
+
+	global		srem_szero
+srem_szero:
+	mov.b		DTAG(%a6),%d1
+	beq.l		t_operr
+	cmpi.b		%d1,&ZERO
+	beq.l		t_operr
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		t_operr
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		dst_snan
+
+	global		srem_sinf
+srem_sinf:
+	mov.b		DTAG(%a6),%d1
+	beq.w		srem_fpn
+	cmpi.b		%d1,&ZERO
+	beq.w		srem_zro
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		srem_fpn
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		dst_snan
+
+#########################################################################
+	global		sscale_snorm
+	global		sscale_sdnrm
+sscale_snorm:
+sscale_sdnrm:
+	mov.b		DTAG(%a6),%d1
+	beq.l		sscale
+	cmpi.b		%d1,&ZERO
+	beq.l		dst_zero
+	cmpi.b		%d1,&INF
+	beq.l		dst_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		sscale
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		dst_snan
+
+	global		sscale_szero
+sscale_szero:
+	mov.b		DTAG(%a6),%d1
+	beq.l		sscale
+	cmpi.b		%d1,&ZERO
+	beq.l		dst_zero
+	cmpi.b		%d1,&INF
+	beq.l		dst_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		sscale
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		dst_snan
+
+	global		sscale_sinf
+sscale_sinf:
+	mov.b		DTAG(%a6),%d1
+	beq.l		t_operr
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	cmpi.b		%d1,&SNAN
+	beq.l		dst_snan
+	bra.l		t_operr
+
+########################################################################
+
+#
+# sop_sqnan(): The src op for frem/fmod/fscale was a QNAN.
+#
+	global		sop_sqnan
+sop_sqnan:
+	mov.b		DTAG(%a6),%d1
+	cmpi.b		%d1,&QNAN
+	beq.b		dst_qnan
+	cmpi.b		%d1,&SNAN
+	beq.b		dst_snan
+	bra.b		src_qnan
+
+#
+# sop_ssnan(): The src op for frem/fmod/fscale was an SNAN.
+#
+	global		sop_ssnan
+sop_ssnan:
+	mov.b		DTAG(%a6),%d1
+	cmpi.b		%d1,&QNAN
+	beq.b		dst_qnan_src_snan
+	cmpi.b		%d1,&SNAN
+	beq.b		dst_snan
+	bra.b		src_snan
+
+dst_qnan_src_snan:
+	ori.l		&snaniop_mask,USER_FPSR(%a6) # set NAN/SNAN/AIOP
+	bra.b		dst_qnan
+
+#
+# dst_qnan(): Return the dst SNAN w/ the SNAN bit set.
+#
+	global		dst_snan
+dst_snan:
+	fmov.x		DST(%a1),%fp0		# the fmove sets the SNAN bit
+	fmov.l		%fpsr,%d0		# catch resulting status
+	or.l		%d0,USER_FPSR(%a6)	# store status
+	rts
+
+#
+# dst_qnan(): Return the dst QNAN.
+#
+	global		dst_qnan
+dst_qnan:
+	fmov.x		DST(%a1),%fp0		# return the non-signalling nan
+	tst.b		DST_EX(%a1)		# set ccodes according to QNAN sign
+	bmi.b		dst_qnan_m
+dst_qnan_p:
+	mov.b		&nan_bmask,FPSR_CC(%a6)
+	rts
+dst_qnan_m:
+	mov.b		&neg_bmask+nan_bmask,FPSR_CC(%a6)
+	rts
+
+#
+# src_snan(): Return the src SNAN w/ the SNAN bit set.
+#
+	global		src_snan
+src_snan:
+	fmov.x		SRC(%a0),%fp0		# the fmove sets the SNAN bit
+	fmov.l		%fpsr,%d0		# catch resulting status
+	or.l		%d0,USER_FPSR(%a6)	# store status
+	rts
+
+#
+# src_qnan(): Return the src QNAN.
+#
+	global		src_qnan
+src_qnan:
+	fmov.x		SRC(%a0),%fp0		# return the non-signalling nan
+	tst.b		SRC_EX(%a0)		# set ccodes according to QNAN sign
+	bmi.b		dst_qnan_m
+src_qnan_p:
+	mov.b		&nan_bmask,FPSR_CC(%a6)
+	rts
+src_qnan_m:
+	mov.b		&neg_bmask+nan_bmask,FPSR_CC(%a6)
+	rts
+
+#
+# fkern2.s:
+#	These entry points are used by the exception handler
+# routines where an instruction is selected by an index into
+# a large jump table corresponding to a given instruction which
+# has been decoded. Flow continues here where we now decode
+# further accoding to the source operand type.
+#
+
+	global		fsinh
+fsinh:
+	mov.b		STAG(%a6),%d1
+	beq.l		ssinh
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		src_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		ssinhd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		flognp1
+flognp1:
+	mov.b		STAG(%a6),%d1
+	beq.l		slognp1
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		sopr_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		slognp1d
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fetoxm1
+fetoxm1:
+	mov.b		STAG(%a6),%d1
+	beq.l		setoxm1
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		setoxm1i
+	cmpi.b		%d1,&DENORM
+	beq.l		setoxm1d
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		ftanh
+ftanh:
+	mov.b		STAG(%a6),%d1
+	beq.l		stanh
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		src_one
+	cmpi.b		%d1,&DENORM
+	beq.l		stanhd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fatan
+fatan:
+	mov.b		STAG(%a6),%d1
+	beq.l		satan
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		spi_2
+	cmpi.b		%d1,&DENORM
+	beq.l		satand
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fasin
+fasin:
+	mov.b		STAG(%a6),%d1
+	beq.l		sasin
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		sasind
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fatanh
+fatanh:
+	mov.b		STAG(%a6),%d1
+	beq.l		satanh
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		satanhd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fsine
+fsine:
+	mov.b		STAG(%a6),%d1
+	beq.l		ssin
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		ssind
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		ftan
+ftan:
+	mov.b		STAG(%a6),%d1
+	beq.l		stan
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		stand
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fetox
+fetox:
+	mov.b		STAG(%a6),%d1
+	beq.l		setox
+	cmpi.b		%d1,&ZERO
+	beq.l		ld_pone
+	cmpi.b		%d1,&INF
+	beq.l		szr_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		setoxd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		ftwotox
+ftwotox:
+	mov.b		STAG(%a6),%d1
+	beq.l		stwotox
+	cmpi.b		%d1,&ZERO
+	beq.l		ld_pone
+	cmpi.b		%d1,&INF
+	beq.l		szr_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		stwotoxd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		ftentox
+ftentox:
+	mov.b		STAG(%a6),%d1
+	beq.l		stentox
+	cmpi.b		%d1,&ZERO
+	beq.l		ld_pone
+	cmpi.b		%d1,&INF
+	beq.l		szr_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		stentoxd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		flogn
+flogn:
+	mov.b		STAG(%a6),%d1
+	beq.l		slogn
+	cmpi.b		%d1,&ZERO
+	beq.l		t_dz2
+	cmpi.b		%d1,&INF
+	beq.l		sopr_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		slognd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		flog10
+flog10:
+	mov.b		STAG(%a6),%d1
+	beq.l		slog10
+	cmpi.b		%d1,&ZERO
+	beq.l		t_dz2
+	cmpi.b		%d1,&INF
+	beq.l		sopr_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		slog10d
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		flog2
+flog2:
+	mov.b		STAG(%a6),%d1
+	beq.l		slog2
+	cmpi.b		%d1,&ZERO
+	beq.l		t_dz2
+	cmpi.b		%d1,&INF
+	beq.l		sopr_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		slog2d
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fcosh
+fcosh:
+	mov.b		STAG(%a6),%d1
+	beq.l		scosh
+	cmpi.b		%d1,&ZERO
+	beq.l		ld_pone
+	cmpi.b		%d1,&INF
+	beq.l		ld_pinf
+	cmpi.b		%d1,&DENORM
+	beq.l		scoshd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		facos
+facos:
+	mov.b		STAG(%a6),%d1
+	beq.l		sacos
+	cmpi.b		%d1,&ZERO
+	beq.l		ld_ppi2
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		sacosd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fcos
+fcos:
+	mov.b		STAG(%a6),%d1
+	beq.l		scos
+	cmpi.b		%d1,&ZERO
+	beq.l		ld_pone
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		scosd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fgetexp
+fgetexp:
+	mov.b		STAG(%a6),%d1
+	beq.l		sgetexp
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		sgetexpd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fgetman
+fgetman:
+	mov.b		STAG(%a6),%d1
+	beq.l		sgetman
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		sgetmand
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fsincos
+fsincos:
+	mov.b		STAG(%a6),%d1
+	beq.l		ssincos
+	cmpi.b		%d1,&ZERO
+	beq.l		ssincosz
+	cmpi.b		%d1,&INF
+	beq.l		ssincosi
+	cmpi.b		%d1,&DENORM
+	beq.l		ssincosd
+	cmpi.b		%d1,&QNAN
+	beq.l		ssincosqnan
+	bra.l		ssincossnan
+
+	global		fmod
+fmod:
+	mov.b		STAG(%a6),%d1
+	beq.l		smod_snorm
+	cmpi.b		%d1,&ZERO
+	beq.l		smod_szero
+	cmpi.b		%d1,&INF
+	beq.l		smod_sinf
+	cmpi.b		%d1,&DENORM
+	beq.l		smod_sdnrm
+	cmpi.b		%d1,&QNAN
+	beq.l		sop_sqnan
+	bra.l		sop_ssnan
+
+	global		frem
+frem:
+	mov.b		STAG(%a6),%d1
+	beq.l		srem_snorm
+	cmpi.b		%d1,&ZERO
+	beq.l		srem_szero
+	cmpi.b		%d1,&INF
+	beq.l		srem_sinf
+	cmpi.b		%d1,&DENORM
+	beq.l		srem_sdnrm
+	cmpi.b		%d1,&QNAN
+	beq.l		sop_sqnan
+	bra.l		sop_ssnan
+
+	global		fscale
+fscale:
+	mov.b		STAG(%a6),%d1
+	beq.l		sscale_snorm
+	cmpi.b		%d1,&ZERO
+	beq.l		sscale_szero
+	cmpi.b		%d1,&INF
+	beq.l		sscale_sinf
+	cmpi.b		%d1,&DENORM
+	beq.l		sscale_sdnrm
+	cmpi.b		%d1,&QNAN
+	beq.l		sop_sqnan
+	bra.l		sop_ssnan
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fgen_except(): catch an exception during transcendental		#
+#		       emulation					#
+#									#
+# XREF ****************************************************************	#
+#	fmul() - emulate a multiply instruction				#
+#	fadd() - emulate an add instruction				#
+#	fin() - emulate an fmove instruction				#
+#									#
+# INPUT ***************************************************************	#
+#	fp0 = destination operand					#
+#	d0  = type of instruction that took exception			#
+#	fsave frame = source operand					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	An exception occurred on the last instruction of the		#
+# transcendental emulation. hopefully, this won't be happening much	#
+# because it will be VERY slow.						#
+#	The only exceptions capable of passing through here are		#
+# Overflow, Underflow, and Unsupported Data Type.			#
+#									#
+#########################################################################
+
+	global		fgen_except
+fgen_except:
+	cmpi.b		0x3(%sp),&0x7		# is exception UNSUPP?
+	beq.b		fge_unsupp		# yes
+
+	mov.b		&NORM,STAG(%a6)
+
+fge_cont:
+	mov.b		&NORM,DTAG(%a6)
+
+# ok, I have a problem with putting the dst op at FP_DST. the emulation
+# routines aren't supposed to alter the operands but we've just squashed
+# FP_DST here...
+
+# 8/17/93 - this turns out to be more of a "cleanliness" standpoint
+# then a potential bug. to begin with, only the dyadic functions
+# frem,fmod, and fscale would get the dst trashed here. But, for
+# the 060SP, the FP_DST is never used again anyways.
+	fmovm.x		&0x80,FP_DST(%a6)	# dst op is in fp0
+
+	lea		0x4(%sp),%a0		# pass: ptr to src op
+	lea		FP_DST(%a6),%a1		# pass: ptr to dst op
+
+	cmpi.b		%d1,&FMOV_OP
+	beq.b		fge_fin			# it was an "fmov"
+	cmpi.b		%d1,&FADD_OP
+	beq.b		fge_fadd		# it was an "fadd"
+fge_fmul:
+	bsr.l		fmul
+	rts
+fge_fadd:
+	bsr.l		fadd
+	rts
+fge_fin:
+	bsr.l		fin
+	rts
+
+fge_unsupp:
+	mov.b		&DENORM,STAG(%a6)
+	bra.b		fge_cont
+
+#
+# This table holds the offsets of the emulation routines for each individual
+# math operation relative to the address of this table. Included are
+# routines like fadd/fmul/fabs as well as the transcendentals.
+# The location within the table is determined by the extension bits of the
+# operation longword.
+#
+
+	swbeg		&109
+tbl_unsupp:
+	long		fin		- tbl_unsupp	# 00: fmove
+	long		fint		- tbl_unsupp	# 01: fint
+	long		fsinh		- tbl_unsupp	# 02: fsinh
+	long		fintrz		- tbl_unsupp	# 03: fintrz
+	long		fsqrt		- tbl_unsupp	# 04: fsqrt
+	long		tbl_unsupp	- tbl_unsupp
+	long		flognp1		- tbl_unsupp	# 06: flognp1
+	long		tbl_unsupp	- tbl_unsupp
+	long		fetoxm1		- tbl_unsupp	# 08: fetoxm1
+	long		ftanh		- tbl_unsupp	# 09: ftanh
+	long		fatan		- tbl_unsupp	# 0a: fatan
+	long		tbl_unsupp	- tbl_unsupp
+	long		fasin		- tbl_unsupp	# 0c: fasin
+	long		fatanh		- tbl_unsupp	# 0d: fatanh
+	long		fsine		- tbl_unsupp	# 0e: fsin
+	long		ftan		- tbl_unsupp	# 0f: ftan
+	long		fetox		- tbl_unsupp	# 10: fetox
+	long		ftwotox		- tbl_unsupp	# 11: ftwotox
+	long		ftentox		- tbl_unsupp	# 12: ftentox
+	long		tbl_unsupp	- tbl_unsupp
+	long		flogn		- tbl_unsupp	# 14: flogn
+	long		flog10		- tbl_unsupp	# 15: flog10
+	long		flog2		- tbl_unsupp	# 16: flog2
+	long		tbl_unsupp	- tbl_unsupp
+	long		fabs		- tbl_unsupp	# 18: fabs
+	long		fcosh		- tbl_unsupp	# 19: fcosh
+	long		fneg		- tbl_unsupp	# 1a: fneg
+	long		tbl_unsupp	- tbl_unsupp
+	long		facos		- tbl_unsupp	# 1c: facos
+	long		fcos		- tbl_unsupp	# 1d: fcos
+	long		fgetexp		- tbl_unsupp	# 1e: fgetexp
+	long		fgetman		- tbl_unsupp	# 1f: fgetman
+	long		fdiv		- tbl_unsupp	# 20: fdiv
+	long		fmod		- tbl_unsupp	# 21: fmod
+	long		fadd		- tbl_unsupp	# 22: fadd
+	long		fmul		- tbl_unsupp	# 23: fmul
+	long		fsgldiv		- tbl_unsupp	# 24: fsgldiv
+	long		frem		- tbl_unsupp	# 25: frem
+	long		fscale		- tbl_unsupp	# 26: fscale
+	long		fsglmul		- tbl_unsupp	# 27: fsglmul
+	long		fsub		- tbl_unsupp	# 28: fsub
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsincos		- tbl_unsupp	# 30: fsincos
+	long		fsincos		- tbl_unsupp	# 31: fsincos
+	long		fsincos		- tbl_unsupp	# 32: fsincos
+	long		fsincos		- tbl_unsupp	# 33: fsincos
+	long		fsincos		- tbl_unsupp	# 34: fsincos
+	long		fsincos		- tbl_unsupp	# 35: fsincos
+	long		fsincos		- tbl_unsupp	# 36: fsincos
+	long		fsincos		- tbl_unsupp	# 37: fsincos
+	long		fcmp		- tbl_unsupp	# 38: fcmp
+	long		tbl_unsupp	- tbl_unsupp
+	long		ftst		- tbl_unsupp	# 3a: ftst
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsin		- tbl_unsupp	# 40: fsmove
+	long		fssqrt		- tbl_unsupp	# 41: fssqrt
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdin		- tbl_unsupp	# 44: fdmove
+	long		fdsqrt		- tbl_unsupp	# 45: fdsqrt
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsabs		- tbl_unsupp	# 58: fsabs
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsneg		- tbl_unsupp	# 5a: fsneg
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdabs		- tbl_unsupp	# 5c: fdabs
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdneg		- tbl_unsupp	# 5e: fdneg
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsdiv		- tbl_unsupp	# 60: fsdiv
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsadd		- tbl_unsupp	# 62: fsadd
+	long		fsmul		- tbl_unsupp	# 63: fsmul
+	long		fddiv		- tbl_unsupp	# 64: fddiv
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdadd		- tbl_unsupp	# 66: fdadd
+	long		fdmul		- tbl_unsupp	# 67: fdmul
+	long		fssub		- tbl_unsupp	# 68: fssub
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdsub		- tbl_unsupp	# 6c: fdsub
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fmul(): emulates the fmul instruction				#
+#	fsmul(): emulates the fsmul instruction				#
+#	fdmul(): emulates the fdmul instruction				#
+#									#
+# XREF ****************************************************************	#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	scale_to_zero_dst() - scale dst exponent to zero		#
+#	unf_res() - return default underflow result			#
+#	ovf_res() - return default overflow result			#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a multiply	#
+# instruction won't cause an exception. Use the regular fmul to		#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	align		0x10
+tbl_fmul_ovfl:
+	long		0x3fff - 0x7ffe		# ext_max
+	long		0x3fff - 0x407e		# sgl_max
+	long		0x3fff - 0x43fe		# dbl_max
+tbl_fmul_unfl:
+	long		0x3fff + 0x0001		# ext_unfl
+	long		0x3fff - 0x3f80		# sgl_unfl
+	long		0x3fff - 0x3c00		# dbl_unfl
+
+	global		fsmul
+fsmul:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	bra.b		fmul
+
+	global		fdmul
+fdmul:
+	andi.b		&0x30,%d0
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fmul
+fmul:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+	bne.w		fmul_not_norm		# optimize on non-norm input
+
+fmul_norm:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_to_zero_src	# scale src exponent
+	mov.l		%d0,-(%sp)		# save scale factor 1
+
+	bsr.l		scale_to_zero_dst	# scale dst exponent
+
+	add.l		%d0,(%sp)		# SCALE_FACTOR = scale1 + scale2
+
+	mov.w		2+L_SCR3(%a6),%d1	# fetch precision
+	lsr.b		&0x6,%d1		# shift to lo bits
+	mov.l		(%sp)+,%d0		# load S.F.
+	cmp.l		%d0,(tbl_fmul_ovfl.w,%pc,%d1.w*4) # would result ovfl?
+	beq.w		fmul_may_ovfl		# result may rnd to overflow
+	blt.w		fmul_ovfl		# result will overflow
+
+	cmp.l		%d0,(tbl_fmul_unfl.w,%pc,%d1.w*4) # would result unfl?
+	beq.w		fmul_may_unfl		# result may rnd to no unfl
+	bgt.w		fmul_unfl		# result will underflow
+
+#
+# NORMAL:
+# - the result of the multiply operation will neither overflow nor underflow.
+# - do the multiply to the proper precision and rounding mode.
+# - scale the result exponent using the scale factor. if both operands were
+# normalized then we really don't need to go through this scaling. but for now,
+# this will do.
+#
+fmul_normal:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fmul_normal_exit:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# OVERFLOW:
+# - the result of the multiply operation is an overflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+# - if overflow or inexact is enabled, we need a multiply result rounded to
+# extended precision. if the original operation was extended, then we have this
+# result. if the original operation was single or double, we have to do another
+# multiply using extended precision and the correct rounding mode. the result
+# of this operation then has its exponent scaled by -0x6000 to create the
+# exceptional operand.
+#
+fmul_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+# save setting this until now because this is where fmul_may_ovfl may jump in
+fmul_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fmul_ovfl_ena		# yes
+
+# calculate the default result
+fmul_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass rnd prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled; Create EXOP:
+# - if precision is extended, then we have the EXOP. simply bias the exponent
+# with an extra -0x6000. if the precision is single or double, we need to
+# calculate a result rounded to extended precision.
+#
+fmul_ovfl_ena:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# test the rnd prec
+	bne.b		fmul_ovfl_ena_sd	# it's sgl or dbl
+
+fmul_ovfl_ena_cont:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# move result to stack
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1		# clear sign bit
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fmul_ovfl_dis
+
+fmul_ovfl_ena_sd:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# keep rnd mode only
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	bra.b		fmul_ovfl_ena_cont
+
+#
+# may OVERFLOW:
+# - the result of the multiply operation MAY overflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+#
+fmul_may_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fmul_ovfl_tst		# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fmul_normal_exit
+
+#
+# UNDERFLOW:
+# - the result of the multiply operation is an underflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+# - if overflow or inexact is enabled, we need a multiply result rounded to
+# extended precision. if the original operation was extended, then we have this
+# result. if the original operation was single or double, we have to do another
+# multiply using extended precision and the correct rounding mode. the result
+# of this operation then has its exponent scaled by -0x6000 to create the
+# exceptional operand.
+#
+fmul_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+# for fun, let's use only extended precision, round to zero. then, let
+# the unf_res() routine figure out all the rest.
+# will we get the correct answer.
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fmul_unfl_ena		# yes
+
+fmul_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# unf_res2 may have set 'Z'
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# UNFL is enabled.
+#
+fmul_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fmul_unfl_ena_sd	# no, sgl or dbl
+
+# if the rnd mode is anything but RZ, then we have to re-do the above
+# multiplication becuase we used RZ for all.
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+fmul_unfl_ena_cont:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp1	# execute multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# add bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fmul_unfl_dis
+
+fmul_unfl_ena_sd:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# use only rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	bra.b		fmul_unfl_ena_cont
+
+# MAY UNDERFLOW:
+# -use the correct rounding mode and precision. this code favors operations
+# that do not underflow.
+fmul_may_unfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| > 2.b?
+	fbgt.w		fmul_normal_exit	# no; no underflow occurred
+	fblt.w		fmul_unfl		# yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 2. but,
+# we don't know if the result was an underflow that rounded up to a 2 or
+# a normalized number that rounded down to a 2. so, redo the entire operation
+# using RZ as the rounding mode to see what the pre-rounded result is.
+# this case should be relatively rare.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst operand
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert RZ
+
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp1	# execute multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fabs.x		%fp1			# make absolute value
+	fcmp.b		%fp1,&0x2		# is |result| < 2.b?
+	fbge.w		fmul_normal_exit	# no; no underflow occurred
+	bra.w		fmul_unfl		# yes, underflow occurred
+
+################################################################################
+
+#
+# Multiply: inputs are not both normalized; what are they?
+#
+fmul_not_norm:
+	mov.w		(tbl_fmul_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fmul_op.b,%pc,%d1.w)
+
+	swbeg		&48
+tbl_fmul_op:
+	short		fmul_norm	- tbl_fmul_op # NORM x NORM
+	short		fmul_zero	- tbl_fmul_op # NORM x ZERO
+	short		fmul_inf_src	- tbl_fmul_op # NORM x INF
+	short		fmul_res_qnan	- tbl_fmul_op # NORM x QNAN
+	short		fmul_norm	- tbl_fmul_op # NORM x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # NORM x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_zero	- tbl_fmul_op # ZERO x NORM
+	short		fmul_zero	- tbl_fmul_op # ZERO x ZERO
+	short		fmul_res_operr	- tbl_fmul_op # ZERO x INF
+	short		fmul_res_qnan	- tbl_fmul_op # ZERO x QNAN
+	short		fmul_zero	- tbl_fmul_op # ZERO x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # ZERO x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_inf_dst	- tbl_fmul_op # INF x NORM
+	short		fmul_res_operr	- tbl_fmul_op # INF x ZERO
+	short		fmul_inf_dst	- tbl_fmul_op # INF x INF
+	short		fmul_res_qnan	- tbl_fmul_op # INF x QNAN
+	short		fmul_inf_dst	- tbl_fmul_op # INF x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # INF x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x NORM
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x ZERO
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x INF
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x QNAN
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # QNAN x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_norm	- tbl_fmul_op # NORM x NORM
+	short		fmul_zero	- tbl_fmul_op # NORM x ZERO
+	short		fmul_inf_src	- tbl_fmul_op # NORM x INF
+	short		fmul_res_qnan	- tbl_fmul_op # NORM x QNAN
+	short		fmul_norm	- tbl_fmul_op # NORM x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # NORM x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x NORM
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x ZERO
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x INF
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x QNAN
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+fmul_res_operr:
+	bra.l		res_operr
+fmul_res_snan:
+	bra.l		res_snan
+fmul_res_qnan:
+	bra.l		res_qnan
+
+#
+# Multiply: (Zero x Zero) || (Zero x norm) || (Zero x denorm)
+#
+	global		fmul_zero		# global for fsglmul
+fmul_zero:
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bpl.b		fmul_zero_p		# result ZERO is pos.
+fmul_zero_n:
+	fmov.s		&0x80000000,%fp0	# load -ZERO
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/N
+	rts
+fmul_zero_p:
+	fmov.s		&0x00000000,%fp0	# load +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+#
+# Multiply: (inf x inf) || (inf x norm) || (inf x denorm)
+#
+# Note: The j-bit for an infinity is a don't-care. However, to be
+# strictly compatible w/ the 68881/882, we make sure to return an
+# INF w/ the j-bit set if the input INF j-bit was set. Destination
+# INFs take priority.
+#
+	global		fmul_inf_dst		# global for fsglmul
+fmul_inf_dst:
+	fmovm.x		DST(%a1),&0x80		# return INF result in fp0
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bpl.b		fmul_inf_dst_p		# result INF is pos.
+fmul_inf_dst_n:
+	fabs.x		%fp0			# clear result sign
+	fneg.x		%fp0			# set result sign
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
+	rts
+fmul_inf_dst_p:
+	fabs.x		%fp0			# clear result sign
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set INF
+	rts
+
+	global		fmul_inf_src		# global for fsglmul
+fmul_inf_src:
+	fmovm.x		SRC(%a0),&0x80		# return INF result in fp0
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bpl.b		fmul_inf_dst_p		# result INF is pos.
+	bra.b		fmul_inf_dst_n
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fin(): emulates the fmove instruction				#
+#	fsin(): emulates the fsmove instruction				#
+#	fdin(): emulates the fdmove instruction				#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize mantissa for EXOP on denorm			#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	ovf_res() - return default overflow result			#
+#	unf_res() - return default underflow result			#
+#	res_qnan_1op() - return QNAN result				#
+#	res_snan_1op() - return SNAN result				#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = round prec/mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms into extended, single, and double precision.			#
+#	Norms can be emulated w/ a regular fmove instruction. For	#
+# sgl/dbl, must scale exponent and perform an "fmove". Check to see	#
+# if the result would have overflowed/underflowed. If so, use unf_res()	#
+# or ovf_res() to return the default result. Also return EXOP if	#
+# exception is enabled. If no exception, return the default result.	#
+#	Unnorms don't pass through here.				#
+#									#
+#########################################################################
+
+	global		fsin
+fsin:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl precision
+	bra.b		fin
+
+	global		fdin
+fdin:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl precision
+
+	global		fin
+fin:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	mov.b		STAG(%a6),%d1		# fetch src optype tag
+	bne.w		fin_not_norm		# optimize on non-norm input
+
+#
+# FP MOVE IN: NORMs and DENORMs ONLY!
+#
+fin_norm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.w		fin_not_ext		# no, so go handle dbl or sgl
+
+#
+# precision selected is extended. so...we cannot get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+	tst.b		SRC_EX(%a0)		# is the operand negative?
+	bpl.b		fin_norm_done		# no
+	bset		&neg_bit,FPSR_CC(%a6)	# yes, so set 'N' ccode bit
+fin_norm_done:
+	fmovm.x		SRC(%a0),&0x80		# return result in fp0
+	rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fin_denorm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.w		fin_not_ext		# no, so go handle dbl or sgl
+
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+	tst.b		SRC_EX(%a0)		# is the operand negative?
+	bpl.b		fin_denorm_done		# no
+	bset		&neg_bit,FPSR_CC(%a6)	# yes, so set 'N' ccode bit
+fin_denorm_done:
+	fmovm.x		SRC(%a0),&0x80		# return result in fp0
+	btst		&unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+	bne.b		fin_denorm_unfl_ena	# yes
+	rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fin_denorm_unfl_ena:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	bsr.l		norm			# normalize result
+	neg.w		%d0			# new exponent = -(shft val)
+	addi.w		&0x6000,%d0		# add new bias to exponent
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch old sign,exp
+	andi.w		&0x8000,%d1		# keep old sign
+	andi.w		&0x7fff,%d0		# clear sign position
+	or.w		%d1,%d0			# concat new exo,old sign
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#
+# operand is to be rounded to single or double precision
+#
+fin_not_ext:
+	cmpi.b		%d0,&s_mode*0x10	# separate sgl/dbl prec
+	bne.b		fin_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fin_sgl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3f80	# will move in underflow?
+	bge.w		fin_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x407e	# will move in overflow?
+	beq.w		fin_sd_may_ovfl		# maybe; go check
+	blt.w		fin_sd_ovfl		# yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved into the fp reg file
+#
+fin_sd_normal:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# perform move
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fin_sd_normal_exit:
+	mov.l		%d2,-(%sp)		# save d2
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d1,%d2			# concat old sign,new exponent
+	mov.w		%d2,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# operand is to be rounded to double precision
+#
+fin_dbl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3c00	# will move in underflow?
+	bge.w		fin_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x43fe	# will move in overflow?
+	beq.w		fin_sd_may_ovfl		# maybe; go check
+	blt.w		fin_sd_ovfl		# yes; go handle overflow
+	bra.w		fin_sd_normal		# no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fin_sd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	tst.b		FP_SCR0_EX(%a6)		# is operand negative?
+	bpl.b		fin_sd_unfl_tst
+	bset		&neg_bit,FPSR_CC(%a6)	# set 'N' ccode bit
+
+# if underflow or inexact is enabled, then go calculate the EXOP first.
+fin_sd_unfl_tst:
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fin_sd_unfl_ena		# yes
+
+fin_sd_unfl_dis:
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# unf_res may have set 'Z'
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# operand will underflow AND underflow or inexact is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fin_sd_unfl_ena:
+	mov.l		FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+	mov.l		FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+	mov.w		FP_SCR0_EX(%a6),%d1	# load current exponent
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# subtract scale factor
+	andi.w		&0x8000,%d2		# extract old sign
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1
+	or.w		%d1,%d2			# concat old sign,new exp
+	mov.w		%d2,FP_SCR1_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR1(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fin_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fin_sd_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# perform move
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fin_sd_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fin_sd_ovfl_ena		# yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fin_sd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass: prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fin_sd_ovfl_ena:
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	sub.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fin_sd_ovfl_dis
+
+#
+# the move in MAY overflow. so...
+#
+fin_sd_may_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# perform the move
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fin_sd_ovfl_tst		# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fin_sd_normal_exit
+
+##########################################################################
+
+#
+# operand is not a NORM: check its optype and branch accordingly
+#
+fin_not_norm:
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.w		fin_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNANs
+	beq.l		res_snan_1op
+	cmpi.b		%d1,&QNAN		# weed out QNANs
+	beq.l		res_qnan_1op
+
+#
+# do the fmove in; at this point, only possible ops are ZERO and INF.
+# use fmov to determine ccodes.
+# prec:mode should be zero at this point but it won't affect answer anyways.
+#
+	fmov.x		SRC(%a0),%fp0		# do fmove in
+	fmov.l		%fpsr,%d0		# no exceptions possible
+	rol.l		&0x8,%d0		# put ccodes in lo byte
+	mov.b		%d0,FPSR_CC(%a6)	# insert correct ccodes
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fdiv(): emulates the fdiv instruction				#
+#	fsdiv(): emulates the fsdiv instruction				#
+#	fddiv(): emulates the fddiv instruction				#
+#									#
+# XREF ****************************************************************	#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	scale_to_zero_dst() - scale dst exponent to zero		#
+#	unf_res() - return default underflow result			#
+#	ovf_res() - return default overflow result			#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a divide	#
+# instruction won't cause an exception. Use the regular fdiv to		#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	align		0x10
+tbl_fdiv_unfl:
+	long		0x3fff - 0x0000		# ext_unfl
+	long		0x3fff - 0x3f81		# sgl_unfl
+	long		0x3fff - 0x3c01		# dbl_unfl
+
+tbl_fdiv_ovfl:
+	long		0x3fff - 0x7ffe		# ext overflow exponent
+	long		0x3fff - 0x407e		# sgl overflow exponent
+	long		0x3fff - 0x43fe		# dbl overflow exponent
+
+	global		fsdiv
+fsdiv:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	bra.b		fdiv
+
+	global		fddiv
+fddiv:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fdiv
+fdiv:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+
+	bne.w		fdiv_not_norm		# optimize on non-norm input
+
+#
+# DIVIDE: NORMs and DENORMs ONLY!
+#
+fdiv_norm:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_to_zero_src	# scale src exponent
+	mov.l		%d0,-(%sp)		# save scale factor 1
+
+	bsr.l		scale_to_zero_dst	# scale dst exponent
+
+	neg.l		(%sp)			# SCALE FACTOR = scale1 - scale2
+	add.l		%d0,(%sp)
+
+	mov.w		2+L_SCR3(%a6),%d1	# fetch precision
+	lsr.b		&0x6,%d1		# shift to lo bits
+	mov.l		(%sp)+,%d0		# load S.F.
+	cmp.l		%d0,(tbl_fdiv_ovfl.b,%pc,%d1.w*4) # will result overflow?
+	ble.w		fdiv_may_ovfl		# result will overflow
+
+	cmp.l		%d0,(tbl_fdiv_unfl.w,%pc,%d1.w*4) # will result underflow?
+	beq.w		fdiv_may_unfl		# maybe
+	bgt.w		fdiv_unfl		# yes; go handle underflow
+
+fdiv_normal:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# save FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# perform divide
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fdiv_normal_exit:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store result on stack
+	mov.l		%d2,-(%sp)		# store d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+tbl_fdiv_ovfl2:
+	long		0x7fff
+	long		0x407f
+	long		0x43ff
+
+fdiv_no_ovfl:
+	mov.l		(%sp)+,%d0		# restore scale factor
+	bra.b		fdiv_normal_exit
+
+fdiv_may_ovfl:
+	mov.l		%d0,-(%sp)		# save scale factor
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# set FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		%fpsr,%d0
+	fmov.l		&0x0,%fpcr
+
+	or.l		%d0,USER_FPSR(%a6)	# save INEX,N
+
+	fmovm.x		&0x01,-(%sp)		# save result to stack
+	mov.w		(%sp),%d0		# fetch new exponent
+	add.l		&0xc,%sp		# clear result from stack
+	andi.l		&0x7fff,%d0		# strip sign
+	sub.l		(%sp),%d0		# add scale factor
+	cmp.l		%d0,(tbl_fdiv_ovfl2.b,%pc,%d1.w*4)
+	blt.b		fdiv_no_ovfl
+	mov.l		(%sp)+,%d0
+
+fdiv_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fdiv_ovfl_ena		# yes
+
+fdiv_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+fdiv_ovfl_ena:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fdiv_ovfl_ena_sd	# no, do sgl or dbl
+
+fdiv_ovfl_ena_cont:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# move result to stack
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1		# clear sign bit
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fdiv_ovfl_dis
+
+fdiv_ovfl_ena_sd:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# keep rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	bra.b		fdiv_ovfl_ena_cont
+
+fdiv_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fdiv_unfl_ena		# yes
+
+fdiv_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# UNFL is enabled.
+#
+fdiv_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fdiv_unfl_ena_sd	# no, sgl or dbl
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+fdiv_unfl_ena_cont:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp1	# execute divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factoer
+	addi.l		&0x6000,%d1		# add bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exp
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fdiv_unfl_dis
+
+fdiv_unfl_ena_sd:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# use only rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	bra.b		fdiv_unfl_ena_cont
+
+#
+# the divide operation MAY underflow:
+#
+fdiv_may_unfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x1		# is |result| > 1.b?
+	fbgt.w		fdiv_normal_exit	# no; no underflow occurred
+	fblt.w		fdiv_unfl		# yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 1. but,
+# we don't know if the result was an underflow that rounded up to a 1
+# or a normalized number that rounded down to a 1. so, redo the entire
+# operation using RZ as the rounding mode to see what the pre-rounded
+# result is. this case should be relatively rare.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into fp1
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert RZ
+
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp1	# execute divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fabs.x		%fp1			# make absolute value
+	fcmp.b		%fp1,&0x1		# is |result| < 1.b?
+	fbge.w		fdiv_normal_exit	# no; no underflow occurred
+	bra.w		fdiv_unfl		# yes; underflow occurred
+
+############################################################################
+
+#
+# Divide: inputs are not both normalized; what are they?
+#
+fdiv_not_norm:
+	mov.w		(tbl_fdiv_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fdiv_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fdiv_op:
+	short		fdiv_norm	- tbl_fdiv_op # NORM / NORM
+	short		fdiv_inf_load	- tbl_fdiv_op # NORM / ZERO
+	short		fdiv_zero_load	- tbl_fdiv_op # NORM / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # NORM / QNAN
+	short		fdiv_norm	- tbl_fdiv_op # NORM / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # NORM / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_zero_load	- tbl_fdiv_op # ZERO / NORM
+	short		fdiv_res_operr	- tbl_fdiv_op # ZERO / ZERO
+	short		fdiv_zero_load	- tbl_fdiv_op # ZERO / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # ZERO / QNAN
+	short		fdiv_zero_load	- tbl_fdiv_op # ZERO / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # ZERO / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_inf_dst	- tbl_fdiv_op # INF / NORM
+	short		fdiv_inf_dst	- tbl_fdiv_op # INF / ZERO
+	short		fdiv_res_operr	- tbl_fdiv_op # INF / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # INF / QNAN
+	short		fdiv_inf_dst	- tbl_fdiv_op # INF / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # INF / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / NORM
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / ZERO
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / QNAN
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # QNAN / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_norm	- tbl_fdiv_op # DENORM / NORM
+	short		fdiv_inf_load	- tbl_fdiv_op # DENORM / ZERO
+	short		fdiv_zero_load	- tbl_fdiv_op # DENORM / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # DENORM / QNAN
+	short		fdiv_norm	- tbl_fdiv_op # DENORM / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # DENORM / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / NORM
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / ZERO
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / INF
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / QNAN
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+fdiv_res_qnan:
+	bra.l		res_qnan
+fdiv_res_snan:
+	bra.l		res_snan
+fdiv_res_operr:
+	bra.l		res_operr
+
+	global		fdiv_zero_load		# global for fsgldiv
+fdiv_zero_load:
+	mov.b		SRC_EX(%a0),%d0		# result sign is exclusive
+	mov.b		DST_EX(%a1),%d1		# or of input signs.
+	eor.b		%d0,%d1
+	bpl.b		fdiv_zero_load_p	# result is positive
+	fmov.s		&0x80000000,%fp0	# load a -ZERO
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)	# set Z/N
+	rts
+fdiv_zero_load_p:
+	fmov.s		&0x00000000,%fp0	# load a +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+#
+# The destination was In Range and the source was a ZERO. The result,
+# therefore, is an INF w/ the proper sign.
+# So, determine the sign and return a new INF (w/ the j-bit cleared).
+#
+	global		fdiv_inf_load		# global for fsgldiv
+fdiv_inf_load:
+	ori.w		&dz_mask+adz_mask,2+USER_FPSR(%a6) # no; set DZ/ADZ
+	mov.b		SRC_EX(%a0),%d0		# load both signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bpl.b		fdiv_inf_load_p		# result is positive
+	fmov.s		&0xff800000,%fp0	# make result -INF
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
+	rts
+fdiv_inf_load_p:
+	fmov.s		&0x7f800000,%fp0	# make result +INF
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set INF
+	rts
+
+#
+# The destination was an INF w/ an In Range or ZERO source, the result is
+# an INF w/ the proper sign.
+# The 68881/882 returns the destination INF w/ the new sign(if the j-bit of the
+# dst INF is set, then then j-bit of the result INF is also set).
+#
+	global		fdiv_inf_dst		# global for fsgldiv
+fdiv_inf_dst:
+	mov.b		DST_EX(%a1),%d0		# load both signs
+	mov.b		SRC_EX(%a0),%d1
+	eor.b		%d0,%d1
+	bpl.b		fdiv_inf_dst_p		# result is positive
+
+	fmovm.x		DST(%a1),&0x80		# return result in fp0
+	fabs.x		%fp0			# clear sign bit
+	fneg.x		%fp0			# set sign bit
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+fdiv_inf_dst_p:
+	fmovm.x		DST(%a1),&0x80		# return result in fp0
+	fabs.x		%fp0			# return positive INF
+	mov.b		&inf_bmask,FPSR_CC(%a6) # set INF
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fneg(): emulates the fneg instruction				#
+#	fsneg(): emulates the fsneg instruction				#
+#	fdneg(): emulates the fdneg instruction				#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize a denorm to provide EXOP			#
+#	scale_to_zero_src() - scale sgl/dbl source exponent		#
+#	ovf_res() - return default overflow result			#
+#	unf_res() - return default underflow result			#
+#	res_qnan_1op() - return QNAN result				#
+#	res_snan_1op() - return SNAN result				#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, zeroes, and infinities as special cases. Separate	#
+# norms/denorms into ext/sgl/dbl precisions. Extended precision can be	#
+# emulated by simply setting sign bit. Sgl/dbl operands must be scaled	#
+# and an actual fneg performed to see if overflow/underflow would have	#
+# occurred. If so, return default underflow/overflow result. Else,	#
+# scale the result exponent and return result. FPSR gets set based on	#
+# the result value.							#
+#									#
+#########################################################################
+
+	global		fsneg
+fsneg:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl precision
+	bra.b		fneg
+
+	global		fdneg
+fdneg:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fneg
+fneg:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+	mov.b		STAG(%a6),%d1
+	bne.w		fneg_not_norm		# optimize on non-norm input
+
+#
+# NEGATE SIGN : norms and denorms ONLY!
+#
+fneg_norm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.w		fneg_not_ext		# no; go handle sgl or dbl
+
+#
+# precision selected is extended. so...we can not get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.w		SRC_EX(%a0),%d0
+	eori.w		&0x8000,%d0		# negate sign
+	bpl.b		fneg_norm_load		# sign is positive
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+fneg_norm_load:
+	mov.w		%d0,FP_SCR0_EX(%a6)
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fneg_denorm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fneg_not_ext		# no; go handle sgl or dbl
+
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.w		SRC_EX(%a0),%d0
+	eori.w		&0x8000,%d0		# negate sign
+	bpl.b		fneg_denorm_done	# no
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# yes, set 'N' ccode bit
+fneg_denorm_done:
+	mov.w		%d0,FP_SCR0_EX(%a6)
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+
+	btst		&unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+	bne.b		fneg_ext_unfl_ena	# yes
+	rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fneg_ext_unfl_ena:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	bsr.l		norm			# normalize result
+	neg.w		%d0			# new exponent = -(shft val)
+	addi.w		&0x6000,%d0		# add new bias to exponent
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch old sign,exp
+	andi.w		&0x8000,%d1		# keep old sign
+	andi.w		&0x7fff,%d0		# clear sign position
+	or.w		%d1,%d0			# concat old sign, new exponent
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#
+# operand is either single or double
+#
+fneg_not_ext:
+	cmpi.b		%d0,&s_mode*0x10	# separate sgl/dbl prec
+	bne.b		fneg_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fneg_sgl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3f80	# will move in underflow?
+	bge.w		fneg_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x407e	# will move in overflow?
+	beq.w		fneg_sd_may_ovfl	# maybe; go check
+	blt.w		fneg_sd_ovfl		# yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fneg_sd_normal:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fneg.x		FP_SCR0(%a6),%fp0	# perform negation
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fneg_sd_normal_exit:
+	mov.l		%d2,-(%sp)		# save d2
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.w		FP_SCR0_EX(%a6),%d1	# load sgn,exp
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d1,%d2			# concat old sign,new exp
+	mov.w		%d2,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# operand is to be rounded to double precision
+#
+fneg_dbl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3c00	# will move in underflow?
+	bge.b		fneg_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x43fe	# will move in overflow?
+	beq.w		fneg_sd_may_ovfl	# maybe; go check
+	blt.w		fneg_sd_ovfl		# yes; go handle overflow
+	bra.w		fneg_sd_normal		# no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fneg_sd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	eori.b		&0x80,FP_SCR0_EX(%a6)	# negate sign
+	bpl.b		fneg_sd_unfl_tst
+	bset		&neg_bit,FPSR_CC(%a6)	# set 'N' ccode bit
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+fneg_sd_unfl_tst:
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fneg_sd_unfl_ena	# yes
+
+fneg_sd_unfl_dis:
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# unf_res may have set 'Z'
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fneg_sd_unfl_ena:
+	mov.l		FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+	mov.l		FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+	mov.w		FP_SCR0_EX(%a6),%d1	# load current exponent
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# subtract scale factor
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat new sign,new exp
+	mov.w		%d1,FP_SCR1_EX(%a6)	# insert new exp
+	fmovm.x		FP_SCR1(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fneg_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fneg_sd_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fneg.x		FP_SCR0(%a6),%fp0	# perform negation
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fneg_sd_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fneg_sd_ovfl_ena	# yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fneg_sd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass: prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fneg_sd_ovfl_ena:
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat sign,exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fneg_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fneg_sd_may_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fneg.x		FP_SCR0(%a6),%fp0	# perform negation
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fneg_sd_ovfl_tst	# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fneg_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fneg_not_norm:
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.w		fneg_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	cmpi.b		%d1,&QNAN		# weed out QNAN
+	beq.l		res_qnan_1op
+
+#
+# do the fneg; at this point, only possible ops are ZERO and INF.
+# use fneg to determine ccodes.
+# prec:mode should be zero at this point but it won't affect answer anyways.
+#
+	fneg.x		SRC_EX(%a0),%fp0	# do fneg
+	fmov.l		%fpsr,%d0
+	rol.l		&0x8,%d0		# put ccodes in lo byte
+	mov.b		%d0,FPSR_CC(%a6)	# insert correct ccodes
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	ftst(): emulates the ftest instruction				#
+#									#
+# XREF ****************************************************************	#
+#	res{s,q}nan_1op() - set NAN result for monadic instruction	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	none								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Check the source operand tag (STAG) and set the FPCR according	#
+# to the operand type and sign.						#
+#									#
+#########################################################################
+
+	global		ftst
+ftst:
+	mov.b		STAG(%a6),%d1
+	bne.b		ftst_not_norm		# optimize on non-norm input
+
+#
+# Norm:
+#
+ftst_norm:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.b		ftst_norm_m		# yes
+	rts
+ftst_norm_m:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+#
+# input is not normalized; what is it?
+#
+ftst_not_norm:
+	cmpi.b		%d1,&ZERO		# weed out ZERO
+	beq.b		ftst_zero
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		ftst_inf
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	cmpi.b		%d1,&QNAN		# weed out QNAN
+	beq.l		res_qnan_1op
+
+#
+# Denorm:
+#
+ftst_denorm:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.b		ftst_denorm_m		# yes
+	rts
+ftst_denorm_m:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+#
+# Infinity:
+#
+ftst_inf:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.b		ftst_inf_m		# yes
+ftst_inf_p:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+ftst_inf_m:
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'I','N' ccode bits
+	rts
+
+#
+# Zero:
+#
+ftst_zero:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.b		ftst_zero_m		# yes
+ftst_zero_p:
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+ftst_zero_m:
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)	# set 'Z','N' ccode bits
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fint(): emulates the fint instruction				#
+#									#
+# XREF ****************************************************************	#
+#	res_{s,q}nan_1op() - set NAN result for monadic operation	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = round precision/mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Separate according to operand type. Unnorms don't pass through	#
+# here. For norms, load the rounding mode/prec, execute a "fint", then	#
+# store the resulting FPSR bits.					#
+#	For denorms, force the j-bit to a one and do the same as for	#
+# norms. Denorms are so low that the answer will either be a zero or a	#
+# one.									#
+#	For zeroes/infs/NANs, return the same while setting the FPSR	#
+# as appropriate.							#
+#									#
+#########################################################################
+
+	global		fint
+fint:
+	mov.b		STAG(%a6),%d1
+	bne.b		fint_not_norm		# optimize on non-norm input
+
+#
+# Norm:
+#
+fint_norm:
+	andi.b		&0x30,%d0		# set prec = ext
+
+	fmov.l		%d0,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fint.x		SRC(%a0),%fp0		# execute fint
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d0		# save FPSR
+	or.l		%d0,USER_FPSR(%a6)	# set exception bits
+
+	rts
+
+#
+# input is not normalized; what is it?
+#
+fint_not_norm:
+	cmpi.b		%d1,&ZERO		# weed out ZERO
+	beq.b		fint_zero
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		fint_inf
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.b		fint_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	bra.l		res_qnan_1op		# weed out QNAN
+
+#
+# Denorm:
+#
+# for DENORMs, the result will be either (+/-)ZERO or (+/-)1.
+# also, the INEX2 and AINEX exception bits will be set.
+# so, we could either set these manually or force the DENORM
+# to a very small NORM and ship it to the NORM routine.
+# I do the latter.
+#
+fint_denorm:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
+	mov.b		&0x80,FP_SCR0_HI(%a6)	# force DENORM ==> small NORM
+	lea		FP_SCR0(%a6),%a0
+	bra.b		fint_norm
+
+#
+# Zero:
+#
+fint_zero:
+	tst.b		SRC_EX(%a0)		# is ZERO negative?
+	bmi.b		fint_zero_m		# yes
+fint_zero_p:
+	fmov.s		&0x00000000,%fp0	# return +ZERO in fp0
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+fint_zero_m:
+	fmov.s		&0x80000000,%fp0	# return -ZERO in fp0
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
+	rts
+
+#
+# Infinity:
+#
+fint_inf:
+	fmovm.x		SRC(%a0),&0x80		# return result in fp0
+	tst.b		SRC_EX(%a0)		# is INF negative?
+	bmi.b		fint_inf_m		# yes
+fint_inf_p:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+fint_inf_m:
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fintrz(): emulates the fintrz instruction			#
+#									#
+# XREF ****************************************************************	#
+#	res_{s,q}nan_1op() - set NAN result for monadic operation	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = round precision/mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Separate according to operand type. Unnorms don't pass through	#
+# here. For norms, load the rounding mode/prec, execute a "fintrz",	#
+# then store the resulting FPSR bits.					#
+#	For denorms, force the j-bit to a one and do the same as for	#
+# norms. Denorms are so low that the answer will either be a zero or a	#
+# one.									#
+#	For zeroes/infs/NANs, return the same while setting the FPSR	#
+# as appropriate.							#
+#									#
+#########################################################################
+
+	global		fintrz
+fintrz:
+	mov.b		STAG(%a6),%d1
+	bne.b		fintrz_not_norm		# optimize on non-norm input
+
+#
+# Norm:
+#
+fintrz_norm:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fintrz.x	SRC(%a0),%fp0		# execute fintrz
+
+	fmov.l		%fpsr,%d0		# save FPSR
+	or.l		%d0,USER_FPSR(%a6)	# set exception bits
+
+	rts
+
+#
+# input is not normalized; what is it?
+#
+fintrz_not_norm:
+	cmpi.b		%d1,&ZERO		# weed out ZERO
+	beq.b		fintrz_zero
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		fintrz_inf
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.b		fintrz_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	bra.l		res_qnan_1op		# weed out QNAN
+
+#
+# Denorm:
+#
+# for DENORMs, the result will be (+/-)ZERO.
+# also, the INEX2 and AINEX exception bits will be set.
+# so, we could either set these manually or force the DENORM
+# to a very small NORM and ship it to the NORM routine.
+# I do the latter.
+#
+fintrz_denorm:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
+	mov.b		&0x80,FP_SCR0_HI(%a6)	# force DENORM ==> small NORM
+	lea		FP_SCR0(%a6),%a0
+	bra.b		fintrz_norm
+
+#
+# Zero:
+#
+fintrz_zero:
+	tst.b		SRC_EX(%a0)		# is ZERO negative?
+	bmi.b		fintrz_zero_m		# yes
+fintrz_zero_p:
+	fmov.s		&0x00000000,%fp0	# return +ZERO in fp0
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+fintrz_zero_m:
+	fmov.s		&0x80000000,%fp0	# return -ZERO in fp0
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
+	rts
+
+#
+# Infinity:
+#
+fintrz_inf:
+	fmovm.x		SRC(%a0),&0x80		# return result in fp0
+	tst.b		SRC_EX(%a0)		# is INF negative?
+	bmi.b		fintrz_inf_m		# yes
+fintrz_inf_p:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+fintrz_inf_m:
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fabs():  emulates the fabs instruction				#
+#	fsabs(): emulates the fsabs instruction				#
+#	fdabs(): emulates the fdabs instruction				#
+#									#
+# XREF **************************************************************** #
+#	norm() - normalize denorm mantissa to provide EXOP		#
+#	scale_to_zero_src() - make exponent. = 0; get scale factor	#
+#	unf_res() - calculate underflow result				#
+#	ovf_res() - calculate overflow result				#
+#	res_{s,q}nan_1op() - set NAN result for monadic operation	#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision source operand		#
+#	d0 = rnd precision/mode						#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms into extended, single, and double precision.			#
+#	Simply clear sign for extended precision norm. Ext prec denorm	#
+# gets an EXOP created for it since it's an underflow.			#
+#	Double and single precision can overflow and underflow. First,	#
+# scale the operand such that the exponent is zero. Perform an "fabs"	#
+# using the correct rnd mode/prec. Check to see if the original		#
+# exponent would take an exception. If so, use unf_res() or ovf_res()	#
+# to calculate the default result. Also, create the EXOP for the	#
+# exceptional case. If no exception should occur, insert the correct	#
+# result exponent and return.						#
+#	Unnorms don't pass through here.				#
+#									#
+#########################################################################
+
+	global		fsabs
+fsabs:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl precision
+	bra.b		fabs
+
+	global		fdabs
+fdabs:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl precision
+
+	global		fabs
+fabs:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+	mov.b		STAG(%a6),%d1
+	bne.w		fabs_not_norm		# optimize on non-norm input
+
+#
+# ABSOLUTE VALUE: norms and denorms ONLY!
+#
+fabs_norm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fabs_not_ext		# no; go handle sgl or dbl
+
+#
+# precision selected is extended. so...we can not get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.w		SRC_EX(%a0),%d1
+	bclr		&15,%d1			# force absolute value
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert exponent
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fabs_denorm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fabs_not_ext		# no
+
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.w		SRC_EX(%a0),%d0
+	bclr		&15,%d0			# clear sign
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert exponent
+
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+
+	btst		&unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+	bne.b		fabs_ext_unfl_ena
+	rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fabs_ext_unfl_ena:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	bsr.l		norm			# normalize result
+	neg.w		%d0			# new exponent = -(shft val)
+	addi.w		&0x6000,%d0		# add new bias to exponent
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch old sign,exp
+	andi.w		&0x8000,%d1		# keep old sign
+	andi.w		&0x7fff,%d0		# clear sign position
+	or.w		%d1,%d0			# concat old sign, new exponent
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#
+# operand is either single or double
+#
+fabs_not_ext:
+	cmpi.b		%d0,&s_mode*0x10	# separate sgl/dbl prec
+	bne.b		fabs_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fabs_sgl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3f80	# will move in underflow?
+	bge.w		fabs_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x407e	# will move in overflow?
+	beq.w		fabs_sd_may_ovfl	# maybe; go check
+	blt.w		fabs_sd_ovfl		# yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fabs_sd_normal:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fabs.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fabs_sd_normal_exit:
+	mov.l		%d2,-(%sp)		# save d2
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.w		FP_SCR0_EX(%a6),%d1	# load sgn,exp
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d1,%d2			# concat old sign,new exp
+	mov.w		%d2,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# operand is to be rounded to double precision
+#
+fabs_dbl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3c00	# will move in underflow?
+	bge.b		fabs_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x43fe	# will move in overflow?
+	beq.w		fabs_sd_may_ovfl	# maybe; go check
+	blt.w		fabs_sd_ovfl		# yes; go handle overflow
+	bra.w		fabs_sd_normal		# no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fabs_sd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	bclr		&0x7,FP_SCR0_EX(%a6)	# force absolute value
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fabs_sd_unfl_ena	# yes
+
+fabs_sd_unfl_dis:
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set possible 'Z' ccode
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fabs_sd_unfl_ena:
+	mov.l		FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+	mov.l		FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+	mov.w		FP_SCR0_EX(%a6),%d1	# load current exponent
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# subtract scale factor
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat new sign,new exp
+	mov.w		%d1,FP_SCR1_EX(%a6)	# insert new exp
+	fmovm.x		FP_SCR1(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fabs_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fabs_sd_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fabs.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fabs_sd_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fabs_sd_ovfl_ena	# yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fabs_sd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass: prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fabs_sd_ovfl_ena:
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat sign,exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fabs_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fabs_sd_may_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fabs.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fabs_sd_ovfl_tst	# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fabs_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fabs_not_norm:
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.w		fabs_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	cmpi.b		%d1,&QNAN		# weed out QNAN
+	beq.l		res_qnan_1op
+
+	fabs.x		SRC(%a0),%fp0		# force absolute value
+
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		fabs_inf
+fabs_zero:
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+fabs_inf:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fcmp(): fp compare op routine					#
+#									#
+# XREF ****************************************************************	#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0 = round prec/mode						#
+#									#
+# OUTPUT ************************************************************** #
+#	None								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs and denorms as special cases. For everything else,	#
+# just use the actual fcmp instruction to produce the correct condition	#
+# codes.								#
+#									#
+#########################################################################
+
+	global		fcmp
+fcmp:
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1
+	bne.b		fcmp_not_norm		# optimize on non-norm input
+
+#
+# COMPARE FP OPs : NORMs, ZEROs, INFs, and "corrected" DENORMs
+#
+fcmp_norm:
+	fmovm.x		DST(%a1),&0x80		# load dst op
+
+	fcmp.x		%fp0,SRC(%a0)		# do compare
+
+	fmov.l		%fpsr,%d0		# save FPSR
+	rol.l		&0x8,%d0		# extract ccode bits
+	mov.b		%d0,FPSR_CC(%a6)	# set ccode bits(no exc bits are set)
+
+	rts
+
+#
+# fcmp: inputs are not both normalized; what are they?
+#
+fcmp_not_norm:
+	mov.w		(tbl_fcmp_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fcmp_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fcmp_op:
+	short		fcmp_norm	- tbl_fcmp_op # NORM - NORM
+	short		fcmp_norm	- tbl_fcmp_op # NORM - ZERO
+	short		fcmp_norm	- tbl_fcmp_op # NORM - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # NORM - QNAN
+	short		fcmp_nrm_dnrm	- tbl_fcmp_op # NORM - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # NORM - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_norm	- tbl_fcmp_op # ZERO - NORM
+	short		fcmp_norm	- tbl_fcmp_op # ZERO - ZERO
+	short		fcmp_norm	- tbl_fcmp_op # ZERO - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # ZERO - QNAN
+	short		fcmp_dnrm_s	- tbl_fcmp_op # ZERO - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # ZERO - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_norm	- tbl_fcmp_op # INF - NORM
+	short		fcmp_norm	- tbl_fcmp_op # INF - ZERO
+	short		fcmp_norm	- tbl_fcmp_op # INF - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # INF - QNAN
+	short		fcmp_dnrm_s	- tbl_fcmp_op # INF - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # INF - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - NORM
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - ZERO
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - QNAN
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # QNAN - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_dnrm_nrm	- tbl_fcmp_op # DENORM - NORM
+	short		fcmp_dnrm_d	- tbl_fcmp_op # DENORM - ZERO
+	short		fcmp_dnrm_d	- tbl_fcmp_op # DENORM - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # DENORM - QNAN
+	short		fcmp_dnrm_sd	- tbl_fcmp_op # DENORM - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # DENORM - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - NORM
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - ZERO
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - INF
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - QNAN
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+# unlike all other functions for QNAN and SNAN, fcmp does NOT set the
+# 'N' bit for a negative QNAN or SNAN input so we must squelch it here.
+fcmp_res_qnan:
+	bsr.l		res_qnan
+	andi.b		&0xf7,FPSR_CC(%a6)
+	rts
+fcmp_res_snan:
+	bsr.l		res_snan
+	andi.b		&0xf7,FPSR_CC(%a6)
+	rts
+
+#
+# DENORMs are a little more difficult.
+# If you have a 2 DENORMs, then you can just force the j-bit to a one
+# and use the fcmp_norm routine.
+# If you have a DENORM and an INF or ZERO, just force the DENORM's j-bit to a one
+# and use the fcmp_norm routine.
+# If you have a DENORM and a NORM with opposite signs, then use fcmp_norm, also.
+# But with a DENORM and a NORM of the same sign, the neg bit is set if the
+# (1) signs are (+) and the DENORM is the dst or
+# (2) signs are (-) and the DENORM is the src
+#
+
+fcmp_dnrm_s:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),%d0
+	bset		&31,%d0			# DENORM src; make into small norm
+	mov.l		%d0,FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	lea		FP_SCR0(%a6),%a0
+	bra.w		fcmp_norm
+
+fcmp_dnrm_d:
+	mov.l		DST_EX(%a1),FP_SCR0_EX(%a6)
+	mov.l		DST_HI(%a1),%d0
+	bset		&31,%d0			# DENORM src; make into small norm
+	mov.l		%d0,FP_SCR0_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR0_LO(%a6)
+	lea		FP_SCR0(%a6),%a1
+	bra.w		fcmp_norm
+
+fcmp_dnrm_sd:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		DST_HI(%a1),%d0
+	bset		&31,%d0			# DENORM dst; make into small norm
+	mov.l		%d0,FP_SCR1_HI(%a6)
+	mov.l		SRC_HI(%a0),%d0
+	bset		&31,%d0			# DENORM dst; make into small norm
+	mov.l		%d0,FP_SCR0_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	lea		FP_SCR1(%a6),%a1
+	lea		FP_SCR0(%a6),%a0
+	bra.w		fcmp_norm
+
+fcmp_nrm_dnrm:
+	mov.b		SRC_EX(%a0),%d0		# determine if like signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bmi.w		fcmp_dnrm_s
+
+# signs are the same, so must determine the answer ourselves.
+	tst.b		%d0			# is src op negative?
+	bmi.b		fcmp_nrm_dnrm_m		# yes
+	rts
+fcmp_nrm_dnrm_m:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+
+fcmp_dnrm_nrm:
+	mov.b		SRC_EX(%a0),%d0		# determine if like signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bmi.w		fcmp_dnrm_d
+
+# signs are the same, so must determine the answer ourselves.
+	tst.b		%d0			# is src op negative?
+	bpl.b		fcmp_dnrm_nrm_m		# no
+	rts
+fcmp_dnrm_nrm_m:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fsglmul(): emulates the fsglmul instruction			#
+#									#
+# XREF ****************************************************************	#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	scale_to_zero_dst() - scale dst exponent to zero		#
+#	unf_res4() - return default underflow result for sglop		#
+#	ovf_res() - return default overflow result			#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a multiply	#
+# instruction won't cause an exception. Use the regular fsglmul to	#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	global		fsglmul
+fsglmul:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1
+
+	bne.w		fsglmul_not_norm	# optimize on non-norm input
+
+fsglmul_norm:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_to_zero_src	# scale exponent
+	mov.l		%d0,-(%sp)		# save scale factor 1
+
+	bsr.l		scale_to_zero_dst	# scale dst exponent
+
+	add.l		(%sp)+,%d0		# SCALE_FACTOR = scale1 + scale2
+
+	cmpi.l		%d0,&0x3fff-0x7ffe	# would result ovfl?
+	beq.w		fsglmul_may_ovfl	# result may rnd to overflow
+	blt.w		fsglmul_ovfl		# result will overflow
+
+	cmpi.l		%d0,&0x3fff+0x0001	# would result unfl?
+	beq.w		fsglmul_may_unfl	# result may rnd to no unfl
+	bgt.w		fsglmul_unfl		# result will underflow
+
+fsglmul_normal:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsglmul_normal_exit:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+fsglmul_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsglmul_ovfl_tst:
+
+# save setting this until now because this is where fsglmul_may_ovfl may jump in
+	or.l		&ovfl_inx_mask, USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fsglmul_ovfl_ena	# yes
+
+fsglmul_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	andi.b		&0x30,%d0		# force prec = ext
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+fsglmul_ovfl_ena:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# move result to stack
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fsglmul_ovfl_dis
+
+fsglmul_may_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fsglmul_ovfl_tst	# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fsglmul_normal_exit
+
+fsglmul_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fsglmul_unfl_ena	# yes
+
+fsglmul_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res4		# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' bit may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# UNFL is enabled.
+#
+fsglmul_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp1	# execute sgl multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# add bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fsglmul_unfl_dis
+
+fsglmul_may_unfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| > 2.b?
+	fbgt.w		fsglmul_normal_exit	# no; no underflow occurred
+	fblt.w		fsglmul_unfl		# yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 2. but,
+# we don't know if the result was an underflow that rounded up to a 2 or
+# a normalized number that rounded down to a 2. so, redo the entire operation
+# using RZ as the rounding mode to see what the pre-rounded result is.
+# this case should be relatively rare.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into fp1
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert RZ
+
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp1	# execute sgl multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fabs.x		%fp1			# make absolute value
+	fcmp.b		%fp1,&0x2		# is |result| < 2.b?
+	fbge.w		fsglmul_normal_exit	# no; no underflow occurred
+	bra.w		fsglmul_unfl		# yes, underflow occurred
+
+##############################################################################
+
+#
+# Single Precision Multiply: inputs are not both normalized; what are they?
+#
+fsglmul_not_norm:
+	mov.w		(tbl_fsglmul_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fsglmul_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fsglmul_op:
+	short		fsglmul_norm		- tbl_fsglmul_op # NORM x NORM
+	short		fsglmul_zero		- tbl_fsglmul_op # NORM x ZERO
+	short		fsglmul_inf_src		- tbl_fsglmul_op # NORM x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # NORM x QNAN
+	short		fsglmul_norm		- tbl_fsglmul_op # NORM x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # NORM x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_zero		- tbl_fsglmul_op # ZERO x NORM
+	short		fsglmul_zero		- tbl_fsglmul_op # ZERO x ZERO
+	short		fsglmul_res_operr	- tbl_fsglmul_op # ZERO x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # ZERO x QNAN
+	short		fsglmul_zero		- tbl_fsglmul_op # ZERO x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # ZERO x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_inf_dst		- tbl_fsglmul_op # INF x NORM
+	short		fsglmul_res_operr	- tbl_fsglmul_op # INF x ZERO
+	short		fsglmul_inf_dst		- tbl_fsglmul_op # INF x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # INF x QNAN
+	short		fsglmul_inf_dst		- tbl_fsglmul_op # INF x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # INF x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x NORM
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x ZERO
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x QNAN
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # QNAN x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_norm		- tbl_fsglmul_op # NORM x NORM
+	short		fsglmul_zero		- tbl_fsglmul_op # NORM x ZERO
+	short		fsglmul_inf_src		- tbl_fsglmul_op # NORM x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # NORM x QNAN
+	short		fsglmul_norm		- tbl_fsglmul_op # NORM x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # NORM x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x NORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x ZERO
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x INF
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x QNAN
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+fsglmul_res_operr:
+	bra.l		res_operr
+fsglmul_res_snan:
+	bra.l		res_snan
+fsglmul_res_qnan:
+	bra.l		res_qnan
+fsglmul_zero:
+	bra.l		fmul_zero
+fsglmul_inf_src:
+	bra.l		fmul_inf_src
+fsglmul_inf_dst:
+	bra.l		fmul_inf_dst
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fsgldiv(): emulates the fsgldiv instruction			#
+#									#
+# XREF ****************************************************************	#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	scale_to_zero_dst() - scale dst exponent to zero		#
+#	unf_res4() - return default underflow result for sglop		#
+#	ovf_res() - return default overflow result			#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a divide	#
+# instruction won't cause an exception. Use the regular fsgldiv to	#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	global		fsgldiv
+fsgldiv:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+
+	bne.w		fsgldiv_not_norm	# optimize on non-norm input
+
+#
+# DIVIDE: NORMs and DENORMs ONLY!
+#
+fsgldiv_norm:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_to_zero_src	# calculate scale factor 1
+	mov.l		%d0,-(%sp)		# save scale factor 1
+
+	bsr.l		scale_to_zero_dst	# calculate scale factor 2
+
+	neg.l		(%sp)			# S.F. = scale1 - scale2
+	add.l		%d0,(%sp)
+
+	mov.w		2+L_SCR3(%a6),%d1	# fetch precision,mode
+	lsr.b		&0x6,%d1
+	mov.l		(%sp)+,%d0
+	cmpi.l		%d0,&0x3fff-0x7ffe
+	ble.w		fsgldiv_may_ovfl
+
+	cmpi.l		%d0,&0x3fff-0x0000	# will result underflow?
+	beq.w		fsgldiv_may_unfl	# maybe
+	bgt.w		fsgldiv_unfl		# yes; go handle underflow
+
+fsgldiv_normal:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# save FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp0	# perform sgl divide
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsgldiv_normal_exit:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store result on stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+fsgldiv_may_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# set FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		%fpsr,%d1
+	fmov.l		&0x0,%fpcr
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX,N
+
+	fmovm.x		&0x01,-(%sp)		# save result to stack
+	mov.w		(%sp),%d1		# fetch new exponent
+	add.l		&0xc,%sp		# clear result
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	cmp.l		%d1,&0x7fff		# did divide overflow?
+	blt.b		fsgldiv_normal_exit
+
+fsgldiv_ovfl_tst:
+	or.w		&ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fsgldiv_ovfl_ena	# yes
+
+fsgldiv_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	andi.b		&0x30,%d0		# kill precision
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+fsgldiv_ovfl_ena:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# move result to stack
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract new bias
+	andi.w		&0x7fff,%d1		# clear ms bit
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fsgldiv_ovfl_dis
+
+fsgldiv_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp0	# execute sgl divide
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fsgldiv_unfl_ena	# yes
+
+fsgldiv_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res4		# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' bit may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# UNFL is enabled.
+#
+fsgldiv_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp1	# execute sgl divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# add bias
+	andi.w		&0x7fff,%d1		# clear top bit
+	or.w		%d2,%d1			# concat old sign, new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fsgldiv_unfl_dis
+
+#
+# the divide operation MAY underflow:
+#
+fsgldiv_may_unfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp0	# execute sgl divide
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x1		# is |result| > 1.b?
+	fbgt.w		fsgldiv_normal_exit	# no; no underflow occurred
+	fblt.w		fsgldiv_unfl		# yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 1. but,
+# we don't know if the result was an underflow that rounded up to a 1
+# or a normalized number that rounded down to a 1. so, redo the entire
+# operation using RZ as the rounding mode to see what the pre-rounded
+# result is. this case should be relatively rare.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into %fp1
+
+	clr.l		%d1			# clear scratch register
+	ori.b		&rz_mode*0x10,%d1	# force RZ rnd mode
+
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp1	# execute sgl divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fabs.x		%fp1			# make absolute value
+	fcmp.b		%fp1,&0x1		# is |result| < 1.b?
+	fbge.w		fsgldiv_normal_exit	# no; no underflow occurred
+	bra.w		fsgldiv_unfl		# yes; underflow occurred
+
+############################################################################
+
+#
+# Divide: inputs are not both normalized; what are they?
+#
+fsgldiv_not_norm:
+	mov.w		(tbl_fsgldiv_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fsgldiv_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fsgldiv_op:
+	short		fsgldiv_norm		- tbl_fsgldiv_op # NORM / NORM
+	short		fsgldiv_inf_load	- tbl_fsgldiv_op # NORM / ZERO
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # NORM / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # NORM / QNAN
+	short		fsgldiv_norm		- tbl_fsgldiv_op # NORM / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # NORM / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # ZERO / NORM
+	short		fsgldiv_res_operr	- tbl_fsgldiv_op # ZERO / ZERO
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # ZERO / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # ZERO / QNAN
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # ZERO / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # ZERO / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_inf_dst		- tbl_fsgldiv_op # INF / NORM
+	short		fsgldiv_inf_dst		- tbl_fsgldiv_op # INF / ZERO
+	short		fsgldiv_res_operr	- tbl_fsgldiv_op # INF / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # INF / QNAN
+	short		fsgldiv_inf_dst		- tbl_fsgldiv_op # INF / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # INF / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / NORM
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / ZERO
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / QNAN
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # QNAN / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_norm		- tbl_fsgldiv_op # DENORM / NORM
+	short		fsgldiv_inf_load	- tbl_fsgldiv_op # DENORM / ZERO
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # DENORM / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # DENORM / QNAN
+	short		fsgldiv_norm		- tbl_fsgldiv_op # DENORM / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # DENORM / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / NORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / ZERO
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / INF
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / QNAN
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+fsgldiv_res_qnan:
+	bra.l		res_qnan
+fsgldiv_res_snan:
+	bra.l		res_snan
+fsgldiv_res_operr:
+	bra.l		res_operr
+fsgldiv_inf_load:
+	bra.l		fdiv_inf_load
+fsgldiv_zero_load:
+	bra.l		fdiv_zero_load
+fsgldiv_inf_dst:
+	bra.l		fdiv_inf_dst
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fadd(): emulates the fadd instruction				#
+#	fsadd(): emulates the fadd instruction				#
+#	fdadd(): emulates the fdadd instruction				#
+#									#
+# XREF ****************************************************************	#
+#	addsub_scaler2() - scale the operands so they won't take exc	#
+#	ovf_res() - return default overflow result			#
+#	unf_res() - return default underflow result			#
+#	res_qnan() - set QNAN result					#
+#	res_snan() - set SNAN result					#
+#	res_operr() - set OPERR result					#
+#	scale_to_zero_src() - set src operand exponent equal to zero	#
+#	scale_to_zero_dst() - set dst operand exponent equal to zero	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms into extended, single, and double precision.			#
+#	Do addition after scaling exponents such that exception won't	#
+# occur. Then, check result exponent to see if exception would have	#
+# occurred. If so, return default result and maybe EXOP. Else, insert	#
+# the correct result exponent and return. Set FPSR bits as appropriate.	#
+#									#
+#########################################################################
+
+	global		fsadd
+fsadd:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	bra.b		fadd
+
+	global		fdadd
+fdadd:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fadd
+fadd:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+
+	bne.w		fadd_not_norm		# optimize on non-norm input
+
+#
+# ADD: norms and denorms
+#
+fadd_norm:
+	bsr.l		addsub_scaler2		# scale exponents
+
+fadd_zero_entry:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fadd.x		FP_SCR0(%a6),%fp0	# execute add
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch INEX2,N,Z
+
+	or.l		%d1,USER_FPSR(%a6)	# save exc and ccode bits
+
+	fbeq.w		fadd_zero_exit		# if result is zero, end now
+
+	mov.l		%d2,-(%sp)		# save d2
+
+	fmovm.x		&0x01,-(%sp)		# save result to stack
+
+	mov.w		2+L_SCR3(%a6),%d1
+	lsr.b		&0x6,%d1
+
+	mov.w		(%sp),%d2		# fetch new sign, exp
+	andi.l		&0x7fff,%d2		# strip sign
+	sub.l		%d0,%d2			# add scale factor
+
+	cmp.l		%d2,(tbl_fadd_ovfl.b,%pc,%d1.w*4) # is it an overflow?
+	bge.b		fadd_ovfl		# yes
+
+	cmp.l		%d2,(tbl_fadd_unfl.b,%pc,%d1.w*4) # is it an underflow?
+	blt.w		fadd_unfl		# yes
+	beq.w		fadd_may_unfl		# maybe; go find out
+
+fadd_normal:
+	mov.w		(%sp),%d1
+	andi.w		&0x8000,%d1		# keep sign
+	or.w		%d2,%d1			# concat sign,new exp
+	mov.w		%d1,(%sp)		# insert new exponent
+
+	fmovm.x		(%sp)+,&0x80		# return result in fp0
+
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fadd_zero_exit:
+#	fmov.s		&0x00000000,%fp0	# return zero in fp0
+	rts
+
+tbl_fadd_ovfl:
+	long		0x7fff			# ext ovfl
+	long		0x407f			# sgl ovfl
+	long		0x43ff			# dbl ovfl
+
+tbl_fadd_unfl:
+	long	        0x0000			# ext unfl
+	long		0x3f81			# sgl unfl
+	long		0x3c01			# dbl unfl
+
+fadd_ovfl:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fadd_ovfl_ena		# yes
+
+	add.l		&0xc,%sp
+fadd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fadd_ovfl_ena:
+	mov.b		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fadd_ovfl_ena_sd	# no; prec = sgl or dbl
+
+fadd_ovfl_ena_cont:
+	mov.w		(%sp),%d1
+	andi.w		&0x8000,%d1		# keep sign
+	subi.l		&0x6000,%d2		# add extra bias
+	andi.w		&0x7fff,%d2
+	or.w		%d2,%d1			# concat sign,new exp
+	mov.w		%d1,(%sp)		# insert new exponent
+
+	fmovm.x		(%sp)+,&0x40		# return EXOP in fp1
+	bra.b		fadd_ovfl_dis
+
+fadd_ovfl_ena_sd:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# keep rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	fadd.x		FP_SCR0(%a6),%fp0	# execute add
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	add.l		&0xc,%sp
+	fmovm.x		&0x01,-(%sp)
+	bra.b		fadd_ovfl_ena_cont
+
+fadd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	add.l		&0xc,%sp
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fadd.x		FP_SCR0(%a6),%fp0	# execute add
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save status
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fadd_unfl_ena		# yes
+
+fadd_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' bit may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fadd_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fadd_unfl_ena_sd	# no; sgl or dbl
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+fadd_unfl_ena_cont:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fadd.x		FP_SCR0(%a6),%fp1	# execute multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1		# clear top bit
+	or.w		%d2,%d1			# concat sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fadd_unfl_dis
+
+fadd_unfl_ena_sd:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# use only rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	bra.b		fadd_unfl_ena_cont
+
+#
+# result is equal to the smallest normalized number in the selected precision
+# if the precision is extended, this result could not have come from an
+# underflow that rounded up.
+#
+fadd_may_unfl:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1
+	beq.w		fadd_normal		# yes; no underflow occurred
+
+	mov.l		0x4(%sp),%d1		# extract hi(man)
+	cmpi.l		%d1,&0x80000000		# is hi(man) = 0x80000000?
+	bne.w		fadd_normal		# no; no underflow occurred
+
+	tst.l		0x8(%sp)		# is lo(man) = 0x0?
+	bne.w		fadd_normal		# no; no underflow occurred
+
+	btst		&inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+	beq.w		fadd_normal		# no; no underflow occurred
+
+#
+# ok, so now the result has a exponent equal to the smallest normalized
+# exponent for the selected precision. also, the mantissa is equal to
+# 0x8000000000000000 and this mantissa is the result of rounding non-zero
+# g,r,s.
+# now, we must determine whether the pre-rounded result was an underflow
+# rounded "up" or a normalized number rounded "down".
+# so, we do this be re-executing the add using RZ as the rounding mode and
+# seeing if the new result is smaller or equal to the current result.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into fp1
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fadd.x		FP_SCR0(%a6),%fp1	# execute add
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fabs.x		%fp0			# compare absolute values
+	fabs.x		%fp1
+	fcmp.x		%fp0,%fp1		# is first result > second?
+
+	fbgt.w		fadd_unfl		# yes; it's an underflow
+	bra.w		fadd_normal		# no; it's not an underflow
+
+##########################################################################
+
+#
+# Add: inputs are not both normalized; what are they?
+#
+fadd_not_norm:
+	mov.w		(tbl_fadd_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fadd_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fadd_op:
+	short		fadd_norm	- tbl_fadd_op # NORM + NORM
+	short		fadd_zero_src	- tbl_fadd_op # NORM + ZERO
+	short		fadd_inf_src	- tbl_fadd_op # NORM + INF
+	short		fadd_res_qnan	- tbl_fadd_op # NORM + QNAN
+	short		fadd_norm	- tbl_fadd_op # NORM + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # NORM + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_zero_dst	- tbl_fadd_op # ZERO + NORM
+	short		fadd_zero_2	- tbl_fadd_op # ZERO + ZERO
+	short		fadd_inf_src	- tbl_fadd_op # ZERO + INF
+	short		fadd_res_qnan	- tbl_fadd_op # NORM + QNAN
+	short		fadd_zero_dst	- tbl_fadd_op # ZERO + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # NORM + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_inf_dst	- tbl_fadd_op # INF + NORM
+	short		fadd_inf_dst	- tbl_fadd_op # INF + ZERO
+	short		fadd_inf_2	- tbl_fadd_op # INF + INF
+	short		fadd_res_qnan	- tbl_fadd_op # NORM + QNAN
+	short		fadd_inf_dst	- tbl_fadd_op # INF + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # NORM + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + NORM
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + ZERO
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + INF
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + QNAN
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # QNAN + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_norm	- tbl_fadd_op # DENORM + NORM
+	short		fadd_zero_src	- tbl_fadd_op # DENORM + ZERO
+	short		fadd_inf_src	- tbl_fadd_op # DENORM + INF
+	short		fadd_res_qnan	- tbl_fadd_op # NORM + QNAN
+	short		fadd_norm	- tbl_fadd_op # DENORM + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # NORM + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + NORM
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + ZERO
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + INF
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + QNAN
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+fadd_res_qnan:
+	bra.l		res_qnan
+fadd_res_snan:
+	bra.l		res_snan
+
+#
+# both operands are ZEROes
+#
+fadd_zero_2:
+	mov.b		SRC_EX(%a0),%d0		# are the signs opposite
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bmi.w		fadd_zero_2_chk_rm	# weed out (-ZERO)+(+ZERO)
+
+# the signs are the same. so determine whether they are positive or negative
+# and return the appropriately signed zero.
+	tst.b		%d0			# are ZEROes positive or negative?
+	bmi.b		fadd_zero_rm		# negative
+	fmov.s		&0x00000000,%fp0	# return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+#
+# the ZEROes have opposite signs:
+# - therefore, we return +ZERO if the rounding modes are RN,RZ, or RP.
+# - -ZERO is returned in the case of RM.
+#
+fadd_zero_2_chk_rm:
+	mov.b		3+L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# extract rnd mode
+	cmpi.b		%d1,&rm_mode*0x10	# is rnd mode == RM?
+	beq.b		fadd_zero_rm		# yes
+	fmov.s		&0x00000000,%fp0	# return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+fadd_zero_rm:
+	fmov.s		&0x80000000,%fp0	# return -ZERO
+	mov.b		&neg_bmask+z_bmask,FPSR_CC(%a6) # set NEG/Z
+	rts
+
+#
+# one operand is a ZERO and the other is a DENORM or NORM. scale
+# the DENORM or NORM and jump to the regular fadd routine.
+#
+fadd_zero_dst:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# scale the operand
+	clr.w		FP_SCR1_EX(%a6)
+	clr.l		FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)
+	bra.w		fadd_zero_entry		# go execute fadd
+
+fadd_zero_src:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+	bsr.l		scale_to_zero_dst	# scale the operand
+	clr.w		FP_SCR0_EX(%a6)
+	clr.l		FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)
+	bra.w		fadd_zero_entry		# go execute fadd
+
+#
+# both operands are INFs. an OPERR will result if the INFs have
+# different signs. else, an INF of the same sign is returned
+#
+fadd_inf_2:
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d1,%d0
+	bmi.l		res_operr		# weed out (-INF)+(+INF)
+
+# ok, so it's not an OPERR. but, we do have to remember to return the
+# src INF since that's where the 881/882 gets the j-bit from...
+
+#
+# operands are INF and one of {ZERO, INF, DENORM, NORM}
+#
+fadd_inf_src:
+	fmovm.x		SRC(%a0),&0x80		# return src INF
+	tst.b		SRC_EX(%a0)		# is INF positive?
+	bpl.b		fadd_inf_done		# yes; we're done
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+#
+# operands are INF and one of {ZERO, INF, DENORM, NORM}
+#
+fadd_inf_dst:
+	fmovm.x		DST(%a1),&0x80		# return dst INF
+	tst.b		DST_EX(%a1)		# is INF positive?
+	bpl.b		fadd_inf_done		# yes; we're done
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+fadd_inf_done:
+	mov.b		&inf_bmask,FPSR_CC(%a6) # set INF
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fsub(): emulates the fsub instruction				#
+#	fssub(): emulates the fssub instruction				#
+#	fdsub(): emulates the fdsub instruction				#
+#									#
+# XREF ****************************************************************	#
+#	addsub_scaler2() - scale the operands so they won't take exc	#
+#	ovf_res() - return default overflow result			#
+#	unf_res() - return default underflow result			#
+#	res_qnan() - set QNAN result					#
+#	res_snan() - set SNAN result					#
+#	res_operr() - set OPERR result					#
+#	scale_to_zero_src() - set src operand exponent equal to zero	#
+#	scale_to_zero_dst() - set dst operand exponent equal to zero	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms into extended, single, and double precision.			#
+#	Do subtraction after scaling exponents such that exception won't#
+# occur. Then, check result exponent to see if exception would have	#
+# occurred. If so, return default result and maybe EXOP. Else, insert	#
+# the correct result exponent and return. Set FPSR bits as appropriate.	#
+#									#
+#########################################################################
+
+	global		fssub
+fssub:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	bra.b		fsub
+
+	global		fdsub
+fdsub:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fsub
+fsub:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+
+	bne.w		fsub_not_norm		# optimize on non-norm input
+
+#
+# SUB: norms and denorms
+#
+fsub_norm:
+	bsr.l		addsub_scaler2		# scale exponents
+
+fsub_zero_entry:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fsub.x		FP_SCR0(%a6),%fp0	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch INEX2, N, Z
+
+	or.l		%d1,USER_FPSR(%a6)	# save exc and ccode bits
+
+	fbeq.w		fsub_zero_exit		# if result zero, end now
+
+	mov.l		%d2,-(%sp)		# save d2
+
+	fmovm.x		&0x01,-(%sp)		# save result to stack
+
+	mov.w		2+L_SCR3(%a6),%d1
+	lsr.b		&0x6,%d1
+
+	mov.w		(%sp),%d2		# fetch new exponent
+	andi.l		&0x7fff,%d2		# strip sign
+	sub.l		%d0,%d2			# add scale factor
+
+	cmp.l		%d2,(tbl_fsub_ovfl.b,%pc,%d1.w*4) # is it an overflow?
+	bge.b		fsub_ovfl		# yes
+
+	cmp.l		%d2,(tbl_fsub_unfl.b,%pc,%d1.w*4) # is it an underflow?
+	blt.w		fsub_unfl		# yes
+	beq.w		fsub_may_unfl		# maybe; go find out
+
+fsub_normal:
+	mov.w		(%sp),%d1
+	andi.w		&0x8000,%d1		# keep sign
+	or.w		%d2,%d1			# insert new exponent
+	mov.w		%d1,(%sp)		# insert new exponent
+
+	fmovm.x		(%sp)+,&0x80		# return result in fp0
+
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fsub_zero_exit:
+#	fmov.s		&0x00000000,%fp0	# return zero in fp0
+	rts
+
+tbl_fsub_ovfl:
+	long		0x7fff			# ext ovfl
+	long		0x407f			# sgl ovfl
+	long		0x43ff			# dbl ovfl
+
+tbl_fsub_unfl:
+	long	        0x0000			# ext unfl
+	long		0x3f81			# sgl unfl
+	long		0x3c01			# dbl unfl
+
+fsub_ovfl:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fsub_ovfl_ena		# yes
+
+	add.l		&0xc,%sp
+fsub_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fsub_ovfl_ena:
+	mov.b		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fsub_ovfl_ena_sd	# no
+
+fsub_ovfl_ena_cont:
+	mov.w		(%sp),%d1		# fetch {sgn,exp}
+	andi.w		&0x8000,%d1		# keep sign
+	subi.l		&0x6000,%d2		# subtract new bias
+	andi.w		&0x7fff,%d2		# clear top bit
+	or.w		%d2,%d1			# concat sign,exp
+	mov.w		%d1,(%sp)		# insert new exponent
+
+	fmovm.x		(%sp)+,&0x40		# return EXOP in fp1
+	bra.b		fsub_ovfl_dis
+
+fsub_ovfl_ena_sd:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# clear rnd prec
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	fsub.x		FP_SCR0(%a6),%fp0	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	add.l		&0xc,%sp
+	fmovm.x		&0x01,-(%sp)
+	bra.b		fsub_ovfl_ena_cont
+
+fsub_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	add.l		&0xc,%sp
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsub.x		FP_SCR0(%a6),%fp0	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save status
+
+	or.l		%d1,USER_FPSR(%a6)
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fsub_unfl_ena		# yes
+
+fsub_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fsub_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fsub_unfl_ena_sd	# no
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+fsub_unfl_ena_cont:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsub.x		FP_SCR0(%a6),%fp1	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# store result to stack
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# subtract new bias
+	andi.w		&0x7fff,%d1		# clear top bit
+	or.w		%d2,%d1			# concat sgn,exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fsub_unfl_dis
+
+fsub_unfl_ena_sd:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# clear rnd prec
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	bra.b		fsub_unfl_ena_cont
+
+#
+# result is equal to the smallest normalized number in the selected precision
+# if the precision is extended, this result could not have come from an
+# underflow that rounded up.
+#
+fsub_may_unfl:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# fetch rnd prec
+	beq.w		fsub_normal		# yes; no underflow occurred
+
+	mov.l		0x4(%sp),%d1
+	cmpi.l		%d1,&0x80000000		# is hi(man) = 0x80000000?
+	bne.w		fsub_normal		# no; no underflow occurred
+
+	tst.l		0x8(%sp)		# is lo(man) = 0x0?
+	bne.w		fsub_normal		# no; no underflow occurred
+
+	btst		&inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+	beq.w		fsub_normal		# no; no underflow occurred
+
+#
+# ok, so now the result has a exponent equal to the smallest normalized
+# exponent for the selected precision. also, the mantissa is equal to
+# 0x8000000000000000 and this mantissa is the result of rounding non-zero
+# g,r,s.
+# now, we must determine whether the pre-rounded result was an underflow
+# rounded "up" or a normalized number rounded "down".
+# so, we do this be re-executing the add using RZ as the rounding mode and
+# seeing if the new result is smaller or equal to the current result.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into fp1
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsub.x		FP_SCR0(%a6),%fp1	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fabs.x		%fp0			# compare absolute values
+	fabs.x		%fp1
+	fcmp.x		%fp0,%fp1		# is first result > second?
+
+	fbgt.w		fsub_unfl		# yes; it's an underflow
+	bra.w		fsub_normal		# no; it's not an underflow
+
+##########################################################################
+
+#
+# Sub: inputs are not both normalized; what are they?
+#
+fsub_not_norm:
+	mov.w		(tbl_fsub_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fsub_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fsub_op:
+	short		fsub_norm	- tbl_fsub_op # NORM - NORM
+	short		fsub_zero_src	- tbl_fsub_op # NORM - ZERO
+	short		fsub_inf_src	- tbl_fsub_op # NORM - INF
+	short		fsub_res_qnan	- tbl_fsub_op # NORM - QNAN
+	short		fsub_norm	- tbl_fsub_op # NORM - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # NORM - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_zero_dst	- tbl_fsub_op # ZERO - NORM
+	short		fsub_zero_2	- tbl_fsub_op # ZERO - ZERO
+	short		fsub_inf_src	- tbl_fsub_op # ZERO - INF
+	short		fsub_res_qnan	- tbl_fsub_op # NORM - QNAN
+	short		fsub_zero_dst	- tbl_fsub_op # ZERO - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # NORM - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_inf_dst	- tbl_fsub_op # INF - NORM
+	short		fsub_inf_dst	- tbl_fsub_op # INF - ZERO
+	short		fsub_inf_2	- tbl_fsub_op # INF - INF
+	short		fsub_res_qnan	- tbl_fsub_op # NORM - QNAN
+	short		fsub_inf_dst	- tbl_fsub_op # INF - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # NORM - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - NORM
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - ZERO
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - INF
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - QNAN
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # QNAN - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_norm	- tbl_fsub_op # DENORM - NORM
+	short		fsub_zero_src	- tbl_fsub_op # DENORM - ZERO
+	short		fsub_inf_src	- tbl_fsub_op # DENORM - INF
+	short		fsub_res_qnan	- tbl_fsub_op # NORM - QNAN
+	short		fsub_norm	- tbl_fsub_op # DENORM - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # NORM - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - NORM
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - ZERO
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - INF
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - QNAN
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+fsub_res_qnan:
+	bra.l		res_qnan
+fsub_res_snan:
+	bra.l		res_snan
+
+#
+# both operands are ZEROes
+#
+fsub_zero_2:
+	mov.b		SRC_EX(%a0),%d0
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d1,%d0
+	bpl.b		fsub_zero_2_chk_rm
+
+# the signs are opposite, so, return a ZERO w/ the sign of the dst ZERO
+	tst.b		%d0			# is dst negative?
+	bmi.b		fsub_zero_2_rm		# yes
+	fmov.s		&0x00000000,%fp0	# no; return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+#
+# the ZEROes have the same signs:
+# - therefore, we return +ZERO if the rounding mode is RN,RZ, or RP
+# - -ZERO is returned in the case of RM.
+#
+fsub_zero_2_chk_rm:
+	mov.b		3+L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# extract rnd mode
+	cmpi.b		%d1,&rm_mode*0x10	# is rnd mode = RM?
+	beq.b		fsub_zero_2_rm		# yes
+	fmov.s		&0x00000000,%fp0	# no; return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+fsub_zero_2_rm:
+	fmov.s		&0x80000000,%fp0	# return -ZERO
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)	# set Z/NEG
+	rts
+
+#
+# one operand is a ZERO and the other is a DENORM or a NORM.
+# scale the DENORM or NORM and jump to the regular fsub routine.
+#
+fsub_zero_dst:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# scale the operand
+	clr.w		FP_SCR1_EX(%a6)
+	clr.l		FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)
+	bra.w		fsub_zero_entry		# go execute fsub
+
+fsub_zero_src:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+	bsr.l		scale_to_zero_dst	# scale the operand
+	clr.w		FP_SCR0_EX(%a6)
+	clr.l		FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)
+	bra.w		fsub_zero_entry		# go execute fsub
+
+#
+# both operands are INFs. an OPERR will result if the INFs have the
+# same signs. else,
+#
+fsub_inf_2:
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d1,%d0
+	bpl.l		res_operr		# weed out (-INF)+(+INF)
+
+# ok, so it's not an OPERR. but we do have to remember to return
+# the src INF since that's where the 881/882 gets the j-bit.
+
+fsub_inf_src:
+	fmovm.x		SRC(%a0),&0x80		# return src INF
+	fneg.x		%fp0			# invert sign
+	fbge.w		fsub_inf_done		# sign is now positive
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+fsub_inf_dst:
+	fmovm.x		DST(%a1),&0x80		# return dst INF
+	tst.b		DST_EX(%a1)		# is INF negative?
+	bpl.b		fsub_inf_done		# no
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+fsub_inf_done:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set INF
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fsqrt(): emulates the fsqrt instruction				#
+#	fssqrt(): emulates the fssqrt instruction			#
+#	fdsqrt(): emulates the fdsqrt instruction			#
+#									#
+# XREF ****************************************************************	#
+#	scale_sqrt() - scale the source operand				#
+#	unf_res() - return default underflow result			#
+#	ovf_res() - return default overflow result			#
+#	res_qnan_1op() - return QNAN result				#
+#	res_snan_1op() - return SNAN result				#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a sqrt		#
+# instruction won't cause an exception. Use the regular fsqrt to	#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	global		fssqrt
+fssqrt:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl precision
+	bra.b		fsqrt
+
+	global		fdsqrt
+fdsqrt:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl precision
+
+	global		fsqrt
+fsqrt:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+	clr.w		%d1
+	mov.b		STAG(%a6),%d1
+	bne.w		fsqrt_not_norm		# optimize on non-norm input
+
+#
+# SQUARE ROOT: norms and denorms ONLY!
+#
+fsqrt_norm:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.l		res_operr		# yes
+
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fsqrt_not_ext		# no; go handle sgl or dbl
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsqrt.x		(%a0),%fp0		# execute square root
+
+	fmov.l		%fpsr,%d1
+	or.l		%d1,USER_FPSR(%a6)	# set N,INEX
+
+	rts
+
+fsqrt_denorm:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.l		res_operr		# yes
+
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fsqrt_not_ext		# no; go handle sgl or dbl
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_sqrt		# calculate scale factor
+
+	bra.w		fsqrt_sd_normal
+
+#
+# operand is either single or double
+#
+fsqrt_not_ext:
+	cmpi.b		%d0,&s_mode*0x10	# separate sgl/dbl prec
+	bne.w		fsqrt_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fsqrt_sgl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_sqrt		# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3f81	# will move in underflow?
+	beq.w		fsqrt_sd_may_unfl
+	bgt.w		fsqrt_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x407f	# will move in overflow?
+	beq.w		fsqrt_sd_may_ovfl	# maybe; go check
+	blt.w		fsqrt_sd_ovfl		# yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fsqrt_sd_normal:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fsqrt.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsqrt_sd_normal_exit:
+	mov.l		%d2,-(%sp)		# save d2
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.w		FP_SCR0_EX(%a6),%d1	# load sgn,exp
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d1,%d2			# concat old sign,new exp
+	mov.w		%d2,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# operand is to be rounded to double precision
+#
+fsqrt_dbl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_sqrt		# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3c01	# will move in underflow?
+	beq.w		fsqrt_sd_may_unfl
+	bgt.b		fsqrt_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x43ff	# will move in overflow?
+	beq.w		fsqrt_sd_may_ovfl	# maybe; go check
+	blt.w		fsqrt_sd_ovfl		# yes; go handle overflow
+	bra.w		fsqrt_sd_normal		# no; ho handle normalized op
+
+# we're on the line here and the distinguising characteristic is whether
+# the exponent is 3fff or 3ffe. if it's 3ffe, then it's a safe number
+# elsewise fall through to underflow.
+fsqrt_sd_may_unfl:
+	btst		&0x0,1+FP_SCR0_EX(%a6)	# is exponent 0x3fff?
+	bne.w		fsqrt_sd_normal		# yes, so no underflow
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fsqrt_sd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsqrt.x		FP_SCR0(%a6),%fp0	# execute square root
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fsqrt_sd_unfl_ena	# yes
+
+fsqrt_sd_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set possible 'Z' ccode
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fsqrt_sd_unfl_ena:
+	mov.l		FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+	mov.l		FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+	mov.w		FP_SCR0_EX(%a6),%d1	# load current exponent
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# subtract scale factor
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat new sign,new exp
+	mov.w		%d1,FP_SCR1_EX(%a6)	# insert new exp
+	fmovm.x		FP_SCR1(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fsqrt_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fsqrt_sd_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fsqrt.x		FP_SCR0(%a6),%fp0	# perform square root
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsqrt_sd_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fsqrt_sd_ovfl_ena	# yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fsqrt_sd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass: prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fsqrt_sd_ovfl_ena:
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat sign,exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fsqrt_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fsqrt_sd_may_ovfl:
+	btst		&0x0,1+FP_SCR0_EX(%a6)	# is exponent 0x3fff?
+	bne.w		fsqrt_sd_ovfl		# yes, so overflow
+
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fsqrt.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fmov.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x1		# is |result| >= 1.b?
+	fbge.w		fsqrt_sd_ovfl_tst	# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fsqrt_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fsqrt_not_norm:
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.w		fsqrt_denorm
+	cmpi.b		%d1,&ZERO		# weed out ZERO
+	beq.b		fsqrt_zero
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		fsqrt_inf
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	bra.l		res_qnan_1op
+
+#
+#	fsqrt(+0) = +0
+#	fsqrt(-0) = -0
+#	fsqrt(+INF) = +INF
+#	fsqrt(-INF) = OPERR
+#
+fsqrt_zero:
+	tst.b		SRC_EX(%a0)		# is ZERO positive or negative?
+	bmi.b		fsqrt_zero_m		# negative
+fsqrt_zero_p:
+	fmov.s		&0x00000000,%fp0	# return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+fsqrt_zero_m:
+	fmov.s		&0x80000000,%fp0	# return -ZERO
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)	# set 'Z','N' ccode bits
+	rts
+
+fsqrt_inf:
+	tst.b		SRC_EX(%a0)		# is INF positive or negative?
+	bmi.l		res_operr		# negative
+fsqrt_inf_p:
+	fmovm.x		SRC(%a0),&0x80		# return +INF in fp0
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+
+##########################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	addsub_scaler2(): scale inputs to fadd/fsub such that no	#
+#			  OVFL/UNFL exceptions will result		#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize mantissa after adjusting exponent		#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SRC(a6) = fp op1(src)					#
+#	FP_DST(a6) = fp op2(dst)					#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SRC(a6) = fp op1 scaled(src)					#
+#	FP_DST(a6) = fp op2 scaled(dst)					#
+#	d0         = scale amount					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	If the DST exponent is > the SRC exponent, set the DST exponent	#
+# equal to 0x3fff and scale the SRC exponent by the value that the	#
+# DST exponent was scaled by. If the SRC exponent is greater or equal,	#
+# do the opposite. Return this scale factor in d0.			#
+#	If the two exponents differ by > the number of mantissa bits	#
+# plus two, then set the smallest exponent to a very small value as a	#
+# quick shortcut.							#
+#									#
+#########################################################################
+
+	global		addsub_scaler2
+addsub_scaler2:
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+	mov.w		SRC_EX(%a0),%d0
+	mov.w		DST_EX(%a1),%d1
+	mov.w		%d0,FP_SCR0_EX(%a6)
+	mov.w		%d1,FP_SCR1_EX(%a6)
+
+	andi.w		&0x7fff,%d0
+	andi.w		&0x7fff,%d1
+	mov.w		%d0,L_SCR1(%a6)		# store src exponent
+	mov.w		%d1,2+L_SCR1(%a6)	# store dst exponent
+
+	cmp.w		%d0, %d1		# is src exp >= dst exp?
+	bge.l		src_exp_ge2
+
+# dst exp is >  src exp; scale dst to exp = 0x3fff
+dst_exp_gt2:
+	bsr.l		scale_to_zero_dst
+	mov.l		%d0,-(%sp)		# save scale factor
+
+	cmpi.b		STAG(%a6),&DENORM	# is dst denormalized?
+	bne.b		cmpexp12
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize the denorm; result is new exp
+	neg.w		%d0			# new exp = -(shft val)
+	mov.w		%d0,L_SCR1(%a6)		# inset new exp
+
+cmpexp12:
+	mov.w		2+L_SCR1(%a6),%d0
+	subi.w		&mantissalen+2,%d0	# subtract mantissalen+2 from larger exp
+
+	cmp.w		%d0,L_SCR1(%a6)		# is difference >= len(mantissa)+2?
+	bge.b		quick_scale12
+
+	mov.w		L_SCR1(%a6),%d0
+	add.w		0x2(%sp),%d0		# scale src exponent by scale factor
+	mov.w		FP_SCR0_EX(%a6),%d1
+	and.w		&0x8000,%d1
+	or.w		%d1,%d0			# concat {sgn,new exp}
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new dst exponent
+
+	mov.l		(%sp)+,%d0		# return SCALE factor
+	rts
+
+quick_scale12:
+	andi.w		&0x8000,FP_SCR0_EX(%a6)	# zero src exponent
+	bset		&0x0,1+FP_SCR0_EX(%a6)	# set exp = 1
+
+	mov.l		(%sp)+,%d0		# return SCALE factor
+	rts
+
+# src exp is >= dst exp; scale src to exp = 0x3fff
+src_exp_ge2:
+	bsr.l		scale_to_zero_src
+	mov.l		%d0,-(%sp)		# save scale factor
+
+	cmpi.b		DTAG(%a6),&DENORM	# is dst denormalized?
+	bne.b		cmpexp22
+	lea		FP_SCR1(%a6),%a0
+	bsr.l		norm			# normalize the denorm; result is new exp
+	neg.w		%d0			# new exp = -(shft val)
+	mov.w		%d0,2+L_SCR1(%a6)	# inset new exp
+
+cmpexp22:
+	mov.w		L_SCR1(%a6),%d0
+	subi.w		&mantissalen+2,%d0	# subtract mantissalen+2 from larger exp
+
+	cmp.w		%d0,2+L_SCR1(%a6)	# is difference >= len(mantissa)+2?
+	bge.b		quick_scale22
+
+	mov.w		2+L_SCR1(%a6),%d0
+	add.w		0x2(%sp),%d0		# scale dst exponent by scale factor
+	mov.w		FP_SCR1_EX(%a6),%d1
+	andi.w		&0x8000,%d1
+	or.w		%d1,%d0			# concat {sgn,new exp}
+	mov.w		%d0,FP_SCR1_EX(%a6)	# insert new dst exponent
+
+	mov.l		(%sp)+,%d0		# return SCALE factor
+	rts
+
+quick_scale22:
+	andi.w		&0x8000,FP_SCR1_EX(%a6)	# zero dst exponent
+	bset		&0x0,1+FP_SCR1_EX(%a6)	# set exp = 1
+
+	mov.l		(%sp)+,%d0		# return SCALE factor
+	rts
+
+##########################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	scale_to_zero_src(): scale the exponent of extended precision	#
+#			     value at FP_SCR0(a6).			#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize the mantissa if the operand was a DENORM	#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SCR0(a6) = extended precision operand to be scaled		#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SCR0(a6) = scaled extended precision operand			#
+#	d0	    = scale value					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Set the exponent of the input operand to 0x3fff. Save the value	#
+# of the difference between the original and new exponent. Then,	#
+# normalize the operand if it was a DENORM. Add this normalization	#
+# value to the previous value. Return the result.			#
+#									#
+#########################################################################
+
+	global		scale_to_zero_src
+scale_to_zero_src:
+	mov.w		FP_SCR0_EX(%a6),%d1	# extract operand's {sgn,exp}
+	mov.w		%d1,%d0			# make a copy
+
+	andi.l		&0x7fff,%d1		# extract operand's exponent
+
+	andi.w		&0x8000,%d0		# extract operand's sgn
+	or.w		&0x3fff,%d0		# insert new operand's exponent(=0)
+
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert biased exponent
+
+	cmpi.b		STAG(%a6),&DENORM	# is operand normalized?
+	beq.b		stzs_denorm		# normalize the DENORM
+
+stzs_norm:
+	mov.l		&0x3fff,%d0
+	sub.l		%d1,%d0			# scale = BIAS + (-exp)
+
+	rts
+
+stzs_denorm:
+	lea		FP_SCR0(%a6),%a0	# pass ptr to src op
+	bsr.l		norm			# normalize denorm
+	neg.l		%d0			# new exponent = -(shft val)
+	mov.l		%d0,%d1			# prepare for op_norm call
+	bra.b		stzs_norm		# finish scaling
+
+###
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	scale_sqrt(): scale the input operand exponent so a subsequent	#
+#		      fsqrt operation won't take an exception.		#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize the mantissa if the operand was a DENORM	#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SCR0(a6) = extended precision operand to be scaled		#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SCR0(a6) = scaled extended precision operand			#
+#	d0	    = scale value					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	If the input operand is a DENORM, normalize it.			#
+#	If the exponent of the input operand is even, set the exponent	#
+# to 0x3ffe and return a scale factor of "(exp-0x3ffe)/2". If the	#
+# exponent of the input operand is off, set the exponent to ox3fff and	#
+# return a scale factor of "(exp-0x3fff)/2".				#
+#									#
+#########################################################################
+
+	global		scale_sqrt
+scale_sqrt:
+	cmpi.b		STAG(%a6),&DENORM	# is operand normalized?
+	beq.b		ss_denorm		# normalize the DENORM
+
+	mov.w		FP_SCR0_EX(%a6),%d1	# extract operand's {sgn,exp}
+	andi.l		&0x7fff,%d1		# extract operand's exponent
+
+	andi.w		&0x8000,FP_SCR0_EX(%a6)	# extract operand's sgn
+
+	btst		&0x0,%d1		# is exp even or odd?
+	beq.b		ss_norm_even
+
+	ori.w		&0x3fff,FP_SCR0_EX(%a6)	# insert new operand's exponent(=0)
+
+	mov.l		&0x3fff,%d0
+	sub.l		%d1,%d0			# scale = BIAS + (-exp)
+	asr.l		&0x1,%d0		# divide scale factor by 2
+	rts
+
+ss_norm_even:
+	ori.w		&0x3ffe,FP_SCR0_EX(%a6)	# insert new operand's exponent(=0)
+
+	mov.l		&0x3ffe,%d0
+	sub.l		%d1,%d0			# scale = BIAS + (-exp)
+	asr.l		&0x1,%d0		# divide scale factor by 2
+	rts
+
+ss_denorm:
+	lea		FP_SCR0(%a6),%a0	# pass ptr to src op
+	bsr.l		norm			# normalize denorm
+
+	btst		&0x0,%d0		# is exp even or odd?
+	beq.b		ss_denorm_even
+
+	ori.w		&0x3fff,FP_SCR0_EX(%a6)	# insert new operand's exponent(=0)
+
+	add.l		&0x3fff,%d0
+	asr.l		&0x1,%d0		# divide scale factor by 2
+	rts
+
+ss_denorm_even:
+	ori.w		&0x3ffe,FP_SCR0_EX(%a6)	# insert new operand's exponent(=0)
+
+	add.l		&0x3ffe,%d0
+	asr.l		&0x1,%d0		# divide scale factor by 2
+	rts
+
+###
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	scale_to_zero_dst(): scale the exponent of extended precision	#
+#			     value at FP_SCR1(a6).			#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize the mantissa if the operand was a DENORM	#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SCR1(a6) = extended precision operand to be scaled		#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SCR1(a6) = scaled extended precision operand			#
+#	d0	    = scale value					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Set the exponent of the input operand to 0x3fff. Save the value	#
+# of the difference between the original and new exponent. Then,	#
+# normalize the operand if it was a DENORM. Add this normalization	#
+# value to the previous value. Return the result.			#
+#									#
+#########################################################################
+
+	global		scale_to_zero_dst
+scale_to_zero_dst:
+	mov.w		FP_SCR1_EX(%a6),%d1	# extract operand's {sgn,exp}
+	mov.w		%d1,%d0			# make a copy
+
+	andi.l		&0x7fff,%d1		# extract operand's exponent
+
+	andi.w		&0x8000,%d0		# extract operand's sgn
+	or.w		&0x3fff,%d0		# insert new operand's exponent(=0)
+
+	mov.w		%d0,FP_SCR1_EX(%a6)	# insert biased exponent
+
+	cmpi.b		DTAG(%a6),&DENORM	# is operand normalized?
+	beq.b		stzd_denorm		# normalize the DENORM
+
+stzd_norm:
+	mov.l		&0x3fff,%d0
+	sub.l		%d1,%d0			# scale = BIAS + (-exp)
+	rts
+
+stzd_denorm:
+	lea		FP_SCR1(%a6),%a0	# pass ptr to dst op
+	bsr.l		norm			# normalize denorm
+	neg.l		%d0			# new exponent = -(shft val)
+	mov.l		%d0,%d1			# prepare for op_norm call
+	bra.b		stzd_norm		# finish scaling
+
+##########################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	res_qnan(): return default result w/ QNAN operand for dyadic	#
+#	res_snan(): return default result w/ SNAN operand for dyadic	#
+#	res_qnan_1op(): return dflt result w/ QNAN operand for monadic	#
+#	res_snan_1op(): return dflt result w/ SNAN operand for monadic	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SRC(a6) = pointer to extended precision src operand		#
+#	FP_DST(a6) = pointer to extended precision dst operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default result						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	If either operand (but not both operands) of an operation is a	#
+# nonsignalling NAN, then that NAN is returned as the result. If both	#
+# operands are nonsignalling NANs, then the destination operand		#
+# nonsignalling NAN is returned as the result.				#
+#	If either operand to an operation is a signalling NAN (SNAN),	#
+# then, the SNAN bit is set in the FPSR EXC byte. If the SNAN trap	#
+# enable bit is set in the FPCR, then the trap is taken and the		#
+# destination is not modified. If the SNAN trap enable bit is not set,	#
+# then the SNAN is converted to a nonsignalling NAN (by setting the	#
+# SNAN bit in the operand to one), and the operation continues as	#
+# described in the preceding paragraph, for nonsignalling NANs.		#
+#	Make sure the appropriate FPSR bits are set before exiting.	#
+#									#
+#########################################################################
+
+	global		res_qnan
+	global		res_snan
+res_qnan:
+res_snan:
+	cmp.b		DTAG(%a6), &SNAN	# is the dst an SNAN?
+	beq.b		dst_snan2
+	cmp.b		DTAG(%a6), &QNAN	# is the dst a  QNAN?
+	beq.b		dst_qnan2
+src_nan:
+	cmp.b		STAG(%a6), &QNAN
+	beq.b		src_qnan2
+	global		res_snan_1op
+res_snan_1op:
+src_snan2:
+	bset		&0x6, FP_SRC_HI(%a6)	# set SNAN bit
+	or.l		&nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
+	lea		FP_SRC(%a6), %a0
+	bra.b		nan_comp
+	global		res_qnan_1op
+res_qnan_1op:
+src_qnan2:
+	or.l		&nan_mask, USER_FPSR(%a6)
+	lea		FP_SRC(%a6), %a0
+	bra.b		nan_comp
+dst_snan2:
+	or.l		&nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
+	bset		&0x6, FP_DST_HI(%a6)	# set SNAN bit
+	lea		FP_DST(%a6), %a0
+	bra.b		nan_comp
+dst_qnan2:
+	lea		FP_DST(%a6), %a0
+	cmp.b		STAG(%a6), &SNAN
+	bne		nan_done
+	or.l		&aiop_mask+snan_mask, USER_FPSR(%a6)
+nan_done:
+	or.l		&nan_mask, USER_FPSR(%a6)
+nan_comp:
+	btst		&0x7, FTEMP_EX(%a0)	# is NAN neg?
+	beq.b		nan_not_neg
+	or.l		&neg_mask, USER_FPSR(%a6)
+nan_not_neg:
+	fmovm.x		(%a0), &0x80
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	res_operr(): return default result during operand error		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default operand error result				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	An nonsignalling NAN is returned as the default result when	#
+# an operand error occurs for the following cases:			#
+#									#
+#	Multiply: (Infinity x Zero)					#
+#	Divide  : (Zero / Zero) || (Infinity / Infinity)		#
+#									#
+#########################################################################
+
+	global		res_operr
+res_operr:
+	or.l		&nan_mask+operr_mask+aiop_mask, USER_FPSR(%a6)
+	fmovm.x		nan_return(%pc), &0x80
+	rts
+
+nan_return:
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+
+#########################################################################
+# fdbcc(): routine to emulate the fdbcc instruction			#
+#									#
+# XDEF **************************************************************** #
+#	_fdbcc()							#
+#									#
+# XREF **************************************************************** #
+#	fetch_dreg() - fetch Dn value					#
+#	store_dreg_l() - store updated Dn value				#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = displacement						#
+#									#
+# OUTPUT ************************************************************** #
+#	none								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	This routine checks which conditional predicate is specified by	#
+# the stacked fdbcc instruction opcode and then branches to a routine	#
+# for that predicate. The corresponding fbcc instruction is then used	#
+# to see whether the condition (specified by the stacked FPSR) is true	#
+# or false.								#
+#	If a BSUN exception should be indicated, the BSUN and ABSUN	#
+# bits are set in the stacked FPSR. If the BSUN exception is enabled,	#
+# the fbsun_flg is set in the SPCOND_FLG location on the stack. If an	#
+# enabled BSUN should not be flagged and the predicate is true, then	#
+# Dn is fetched and decremented by one. If Dn is not equal to -1, add	#
+# the displacement value to the stacked PC so that when an "rte" is	#
+# finally executed, the branch occurs.					#
+#									#
+#########################################################################
+	global		_fdbcc
+_fdbcc:
+	mov.l		%d0,L_SCR1(%a6)		# save displacement
+
+	mov.w		EXC_CMDREG(%a6),%d0	# fetch predicate
+
+	clr.l		%d1			# clear scratch reg
+	mov.b		FPSR_CC(%a6),%d1	# fetch fp ccodes
+	ror.l		&0x8,%d1		# rotate to top byte
+	fmov.l		%d1,%fpsr		# insert into FPSR
+
+	mov.w		(tbl_fdbcc.b,%pc,%d0.w*2),%d1 # load table
+	jmp		(tbl_fdbcc.b,%pc,%d1.w) # jump to fdbcc routine
+
+tbl_fdbcc:
+	short		fdbcc_f		-	tbl_fdbcc	# 00
+	short		fdbcc_eq	-	tbl_fdbcc	# 01
+	short		fdbcc_ogt	-	tbl_fdbcc	# 02
+	short		fdbcc_oge	-	tbl_fdbcc	# 03
+	short		fdbcc_olt	-	tbl_fdbcc	# 04
+	short		fdbcc_ole	-	tbl_fdbcc	# 05
+	short		fdbcc_ogl	-	tbl_fdbcc	# 06
+	short		fdbcc_or	-	tbl_fdbcc	# 07
+	short		fdbcc_un	-	tbl_fdbcc	# 08
+	short		fdbcc_ueq	-	tbl_fdbcc	# 09
+	short		fdbcc_ugt	-	tbl_fdbcc	# 10
+	short		fdbcc_uge	-	tbl_fdbcc	# 11
+	short		fdbcc_ult	-	tbl_fdbcc	# 12
+	short		fdbcc_ule	-	tbl_fdbcc	# 13
+	short		fdbcc_neq	-	tbl_fdbcc	# 14
+	short		fdbcc_t		-	tbl_fdbcc	# 15
+	short		fdbcc_sf	-	tbl_fdbcc	# 16
+	short		fdbcc_seq	-	tbl_fdbcc	# 17
+	short		fdbcc_gt	-	tbl_fdbcc	# 18
+	short		fdbcc_ge	-	tbl_fdbcc	# 19
+	short		fdbcc_lt	-	tbl_fdbcc	# 20
+	short		fdbcc_le	-	tbl_fdbcc	# 21
+	short		fdbcc_gl	-	tbl_fdbcc	# 22
+	short		fdbcc_gle	-	tbl_fdbcc	# 23
+	short		fdbcc_ngle	-	tbl_fdbcc	# 24
+	short		fdbcc_ngl	-	tbl_fdbcc	# 25
+	short		fdbcc_nle	-	tbl_fdbcc	# 26
+	short		fdbcc_nlt	-	tbl_fdbcc	# 27
+	short		fdbcc_nge	-	tbl_fdbcc	# 28
+	short		fdbcc_ngt	-	tbl_fdbcc	# 29
+	short		fdbcc_sneq	-	tbl_fdbcc	# 30
+	short		fdbcc_st	-	tbl_fdbcc	# 31
+
+#########################################################################
+#									#
+# IEEE Nonaware tests							#
+#									#
+# For the IEEE nonaware tests, only the false branch changes the	#
+# counter. However, the true branch may set bsun so we check to see	#
+# if the NAN bit is set, in which case BSUN and AIOP will be set.	#
+#									#
+# The cases EQ and NE are shared by the Aware and Nonaware groups	#
+# and are incapable of setting the BSUN exception bit.			#
+#									#
+# Typically, only one of the two possible branch directions could	#
+# have the NAN bit set.							#
+# (This is assuming the mutual exclusiveness of FPSR cc bit groupings	#
+#  is preserved.)							#
+#									#
+#########################################################################
+
+#
+# equal:
+#
+#	Z
+#
+fdbcc_eq:
+	fbeq.w		fdbcc_eq_yes		# equal?
+fdbcc_eq_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_eq_yes:
+	rts
+
+#
+# not equal:
+#	_
+#	Z
+#
+fdbcc_neq:
+	fbneq.w		fdbcc_neq_yes		# not equal?
+fdbcc_neq_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_neq_yes:
+	rts
+
+#
+# greater than:
+#	_______
+#	NANvZvN
+#
+fdbcc_gt:
+	fbgt.w		fdbcc_gt_yes		# greater than?
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fdbcc_false		# no;go handle counter
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_gt_yes:
+	rts					# do nothing
+
+#
+# not greater than:
+#
+#	NANvZvN
+#
+fdbcc_ngt:
+	fbngt.w		fdbcc_ngt_yes		# not greater than?
+fdbcc_ngt_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ngt_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		fdbcc_ngt_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_ngt_done:
+	rts					# no; do nothing
+
+#
+# greater than or equal:
+#	   _____
+#	Zv(NANvN)
+#
+fdbcc_ge:
+	fbge.w		fdbcc_ge_yes		# greater than or equal?
+fdbcc_ge_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fdbcc_false		# no;go handle counter
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ge_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		fdbcc_ge_yes_done	# no;go do nothing
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_ge_yes_done:
+	rts					# do nothing
+
+#
+# not (greater than or equal):
+#	       _
+#	NANv(N^Z)
+#
+fdbcc_nge:
+	fbnge.w		fdbcc_nge_yes		# not (greater than or equal)?
+fdbcc_nge_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_nge_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		fdbcc_nge_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_nge_done:
+	rts					# no; do nothing
+
+#
+# less than:
+#	   _____
+#	N^(NANvZ)
+#
+fdbcc_lt:
+	fblt.w		fdbcc_lt_yes		# less than?
+fdbcc_lt_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fdbcc_false		# no; go handle counter
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_lt_yes:
+	rts					# do nothing
+
+#
+# not less than:
+#	       _
+#	NANv(ZvN)
+#
+fdbcc_nlt:
+	fbnlt.w		fdbcc_nlt_yes		# not less than?
+fdbcc_nlt_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_nlt_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		fdbcc_nlt_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_nlt_done:
+	rts					# no; do nothing
+
+#
+# less than or equal:
+#	     ___
+#	Zv(N^NAN)
+#
+fdbcc_le:
+	fble.w		fdbcc_le_yes		# less than or equal?
+fdbcc_le_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fdbcc_false		# no; go handle counter
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_le_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		fdbcc_le_yes_done	# no; go do nothing
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_le_yes_done:
+	rts					# do nothing
+
+#
+# not (less than or equal):
+#	     ___
+#	NANv(NvZ)
+#
+fdbcc_nle:
+	fbnle.w		fdbcc_nle_yes		# not (less than or equal)?
+fdbcc_nle_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_nle_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fdbcc_nle_done		# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_nle_done:
+	rts					# no; do nothing
+
+#
+# greater or less than:
+#	_____
+#	NANvZ
+#
+fdbcc_gl:
+	fbgl.w		fdbcc_gl_yes		# greater or less than?
+fdbcc_gl_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fdbcc_false		# no; handle counter
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_gl_yes:
+	rts					# do nothing
+
+#
+# not (greater or less than):
+#
+#	NANvZ
+#
+fdbcc_ngl:
+	fbngl.w		fdbcc_ngl_yes		# not (greater or less than)?
+fdbcc_ngl_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ngl_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		fdbcc_ngl_done		# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_ngl_done:
+	rts					# no; do nothing
+
+#
+# greater, less, or equal:
+#	___
+#	NAN
+#
+fdbcc_gle:
+	fbgle.w		fdbcc_gle_yes		# greater, less, or equal?
+fdbcc_gle_no:
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_gle_yes:
+	rts					# do nothing
+
+#
+# not (greater, less, or equal):
+#
+#	NAN
+#
+fdbcc_ngle:
+	fbngle.w	fdbcc_ngle_yes		# not (greater, less, or equal)?
+fdbcc_ngle_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ngle_yes:
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	rts					# no; do nothing
+
+#########################################################################
+#									#
+# Miscellaneous tests							#
+#									#
+# For the IEEE miscellaneous tests, all but fdbf and fdbt can set bsun. #
+#									#
+#########################################################################
+
+#
+# false:
+#
+#	False
+#
+fdbcc_f:					# no bsun possible
+	bra.w		fdbcc_false		# go handle counter
+
+#
+# true:
+#
+#	True
+#
+fdbcc_t:					# no bsun possible
+	rts					# do nothing
+
+#
+# signalling false:
+#
+#	False
+#
+fdbcc_sf:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set?
+	beq.w		fdbcc_false		# no;go handle counter
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# go handle counter
+
+#
+# signalling true:
+#
+#	True
+#
+fdbcc_st:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set?
+	beq.b		fdbcc_st_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_st_done:
+	rts
+
+#
+# signalling equal:
+#
+#	Z
+#
+fdbcc_seq:
+	fbseq.w		fdbcc_seq_yes		# signalling equal?
+fdbcc_seq_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set?
+	beq.w		fdbcc_false		# no;go handle counter
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# go handle counter
+fdbcc_seq_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set?
+	beq.b		fdbcc_seq_yes_done	# no;go do nothing
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_seq_yes_done:
+	rts					# yes; do nothing
+
+#
+# signalling not equal:
+#	_
+#	Z
+#
+fdbcc_sneq:
+	fbsneq.w	fdbcc_sneq_yes		# signalling not equal?
+fdbcc_sneq_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set?
+	beq.w		fdbcc_false		# no;go handle counter
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# go handle counter
+fdbcc_sneq_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		fdbcc_sneq_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_sneq_done:
+	rts
+
+#########################################################################
+#									#
+# IEEE Aware tests							#
+#									#
+# For the IEEE aware tests, action is only taken if the result is false.#
+# Therefore, the opposite branch type is used to jump to the decrement	#
+# routine.								#
+# The BSUN exception will not be set for any of these tests.		#
+#									#
+#########################################################################
+
+#
+# ordered greater than:
+#	_______
+#	NANvZvN
+#
+fdbcc_ogt:
+	fbogt.w		fdbcc_ogt_yes		# ordered greater than?
+fdbcc_ogt_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ogt_yes:
+	rts					# yes; do nothing
+
+#
+# unordered or less or equal:
+#	_______
+#	NANvZvN
+#
+fdbcc_ule:
+	fbule.w		fdbcc_ule_yes		# unordered or less or equal?
+fdbcc_ule_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ule_yes:
+	rts					# yes; do nothing
+
+#
+# ordered greater than or equal:
+#	   _____
+#	Zv(NANvN)
+#
+fdbcc_oge:
+	fboge.w		fdbcc_oge_yes		# ordered greater than or equal?
+fdbcc_oge_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_oge_yes:
+	rts					# yes; do nothing
+
+#
+# unordered or less than:
+#	       _
+#	NANv(N^Z)
+#
+fdbcc_ult:
+	fbult.w		fdbcc_ult_yes		# unordered or less than?
+fdbcc_ult_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ult_yes:
+	rts					# yes; do nothing
+
+#
+# ordered less than:
+#	   _____
+#	N^(NANvZ)
+#
+fdbcc_olt:
+	fbolt.w		fdbcc_olt_yes		# ordered less than?
+fdbcc_olt_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_olt_yes:
+	rts					# yes; do nothing
+
+#
+# unordered or greater or equal:
+#
+#	NANvZvN
+#
+fdbcc_uge:
+	fbuge.w		fdbcc_uge_yes		# unordered or greater than?
+fdbcc_uge_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_uge_yes:
+	rts					# yes; do nothing
+
+#
+# ordered less than or equal:
+#	     ___
+#	Zv(N^NAN)
+#
+fdbcc_ole:
+	fbole.w		fdbcc_ole_yes		# ordered greater or less than?
+fdbcc_ole_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ole_yes:
+	rts					# yes; do nothing
+
+#
+# unordered or greater than:
+#	     ___
+#	NANv(NvZ)
+#
+fdbcc_ugt:
+	fbugt.w		fdbcc_ugt_yes		# unordered or greater than?
+fdbcc_ugt_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ugt_yes:
+	rts					# yes; do nothing
+
+#
+# ordered greater or less than:
+#	_____
+#	NANvZ
+#
+fdbcc_ogl:
+	fbogl.w		fdbcc_ogl_yes		# ordered greater or less than?
+fdbcc_ogl_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ogl_yes:
+	rts					# yes; do nothing
+
+#
+# unordered or equal:
+#
+#	NANvZ
+#
+fdbcc_ueq:
+	fbueq.w		fdbcc_ueq_yes		# unordered or equal?
+fdbcc_ueq_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ueq_yes:
+	rts					# yes; do nothing
+
+#
+# ordered:
+#	___
+#	NAN
+#
+fdbcc_or:
+	fbor.w		fdbcc_or_yes		# ordered?
+fdbcc_or_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_or_yes:
+	rts					# yes; do nothing
+
+#
+# unordered:
+#
+#	NAN
+#
+fdbcc_un:
+	fbun.w		fdbcc_un_yes		# unordered?
+fdbcc_un_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_un_yes:
+	rts					# yes; do nothing
+
+#######################################################################
+
+#
+# the bsun exception bit was not set.
+#
+# (1) subtract 1 from the count register
+# (2) if (cr == -1) then
+#	pc = pc of next instruction
+#     else
+#	pc += sign_ext(16-bit displacement)
+#
+fdbcc_false:
+	mov.b		1+EXC_OPWORD(%a6), %d1	# fetch lo opword
+	andi.w		&0x7, %d1		# extract count register
+
+	bsr.l		fetch_dreg		# fetch count value
+# make sure that d0 isn't corrupted between calls...
+
+	subq.w		&0x1, %d0		# Dn - 1 -> Dn
+
+	bsr.l		store_dreg_l		# store new count value
+
+	cmpi.w		%d0, &-0x1		# is (Dn == -1)?
+	bne.b		fdbcc_false_cont	# no;
+	rts
+
+fdbcc_false_cont:
+	mov.l		L_SCR1(%a6),%d0		# fetch displacement
+	add.l		USER_FPIAR(%a6),%d0	# add instruction PC
+	addq.l		&0x4,%d0		# add instruction length
+	mov.l		%d0,EXC_PC(%a6)		# set new PC
+	rts
+
+# the emulation routine set bsun and BSUN was enabled. have to
+# fix stack and jump to the bsun handler.
+# let the caller of this routine shift the stack frame up to
+# eliminate the effective address field.
+fdbcc_bsun:
+	mov.b		&fbsun_flg,SPCOND_FLG(%a6)
+	rts
+
+#########################################################################
+# ftrapcc(): routine to emulate the ftrapcc instruction			#
+#									#
+# XDEF ****************************************************************	#
+#	_ftrapcc()							#
+#									#
+# XREF ****************************************************************	#
+#	none								#
+#									#
+# INPUT *************************************************************** #
+#	none								#
+#									#
+# OUTPUT ************************************************************** #
+#	none								#
+#									#
+# ALGORITHM *********************************************************** #
+#	This routine checks which conditional predicate is specified by	#
+# the stacked ftrapcc instruction opcode and then branches to a routine	#
+# for that predicate. The corresponding fbcc instruction is then used	#
+# to see whether the condition (specified by the stacked FPSR) is true	#
+# or false.								#
+#	If a BSUN exception should be indicated, the BSUN and ABSUN	#
+# bits are set in the stacked FPSR. If the BSUN exception is enabled,	#
+# the fbsun_flg is set in the SPCOND_FLG location on the stack. If an	#
+# enabled BSUN should not be flagged and the predicate is true, then	#
+# the ftrapcc_flg is set in the SPCOND_FLG location. These special	#
+# flags indicate to the calling routine to emulate the exceptional	#
+# condition.								#
+#									#
+#########################################################################
+
+	global		_ftrapcc
+_ftrapcc:
+	mov.w		EXC_CMDREG(%a6),%d0	# fetch predicate
+
+	clr.l		%d1			# clear scratch reg
+	mov.b		FPSR_CC(%a6),%d1	# fetch fp ccodes
+	ror.l		&0x8,%d1		# rotate to top byte
+	fmov.l		%d1,%fpsr		# insert into FPSR
+
+	mov.w		(tbl_ftrapcc.b,%pc,%d0.w*2), %d1 # load table
+	jmp		(tbl_ftrapcc.b,%pc,%d1.w) # jump to ftrapcc routine
+
+tbl_ftrapcc:
+	short		ftrapcc_f	-	tbl_ftrapcc	# 00
+	short		ftrapcc_eq	-	tbl_ftrapcc	# 01
+	short		ftrapcc_ogt	-	tbl_ftrapcc	# 02
+	short		ftrapcc_oge	-	tbl_ftrapcc	# 03
+	short		ftrapcc_olt	-	tbl_ftrapcc	# 04
+	short		ftrapcc_ole	-	tbl_ftrapcc	# 05
+	short		ftrapcc_ogl	-	tbl_ftrapcc	# 06
+	short		ftrapcc_or	-	tbl_ftrapcc	# 07
+	short		ftrapcc_un	-	tbl_ftrapcc	# 08
+	short		ftrapcc_ueq	-	tbl_ftrapcc	# 09
+	short		ftrapcc_ugt	-	tbl_ftrapcc	# 10
+	short		ftrapcc_uge	-	tbl_ftrapcc	# 11
+	short		ftrapcc_ult	-	tbl_ftrapcc	# 12
+	short		ftrapcc_ule	-	tbl_ftrapcc	# 13
+	short		ftrapcc_neq	-	tbl_ftrapcc	# 14
+	short		ftrapcc_t	-	tbl_ftrapcc	# 15
+	short		ftrapcc_sf	-	tbl_ftrapcc	# 16
+	short		ftrapcc_seq	-	tbl_ftrapcc	# 17
+	short		ftrapcc_gt	-	tbl_ftrapcc	# 18
+	short		ftrapcc_ge	-	tbl_ftrapcc	# 19
+	short		ftrapcc_lt	-	tbl_ftrapcc	# 20
+	short		ftrapcc_le	-	tbl_ftrapcc	# 21
+	short		ftrapcc_gl	-	tbl_ftrapcc	# 22
+	short		ftrapcc_gle	-	tbl_ftrapcc	# 23
+	short		ftrapcc_ngle	-	tbl_ftrapcc	# 24
+	short		ftrapcc_ngl	-	tbl_ftrapcc	# 25
+	short		ftrapcc_nle	-	tbl_ftrapcc	# 26
+	short		ftrapcc_nlt	-	tbl_ftrapcc	# 27
+	short		ftrapcc_nge	-	tbl_ftrapcc	# 28
+	short		ftrapcc_ngt	-	tbl_ftrapcc	# 29
+	short		ftrapcc_sneq	-	tbl_ftrapcc	# 30
+	short		ftrapcc_st	-	tbl_ftrapcc	# 31
+
+#########################################################################
+#									#
+# IEEE Nonaware tests							#
+#									#
+# For the IEEE nonaware tests, we set the result based on the		#
+# floating point condition codes. In addition, we check to see		#
+# if the NAN bit is set, in which case BSUN and AIOP will be set.	#
+#									#
+# The cases EQ and NE are shared by the Aware and Nonaware groups	#
+# and are incapable of setting the BSUN exception bit.			#
+#									#
+# Typically, only one of the two possible branch directions could	#
+# have the NAN bit set.							#
+#									#
+#########################################################################
+
+#
+# equal:
+#
+#	Z
+#
+ftrapcc_eq:
+	fbeq.w		ftrapcc_trap		# equal?
+ftrapcc_eq_no:
+	rts					# do nothing
+
+#
+# not equal:
+#	_
+#	Z
+#
+ftrapcc_neq:
+	fbneq.w		ftrapcc_trap		# not equal?
+ftrapcc_neq_no:
+	rts					# do nothing
+
+#
+# greater than:
+#	_______
+#	NANvZvN
+#
+ftrapcc_gt:
+	fbgt.w		ftrapcc_trap		# greater than?
+ftrapcc_gt_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		ftrapcc_gt_done		# no
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+ftrapcc_gt_done:
+	rts					# no; do nothing
+
+#
+# not greater than:
+#
+#	NANvZvN
+#
+ftrapcc_ngt:
+	fbngt.w		ftrapcc_ngt_yes		# not greater than?
+ftrapcc_ngt_no:
+	rts					# do nothing
+ftrapcc_ngt_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# greater than or equal:
+#	   _____
+#	Zv(NANvN)
+#
+ftrapcc_ge:
+	fbge.w		ftrapcc_ge_yes		# greater than or equal?
+ftrapcc_ge_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		ftrapcc_ge_done		# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+ftrapcc_ge_done:
+	rts					# no; do nothing
+ftrapcc_ge_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# not (greater than or equal):
+#	       _
+#	NANv(N^Z)
+#
+ftrapcc_nge:
+	fbnge.w		ftrapcc_nge_yes		# not (greater than or equal)?
+ftrapcc_nge_no:
+	rts					# do nothing
+ftrapcc_nge_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# less than:
+#	   _____
+#	N^(NANvZ)
+#
+ftrapcc_lt:
+	fblt.w		ftrapcc_trap		# less than?
+ftrapcc_lt_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		ftrapcc_lt_done		# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+ftrapcc_lt_done:
+	rts					# no; do nothing
+
+#
+# not less than:
+#	       _
+#	NANv(ZvN)
+#
+ftrapcc_nlt:
+	fbnlt.w		ftrapcc_nlt_yes		# not less than?
+ftrapcc_nlt_no:
+	rts					# do nothing
+ftrapcc_nlt_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# less than or equal:
+#	     ___
+#	Zv(N^NAN)
+#
+ftrapcc_le:
+	fble.w		ftrapcc_le_yes		# less than or equal?
+ftrapcc_le_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		ftrapcc_le_done		# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+ftrapcc_le_done:
+	rts					# no; do nothing
+ftrapcc_le_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# not (less than or equal):
+#	     ___
+#	NANv(NvZ)
+#
+ftrapcc_nle:
+	fbnle.w		ftrapcc_nle_yes		# not (less than or equal)?
+ftrapcc_nle_no:
+	rts					# do nothing
+ftrapcc_nle_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# greater or less than:
+#	_____
+#	NANvZ
+#
+ftrapcc_gl:
+	fbgl.w		ftrapcc_trap		# greater or less than?
+ftrapcc_gl_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		ftrapcc_gl_done		# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+ftrapcc_gl_done:
+	rts					# no; do nothing
+
+#
+# not (greater or less than):
+#
+#	NANvZ
+#
+ftrapcc_ngl:
+	fbngl.w		ftrapcc_ngl_yes		# not (greater or less than)?
+ftrapcc_ngl_no:
+	rts					# do nothing
+ftrapcc_ngl_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# greater, less, or equal:
+#	___
+#	NAN
+#
+ftrapcc_gle:
+	fbgle.w		ftrapcc_trap		# greater, less, or equal?
+ftrapcc_gle_no:
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	rts					# no; do nothing
+
+#
+# not (greater, less, or equal):
+#
+#	NAN
+#
+ftrapcc_ngle:
+	fbngle.w	ftrapcc_ngle_yes	# not (greater, less, or equal)?
+ftrapcc_ngle_no:
+	rts					# do nothing
+ftrapcc_ngle_yes:
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#########################################################################
+#									#
+# Miscellaneous tests							#
+#									#
+# For the IEEE aware tests, we only have to set the result based on the	#
+# floating point condition codes. The BSUN exception will not be	#
+# set for any of these tests.						#
+#									#
+#########################################################################
+
+#
+# false:
+#
+#	False
+#
+ftrapcc_f:
+	rts					# do nothing
+
+#
+# true:
+#
+#	True
+#
+ftrapcc_t:
+	bra.w		ftrapcc_trap		# go take trap
+
+#
+# signalling false:
+#
+#	False
+#
+ftrapcc_sf:
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.b		ftrapcc_sf_done		# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+ftrapcc_sf_done:
+	rts					# no; do nothing
+
+#
+# signalling true:
+#
+#	True
+#
+ftrapcc_st:
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# signalling equal:
+#
+#	Z
+#
+ftrapcc_seq:
+	fbseq.w		ftrapcc_seq_yes		# signalling equal?
+ftrapcc_seq_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		ftrapcc_seq_done	# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+ftrapcc_seq_done:
+	rts					# no; do nothing
+ftrapcc_seq_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# signalling not equal:
+#	_
+#	Z
+#
+ftrapcc_sneq:
+	fbsneq.w	ftrapcc_sneq_yes	# signalling equal?
+ftrapcc_sneq_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		ftrapcc_sneq_no_done	# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+ftrapcc_sneq_no_done:
+	rts					# do nothing
+ftrapcc_sneq_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#########################################################################
+#									#
+# IEEE Aware tests							#
+#									#
+# For the IEEE aware tests, we only have to set the result based on the	#
+# floating point condition codes. The BSUN exception will not be	#
+# set for any of these tests.						#
+#									#
+#########################################################################
+
+#
+# ordered greater than:
+#	_______
+#	NANvZvN
+#
+ftrapcc_ogt:
+	fbogt.w		ftrapcc_trap		# ordered greater than?
+ftrapcc_ogt_no:
+	rts					# do nothing
+
+#
+# unordered or less or equal:
+#	_______
+#	NANvZvN
+#
+ftrapcc_ule:
+	fbule.w		ftrapcc_trap		# unordered or less or equal?
+ftrapcc_ule_no:
+	rts					# do nothing
+
+#
+# ordered greater than or equal:
+#	   _____
+#	Zv(NANvN)
+#
+ftrapcc_oge:
+	fboge.w		ftrapcc_trap		# ordered greater than or equal?
+ftrapcc_oge_no:
+	rts					# do nothing
+
+#
+# unordered or less than:
+#	       _
+#	NANv(N^Z)
+#
+ftrapcc_ult:
+	fbult.w		ftrapcc_trap		# unordered or less than?
+ftrapcc_ult_no:
+	rts					# do nothing
+
+#
+# ordered less than:
+#	   _____
+#	N^(NANvZ)
+#
+ftrapcc_olt:
+	fbolt.w		ftrapcc_trap		# ordered less than?
+ftrapcc_olt_no:
+	rts					# do nothing
+
+#
+# unordered or greater or equal:
+#
+#	NANvZvN
+#
+ftrapcc_uge:
+	fbuge.w		ftrapcc_trap		# unordered or greater than?
+ftrapcc_uge_no:
+	rts					# do nothing
+
+#
+# ordered less than or equal:
+#	     ___
+#	Zv(N^NAN)
+#
+ftrapcc_ole:
+	fbole.w		ftrapcc_trap		# ordered greater or less than?
+ftrapcc_ole_no:
+	rts					# do nothing
+
+#
+# unordered or greater than:
+#	     ___
+#	NANv(NvZ)
+#
+ftrapcc_ugt:
+	fbugt.w		ftrapcc_trap		# unordered or greater than?
+ftrapcc_ugt_no:
+	rts					# do nothing
+
+#
+# ordered greater or less than:
+#	_____
+#	NANvZ
+#
+ftrapcc_ogl:
+	fbogl.w		ftrapcc_trap		# ordered greater or less than?
+ftrapcc_ogl_no:
+	rts					# do nothing
+
+#
+# unordered or equal:
+#
+#	NANvZ
+#
+ftrapcc_ueq:
+	fbueq.w		ftrapcc_trap		# unordered or equal?
+ftrapcc_ueq_no:
+	rts					# do nothing
+
+#
+# ordered:
+#	___
+#	NAN
+#
+ftrapcc_or:
+	fbor.w		ftrapcc_trap		# ordered?
+ftrapcc_or_no:
+	rts					# do nothing
+
+#
+# unordered:
+#
+#	NAN
+#
+ftrapcc_un:
+	fbun.w		ftrapcc_trap		# unordered?
+ftrapcc_un_no:
+	rts					# do nothing
+
+#######################################################################
+
+# the bsun exception bit was not set.
+# we will need to jump to the ftrapcc vector. the stack frame
+# is the same size as that of the fp unimp instruction. the
+# only difference is that the <ea> field should hold the PC
+# of the ftrapcc instruction and the vector offset field
+# should denote the ftrapcc trap.
+ftrapcc_trap:
+	mov.b		&ftrapcc_flg,SPCOND_FLG(%a6)
+	rts
+
+# the emulation routine set bsun and BSUN was enabled. have to
+# fix stack and jump to the bsun handler.
+# let the caller of this routine shift the stack frame up to
+# eliminate the effective address field.
+ftrapcc_bsun:
+	mov.b		&fbsun_flg,SPCOND_FLG(%a6)
+	rts
+
+#########################################################################
+# fscc(): routine to emulate the fscc instruction			#
+#									#
+# XDEF **************************************************************** #
+#	_fscc()								#
+#									#
+# XREF **************************************************************** #
+#	store_dreg_b() - store result to data register file		#
+#	dec_areg() - decrement an areg for -(an) mode			#
+#	inc_areg() - increment an areg for (an)+ mode			#
+#	_dmem_write_byte() - store result to memory			#
+#									#
+# INPUT ***************************************************************	#
+#	none								#
+#									#
+# OUTPUT ************************************************************** #
+#	none								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	This routine checks which conditional predicate is specified by	#
+# the stacked fscc instruction opcode and then branches to a routine	#
+# for that predicate. The corresponding fbcc instruction is then used	#
+# to see whether the condition (specified by the stacked FPSR) is true	#
+# or false.								#
+#	If a BSUN exception should be indicated, the BSUN and ABSUN	#
+# bits are set in the stacked FPSR. If the BSUN exception is enabled,	#
+# the fbsun_flg is set in the SPCOND_FLG location on the stack. If an	#
+# enabled BSUN should not be flagged and the predicate is true, then	#
+# the result is stored to the data register file or memory		#
+#									#
+#########################################################################
+
+	global		_fscc
+_fscc:
+	mov.w		EXC_CMDREG(%a6),%d0	# fetch predicate
+
+	clr.l		%d1			# clear scratch reg
+	mov.b		FPSR_CC(%a6),%d1	# fetch fp ccodes
+	ror.l		&0x8,%d1		# rotate to top byte
+	fmov.l		%d1,%fpsr		# insert into FPSR
+
+	mov.w		(tbl_fscc.b,%pc,%d0.w*2),%d1 # load table
+	jmp		(tbl_fscc.b,%pc,%d1.w)	# jump to fscc routine
+
+tbl_fscc:
+	short		fscc_f		-	tbl_fscc	# 00
+	short		fscc_eq		-	tbl_fscc	# 01
+	short		fscc_ogt	-	tbl_fscc	# 02
+	short		fscc_oge	-	tbl_fscc	# 03
+	short		fscc_olt	-	tbl_fscc	# 04
+	short		fscc_ole	-	tbl_fscc	# 05
+	short		fscc_ogl	-	tbl_fscc	# 06
+	short		fscc_or		-	tbl_fscc	# 07
+	short		fscc_un		-	tbl_fscc	# 08
+	short		fscc_ueq	-	tbl_fscc	# 09
+	short		fscc_ugt	-	tbl_fscc	# 10
+	short		fscc_uge	-	tbl_fscc	# 11
+	short		fscc_ult	-	tbl_fscc	# 12
+	short		fscc_ule	-	tbl_fscc	# 13
+	short		fscc_neq	-	tbl_fscc	# 14
+	short		fscc_t		-	tbl_fscc	# 15
+	short		fscc_sf		-	tbl_fscc	# 16
+	short		fscc_seq	-	tbl_fscc	# 17
+	short		fscc_gt		-	tbl_fscc	# 18
+	short		fscc_ge		-	tbl_fscc	# 19
+	short		fscc_lt		-	tbl_fscc	# 20
+	short		fscc_le		-	tbl_fscc	# 21
+	short		fscc_gl		-	tbl_fscc	# 22
+	short		fscc_gle	-	tbl_fscc	# 23
+	short		fscc_ngle	-	tbl_fscc	# 24
+	short		fscc_ngl	-	tbl_fscc	# 25
+	short		fscc_nle	-	tbl_fscc	# 26
+	short		fscc_nlt	-	tbl_fscc	# 27
+	short		fscc_nge	-	tbl_fscc	# 28
+	short		fscc_ngt	-	tbl_fscc	# 29
+	short		fscc_sneq	-	tbl_fscc	# 30
+	short		fscc_st		-	tbl_fscc	# 31
+
+#########################################################################
+#									#
+# IEEE Nonaware tests							#
+#									#
+# For the IEEE nonaware tests, we set the result based on the		#
+# floating point condition codes. In addition, we check to see		#
+# if the NAN bit is set, in which case BSUN and AIOP will be set.	#
+#									#
+# The cases EQ and NE are shared by the Aware and Nonaware groups	#
+# and are incapable of setting the BSUN exception bit.			#
+#									#
+# Typically, only one of the two possible branch directions could	#
+# have the NAN bit set.							#
+#									#
+#########################################################################
+
+#
+# equal:
+#
+#	Z
+#
+fscc_eq:
+	fbeq.w		fscc_eq_yes		# equal?
+fscc_eq_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_eq_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# not equal:
+#	_
+#	Z
+#
+fscc_neq:
+	fbneq.w		fscc_neq_yes		# not equal?
+fscc_neq_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_neq_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# greater than:
+#	_______
+#	NANvZvN
+#
+fscc_gt:
+	fbgt.w		fscc_gt_yes		# greater than?
+fscc_gt_no:
+	clr.b		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+fscc_gt_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# not greater than:
+#
+#	NANvZvN
+#
+fscc_ngt:
+	fbngt.w		fscc_ngt_yes		# not greater than?
+fscc_ngt_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ngt_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# greater than or equal:
+#	   _____
+#	Zv(NANvN)
+#
+fscc_ge:
+	fbge.w		fscc_ge_yes		# greater than or equal?
+fscc_ge_no:
+	clr.b		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+fscc_ge_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# not (greater than or equal):
+#	       _
+#	NANv(N^Z)
+#
+fscc_nge:
+	fbnge.w		fscc_nge_yes		# not (greater than or equal)?
+fscc_nge_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_nge_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# less than:
+#	   _____
+#	N^(NANvZ)
+#
+fscc_lt:
+	fblt.w		fscc_lt_yes		# less than?
+fscc_lt_no:
+	clr.b		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+fscc_lt_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# not less than:
+#	       _
+#	NANv(ZvN)
+#
+fscc_nlt:
+	fbnlt.w		fscc_nlt_yes		# not less than?
+fscc_nlt_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_nlt_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# less than or equal:
+#	     ___
+#	Zv(N^NAN)
+#
+fscc_le:
+	fble.w		fscc_le_yes		# less than or equal?
+fscc_le_no:
+	clr.b		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+fscc_le_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# not (less than or equal):
+#	     ___
+#	NANv(NvZ)
+#
+fscc_nle:
+	fbnle.w		fscc_nle_yes		# not (less than or equal)?
+fscc_nle_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_nle_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# greater or less than:
+#	_____
+#	NANvZ
+#
+fscc_gl:
+	fbgl.w		fscc_gl_yes		# greater or less than?
+fscc_gl_no:
+	clr.b		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+fscc_gl_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# not (greater or less than):
+#
+#	NANvZ
+#
+fscc_ngl:
+	fbngl.w		fscc_ngl_yes		# not (greater or less than)?
+fscc_ngl_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ngl_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# greater, less, or equal:
+#	___
+#	NAN
+#
+fscc_gle:
+	fbgle.w		fscc_gle_yes		# greater, less, or equal?
+fscc_gle_no:
+	clr.b		%d0			# set false
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+fscc_gle_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# not (greater, less, or equal):
+#
+#	NAN
+#
+fscc_ngle:
+	fbngle.w		fscc_ngle_yes	# not (greater, less, or equal)?
+fscc_ngle_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ngle_yes:
+	st		%d0			# set true
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#########################################################################
+#									#
+# Miscellaneous tests							#
+#									#
+# For the IEEE aware tests, we only have to set the result based on the	#
+# floating point condition codes. The BSUN exception will not be	#
+# set for any of these tests.						#
+#									#
+#########################################################################
+
+#
+# false:
+#
+#	False
+#
+fscc_f:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+
+#
+# true:
+#
+#	True
+#
+fscc_t:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# signalling false:
+#
+#	False
+#
+fscc_sf:
+	clr.b		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# signalling true:
+#
+#	True
+#
+fscc_st:
+	st		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# signalling equal:
+#
+#	Z
+#
+fscc_seq:
+	fbseq.w		fscc_seq_yes		# signalling equal?
+fscc_seq_no:
+	clr.b		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+fscc_seq_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# signalling not equal:
+#	_
+#	Z
+#
+fscc_sneq:
+	fbsneq.w	fscc_sneq_yes		# signalling equal?
+fscc_sneq_no:
+	clr.b		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+fscc_sneq_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#########################################################################
+#									#
+# IEEE Aware tests							#
+#									#
+# For the IEEE aware tests, we only have to set the result based on the	#
+# floating point condition codes. The BSUN exception will not be	#
+# set for any of these tests.						#
+#									#
+#########################################################################
+
+#
+# ordered greater than:
+#	_______
+#	NANvZvN
+#
+fscc_ogt:
+	fbogt.w		fscc_ogt_yes		# ordered greater than?
+fscc_ogt_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ogt_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# unordered or less or equal:
+#	_______
+#	NANvZvN
+#
+fscc_ule:
+	fbule.w		fscc_ule_yes		# unordered or less or equal?
+fscc_ule_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ule_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# ordered greater than or equal:
+#	   _____
+#	Zv(NANvN)
+#
+fscc_oge:
+	fboge.w		fscc_oge_yes		# ordered greater than or equal?
+fscc_oge_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_oge_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# unordered or less than:
+#	       _
+#	NANv(N^Z)
+#
+fscc_ult:
+	fbult.w		fscc_ult_yes		# unordered or less than?
+fscc_ult_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ult_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# ordered less than:
+#	   _____
+#	N^(NANvZ)
+#
+fscc_olt:
+	fbolt.w		fscc_olt_yes		# ordered less than?
+fscc_olt_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_olt_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# unordered or greater or equal:
+#
+#	NANvZvN
+#
+fscc_uge:
+	fbuge.w		fscc_uge_yes		# unordered or greater than?
+fscc_uge_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_uge_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# ordered less than or equal:
+#	     ___
+#	Zv(N^NAN)
+#
+fscc_ole:
+	fbole.w		fscc_ole_yes		# ordered greater or less than?
+fscc_ole_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ole_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# unordered or greater than:
+#	     ___
+#	NANv(NvZ)
+#
+fscc_ugt:
+	fbugt.w		fscc_ugt_yes		# unordered or greater than?
+fscc_ugt_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ugt_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# ordered greater or less than:
+#	_____
+#	NANvZ
+#
+fscc_ogl:
+	fbogl.w		fscc_ogl_yes		# ordered greater or less than?
+fscc_ogl_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ogl_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# unordered or equal:
+#
+#	NANvZ
+#
+fscc_ueq:
+	fbueq.w		fscc_ueq_yes		# unordered or equal?
+fscc_ueq_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ueq_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# ordered:
+#	___
+#	NAN
+#
+fscc_or:
+	fbor.w		fscc_or_yes		# ordered?
+fscc_or_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_or_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# unordered:
+#
+#	NAN
+#
+fscc_un:
+	fbun.w		fscc_un_yes		# unordered?
+fscc_un_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_un_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#######################################################################
+
+#
+# the bsun exception bit was set. now, check to see is BSUN
+# is enabled. if so, don't store result and correct stack frame
+# for a bsun exception.
+#
+fscc_chk_bsun:
+	btst		&bsun_bit,FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		fscc_bsun
+
+#
+# the bsun exception bit was not set.
+# the result has been selected.
+# now, check to see if the result is to be stored in the data register
+# file or in memory.
+#
+fscc_done:
+	mov.l		%d0,%a0			# save result for a moment
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# fetch lo opword
+	mov.l		%d1,%d0			# make a copy
+	andi.b		&0x38,%d1		# extract src mode
+
+	bne.b		fscc_mem_op		# it's a memory operation
+
+	mov.l		%d0,%d1
+	andi.w		&0x7,%d1		# pass index in d1
+	mov.l		%a0,%d0			# pass result in d0
+	bsr.l		store_dreg_b		# save result in regfile
+	rts
+
+#
+# the stacked <ea> is correct with the exception of:
+#	-> Dn : <ea> is garbage
+#
+# if the addressing mode is post-increment or pre-decrement,
+# then the address registers have not been updated.
+#
+fscc_mem_op:
+	cmpi.b		%d1,&0x18		# is <ea> (An)+ ?
+	beq.b		fscc_mem_inc		# yes
+	cmpi.b		%d1,&0x20		# is <ea> -(An) ?
+	beq.b		fscc_mem_dec		# yes
+
+	mov.l		%a0,%d0			# pass result in d0
+	mov.l		EXC_EA(%a6),%a0		# fetch <ea>
+	bsr.l		_dmem_write_byte	# write result byte
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fscc_err		# yes
+
+	rts
+
+# addresing mode is post-increment. write the result byte. if the write
+# fails then don't update the address register. if write passes then
+# call inc_areg() to update the address register.
+fscc_mem_inc:
+	mov.l		%a0,%d0			# pass result in d0
+	mov.l		EXC_EA(%a6),%a0		# fetch <ea>
+	bsr.l		_dmem_write_byte	# write result byte
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fscc_err		# yes
+
+	mov.b		0x1+EXC_OPWORD(%a6),%d1	# fetch opword
+	andi.w		&0x7,%d1		# pass index in d1
+	movq.l		&0x1,%d0		# pass amt to inc by
+	bsr.l		inc_areg		# increment address register
+
+	rts
+
+# addressing mode is pre-decrement. write the result byte. if the write
+# fails then don't update the address register. if the write passes then
+# call dec_areg() to update the address register.
+fscc_mem_dec:
+	mov.l		%a0,%d0			# pass result in d0
+	mov.l		EXC_EA(%a6),%a0		# fetch <ea>
+	bsr.l		_dmem_write_byte	# write result byte
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fscc_err		# yes
+
+	mov.b		0x1+EXC_OPWORD(%a6),%d1	# fetch opword
+	andi.w		&0x7,%d1		# pass index in d1
+	movq.l		&0x1,%d0		# pass amt to dec by
+	bsr.l		dec_areg		# decrement address register
+
+	rts
+
+# the emulation routine set bsun and BSUN was enabled. have to
+# fix stack and jump to the bsun handler.
+# let the caller of this routine shift the stack frame up to
+# eliminate the effective address field.
+fscc_bsun:
+	mov.b		&fbsun_flg,SPCOND_FLG(%a6)
+	rts
+
+# the byte write to memory has failed. pass the failing effective address
+# and a FSLW to funimp_dacc().
+fscc_err:
+	mov.w		&0x00a1,EXC_VOFF(%a6)
+	bra.l		facc_finish
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fmovm_dynamic(): emulate "fmovm" dynamic instruction		#
+#									#
+# XREF ****************************************************************	#
+#	fetch_dreg() - fetch data register				#
+#	{i,d,}mem_read() - fetch data from memory			#
+#	_mem_write() - write data to memory				#
+#	iea_iacc() - instruction memory access error occurred		#
+#	iea_dacc() - data memory access error occurred			#
+#	restore() - restore An index regs if access error occurred	#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	If instr is "fmovm Dn,-(A7)" from supervisor mode,		#
+#		d0 = size of dump					#
+#		d1 = Dn							#
+#	Else if instruction access error,				#
+#		d0 = FSLW						#
+#	Else if data access error,					#
+#		d0 = FSLW						#
+#		a0 = address of fault					#
+#	Else								#
+#		none.							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	The effective address must be calculated since this is entered	#
+# from an "Unimplemented Effective Address" exception handler. So, we	#
+# have our own fcalc_ea() routine here. If an access error is flagged	#
+# by a _{i,d,}mem_read() call, we must exit through the special		#
+# handler.								#
+#	The data register is determined and its value loaded to get the	#
+# string of FP registers affected. This value is used as an index into	#
+# a lookup table such that we can determine the number of bytes		#
+# involved.								#
+#	If the instruction is "fmovm.x <ea>,Dn", a _mem_read() is used	#
+# to read in all FP values. Again, _mem_read() may fail and require a	#
+# special exit.								#
+#	If the instruction is "fmovm.x DN,<ea>", a _mem_write() is used	#
+# to write all FP values. _mem_write() may also fail.			#
+#	If the instruction is "fmovm.x DN,-(a7)" from supervisor mode,	#
+# then we return the size of the dump and the string to the caller	#
+# so that the move can occur outside of this routine. This special	#
+# case is required so that moves to the system stack are handled	#
+# correctly.								#
+#									#
+# DYNAMIC:								#
+#	fmovm.x	dn, <ea>						#
+#	fmovm.x	<ea>, dn						#
+#									#
+#	      <WORD 1>		      <WORD2>				#
+#	1111 0010 00 |<ea>|	11@& 1000 0$$$ 0000			#
+#									#
+#	& = (0): predecrement addressing mode				#
+#	    (1): postincrement or control addressing mode		#
+#	@ = (0): move listed regs from memory to the FPU		#
+#	    (1): move listed regs from the FPU to memory		#
+#	$$$    : index of data register holding reg select mask		#
+#									#
+# NOTES:								#
+#	If the data register holds a zero, then the			#
+#	instruction is a nop.						#
+#									#
+#########################################################################
+
+	global		fmovm_dynamic
+fmovm_dynamic:
+
+# extract the data register in which the bit string resides...
+	mov.b		1+EXC_EXTWORD(%a6),%d1	# fetch extword
+	andi.w		&0x70,%d1		# extract reg bits
+	lsr.b		&0x4,%d1		# shift into lo bits
+
+# fetch the bit string into d0...
+	bsr.l		fetch_dreg		# fetch reg string
+
+	andi.l		&0x000000ff,%d0		# keep only lo byte
+
+	mov.l		%d0,-(%sp)		# save strg
+	mov.b		(tbl_fmovm_size.w,%pc,%d0),%d0
+	mov.l		%d0,-(%sp)		# save size
+	bsr.l		fmovm_calc_ea		# calculate <ea>
+	mov.l		(%sp)+,%d0		# restore size
+	mov.l		(%sp)+,%d1		# restore strg
+
+# if the bit string is a zero, then the operation is a no-op
+# but, make sure that we've calculated ea and advanced the opword pointer
+	beq.w		fmovm_data_done
+
+# separate move ins from move outs...
+	btst		&0x5,EXC_EXTWORD(%a6)	# is it a move in or out?
+	beq.w		fmovm_data_in		# it's a move out
+
+#############
+# MOVE OUT: #
+#############
+fmovm_data_out:
+	btst		&0x4,EXC_EXTWORD(%a6)	# control or predecrement?
+	bne.w		fmovm_out_ctrl		# control
+
+############################
+fmovm_out_predec:
+# for predecrement mode, the bit string is the opposite of both control
+# operations and postincrement mode. (bit7 = FP7 ... bit0 = FP0)
+# here, we convert it to be just like the others...
+	mov.b		(tbl_fmovm_convert.w,%pc,%d1.w*1),%d1
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor mode?
+	beq.b		fmovm_out_ctrl		# user
+
+fmovm_out_predec_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
+	bne.b		fmovm_out_ctrl
+
+# the operation was unfortunately an: fmovm.x dn,-(sp)
+# called from supervisor mode.
+# we're also passing "size" and "strg" back to the calling routine
+	rts
+
+############################
+fmovm_out_ctrl:
+	mov.l		%a0,%a1			# move <ea> to a1
+
+	sub.l		%d0,%sp			# subtract size of dump
+	lea		(%sp),%a0
+
+	tst.b		%d1			# should FP0 be moved?
+	bpl.b		fmovm_out_ctrl_fp1	# no
+
+	mov.l		0x0+EXC_FP0(%a6),(%a0)+	# yes
+	mov.l		0x4+EXC_FP0(%a6),(%a0)+
+	mov.l		0x8+EXC_FP0(%a6),(%a0)+
+
+fmovm_out_ctrl_fp1:
+	lsl.b		&0x1,%d1		# should FP1 be moved?
+	bpl.b		fmovm_out_ctrl_fp2	# no
+
+	mov.l		0x0+EXC_FP1(%a6),(%a0)+	# yes
+	mov.l		0x4+EXC_FP1(%a6),(%a0)+
+	mov.l		0x8+EXC_FP1(%a6),(%a0)+
+
+fmovm_out_ctrl_fp2:
+	lsl.b		&0x1,%d1		# should FP2 be moved?
+	bpl.b		fmovm_out_ctrl_fp3	# no
+
+	fmovm.x		&0x20,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp3:
+	lsl.b		&0x1,%d1		# should FP3 be moved?
+	bpl.b		fmovm_out_ctrl_fp4	# no
+
+	fmovm.x		&0x10,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp4:
+	lsl.b		&0x1,%d1		# should FP4 be moved?
+	bpl.b		fmovm_out_ctrl_fp5	# no
+
+	fmovm.x		&0x08,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp5:
+	lsl.b		&0x1,%d1		# should FP5 be moved?
+	bpl.b		fmovm_out_ctrl_fp6	# no
+
+	fmovm.x		&0x04,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp6:
+	lsl.b		&0x1,%d1		# should FP6 be moved?
+	bpl.b		fmovm_out_ctrl_fp7	# no
+
+	fmovm.x		&0x02,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp7:
+	lsl.b		&0x1,%d1		# should FP7 be moved?
+	bpl.b		fmovm_out_ctrl_done	# no
+
+	fmovm.x		&0x01,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_done:
+	mov.l		%a1,L_SCR1(%a6)
+
+	lea		(%sp),%a0		# pass: supervisor src
+	mov.l		%d0,-(%sp)		# save size
+	bsr.l		_dmem_write		# copy data to user mem
+
+	mov.l		(%sp)+,%d0
+	add.l		%d0,%sp			# clear fpreg data from stack
+
+	tst.l		%d1			# did dstore err?
+	bne.w		fmovm_out_err		# yes
+
+	rts
+
+############
+# MOVE IN: #
+############
+fmovm_data_in:
+	mov.l		%a0,L_SCR1(%a6)
+
+	sub.l		%d0,%sp			# make room for fpregs
+	lea		(%sp),%a1
+
+	mov.l		%d1,-(%sp)		# save bit string for later
+	mov.l		%d0,-(%sp)		# save # of bytes
+
+	bsr.l		_dmem_read		# copy data from user mem
+
+	mov.l		(%sp)+,%d0		# retrieve # of bytes
+
+	tst.l		%d1			# did dfetch fail?
+	bne.w		fmovm_in_err		# yes
+
+	mov.l		(%sp)+,%d1		# load bit string
+
+	lea		(%sp),%a0		# addr of stack
+
+	tst.b		%d1			# should FP0 be moved?
+	bpl.b		fmovm_data_in_fp1	# no
+
+	mov.l		(%a0)+,0x0+EXC_FP0(%a6)	# yes
+	mov.l		(%a0)+,0x4+EXC_FP0(%a6)
+	mov.l		(%a0)+,0x8+EXC_FP0(%a6)
+
+fmovm_data_in_fp1:
+	lsl.b		&0x1,%d1		# should FP1 be moved?
+	bpl.b		fmovm_data_in_fp2	# no
+
+	mov.l		(%a0)+,0x0+EXC_FP1(%a6)	# yes
+	mov.l		(%a0)+,0x4+EXC_FP1(%a6)
+	mov.l		(%a0)+,0x8+EXC_FP1(%a6)
+
+fmovm_data_in_fp2:
+	lsl.b		&0x1,%d1		# should FP2 be moved?
+	bpl.b		fmovm_data_in_fp3	# no
+
+	fmovm.x		(%a0)+,&0x20		# yes
+
+fmovm_data_in_fp3:
+	lsl.b		&0x1,%d1		# should FP3 be moved?
+	bpl.b		fmovm_data_in_fp4	# no
+
+	fmovm.x		(%a0)+,&0x10		# yes
+
+fmovm_data_in_fp4:
+	lsl.b		&0x1,%d1		# should FP4 be moved?
+	bpl.b		fmovm_data_in_fp5	# no
+
+	fmovm.x		(%a0)+,&0x08		# yes
+
+fmovm_data_in_fp5:
+	lsl.b		&0x1,%d1		# should FP5 be moved?
+	bpl.b		fmovm_data_in_fp6	# no
+
+	fmovm.x		(%a0)+,&0x04		# yes
+
+fmovm_data_in_fp6:
+	lsl.b		&0x1,%d1		# should FP6 be moved?
+	bpl.b		fmovm_data_in_fp7	# no
+
+	fmovm.x		(%a0)+,&0x02		# yes
+
+fmovm_data_in_fp7:
+	lsl.b		&0x1,%d1		# should FP7 be moved?
+	bpl.b		fmovm_data_in_done	# no
+
+	fmovm.x		(%a0)+,&0x01		# yes
+
+fmovm_data_in_done:
+	add.l		%d0,%sp			# remove fpregs from stack
+	rts
+
+#####################################
+
+fmovm_data_done:
+	rts
+
+##############################################################################
+
+#
+# table indexed by the operation's bit string that gives the number
+# of bytes that will be moved.
+#
+# number of bytes = (# of 1's in bit string) * 12(bytes/fpreg)
+#
+tbl_fmovm_size:
+	byte	0x00,0x0c,0x0c,0x18,0x0c,0x18,0x18,0x24
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x3c,0x48,0x48,0x54,0x48,0x54,0x54,0x60
+
+#
+# table to convert a pre-decrement bit string into a post-increment
+# or control bit string.
+# ex:	0x00	==>	0x00
+#	0x01	==>	0x80
+#	0x02	==>	0x40
+#		.
+#		.
+#	0xfd	==>	0xbf
+#	0xfe	==>	0x7f
+#	0xff	==>	0xff
+#
+tbl_fmovm_convert:
+	byte	0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0
+	byte	0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0
+	byte	0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8
+	byte	0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8
+	byte	0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4
+	byte	0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4
+	byte	0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec
+	byte	0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc
+	byte	0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2
+	byte	0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2
+	byte	0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea
+	byte	0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa
+	byte	0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6
+	byte	0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6
+	byte	0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee
+	byte	0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe
+	byte	0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1
+	byte	0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1
+	byte	0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9
+	byte	0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9
+	byte	0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5
+	byte	0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5
+	byte	0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed
+	byte	0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd
+	byte	0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3
+	byte	0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3
+	byte	0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb
+	byte	0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb
+	byte	0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7
+	byte	0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7
+	byte	0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef
+	byte	0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff
+
+	global		fmovm_calc_ea
+###############################################
+# _fmovm_calc_ea: calculate effective address #
+###############################################
+fmovm_calc_ea:
+	mov.l		%d0,%a0			# move # bytes to a0
+
+# currently, MODE and REG are taken from the EXC_OPWORD. this could be
+# easily changed if they were inputs passed in registers.
+	mov.w		EXC_OPWORD(%a6),%d0	# fetch opcode word
+	mov.w		%d0,%d1			# make a copy
+
+	andi.w		&0x3f,%d0		# extract mode field
+	andi.l		&0x7,%d1		# extract reg  field
+
+# jump to the corresponding function for each {MODE,REG} pair.
+	mov.w		(tbl_fea_mode.b,%pc,%d0.w*2),%d0 # fetch jmp distance
+	jmp		(tbl_fea_mode.b,%pc,%d0.w*1) # jmp to correct ea mode
+
+	swbeg		&64
+tbl_fea_mode:
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+
+	short		faddr_ind_a0	-	tbl_fea_mode
+	short		faddr_ind_a1	-	tbl_fea_mode
+	short		faddr_ind_a2	-	tbl_fea_mode
+	short		faddr_ind_a3	-	tbl_fea_mode
+	short		faddr_ind_a4	-	tbl_fea_mode
+	short		faddr_ind_a5	-	tbl_fea_mode
+	short		faddr_ind_a6	-	tbl_fea_mode
+	short		faddr_ind_a7	-	tbl_fea_mode
+
+	short		faddr_ind_p_a0	-	tbl_fea_mode
+	short		faddr_ind_p_a1	-	tbl_fea_mode
+	short		faddr_ind_p_a2	-	tbl_fea_mode
+	short		faddr_ind_p_a3	-	tbl_fea_mode
+	short		faddr_ind_p_a4	-	tbl_fea_mode
+	short		faddr_ind_p_a5	-	tbl_fea_mode
+	short		faddr_ind_p_a6	-	tbl_fea_mode
+	short		faddr_ind_p_a7	-	tbl_fea_mode
+
+	short		faddr_ind_m_a0	-	tbl_fea_mode
+	short		faddr_ind_m_a1	-	tbl_fea_mode
+	short		faddr_ind_m_a2	-	tbl_fea_mode
+	short		faddr_ind_m_a3	-	tbl_fea_mode
+	short		faddr_ind_m_a4	-	tbl_fea_mode
+	short		faddr_ind_m_a5	-	tbl_fea_mode
+	short		faddr_ind_m_a6	-	tbl_fea_mode
+	short		faddr_ind_m_a7	-	tbl_fea_mode
+
+	short		faddr_ind_disp_a0	-	tbl_fea_mode
+	short		faddr_ind_disp_a1	-	tbl_fea_mode
+	short		faddr_ind_disp_a2	-	tbl_fea_mode
+	short		faddr_ind_disp_a3	-	tbl_fea_mode
+	short		faddr_ind_disp_a4	-	tbl_fea_mode
+	short		faddr_ind_disp_a5	-	tbl_fea_mode
+	short		faddr_ind_disp_a6	-	tbl_fea_mode
+	short		faddr_ind_disp_a7	-	tbl_fea_mode
+
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+
+	short		fabs_short	-	tbl_fea_mode
+	short		fabs_long	-	tbl_fea_mode
+	short		fpc_ind		-	tbl_fea_mode
+	short		fpc_ind_ext	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+
+###################################
+# Address register indirect: (An) #
+###################################
+faddr_ind_a0:
+	mov.l		EXC_DREGS+0x8(%a6),%a0	# Get current a0
+	rts
+
+faddr_ind_a1:
+	mov.l		EXC_DREGS+0xc(%a6),%a0	# Get current a1
+	rts
+
+faddr_ind_a2:
+	mov.l		%a2,%a0			# Get current a2
+	rts
+
+faddr_ind_a3:
+	mov.l		%a3,%a0			# Get current a3
+	rts
+
+faddr_ind_a4:
+	mov.l		%a4,%a0			# Get current a4
+	rts
+
+faddr_ind_a5:
+	mov.l		%a5,%a0			# Get current a5
+	rts
+
+faddr_ind_a6:
+	mov.l		(%a6),%a0		# Get current a6
+	rts
+
+faddr_ind_a7:
+	mov.l		EXC_A7(%a6),%a0		# Get current a7
+	rts
+
+#####################################################
+# Address register indirect w/ postincrement: (An)+ #
+#####################################################
+faddr_ind_p_a0:
+	mov.l		EXC_DREGS+0x8(%a6),%d0	# Get current a0
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,EXC_DREGS+0x8(%a6)	# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a1:
+	mov.l		EXC_DREGS+0xc(%a6),%d0	# Get current a1
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,EXC_DREGS+0xc(%a6)	# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a2:
+	mov.l		%a2,%d0			# Get current a2
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,%a2			# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a3:
+	mov.l		%a3,%d0			# Get current a3
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,%a3			# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a4:
+	mov.l		%a4,%d0			# Get current a4
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,%a4			# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a5:
+	mov.l		%a5,%d0			# Get current a5
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,%a5			# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a6:
+	mov.l		(%a6),%d0		# Get current a6
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,(%a6)		# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a7:
+	mov.b		&mia7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+	mov.l		EXC_A7(%a6),%d0		# Get current a7
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,EXC_A7(%a6)		# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+####################################################
+# Address register indirect w/ predecrement: -(An) #
+####################################################
+faddr_ind_m_a0:
+	mov.l		EXC_DREGS+0x8(%a6),%d0	# Get current a0
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_DREGS+0x8(%a6)	# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a1:
+	mov.l		EXC_DREGS+0xc(%a6),%d0	# Get current a1
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_DREGS+0xc(%a6)	# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a2:
+	mov.l		%a2,%d0			# Get current a2
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,%a2			# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a3:
+	mov.l		%a3,%d0			# Get current a3
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,%a3			# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a4:
+	mov.l		%a4,%d0			# Get current a4
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,%a4			# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a5:
+	mov.l		%a5,%d0			# Get current a5
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,%a5			# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a6:
+	mov.l		(%a6),%d0		# Get current a6
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,(%a6)		# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a7:
+	mov.b		&mda7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+	mov.l		EXC_A7(%a6),%d0		# Get current a7
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A7(%a6)		# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+########################################################
+# Address register indirect w/ displacement: (d16, An) #
+########################################################
+faddr_ind_disp_a0:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_DREGS+0x8(%a6),%a0	# a0 + d16
+	rts
+
+faddr_ind_disp_a1:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_DREGS+0xc(%a6),%a0	# a1 + d16
+	rts
+
+faddr_ind_disp_a2:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		%a2,%a0			# a2 + d16
+	rts
+
+faddr_ind_disp_a3:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		%a3,%a0			# a3 + d16
+	rts
+
+faddr_ind_disp_a4:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		%a4,%a0			# a4 + d16
+	rts
+
+faddr_ind_disp_a5:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		%a5,%a0			# a5 + d16
+	rts
+
+faddr_ind_disp_a6:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		(%a6),%a0		# a6 + d16
+	rts
+
+faddr_ind_disp_a7:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_A7(%a6),%a0		# a7 + d16
+	rts
+
+########################################################################
+# Address register indirect w/ index(8-bit displacement): (d8, An, Xn) #
+#    "       "         "    w/   "  (base displacement): (bd, An, Xn)  #
+# Memory indirect postindexed: ([bd, An], Xn, od)		       #
+# Memory indirect preindexed: ([bd, An, Xn], od)		       #
+########################################################################
+faddr_ind_ext:
+	addq.l		&0x8,%d1
+	bsr.l		fetch_dreg		# fetch base areg
+	mov.l		%d0,-(%sp)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch extword in d0
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		(%sp)+,%a0
+
+	btst		&0x8,%d0
+	bne.w		fcalc_mem_ind
+
+	mov.l		%d0,L_SCR1(%a6)		# hold opword
+
+	mov.l		%d0,%d1
+	rol.w		&0x4,%d1
+	andi.w		&0xf,%d1		# extract index regno
+
+# count on fetch_dreg() not to alter a0...
+	bsr.l		fetch_dreg		# fetch index
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		L_SCR1(%a6),%d2		# fetch opword
+
+	btst		&0xb,%d2		# is it word or long?
+	bne.b		faii8_long
+	ext.l		%d0			# sign extend word index
+faii8_long:
+	mov.l		%d2,%d1
+	rol.w		&0x7,%d1
+	andi.l		&0x3,%d1		# extract scale value
+
+	lsl.l		%d1,%d0			# shift index by scale
+
+	extb.l		%d2			# sign extend displacement
+	add.l		%d2,%d0			# index + disp
+	add.l		%d0,%a0			# An + (index + disp)
+
+	mov.l		(%sp)+,%d2		# restore old d2
+	rts
+
+###########################
+# Absolute short: (XXX).W #
+###########################
+fabs_short:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch short address
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# return <ea> in a0
+	rts
+
+##########################
+# Absolute long: (XXX).L #
+##########################
+fabs_long:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch long address
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,%a0			# return <ea> in a0
+	rts
+
+#######################################################
+# Program counter indirect w/ displacement: (d16, PC) #
+#######################################################
+fpc_ind:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch word displacement
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_EXTWPTR(%a6),%a0	# pc + d16
+
+# _imem_read_word() increased the extwptr by 2. need to adjust here.
+	subq.l		&0x2,%a0		# adjust <ea>
+	rts
+
+##########################################################
+# PC indirect w/ index(8-bit displacement): (d8, PC, An) #
+# "     "     w/   "  (base displacement): (bd, PC, An)  #
+# PC memory indirect postindexed: ([bd, PC], Xn, od)     #
+# PC memory indirect preindexed: ([bd, PC, Xn], od)      #
+##########################################################
+fpc_ind_ext:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch ext word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# put base in a0
+	subq.l		&0x2,%a0		# adjust base
+
+	btst		&0x8,%d0		# is disp only 8 bits?
+	bne.w		fcalc_mem_ind		# calc memory indirect
+
+	mov.l		%d0,L_SCR1(%a6)		# store opword
+
+	mov.l		%d0,%d1			# make extword copy
+	rol.w		&0x4,%d1		# rotate reg num into place
+	andi.w		&0xf,%d1		# extract register number
+
+# count on fetch_dreg() not to alter a0...
+	bsr.l		fetch_dreg		# fetch index
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		L_SCR1(%a6),%d2		# fetch opword
+
+	btst		&0xb,%d2		# is index word or long?
+	bne.b		fpii8_long		# long
+	ext.l		%d0			# sign extend word index
+fpii8_long:
+	mov.l		%d2,%d1
+	rol.w		&0x7,%d1		# rotate scale value into place
+	andi.l		&0x3,%d1		# extract scale value
+
+	lsl.l		%d1,%d0			# shift index by scale
+
+	extb.l		%d2			# sign extend displacement
+	add.l		%d2,%d0			# disp + index
+	add.l		%d0,%a0			# An + (index + disp)
+
+	mov.l		(%sp)+,%d2		# restore temp register
+	rts
+
+# d2 = index
+# d3 = base
+# d4 = od
+# d5 = extword
+fcalc_mem_ind:
+	btst		&0x6,%d0		# is the index suppressed?
+	beq.b		fcalc_index
+
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+
+	mov.l		%d0,%d5			# put extword in d5
+	mov.l		%a0,%d3			# put base in d3
+
+	clr.l		%d2			# yes, so index = 0
+	bra.b		fbase_supp_ck
+
+# index:
+fcalc_index:
+	mov.l		%d0,L_SCR1(%a6)		# save d0 (opword)
+	bfextu		%d0{&16:&4},%d1		# fetch dreg index
+	bsr.l		fetch_dreg
+
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+	mov.l		%d0,%d2			# put index in d2
+	mov.l		L_SCR1(%a6),%d5
+	mov.l		%a0,%d3
+
+	btst		&0xb,%d5		# is index word or long?
+	bne.b		fno_ext
+	ext.l		%d2
+
+fno_ext:
+	bfextu		%d5{&21:&2},%d0
+	lsl.l		%d0,%d2
+
+# base address (passed as parameter in d3):
+# we clear the value here if it should actually be suppressed.
+fbase_supp_ck:
+	btst		&0x7,%d5		# is the bd suppressed?
+	beq.b		fno_base_sup
+	clr.l		%d3
+
+# base displacement:
+fno_base_sup:
+	bfextu		%d5{&26:&2},%d0		# get bd size
+#	beq.l		fmovm_error		# if (size == 0) it's reserved
+
+	cmpi.b		%d0,&0x2
+	blt.b		fno_bd
+	beq.b		fget_word_bd
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		fcea_iacc		# yes
+
+	bra.b		fchk_ind
+
+fget_word_bd:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		fcea_iacc		# yes
+
+	ext.l		%d0			# sign extend bd
+
+fchk_ind:
+	add.l		%d0,%d3			# base += bd
+
+# outer displacement:
+fno_bd:
+	bfextu		%d5{&30:&2},%d0		# is od suppressed?
+	beq.w		faii_bd
+
+	cmpi.b		%d0,&0x2
+	blt.b		fnull_od
+	beq.b		fword_od
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		fcea_iacc		# yes
+
+	bra.b		fadd_them
+
+fword_od:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		fcea_iacc		# yes
+
+	ext.l		%d0			# sign extend od
+	bra.b		fadd_them
+
+fnull_od:
+	clr.l		%d0
+
+fadd_them:
+	mov.l		%d0,%d4
+
+	btst		&0x2,%d5		# pre or post indexing?
+	beq.b		fpre_indexed
+
+	mov.l		%d3,%a0
+	bsr.l		_dmem_read_long
+
+	tst.l		%d1			# did dfetch fail?
+	bne.w		fcea_err		# yes
+
+	add.l		%d2,%d0			# <ea> += index
+	add.l		%d4,%d0			# <ea> += od
+	bra.b		fdone_ea
+
+fpre_indexed:
+	add.l		%d2,%d3			# preindexing
+	mov.l		%d3,%a0
+	bsr.l		_dmem_read_long
+
+	tst.l		%d1			# did dfetch fail?
+	bne.w		fcea_err		# yes
+
+	add.l		%d4,%d0			# ea += od
+	bra.b		fdone_ea
+
+faii_bd:
+	add.l		%d2,%d3			# ea = (base + bd) + index
+	mov.l		%d3,%d0
+fdone_ea:
+	mov.l		%d0,%a0
+
+	movm.l		(%sp)+,&0x003c		# restore d2-d5
+	rts
+
+#########################################################
+fcea_err:
+	mov.l		%d3,%a0
+
+	movm.l		(%sp)+,&0x003c		# restore d2-d5
+	mov.w		&0x0101,%d0
+	bra.l		iea_dacc
+
+fcea_iacc:
+	movm.l		(%sp)+,&0x003c		# restore d2-d5
+	bra.l		iea_iacc
+
+fmovm_out_err:
+	bsr.l		restore
+	mov.w		&0x00e1,%d0
+	bra.b		fmovm_err
+
+fmovm_in_err:
+	bsr.l		restore
+	mov.w		&0x0161,%d0
+
+fmovm_err:
+	mov.l		L_SCR1(%a6),%a0
+	bra.l		iea_dacc
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fmovm_ctrl(): emulate fmovm.l of control registers instr	#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read longword from memory			#
+#	iea_iacc() - _imem_read_long() failed; error recovery		#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	If _imem_read_long() doesn't fail:				#
+#		USER_FPCR(a6)  = new FPCR value				#
+#		USER_FPSR(a6)  = new FPSR value				#
+#		USER_FPIAR(a6) = new FPIAR value			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Decode the instruction type by looking at the extension word	#
+# in order to see how many control registers to fetch from memory.	#
+# Fetch them using _imem_read_long(). If this fetch fails, exit through	#
+# the special access error exit handler iea_iacc().			#
+#									#
+# Instruction word decoding:						#
+#									#
+#	fmovem.l #<data>, {FPIAR&|FPCR&|FPSR}				#
+#									#
+#		WORD1			WORD2				#
+#	1111 0010 00 111100	100$ $$00 0000 0000			#
+#									#
+#	$$$ (100): FPCR							#
+#	    (010): FPSR							#
+#	    (001): FPIAR						#
+#	    (000): FPIAR						#
+#									#
+#########################################################################
+
+	global		fmovm_ctrl
+fmovm_ctrl:
+	mov.b		EXC_EXTWORD(%a6),%d0	# fetch reg select bits
+	cmpi.b		%d0,&0x9c		# fpcr & fpsr & fpiar ?
+	beq.w		fctrl_in_7		# yes
+	cmpi.b		%d0,&0x98		# fpcr & fpsr ?
+	beq.w		fctrl_in_6		# yes
+	cmpi.b		%d0,&0x94		# fpcr & fpiar ?
+	beq.b		fctrl_in_5		# yes
+
+# fmovem.l #<data>, fpsr/fpiar
+fctrl_in_3:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPSR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPSR(%a6)	# store new FPSR to stack
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPIAR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPIAR(%a6)	# store new FPIAR to stack
+	rts
+
+# fmovem.l #<data>, fpcr/fpiar
+fctrl_in_5:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPCR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPCR(%a6)	# store new FPCR to stack
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPIAR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPIAR(%a6)	# store new FPIAR to stack
+	rts
+
+# fmovem.l #<data>, fpcr/fpsr
+fctrl_in_6:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPCR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPCR(%a6)	# store new FPCR to mem
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPSR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPSR(%a6)	# store new FPSR to mem
+	rts
+
+# fmovem.l #<data>, fpcr/fpsr/fpiar
+fctrl_in_7:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPCR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPCR(%a6)	# store new FPCR to mem
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPSR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPSR(%a6)	# store new FPSR to mem
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPIAR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPIAR(%a6)	# store new FPIAR to mem
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_dcalc_ea(): calc correct <ea> from <ea> stacked on exception	#
+#									#
+# XREF ****************************************************************	#
+#	inc_areg() - increment an address register			#
+#	dec_areg() - decrement an address register			#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = number of bytes to adjust <ea> by				#
+#									#
+# OUTPUT **************************************************************	#
+#	None								#
+#									#
+# ALGORITHM ***********************************************************	#
+# "Dummy" CALCulate Effective Address:					#
+#	The stacked <ea> for FP unimplemented instructions and opclass	#
+#	two packed instructions is correct with the exception of...	#
+#									#
+#	1) -(An)   : The register is not updated regardless of size.	#
+#		     Also, for extended precision and packed, the	#
+#		     stacked <ea> value is 8 bytes too big		#
+#	2) (An)+   : The register is not updated.			#
+#	3) #<data> : The upper longword of the immediate operand is	#
+#		     stacked b,w,l and s sizes are completely stacked.	#
+#		     d,x, and p are not.				#
+#									#
+#########################################################################
+
+	global		_dcalc_ea
+_dcalc_ea:
+	mov.l		%d0, %a0		# move # bytes to %a0
+
+	mov.b		1+EXC_OPWORD(%a6), %d0	# fetch opcode word
+	mov.l		%d0, %d1		# make a copy
+
+	andi.w		&0x38, %d0		# extract mode field
+	andi.l		&0x7, %d1		# extract reg  field
+
+	cmpi.b		%d0,&0x18		# is mode (An)+ ?
+	beq.b		dcea_pi			# yes
+
+	cmpi.b		%d0,&0x20		# is mode -(An) ?
+	beq.b		dcea_pd			# yes
+
+	or.w		%d1,%d0			# concat mode,reg
+	cmpi.b		%d0,&0x3c		# is mode #<data>?
+
+	beq.b		dcea_imm		# yes
+
+	mov.l		EXC_EA(%a6),%a0		# return <ea>
+	rts
+
+# need to set immediate data flag here since we'll need to do
+# an imem_read to fetch this later.
+dcea_imm:
+	mov.b		&immed_flg,SPCOND_FLG(%a6)
+	lea		([USER_FPIAR,%a6],0x4),%a0 # no; return <ea>
+	rts
+
+# here, the <ea> is stacked correctly. however, we must update the
+# address register...
+dcea_pi:
+	mov.l		%a0,%d0			# pass amt to inc by
+	bsr.l		inc_areg		# inc addr register
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	rts
+
+# the <ea> is stacked correctly for all but extended and packed which
+# the <ea>s are 8 bytes too large.
+# it would make no sense to have a pre-decrement to a7 in supervisor
+# mode so we don't even worry about this tricky case here : )
+dcea_pd:
+	mov.l		%a0,%d0			# pass amt to dec by
+	bsr.l		dec_areg		# dec addr register
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+
+	cmpi.b		%d0,&0xc		# is opsize ext or packed?
+	beq.b		dcea_pd2		# yes
+	rts
+dcea_pd2:
+	sub.l		&0x8,%a0		# correct <ea>
+	mov.l		%a0,EXC_EA(%a6)		# put correct <ea> on stack
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_calc_ea_fout(): calculate correct stacked <ea> for extended	#
+#			 and packed data opclass 3 operations.		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	a0 = return correct effective address				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	For opclass 3 extended and packed data operations, the <ea>	#
+# stacked for the exception is incorrect for -(an) and (an)+ addressing	#
+# modes. Also, while we're at it, the index register itself must get	#
+# updated.								#
+#	So, for -(an), we must subtract 8 off of the stacked <ea> value	#
+# and return that value as the correct <ea> and store that value in An.	#
+# For (an)+, the stacked <ea> is correct but we must adjust An by +12.	#
+#									#
+#########################################################################
+
+# This calc_ea is currently used to retrieve the correct <ea>
+# for fmove outs of type extended and packed.
+	global		_calc_ea_fout
+_calc_ea_fout:
+	mov.b		1+EXC_OPWORD(%a6),%d0	# fetch opcode word
+	mov.l		%d0,%d1			# make a copy
+
+	andi.w		&0x38,%d0		# extract mode field
+	andi.l		&0x7,%d1		# extract reg  field
+
+	cmpi.b		%d0,&0x18		# is mode (An)+ ?
+	beq.b		ceaf_pi			# yes
+
+	cmpi.b		%d0,&0x20		# is mode -(An) ?
+	beq.w		ceaf_pd			# yes
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	rts
+
+# (An)+ : extended and packed fmove out
+#	: stacked <ea> is correct
+#	: "An" not updated
+ceaf_pi:
+	mov.w		(tbl_ceaf_pi.b,%pc,%d1.w*2),%d1
+	mov.l		EXC_EA(%a6),%a0
+	jmp		(tbl_ceaf_pi.b,%pc,%d1.w*1)
+
+	swbeg		&0x8
+tbl_ceaf_pi:
+	short		ceaf_pi0 - tbl_ceaf_pi
+	short		ceaf_pi1 - tbl_ceaf_pi
+	short		ceaf_pi2 - tbl_ceaf_pi
+	short		ceaf_pi3 - tbl_ceaf_pi
+	short		ceaf_pi4 - tbl_ceaf_pi
+	short		ceaf_pi5 - tbl_ceaf_pi
+	short		ceaf_pi6 - tbl_ceaf_pi
+	short		ceaf_pi7 - tbl_ceaf_pi
+
+ceaf_pi0:
+	addi.l		&0xc,EXC_DREGS+0x8(%a6)
+	rts
+ceaf_pi1:
+	addi.l		&0xc,EXC_DREGS+0xc(%a6)
+	rts
+ceaf_pi2:
+	add.l		&0xc,%a2
+	rts
+ceaf_pi3:
+	add.l		&0xc,%a3
+	rts
+ceaf_pi4:
+	add.l		&0xc,%a4
+	rts
+ceaf_pi5:
+	add.l		&0xc,%a5
+	rts
+ceaf_pi6:
+	addi.l		&0xc,EXC_A6(%a6)
+	rts
+ceaf_pi7:
+	mov.b		&mia7_flg,SPCOND_FLG(%a6)
+	addi.l		&0xc,EXC_A7(%a6)
+	rts
+
+# -(An) : extended and packed fmove out
+#	: stacked <ea> = actual <ea> + 8
+#	: "An" not updated
+ceaf_pd:
+	mov.w		(tbl_ceaf_pd.b,%pc,%d1.w*2),%d1
+	mov.l		EXC_EA(%a6),%a0
+	sub.l		&0x8,%a0
+	sub.l		&0x8,EXC_EA(%a6)
+	jmp		(tbl_ceaf_pd.b,%pc,%d1.w*1)
+
+	swbeg		&0x8
+tbl_ceaf_pd:
+	short		ceaf_pd0 - tbl_ceaf_pd
+	short		ceaf_pd1 - tbl_ceaf_pd
+	short		ceaf_pd2 - tbl_ceaf_pd
+	short		ceaf_pd3 - tbl_ceaf_pd
+	short		ceaf_pd4 - tbl_ceaf_pd
+	short		ceaf_pd5 - tbl_ceaf_pd
+	short		ceaf_pd6 - tbl_ceaf_pd
+	short		ceaf_pd7 - tbl_ceaf_pd
+
+ceaf_pd0:
+	mov.l		%a0,EXC_DREGS+0x8(%a6)
+	rts
+ceaf_pd1:
+	mov.l		%a0,EXC_DREGS+0xc(%a6)
+	rts
+ceaf_pd2:
+	mov.l		%a0,%a2
+	rts
+ceaf_pd3:
+	mov.l		%a0,%a3
+	rts
+ceaf_pd4:
+	mov.l		%a0,%a4
+	rts
+ceaf_pd5:
+	mov.l		%a0,%a5
+	rts
+ceaf_pd6:
+	mov.l		%a0,EXC_A6(%a6)
+	rts
+ceaf_pd7:
+	mov.l		%a0,EXC_A7(%a6)
+	mov.b		&mda7_flg,SPCOND_FLG(%a6)
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_load_fop(): load operand for unimplemented FP exception	#
+#									#
+# XREF ****************************************************************	#
+#	set_tag_x() - determine ext prec optype tag			#
+#	set_tag_s() - determine sgl prec optype tag			#
+#	set_tag_d() - determine dbl prec optype tag			#
+#	unnorm_fix() - convert normalized number to denorm or zero	#
+#	norm() - normalize a denormalized number			#
+#	get_packed() - fetch a packed operand from memory		#
+#	_dcalc_ea() - calculate <ea>, fixing An in process		#
+#									#
+#	_imem_read_{word,long}() - read from instruction memory		#
+#	_dmem_read() - read from data memory				#
+#	_dmem_read_{byte,word,long}() - read from data memory		#
+#									#
+#	facc_in_{b,w,l,d,x}() - mem read failed; special exit point	#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	If memory access doesn't fail:					#
+#		FP_SRC(a6) = source operand in extended precision	#
+#		FP_DST(a6) = destination operand in extended precision	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	This is called from the Unimplemented FP exception handler in	#
+# order to load the source and maybe destination operand into		#
+# FP_SRC(a6) and FP_DST(a6). If the instruction was opclass zero, load	#
+# the source and destination from the FP register file. Set the optype	#
+# tags for both if dyadic, one for monadic. If a number is an UNNORM,	#
+# convert it to a DENORM or a ZERO.					#
+#	If the instruction is opclass two (memory->reg), then fetch	#
+# the destination from the register file and the source operand from	#
+# memory. Tag and fix both as above w/ opclass zero instructions.	#
+#	If the source operand is byte,word,long, or single, it may be	#
+# in the data register file. If it's actually out in memory, use one of	#
+# the mem_read() routines to fetch it. If the mem_read() access returns	#
+# a failing value, exit through the special facc_in() routine which	#
+# will create an access error exception frame from the current exception #
+# frame.								#
+#	Immediate data and regular data accesses are separated because	#
+# if an immediate data access fails, the resulting fault status		#
+# longword stacked for the access error exception must have the		#
+# instruction bit set.							#
+#									#
+#########################################################################
+
+	global		_load_fop
+_load_fop:
+
+#  15     13 12 10  9 7  6       0
+# /        \ /   \ /  \ /         \
+# ---------------------------------
+# | opclass | RX  | RY | EXTENSION |  (2nd word of general FP instruction)
+# ---------------------------------
+#
+
+#	bfextu		EXC_CMDREG(%a6){&0:&3}, %d0 # extract opclass
+#	cmpi.b		%d0, &0x2		# which class is it? ('000,'010,'011)
+#	beq.w		op010			# handle <ea> -> fpn
+#	bgt.w		op011			# handle fpn -> <ea>
+
+# we're not using op011 for now...
+	btst		&0x6,EXC_CMDREG(%a6)
+	bne.b		op010
+
+############################
+# OPCLASS '000: reg -> reg #
+############################
+op000:
+	mov.b		1+EXC_CMDREG(%a6),%d0	# fetch extension word lo
+	btst		&0x5,%d0		# testing extension bits
+	beq.b		op000_src		# (bit 5 == 0) => monadic
+	btst		&0x4,%d0		# (bit 5 == 1)
+	beq.b		op000_dst		# (bit 4 == 0) => dyadic
+	and.w		&0x007f,%d0		# extract extension bits {6:0}
+	cmpi.w		%d0,&0x0038		# is it an fcmp (dyadic) ?
+	bne.b		op000_src		# it's an fcmp
+
+op000_dst:
+	bfextu		EXC_CMDREG(%a6){&6:&3}, %d0 # extract dst field
+	bsr.l		load_fpn2		# fetch dst fpreg into FP_DST
+
+	bsr.l		set_tag_x		# get dst optype tag
+
+	cmpi.b		%d0, &UNNORM		# is dst fpreg an UNNORM?
+	beq.b		op000_dst_unnorm	# yes
+op000_dst_cont:
+	mov.b		%d0, DTAG(%a6)		# store the dst optype tag
+
+op000_src:
+	bfextu		EXC_CMDREG(%a6){&3:&3}, %d0 # extract src field
+	bsr.l		load_fpn1		# fetch src fpreg into FP_SRC
+
+	bsr.l		set_tag_x		# get src optype tag
+
+	cmpi.b		%d0, &UNNORM		# is src fpreg an UNNORM?
+	beq.b		op000_src_unnorm	# yes
+op000_src_cont:
+	mov.b		%d0, STAG(%a6)		# store the src optype tag
+	rts
+
+op000_dst_unnorm:
+	bsr.l		unnorm_fix		# fix the dst UNNORM
+	bra.b		op000_dst_cont
+op000_src_unnorm:
+	bsr.l		unnorm_fix		# fix the src UNNORM
+	bra.b		op000_src_cont
+
+#############################
+# OPCLASS '010: <ea> -> reg #
+#############################
+op010:
+	mov.w		EXC_CMDREG(%a6),%d0	# fetch extension word
+	btst		&0x5,%d0		# testing extension bits
+	beq.b		op010_src		# (bit 5 == 0) => monadic
+	btst		&0x4,%d0		# (bit 5 == 1)
+	beq.b		op010_dst		# (bit 4 == 0) => dyadic
+	and.w		&0x007f,%d0		# extract extension bits {6:0}
+	cmpi.w		%d0,&0x0038		# is it an fcmp (dyadic) ?
+	bne.b		op010_src		# it's an fcmp
+
+op010_dst:
+	bfextu		EXC_CMDREG(%a6){&6:&3}, %d0 # extract dst field
+	bsr.l		load_fpn2		# fetch dst fpreg ptr
+
+	bsr.l		set_tag_x		# get dst type tag
+
+	cmpi.b		%d0, &UNNORM		# is dst fpreg an UNNORM?
+	beq.b		op010_dst_unnorm	# yes
+op010_dst_cont:
+	mov.b		%d0, DTAG(%a6)		# store the dst optype tag
+
+op010_src:
+	bfextu		EXC_CMDREG(%a6){&3:&3}, %d0 # extract src type field
+
+	bfextu		EXC_OPWORD(%a6){&10:&3}, %d1 # extract <ea> mode field
+	bne.w		fetch_from_mem		# src op is in memory
+
+op010_dreg:
+	clr.b		STAG(%a6)		# either NORM or ZERO
+	bfextu		EXC_OPWORD(%a6){&13:&3}, %d1 # extract src reg field
+
+	mov.w		(tbl_op010_dreg.b,%pc,%d0.w*2), %d0 # jmp based on optype
+	jmp		(tbl_op010_dreg.b,%pc,%d0.w*1) # fetch src from dreg
+
+op010_dst_unnorm:
+	bsr.l		unnorm_fix		# fix the dst UNNORM
+	bra.b		op010_dst_cont
+
+	swbeg		&0x8
+tbl_op010_dreg:
+	short		opd_long	- tbl_op010_dreg
+	short		opd_sgl		- tbl_op010_dreg
+	short		tbl_op010_dreg	- tbl_op010_dreg
+	short		tbl_op010_dreg	- tbl_op010_dreg
+	short		opd_word	- tbl_op010_dreg
+	short		tbl_op010_dreg	- tbl_op010_dreg
+	short		opd_byte	- tbl_op010_dreg
+	short		tbl_op010_dreg	- tbl_op010_dreg
+
+#
+# LONG: can be either NORM or ZERO...
+#
+opd_long:
+	bsr.l		fetch_dreg		# fetch long in d0
+	fmov.l		%d0, %fp0		# load a long
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+	fbeq.w		opd_long_zero		# long is a ZERO
+	rts
+opd_long_zero:
+	mov.b		&ZERO, STAG(%a6)	# set ZERO optype flag
+	rts
+
+#
+# WORD: can be either NORM or ZERO...
+#
+opd_word:
+	bsr.l		fetch_dreg		# fetch word in d0
+	fmov.w		%d0, %fp0		# load a word
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+	fbeq.w		opd_word_zero		# WORD is a ZERO
+	rts
+opd_word_zero:
+	mov.b		&ZERO, STAG(%a6)	# set ZERO optype flag
+	rts
+
+#
+# BYTE: can be either NORM or ZERO...
+#
+opd_byte:
+	bsr.l		fetch_dreg		# fetch word in d0
+	fmov.b		%d0, %fp0		# load a byte
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+	fbeq.w		opd_byte_zero		# byte is a ZERO
+	rts
+opd_byte_zero:
+	mov.b		&ZERO, STAG(%a6)	# set ZERO optype flag
+	rts
+
+#
+# SGL: can be either NORM, DENORM, ZERO, INF, QNAN or SNAN but not UNNORM
+#
+# separate SNANs and DENORMs so they can be loaded w/ special care.
+# all others can simply be moved "in" using fmove.
+#
+opd_sgl:
+	bsr.l		fetch_dreg		# fetch sgl in d0
+	mov.l		%d0,L_SCR1(%a6)
+
+	lea		L_SCR1(%a6), %a0	# pass: ptr to the sgl
+	bsr.l		set_tag_s		# determine sgl type
+	mov.b		%d0, STAG(%a6)		# save the src tag
+
+	cmpi.b		%d0, &SNAN		# is it an SNAN?
+	beq.w		get_sgl_snan		# yes
+
+	cmpi.b		%d0, &DENORM		# is it a DENORM?
+	beq.w		get_sgl_denorm		# yes
+
+	fmov.s		(%a0), %fp0		# no, so can load it regular
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+	rts
+
+##############################################################################
+
+#########################################################################
+# fetch_from_mem():							#
+# - src is out in memory. must:						#
+#	(1) calc ea - must read AFTER you know the src type since	#
+#		      if the ea is -() or ()+, need to know # of bytes.	#
+#	(2) read it in from either user or supervisor space		#
+#	(3) if (b || w || l) then simply read in			#
+#	    if (s || d || x) then check for SNAN,UNNORM,DENORM		#
+#	    if (packed) then punt for now				#
+# INPUT:								#
+#	%d0 : src type field						#
+#########################################################################
+fetch_from_mem:
+	clr.b		STAG(%a6)		# either NORM or ZERO
+
+	mov.w		(tbl_fp_type.b,%pc,%d0.w*2), %d0 # index by src type field
+	jmp		(tbl_fp_type.b,%pc,%d0.w*1)
+
+	swbeg		&0x8
+tbl_fp_type:
+	short		load_long	- tbl_fp_type
+	short		load_sgl	- tbl_fp_type
+	short		load_ext	- tbl_fp_type
+	short		load_packed	- tbl_fp_type
+	short		load_word	- tbl_fp_type
+	short		load_dbl	- tbl_fp_type
+	short		load_byte	- tbl_fp_type
+	short		tbl_fp_type	- tbl_fp_type
+
+#########################################
+# load a LONG into %fp0:		#
+#	-number can't fault		#
+#	(1) calc ea			#
+#	(2) read 4 bytes into L_SCR1	#
+#	(3) fmov.l into %fp0		#
+#########################################
+load_long:
+	movq.l		&0x4, %d0		# pass: 4 (bytes)
+	bsr.l		_dcalc_ea		# calc <ea>; <ea> in %a0
+
+	cmpi.b		SPCOND_FLG(%a6),&immed_flg
+	beq.b		load_long_immed
+
+	bsr.l		_dmem_read_long		# fetch src operand from memory
+
+	tst.l		%d1			# did dfetch fail?
+	bne.l		facc_in_l		# yes
+
+load_long_cont:
+	fmov.l		%d0, %fp0		# read into %fp0;convert to xprec
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+
+	fbeq.w		load_long_zero		# src op is a ZERO
+	rts
+load_long_zero:
+	mov.b		&ZERO, STAG(%a6)	# set optype tag to ZERO
+	rts
+
+load_long_immed:
+	bsr.l		_imem_read_long		# fetch src operand immed data
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		funimp_iacc		# yes
+	bra.b		load_long_cont
+
+#########################################
+# load a WORD into %fp0:		#
+#	-number can't fault		#
+#	(1) calc ea			#
+#	(2) read 2 bytes into L_SCR1	#
+#	(3) fmov.w into %fp0		#
+#########################################
+load_word:
+	movq.l		&0x2, %d0		# pass: 2 (bytes)
+	bsr.l		_dcalc_ea		# calc <ea>; <ea> in %a0
+
+	cmpi.b		SPCOND_FLG(%a6),&immed_flg
+	beq.b		load_word_immed
+
+	bsr.l		_dmem_read_word		# fetch src operand from memory
+
+	tst.l		%d1			# did dfetch fail?
+	bne.l		facc_in_w		# yes
+
+load_word_cont:
+	fmov.w		%d0, %fp0		# read into %fp0;convert to xprec
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+
+	fbeq.w		load_word_zero		# src op is a ZERO
+	rts
+load_word_zero:
+	mov.b		&ZERO, STAG(%a6)	# set optype tag to ZERO
+	rts
+
+load_word_immed:
+	bsr.l		_imem_read_word		# fetch src operand immed data
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		funimp_iacc		# yes
+	bra.b		load_word_cont
+
+#########################################
+# load a BYTE into %fp0:		#
+#	-number can't fault		#
+#	(1) calc ea			#
+#	(2) read 1 byte into L_SCR1	#
+#	(3) fmov.b into %fp0		#
+#########################################
+load_byte:
+	movq.l		&0x1, %d0		# pass: 1 (byte)
+	bsr.l		_dcalc_ea		# calc <ea>; <ea> in %a0
+
+	cmpi.b		SPCOND_FLG(%a6),&immed_flg
+	beq.b		load_byte_immed
+
+	bsr.l		_dmem_read_byte		# fetch src operand from memory
+
+	tst.l		%d1			# did dfetch fail?
+	bne.l		facc_in_b		# yes
+
+load_byte_cont:
+	fmov.b		%d0, %fp0		# read into %fp0;convert to xprec
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+
+	fbeq.w		load_byte_zero		# src op is a ZERO
+	rts
+load_byte_zero:
+	mov.b		&ZERO, STAG(%a6)	# set optype tag to ZERO
+	rts
+
+load_byte_immed:
+	bsr.l		_imem_read_word		# fetch src operand immed data
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		funimp_iacc		# yes
+	bra.b		load_byte_cont
+
+#########################################
+# load a SGL into %fp0:			#
+#	-number can't fault		#
+#	(1) calc ea			#
+#	(2) read 4 bytes into L_SCR1	#
+#	(3) fmov.s into %fp0		#
+#########################################
+load_sgl:
+	movq.l		&0x4, %d0		# pass: 4 (bytes)
+	bsr.l		_dcalc_ea		# calc <ea>; <ea> in %a0
+
+	cmpi.b		SPCOND_FLG(%a6),&immed_flg
+	beq.b		load_sgl_immed
+
+	bsr.l		_dmem_read_long		# fetch src operand from memory
+	mov.l		%d0, L_SCR1(%a6)	# store src op on stack
+
+	tst.l		%d1			# did dfetch fail?
+	bne.l		facc_in_l		# yes
+
+load_sgl_cont:
+	lea		L_SCR1(%a6), %a0	# pass: ptr to sgl src op
+	bsr.l		set_tag_s		# determine src type tag
+	mov.b		%d0, STAG(%a6)		# save src optype tag on stack
+
+	cmpi.b		%d0, &DENORM		# is it a sgl DENORM?
+	beq.w		get_sgl_denorm		# yes
+
+	cmpi.b		%d0, &SNAN		# is it a sgl SNAN?
+	beq.w		get_sgl_snan		# yes
+
+	fmov.s		L_SCR1(%a6), %fp0	# read into %fp0;convert to xprec
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+	rts
+
+load_sgl_immed:
+	bsr.l		_imem_read_long		# fetch src operand immed data
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		funimp_iacc		# yes
+	bra.b		load_sgl_cont
+
+# must convert sgl denorm format to an Xprec denorm fmt suitable for
+# normalization...
+# %a0 : points to sgl denorm
+get_sgl_denorm:
+	clr.w		FP_SRC_EX(%a6)
+	bfextu		(%a0){&9:&23}, %d0	# fetch sgl hi(_mantissa)
+	lsl.l		&0x8, %d0
+	mov.l		%d0, FP_SRC_HI(%a6)	# set ext hi(_mantissa)
+	clr.l		FP_SRC_LO(%a6)		# set ext lo(_mantissa)
+
+	clr.w		FP_SRC_EX(%a6)
+	btst		&0x7, (%a0)		# is sgn bit set?
+	beq.b		sgl_dnrm_norm
+	bset		&0x7, FP_SRC_EX(%a6)	# set sgn of xprec value
+
+sgl_dnrm_norm:
+	lea		FP_SRC(%a6), %a0
+	bsr.l		norm			# normalize number
+	mov.w		&0x3f81, %d1		# xprec exp = 0x3f81
+	sub.w		%d0, %d1		# exp = 0x3f81 - shft amt.
+	or.w		%d1, FP_SRC_EX(%a6)	# {sgn,exp}
+
+	mov.b		&NORM, STAG(%a6)	# fix src type tag
+	rts
+
+# convert sgl to ext SNAN
+# %a0 : points to sgl SNAN
+get_sgl_snan:
+	mov.w		&0x7fff, FP_SRC_EX(%a6) # set exp of SNAN
+	bfextu		(%a0){&9:&23}, %d0
+	lsl.l		&0x8, %d0		# extract and insert hi(man)
+	mov.l		%d0, FP_SRC_HI(%a6)
+	clr.l		FP_SRC_LO(%a6)
+
+	btst		&0x7, (%a0)		# see if sign of SNAN is set
+	beq.b		no_sgl_snan_sgn
+	bset		&0x7, FP_SRC_EX(%a6)
+no_sgl_snan_sgn:
+	rts
+
+#########################################
+# load a DBL into %fp0:			#
+#	-number can't fault		#
+#	(1) calc ea			#
+#	(2) read 8 bytes into L_SCR(1,2)#
+#	(3) fmov.d into %fp0		#
+#########################################
+load_dbl:
+	movq.l		&0x8, %d0		# pass: 8 (bytes)
+	bsr.l		_dcalc_ea		# calc <ea>; <ea> in %a0
+
+	cmpi.b		SPCOND_FLG(%a6),&immed_flg
+	beq.b		load_dbl_immed
+
+	lea		L_SCR1(%a6), %a1	# pass: ptr to input dbl tmp space
+	movq.l		&0x8, %d0		# pass: # bytes to read
+	bsr.l		_dmem_read		# fetch src operand from memory
+
+	tst.l		%d1			# did dfetch fail?
+	bne.l		facc_in_d		# yes
+
+load_dbl_cont:
+	lea		L_SCR1(%a6), %a0	# pass: ptr to input dbl
+	bsr.l		set_tag_d		# determine src type tag
+	mov.b		%d0, STAG(%a6)		# set src optype tag
+
+	cmpi.b		%d0, &DENORM		# is it a dbl DENORM?
+	beq.w		get_dbl_denorm		# yes
+
+	cmpi.b		%d0, &SNAN		# is it a dbl SNAN?
+	beq.w		get_dbl_snan		# yes
+
+	fmov.d		L_SCR1(%a6), %fp0	# read into %fp0;convert to xprec
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+	rts
+
+load_dbl_immed:
+	lea		L_SCR1(%a6), %a1	# pass: ptr to input dbl tmp space
+	movq.l		&0x8, %d0		# pass: # bytes to read
+	bsr.l		_imem_read		# fetch src operand from memory
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		funimp_iacc		# yes
+	bra.b		load_dbl_cont
+
+# must convert dbl denorm format to an Xprec denorm fmt suitable for
+# normalization...
+# %a0 : loc. of dbl denorm
+get_dbl_denorm:
+	clr.w		FP_SRC_EX(%a6)
+	bfextu		(%a0){&12:&31}, %d0	# fetch hi(_mantissa)
+	mov.l		%d0, FP_SRC_HI(%a6)
+	bfextu		4(%a0){&11:&21}, %d0	# fetch lo(_mantissa)
+	mov.l		&0xb, %d1
+	lsl.l		%d1, %d0
+	mov.l		%d0, FP_SRC_LO(%a6)
+
+	btst		&0x7, (%a0)		# is sgn bit set?
+	beq.b		dbl_dnrm_norm
+	bset		&0x7, FP_SRC_EX(%a6)	# set sgn of xprec value
+
+dbl_dnrm_norm:
+	lea		FP_SRC(%a6), %a0
+	bsr.l		norm			# normalize number
+	mov.w		&0x3c01, %d1		# xprec exp = 0x3c01
+	sub.w		%d0, %d1		# exp = 0x3c01 - shft amt.
+	or.w		%d1, FP_SRC_EX(%a6)	# {sgn,exp}
+
+	mov.b		&NORM, STAG(%a6)	# fix src type tag
+	rts
+
+# convert dbl to ext SNAN
+# %a0 : points to dbl SNAN
+get_dbl_snan:
+	mov.w		&0x7fff, FP_SRC_EX(%a6) # set exp of SNAN
+
+	bfextu		(%a0){&12:&31}, %d0	# fetch hi(_mantissa)
+	mov.l		%d0, FP_SRC_HI(%a6)
+	bfextu		4(%a0){&11:&21}, %d0	# fetch lo(_mantissa)
+	mov.l		&0xb, %d1
+	lsl.l		%d1, %d0
+	mov.l		%d0, FP_SRC_LO(%a6)
+
+	btst		&0x7, (%a0)		# see if sign of SNAN is set
+	beq.b		no_dbl_snan_sgn
+	bset		&0x7, FP_SRC_EX(%a6)
+no_dbl_snan_sgn:
+	rts
+
+#################################################
+# load a Xprec into %fp0:			#
+#	-number can't fault			#
+#	(1) calc ea				#
+#	(2) read 12 bytes into L_SCR(1,2)	#
+#	(3) fmov.x into %fp0			#
+#################################################
+load_ext:
+	mov.l		&0xc, %d0		# pass: 12 (bytes)
+	bsr.l		_dcalc_ea		# calc <ea>
+
+	lea		FP_SRC(%a6), %a1	# pass: ptr to input ext tmp space
+	mov.l		&0xc, %d0		# pass: # of bytes to read
+	bsr.l		_dmem_read		# fetch src operand from memory
+
+	tst.l		%d1			# did dfetch fail?
+	bne.l		facc_in_x		# yes
+
+	lea		FP_SRC(%a6), %a0	# pass: ptr to src op
+	bsr.l		set_tag_x		# determine src type tag
+
+	cmpi.b		%d0, &UNNORM		# is the src op an UNNORM?
+	beq.b		load_ext_unnorm		# yes
+
+	mov.b		%d0, STAG(%a6)		# store the src optype tag
+	rts
+
+load_ext_unnorm:
+	bsr.l		unnorm_fix		# fix the src UNNORM
+	mov.b		%d0, STAG(%a6)		# store the src optype tag
+	rts
+
+#################################################
+# load a packed into %fp0:			#
+#	-number can't fault			#
+#	(1) calc ea				#
+#	(2) read 12 bytes into L_SCR(1,2,3)	#
+#	(3) fmov.x into %fp0			#
+#################################################
+load_packed:
+	bsr.l		get_packed
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src op
+	bsr.l		set_tag_x		# determine src type tag
+	cmpi.b		%d0,&UNNORM		# is the src op an UNNORM ZERO?
+	beq.b		load_packed_unnorm	# yes
+
+	mov.b		%d0,STAG(%a6)		# store the src optype tag
+	rts
+
+load_packed_unnorm:
+	bsr.l		unnorm_fix		# fix the UNNORM ZERO
+	mov.b		%d0,STAG(%a6)		# store the src optype tag
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fout(): move from fp register to memory or data register	#
+#									#
+# XREF ****************************************************************	#
+#	_round() - needed to create EXOP for sgl/dbl precision		#
+#	norm() - needed to create EXOP for extended precision		#
+#	ovf_res() - create default overflow result for sgl/dbl precision#
+#	unf_res() - create default underflow result for sgl/dbl prec.	#
+#	dst_dbl() - create rounded dbl precision result.		#
+#	dst_sgl() - create rounded sgl precision result.		#
+#	fetch_dreg() - fetch dynamic k-factor reg for packed.		#
+#	bindec() - convert FP binary number to packed number.		#
+#	_mem_write() - write data to memory.				#
+#	_mem_write2() - write data to memory unless supv mode -(a7) exc.#
+#	_dmem_write_{byte,word,long}() - write data to memory.		#
+#	store_dreg_{b,w,l}() - store data to data register file.	#
+#	facc_out_{b,w,l,d,x}() - data access error occurred.		#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = round prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 : intermediate underflow or overflow result if		#
+#	      OVFL/UNFL occurred for a sgl or dbl operand		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	This routine is accessed by many handlers that need to do an	#
+# opclass three move of an operand out to memory.			#
+#	Decode an fmove out (opclass 3) instruction to determine if	#
+# it's b,w,l,s,d,x, or p in size. b,w,l can be stored to either a data	#
+# register or memory. The algorithm uses a standard "fmove" to create	#
+# the rounded result. Also, since exceptions are disabled, this also	#
+# create the correct OPERR default result if appropriate.		#
+#	For sgl or dbl precision, overflow or underflow can occur. If	#
+# either occurs and is enabled, the EXOP.				#
+#	For extended precision, the stacked <ea> must be fixed along	#
+# w/ the address index register as appropriate w/ _calc_ea_fout(). If	#
+# the source is a denorm and if underflow is enabled, an EXOP must be	#
+# created.								#
+#	For packed, the k-factor must be fetched from the instruction	#
+# word or a data register. The <ea> must be fixed as w/ extended	#
+# precision. Then, bindec() is called to create the appropriate		#
+# packed result.							#
+#	If at any time an access error is flagged by one of the move-	#
+# to-memory routines, then a special exit must be made so that the	#
+# access error can be handled properly.					#
+#									#
+#########################################################################
+
+	global		fout
+fout:
+	bfextu		EXC_CMDREG(%a6){&3:&3},%d1 # extract dst fmt
+	mov.w		(tbl_fout.b,%pc,%d1.w*2),%a1 # use as index
+	jmp		(tbl_fout.b,%pc,%a1)	# jump to routine
+
+	swbeg		&0x8
+tbl_fout:
+	short		fout_long	-	tbl_fout
+	short		fout_sgl	-	tbl_fout
+	short		fout_ext	-	tbl_fout
+	short		fout_pack	-	tbl_fout
+	short		fout_word	-	tbl_fout
+	short		fout_dbl	-	tbl_fout
+	short		fout_byte	-	tbl_fout
+	short		fout_pack	-	tbl_fout
+
+#################################################################
+# fmove.b out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_byte:
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_byte_denorm	# no
+
+	fmovm.x		SRC(%a0),&0x80		# load value
+
+fout_byte_norm:
+	fmov.l		%d0,%fpcr		# insert rnd prec,mode
+
+	fmov.b		%fp0,%d0		# exec move out w/ correct rnd mode
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch FPSR
+	or.w		%d1,2+USER_FPSR(%a6)	# save new exc,accrued bits
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_byte_dn		# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_byte	# write byte
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_b		# yes
+
+	rts
+
+fout_byte_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_b
+	rts
+
+fout_byte_denorm:
+	mov.l		SRC_EX(%a0),%d1
+	andi.l		&0x80000000,%d1		# keep DENORM sign
+	ori.l		&0x00800000,%d1		# make smallest sgl
+	fmov.s		%d1,%fp0
+	bra.b		fout_byte_norm
+
+#################################################################
+# fmove.w out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_word:
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_word_denorm	# no
+
+	fmovm.x		SRC(%a0),&0x80		# load value
+
+fout_word_norm:
+	fmov.l		%d0,%fpcr		# insert rnd prec:mode
+
+	fmov.w		%fp0,%d0		# exec move out w/ correct rnd mode
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch FPSR
+	or.w		%d1,2+USER_FPSR(%a6)	# save new exc,accrued bits
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_word_dn		# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_word	# write word
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_w		# yes
+
+	rts
+
+fout_word_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_w
+	rts
+
+fout_word_denorm:
+	mov.l		SRC_EX(%a0),%d1
+	andi.l		&0x80000000,%d1		# keep DENORM sign
+	ori.l		&0x00800000,%d1		# make smallest sgl
+	fmov.s		%d1,%fp0
+	bra.b		fout_word_norm
+
+#################################################################
+# fmove.l out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_long:
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_long_denorm	# no
+
+	fmovm.x		SRC(%a0),&0x80		# load value
+
+fout_long_norm:
+	fmov.l		%d0,%fpcr		# insert rnd prec:mode
+
+	fmov.l		%fp0,%d0		# exec move out w/ correct rnd mode
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch FPSR
+	or.w		%d1,2+USER_FPSR(%a6)	# save new exc,accrued bits
+
+fout_long_write:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_long_dn		# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_long	# write long
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	rts
+
+fout_long_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_l
+	rts
+
+fout_long_denorm:
+	mov.l		SRC_EX(%a0),%d1
+	andi.l		&0x80000000,%d1		# keep DENORM sign
+	ori.l		&0x00800000,%d1		# make smallest sgl
+	fmov.s		%d1,%fp0
+	bra.b		fout_long_norm
+
+#################################################################
+# fmove.x out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+# The DENORM causes an Underflow exception.
+fout_ext:
+
+# we copy the extended precision result to FP_SCR0 so that the reserved
+# 16-bit field gets zeroed. we do this since we promise not to disturb
+# what's at SRC(a0).
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	clr.w		2+FP_SCR0_EX(%a6)	# clear reserved field
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	fmovm.x		SRC(%a0),&0x80		# return result
+
+	bsr.l		_calc_ea_fout		# fix stacked <ea>
+
+	mov.l		%a0,%a1			# pass: dst addr
+	lea		FP_SCR0(%a6),%a0	# pass: src addr
+	mov.l		&0xc,%d0		# pass: opsize is 12 bytes
+
+# we must not yet write the extended precision data to the stack
+# in the pre-decrement case from supervisor mode or else we'll corrupt
+# the stack frame. so, leave it in FP_SRC for now and deal with it later...
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	beq.b		fout_ext_a7
+
+	bsr.l		_dmem_write		# write ext prec number to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fout_ext_err		# yes
+
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_ext_denorm		# no
+	rts
+
+# the number is a DENORM. must set the underflow exception bit
+fout_ext_denorm:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set underflow exc bit
+
+	mov.b		FPCR_ENABLE(%a6),%d0
+	andi.b		&0x0a,%d0		# is UNFL or INEX enabled?
+	bne.b		fout_ext_exc		# yes
+	rts
+
+# we don't want to do the write if the exception occurred in supervisor mode
+# so _mem_write2() handles this for us.
+fout_ext_a7:
+	bsr.l		_mem_write2		# write ext prec number to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fout_ext_err		# yes
+
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_ext_denorm		# no
+	rts
+
+fout_ext_exc:
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize the mantissa
+	neg.w		%d0			# new exp = -(shft amt)
+	andi.w		&0x7fff,%d0
+	andi.w		&0x8000,FP_SCR0_EX(%a6)	# keep only old sign
+	or.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+fout_ext_err:
+	mov.l		EXC_A6(%a6),(%a6)	# fix stacked a6
+	bra.l		facc_out_x
+
+#########################################################################
+# fmove.s out ###########################################################
+#########################################################################
+fout_sgl:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	mov.l		%d0,L_SCR3(%a6)		# save rnd prec,mode on stack
+
+#
+# operand is a normalized number. first, we check to see if the move out
+# would cause either an underflow or overflow. these cases are handled
+# separately. otherwise, set the FPCR to the proper rounding mode and
+# execute the move.
+#
+	mov.w		SRC_EX(%a0),%d0		# extract exponent
+	andi.w		&0x7fff,%d0		# strip sign
+
+	cmpi.w		%d0,&SGL_HI		# will operand overflow?
+	bgt.w		fout_sgl_ovfl		# yes; go handle OVFL
+	beq.w		fout_sgl_may_ovfl	# maybe; go handle possible OVFL
+	cmpi.w		%d0,&SGL_LO		# will operand underflow?
+	blt.w		fout_sgl_unfl		# yes; go handle underflow
+
+#
+# NORMs(in range) can be stored out by a simple "fmov.s"
+# Unnormalized inputs can come through this point.
+#
+fout_sgl_exg:
+	fmovm.x		SRC(%a0),&0x80		# fetch fop from stack
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmov.s		%fp0,%d0		# store does convert and round
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.w		%d1,2+USER_FPSR(%a6)	# set possible inex2/ainex
+
+fout_sgl_exg_write:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_sgl_exg_write_dn	# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_long	# write long
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	rts
+
+fout_sgl_exg_write_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_l
+	rts
+
+#
+# here, we know that the operand would UNFL if moved out to single prec,
+# so, denorm and round and then use generic store single routine to
+# write the value to memory.
+#
+fout_sgl_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.l		%a0,-(%sp)
+
+	clr.l		%d0			# pass: S.F. = 0
+
+	cmpi.b		STAG(%a6),&DENORM	# fetch src optype tag
+	bne.b		fout_sgl_unfl_cont	# let DENORMs fall through
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize the DENORM
+
+fout_sgl_unfl_cont:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calc default underflow result
+
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to fop
+	bsr.l		dst_sgl			# convert to single prec
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_sgl_unfl_dn	# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_long	# write long
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.b		fout_sgl_unfl_chkexc
+
+fout_sgl_unfl_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_l
+
+fout_sgl_unfl_chkexc:
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0a,%d1		# is UNFL or INEX enabled?
+	bne.w		fout_sd_exc_unfl	# yes
+	addq.l		&0x4,%sp
+	rts
+
+#
+# it's definitely an overflow so call ovf_res to get the correct answer
+#
+fout_sgl_ovfl:
+	tst.b		3+SRC_HI(%a0)		# is result inexact?
+	bne.b		fout_sgl_ovfl_inex2
+	tst.l		SRC_LO(%a0)		# is result inexact?
+	bne.b		fout_sgl_ovfl_inex2
+	ori.w		&ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+	bra.b		fout_sgl_ovfl_cont
+fout_sgl_ovfl_inex2:
+	ori.w		&ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
+
+fout_sgl_ovfl_cont:
+	mov.l		%a0,-(%sp)
+
+# call ovf_res() w/ sgl prec and the correct rnd mode to create the default
+# overflow result. DON'T save the returned ccodes from ovf_res() since
+# fmove out doesn't alter them.
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	smi		%d1			# set if so
+	mov.l		L_SCR3(%a6),%d0		# pass: sgl prec,rnd mode
+	bsr.l		ovf_res			# calc OVFL result
+	fmovm.x		(%a0),&0x80		# load default overflow result
+	fmov.s		%fp0,%d0		# store to single
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_sgl_ovfl_dn	# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_long	# write long
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.b		fout_sgl_ovfl_chkexc
+
+fout_sgl_ovfl_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_l
+
+fout_sgl_ovfl_chkexc:
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0a,%d1		# is UNFL or INEX enabled?
+	bne.w		fout_sd_exc_ovfl	# yes
+	addq.l		&0x4,%sp
+	rts
+
+#
+# move out MAY overflow:
+# (1) force the exp to 0x3fff
+# (2) do a move w/ appropriate rnd mode
+# (3) if exp still equals zero, then insert original exponent
+#	for the correct result.
+#     if exp now equals one, then it overflowed so call ovf_res.
+#
+fout_sgl_may_ovfl:
+	mov.w		SRC_EX(%a0),%d1		# fetch current sign
+	andi.w		&0x8000,%d1		# keep it,clear exp
+	ori.w		&0x3fff,%d1		# insert exp = 0
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert scaled exp
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# force fop to be rounded
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fabs.x		%fp0			# need absolute value
+	fcmp.b		%fp0,&0x2		# did exponent increase?
+	fblt.w		fout_sgl_exg		# no; go finish NORM
+	bra.w		fout_sgl_ovfl		# yes; go handle overflow
+
+################
+
+fout_sd_exc_unfl:
+	mov.l		(%sp)+,%a0
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	cmpi.b		STAG(%a6),&DENORM	# was src a DENORM?
+	bne.b		fout_sd_exc_cont	# no
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm
+	neg.l		%d0
+	andi.w		&0x7fff,%d0
+	bfins		%d0,FP_SCR0_EX(%a6){&1:&15}
+	bra.b		fout_sd_exc_cont
+
+fout_sd_exc:
+fout_sd_exc_ovfl:
+	mov.l		(%sp)+,%a0		# restore a0
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+fout_sd_exc_cont:
+	bclr		&0x7,FP_SCR0_EX(%a6)	# clear sign bit
+	sne.b		2+FP_SCR0_EX(%a6)	# set internal sign bit
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to DENORM
+
+	mov.b		3+L_SCR3(%a6),%d1
+	lsr.b		&0x4,%d1
+	andi.w		&0x0c,%d1
+	swap		%d1
+	mov.b		3+L_SCR3(%a6),%d1
+	lsr.b		&0x4,%d1
+	andi.w		&0x03,%d1
+	clr.l		%d0			# pass: zero g,r,s
+	bsr.l		_round			# round the DENORM
+
+	tst.b		2+FP_SCR0_EX(%a6)	# is EXOP negative?
+	beq.b		fout_sd_exc_done	# no
+	bset		&0x7,FP_SCR0_EX(%a6)	# yes
+
+fout_sd_exc_done:
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#################################################################
+# fmove.d out ###################################################
+#################################################################
+fout_dbl:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+	mov.l		%d0,L_SCR3(%a6)		# save rnd prec,mode on stack
+
+#
+# operand is a normalized number. first, we check to see if the move out
+# would cause either an underflow or overflow. these cases are handled
+# separately. otherwise, set the FPCR to the proper rounding mode and
+# execute the move.
+#
+	mov.w		SRC_EX(%a0),%d0		# extract exponent
+	andi.w		&0x7fff,%d0		# strip sign
+
+	cmpi.w		%d0,&DBL_HI		# will operand overflow?
+	bgt.w		fout_dbl_ovfl		# yes; go handle OVFL
+	beq.w		fout_dbl_may_ovfl	# maybe; go handle possible OVFL
+	cmpi.w		%d0,&DBL_LO		# will operand underflow?
+	blt.w		fout_dbl_unfl		# yes; go handle underflow
+
+#
+# NORMs(in range) can be stored out by a simple "fmov.d"
+# Unnormalized inputs can come through this point.
+#
+fout_dbl_exg:
+	fmovm.x		SRC(%a0),&0x80		# fetch fop from stack
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmov.d		%fp0,L_SCR1(%a6)	# store does convert and round
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d0		# save FPSR
+
+	or.w		%d0,2+USER_FPSR(%a6)	# set possible inex2/ainex
+
+	mov.l		EXC_EA(%a6),%a1		# pass: dst addr
+	lea		L_SCR1(%a6),%a0		# pass: src addr
+	movq.l		&0x8,%d0		# pass: opsize is 8 bytes
+	bsr.l		_dmem_write		# store dbl fop to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_d		# yes
+
+	rts					# no; so we're finished
+
+#
+# here, we know that the operand would UNFL if moved out to double prec,
+# so, denorm and round and then use generic store double routine to
+# write the value to memory.
+#
+fout_dbl_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.l		%a0,-(%sp)
+
+	clr.l		%d0			# pass: S.F. = 0
+
+	cmpi.b		STAG(%a6),&DENORM	# fetch src optype tag
+	bne.b		fout_dbl_unfl_cont	# let DENORMs fall through
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize the DENORM
+
+fout_dbl_unfl_cont:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calc default underflow result
+
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to fop
+	bsr.l		dst_dbl			# convert to single prec
+	mov.l		%d0,L_SCR1(%a6)
+	mov.l		%d1,L_SCR2(%a6)
+
+	mov.l		EXC_EA(%a6),%a1		# pass: dst addr
+	lea		L_SCR1(%a6),%a0		# pass: src addr
+	movq.l		&0x8,%d0		# pass: opsize is 8 bytes
+	bsr.l		_dmem_write		# store dbl fop to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_d		# yes
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0a,%d1		# is UNFL or INEX enabled?
+	bne.w		fout_sd_exc_unfl	# yes
+	addq.l		&0x4,%sp
+	rts
+
+#
+# it's definitely an overflow so call ovf_res to get the correct answer
+#
+fout_dbl_ovfl:
+	mov.w		2+SRC_LO(%a0),%d0
+	andi.w		&0x7ff,%d0
+	bne.b		fout_dbl_ovfl_inex2
+
+	ori.w		&ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+	bra.b		fout_dbl_ovfl_cont
+fout_dbl_ovfl_inex2:
+	ori.w		&ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
+
+fout_dbl_ovfl_cont:
+	mov.l		%a0,-(%sp)
+
+# call ovf_res() w/ dbl prec and the correct rnd mode to create the default
+# overflow result. DON'T save the returned ccodes from ovf_res() since
+# fmove out doesn't alter them.
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	smi		%d1			# set if so
+	mov.l		L_SCR3(%a6),%d0		# pass: dbl prec,rnd mode
+	bsr.l		ovf_res			# calc OVFL result
+	fmovm.x		(%a0),&0x80		# load default overflow result
+	fmov.d		%fp0,L_SCR1(%a6)	# store to double
+
+	mov.l		EXC_EA(%a6),%a1		# pass: dst addr
+	lea		L_SCR1(%a6),%a0		# pass: src addr
+	movq.l		&0x8,%d0		# pass: opsize is 8 bytes
+	bsr.l		_dmem_write		# store dbl fop to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_d		# yes
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0a,%d1		# is UNFL or INEX enabled?
+	bne.w		fout_sd_exc_ovfl	# yes
+	addq.l		&0x4,%sp
+	rts
+
+#
+# move out MAY overflow:
+# (1) force the exp to 0x3fff
+# (2) do a move w/ appropriate rnd mode
+# (3) if exp still equals zero, then insert original exponent
+#	for the correct result.
+#     if exp now equals one, then it overflowed so call ovf_res.
+#
+fout_dbl_may_ovfl:
+	mov.w		SRC_EX(%a0),%d1		# fetch current sign
+	andi.w		&0x8000,%d1		# keep it,clear exp
+	ori.w		&0x3fff,%d1		# insert exp = 0
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert scaled exp
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# force fop to be rounded
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fabs.x		%fp0			# need absolute value
+	fcmp.b		%fp0,&0x2		# did exponent increase?
+	fblt.w		fout_dbl_exg		# no; go finish NORM
+	bra.w		fout_dbl_ovfl		# yes; go handle overflow
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	dst_dbl(): create double precision value from extended prec.	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to source operand in extended precision		#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = hi(double precision result)				#
+#	d1 = lo(double precision result)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#  Changes extended precision to double precision.			#
+#  Note: no attempt is made to round the extended value to double.	#
+#	dbl_sign = ext_sign						#
+#	dbl_exp = ext_exp - $3fff(ext bias) + $7ff(dbl bias)		#
+#	get rid of ext integer bit					#
+#	dbl_mant = ext_mant{62:12}					#
+#									#
+#		---------------   ---------------    ---------------	#
+#  extended ->  |s|    exp    |   |1| ms mant   |    | ls mant     |	#
+#		---------------   ---------------    ---------------	#
+#		 95	    64    63 62	      32      31     11	  0	#
+#				     |			     |		#
+#				     |			     |		#
+#				     |			     |		#
+#			             v			     v		#
+#			      ---------------   ---------------		#
+#  double   ->		      |s|exp| mant  |   |  mant       |		#
+#			      ---------------   ---------------		#
+#			      63     51   32   31	       0	#
+#									#
+#########################################################################
+
+dst_dbl:
+	clr.l		%d0			# clear d0
+	mov.w		FTEMP_EX(%a0),%d0	# get exponent
+	subi.w		&EXT_BIAS,%d0		# subtract extended precision bias
+	addi.w		&DBL_BIAS,%d0		# add double precision bias
+	tst.b		FTEMP_HI(%a0)		# is number a denorm?
+	bmi.b		dst_get_dupper		# no
+	subq.w		&0x1,%d0		# yes; denorm bias = DBL_BIAS - 1
+dst_get_dupper:
+	swap		%d0			# d0 now in upper word
+	lsl.l		&0x4,%d0		# d0 in proper place for dbl prec exp
+	tst.b		FTEMP_EX(%a0)		# test sign
+	bpl.b		dst_get_dman		# if postive, go process mantissa
+	bset		&0x1f,%d0		# if negative, set sign
+dst_get_dman:
+	mov.l		FTEMP_HI(%a0),%d1	# get ms mantissa
+	bfextu		%d1{&1:&20},%d1		# get upper 20 bits of ms
+	or.l		%d1,%d0			# put these bits in ms word of double
+	mov.l		%d0,L_SCR1(%a6)		# put the new exp back on the stack
+	mov.l		FTEMP_HI(%a0),%d1	# get ms mantissa
+	mov.l		&21,%d0			# load shift count
+	lsl.l		%d0,%d1			# put lower 11 bits in upper bits
+	mov.l		%d1,L_SCR2(%a6)		# build lower lword in memory
+	mov.l		FTEMP_LO(%a0),%d1	# get ls mantissa
+	bfextu		%d1{&0:&21},%d0		# get ls 21 bits of double
+	mov.l		L_SCR2(%a6),%d1
+	or.l		%d0,%d1			# put them in double result
+	mov.l		L_SCR1(%a6),%d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	dst_sgl(): create single precision value from extended prec	#
+#									#
+# XREF ****************************************************************	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to source operand in extended precision		#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = single precision result					#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+# Changes extended precision to single precision.			#
+#	sgl_sign = ext_sign						#
+#	sgl_exp = ext_exp - $3fff(ext bias) + $7f(sgl bias)		#
+#	get rid of ext integer bit					#
+#	sgl_mant = ext_mant{62:12}					#
+#									#
+#		---------------   ---------------    ---------------	#
+#  extended ->  |s|    exp    |   |1| ms mant   |    | ls mant     |	#
+#		---------------   ---------------    ---------------	#
+#		 95	    64    63 62	   40 32      31     12	  0	#
+#				     |	   |				#
+#				     |	   |				#
+#				     |	   |				#
+#			             v     v				#
+#			      ---------------				#
+#  single   ->		      |s|exp| mant  |				#
+#			      ---------------				#
+#			      31     22     0				#
+#									#
+#########################################################################
+
+dst_sgl:
+	clr.l		%d0
+	mov.w		FTEMP_EX(%a0),%d0	# get exponent
+	subi.w		&EXT_BIAS,%d0		# subtract extended precision bias
+	addi.w		&SGL_BIAS,%d0		# add single precision bias
+	tst.b		FTEMP_HI(%a0)		# is number a denorm?
+	bmi.b		dst_get_supper		# no
+	subq.w		&0x1,%d0		# yes; denorm bias = SGL_BIAS - 1
+dst_get_supper:
+	swap		%d0			# put exp in upper word of d0
+	lsl.l		&0x7,%d0		# shift it into single exp bits
+	tst.b		FTEMP_EX(%a0)		# test sign
+	bpl.b		dst_get_sman		# if positive, continue
+	bset		&0x1f,%d0		# if negative, put in sign first
+dst_get_sman:
+	mov.l		FTEMP_HI(%a0),%d1	# get ms mantissa
+	andi.l		&0x7fffff00,%d1		# get upper 23 bits of ms
+	lsr.l		&0x8,%d1		# and put them flush right
+	or.l		%d1,%d0			# put these bits in ms word of single
+	rts
+
+##############################################################################
+fout_pack:
+	bsr.l		_calc_ea_fout		# fetch the <ea>
+	mov.l		%a0,-(%sp)
+
+	mov.b		STAG(%a6),%d0		# fetch input type
+	bne.w		fout_pack_not_norm	# input is not NORM
+
+fout_pack_norm:
+	btst		&0x4,EXC_CMDREG(%a6)	# static or dynamic?
+	beq.b		fout_pack_s		# static
+
+fout_pack_d:
+	mov.b		1+EXC_CMDREG(%a6),%d1	# fetch dynamic reg
+	lsr.b		&0x4,%d1
+	andi.w		&0x7,%d1
+
+	bsr.l		fetch_dreg		# fetch Dn w/ k-factor
+
+	bra.b		fout_pack_type
+fout_pack_s:
+	mov.b		1+EXC_CMDREG(%a6),%d0	# fetch static field
+
+fout_pack_type:
+	bfexts		%d0{&25:&7},%d0		# extract k-factor
+	mov.l	%d0,-(%sp)
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to input
+
+# bindec is currently scrambling FP_SRC for denorm inputs.
+# we'll have to change this, but for now, tough luck!!!
+	bsr.l		bindec			# convert xprec to packed
+
+#	andi.l		&0xcfff000f,FP_SCR0(%a6) # clear unused fields
+	andi.l		&0xcffff00f,FP_SCR0(%a6) # clear unused fields
+
+	mov.l	(%sp)+,%d0
+
+	tst.b		3+FP_SCR0_EX(%a6)
+	bne.b		fout_pack_set
+	tst.l		FP_SCR0_HI(%a6)
+	bne.b		fout_pack_set
+	tst.l		FP_SCR0_LO(%a6)
+	bne.b		fout_pack_set
+
+# add the extra condition that only if the k-factor was zero, too, should
+# we zero the exponent
+	tst.l		%d0
+	bne.b		fout_pack_set
+# "mantissa" is all zero which means that the answer is zero. but, the '040
+# algorithm allows the exponent to be non-zero. the 881/2 do not. therefore,
+# if the mantissa is zero, I will zero the exponent, too.
+# the question now is whether the exponents sign bit is allowed to be non-zero
+# for a zero, also...
+	andi.w		&0xf000,FP_SCR0(%a6)
+
+fout_pack_set:
+
+	lea		FP_SCR0(%a6),%a0	# pass: src addr
+
+fout_pack_write:
+	mov.l		(%sp)+,%a1		# pass: dst addr
+	mov.l		&0xc,%d0		# pass: opsize is 12 bytes
+
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	beq.b		fout_pack_a7
+
+	bsr.l		_dmem_write		# write ext prec number to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fout_ext_err		# yes
+
+	rts
+
+# we don't want to do the write if the exception occurred in supervisor mode
+# so _mem_write2() handles this for us.
+fout_pack_a7:
+	bsr.l		_mem_write2		# write ext prec number to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fout_ext_err		# yes
+
+	rts
+
+fout_pack_not_norm:
+	cmpi.b		%d0,&DENORM		# is it a DENORM?
+	beq.w		fout_pack_norm		# yes
+	lea		FP_SRC(%a6),%a0
+	clr.w		2+FP_SRC_EX(%a6)
+	cmpi.b		%d0,&SNAN		# is it an SNAN?
+	beq.b		fout_pack_snan		# yes
+	bra.b		fout_pack_write		# no
+
+fout_pack_snan:
+	ori.w		&snaniop2_mask,FPSR_EXCEPT(%a6) # set SNAN/AIOP
+	bset		&0x6,FP_SRC_HI(%a6)	# set snan bit
+	bra.b		fout_pack_write
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fetch_dreg(): fetch register according to index in d1		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d1 = index of register to fetch from				#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of register fetched					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the index value in d1 which can range from zero	#
+# to fifteen, load the corresponding register file value (where		#
+# address register indexes start at 8). D0/D1/A0/A1/A6/A7 are on the	#
+# stack. The rest should still be in their original places.		#
+#									#
+#########################################################################
+
+# this routine leaves d1 intact for subsequent store_dreg calls.
+	global		fetch_dreg
+fetch_dreg:
+	mov.w		(tbl_fdreg.b,%pc,%d1.w*2),%d0
+	jmp		(tbl_fdreg.b,%pc,%d0.w*1)
+
+tbl_fdreg:
+	short		fdreg0 - tbl_fdreg
+	short		fdreg1 - tbl_fdreg
+	short		fdreg2 - tbl_fdreg
+	short		fdreg3 - tbl_fdreg
+	short		fdreg4 - tbl_fdreg
+	short		fdreg5 - tbl_fdreg
+	short		fdreg6 - tbl_fdreg
+	short		fdreg7 - tbl_fdreg
+	short		fdreg8 - tbl_fdreg
+	short		fdreg9 - tbl_fdreg
+	short		fdrega - tbl_fdreg
+	short		fdregb - tbl_fdreg
+	short		fdregc - tbl_fdreg
+	short		fdregd - tbl_fdreg
+	short		fdrege - tbl_fdreg
+	short		fdregf - tbl_fdreg
+
+fdreg0:
+	mov.l		EXC_DREGS+0x0(%a6),%d0
+	rts
+fdreg1:
+	mov.l		EXC_DREGS+0x4(%a6),%d0
+	rts
+fdreg2:
+	mov.l		%d2,%d0
+	rts
+fdreg3:
+	mov.l		%d3,%d0
+	rts
+fdreg4:
+	mov.l		%d4,%d0
+	rts
+fdreg5:
+	mov.l		%d5,%d0
+	rts
+fdreg6:
+	mov.l		%d6,%d0
+	rts
+fdreg7:
+	mov.l		%d7,%d0
+	rts
+fdreg8:
+	mov.l		EXC_DREGS+0x8(%a6),%d0
+	rts
+fdreg9:
+	mov.l		EXC_DREGS+0xc(%a6),%d0
+	rts
+fdrega:
+	mov.l		%a2,%d0
+	rts
+fdregb:
+	mov.l		%a3,%d0
+	rts
+fdregc:
+	mov.l		%a4,%d0
+	rts
+fdregd:
+	mov.l		%a5,%d0
+	rts
+fdrege:
+	mov.l		(%a6),%d0
+	rts
+fdregf:
+	mov.l		EXC_A7(%a6),%d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	store_dreg_l(): store longword to data register specified by d1	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = longowrd value to store					#
+#	d1 = index of register to fetch from				#
+#									#
+# OUTPUT **************************************************************	#
+#	(data register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the index value in d1, store the longword value	#
+# in d0 to the corresponding data register. D0/D1 are on the stack	#
+# while the rest are in their initial places.				#
+#									#
+#########################################################################
+
+	global		store_dreg_l
+store_dreg_l:
+	mov.w		(tbl_sdregl.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_sdregl.b,%pc,%d1.w*1)
+
+tbl_sdregl:
+	short		sdregl0 - tbl_sdregl
+	short		sdregl1 - tbl_sdregl
+	short		sdregl2 - tbl_sdregl
+	short		sdregl3 - tbl_sdregl
+	short		sdregl4 - tbl_sdregl
+	short		sdregl5 - tbl_sdregl
+	short		sdregl6 - tbl_sdregl
+	short		sdregl7 - tbl_sdregl
+
+sdregl0:
+	mov.l		%d0,EXC_DREGS+0x0(%a6)
+	rts
+sdregl1:
+	mov.l		%d0,EXC_DREGS+0x4(%a6)
+	rts
+sdregl2:
+	mov.l		%d0,%d2
+	rts
+sdregl3:
+	mov.l		%d0,%d3
+	rts
+sdregl4:
+	mov.l		%d0,%d4
+	rts
+sdregl5:
+	mov.l		%d0,%d5
+	rts
+sdregl6:
+	mov.l		%d0,%d6
+	rts
+sdregl7:
+	mov.l		%d0,%d7
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	store_dreg_w(): store word to data register specified by d1	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = word value to store					#
+#	d1 = index of register to fetch from				#
+#									#
+# OUTPUT **************************************************************	#
+#	(data register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the index value in d1, store the word value	#
+# in d0 to the corresponding data register. D0/D1 are on the stack	#
+# while the rest are in their initial places.				#
+#									#
+#########################################################################
+
+	global		store_dreg_w
+store_dreg_w:
+	mov.w		(tbl_sdregw.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_sdregw.b,%pc,%d1.w*1)
+
+tbl_sdregw:
+	short		sdregw0 - tbl_sdregw
+	short		sdregw1 - tbl_sdregw
+	short		sdregw2 - tbl_sdregw
+	short		sdregw3 - tbl_sdregw
+	short		sdregw4 - tbl_sdregw
+	short		sdregw5 - tbl_sdregw
+	short		sdregw6 - tbl_sdregw
+	short		sdregw7 - tbl_sdregw
+
+sdregw0:
+	mov.w		%d0,2+EXC_DREGS+0x0(%a6)
+	rts
+sdregw1:
+	mov.w		%d0,2+EXC_DREGS+0x4(%a6)
+	rts
+sdregw2:
+	mov.w		%d0,%d2
+	rts
+sdregw3:
+	mov.w		%d0,%d3
+	rts
+sdregw4:
+	mov.w		%d0,%d4
+	rts
+sdregw5:
+	mov.w		%d0,%d5
+	rts
+sdregw6:
+	mov.w		%d0,%d6
+	rts
+sdregw7:
+	mov.w		%d0,%d7
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	store_dreg_b(): store byte to data register specified by d1	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = byte value to store					#
+#	d1 = index of register to fetch from				#
+#									#
+# OUTPUT **************************************************************	#
+#	(data register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the index value in d1, store the byte value	#
+# in d0 to the corresponding data register. D0/D1 are on the stack	#
+# while the rest are in their initial places.				#
+#									#
+#########################################################################
+
+	global		store_dreg_b
+store_dreg_b:
+	mov.w		(tbl_sdregb.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_sdregb.b,%pc,%d1.w*1)
+
+tbl_sdregb:
+	short		sdregb0 - tbl_sdregb
+	short		sdregb1 - tbl_sdregb
+	short		sdregb2 - tbl_sdregb
+	short		sdregb3 - tbl_sdregb
+	short		sdregb4 - tbl_sdregb
+	short		sdregb5 - tbl_sdregb
+	short		sdregb6 - tbl_sdregb
+	short		sdregb7 - tbl_sdregb
+
+sdregb0:
+	mov.b		%d0,3+EXC_DREGS+0x0(%a6)
+	rts
+sdregb1:
+	mov.b		%d0,3+EXC_DREGS+0x4(%a6)
+	rts
+sdregb2:
+	mov.b		%d0,%d2
+	rts
+sdregb3:
+	mov.b		%d0,%d3
+	rts
+sdregb4:
+	mov.b		%d0,%d4
+	rts
+sdregb5:
+	mov.b		%d0,%d5
+	rts
+sdregb6:
+	mov.b		%d0,%d6
+	rts
+sdregb7:
+	mov.b		%d0,%d7
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	inc_areg(): increment an address register by the value in d0	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = amount to increment by					#
+#	d1 = index of address register to increment			#
+#									#
+# OUTPUT **************************************************************	#
+#	(address register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Typically used for an instruction w/ a post-increment <ea>,	#
+# this routine adds the increment value in d0 to the address register	#
+# specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside	#
+# in their original places.						#
+#	For a7, if the increment amount is one, then we have to		#
+# increment by two. For any a7 update, set the mia7_flag so that if	#
+# an access error exception occurs later in emulation, this address	#
+# register update can be undone.					#
+#									#
+#########################################################################
+
+	global		inc_areg
+inc_areg:
+	mov.w		(tbl_iareg.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_iareg.b,%pc,%d1.w*1)
+
+tbl_iareg:
+	short		iareg0 - tbl_iareg
+	short		iareg1 - tbl_iareg
+	short		iareg2 - tbl_iareg
+	short		iareg3 - tbl_iareg
+	short		iareg4 - tbl_iareg
+	short		iareg5 - tbl_iareg
+	short		iareg6 - tbl_iareg
+	short		iareg7 - tbl_iareg
+
+iareg0:	add.l		%d0,EXC_DREGS+0x8(%a6)
+	rts
+iareg1:	add.l		%d0,EXC_DREGS+0xc(%a6)
+	rts
+iareg2:	add.l		%d0,%a2
+	rts
+iareg3:	add.l		%d0,%a3
+	rts
+iareg4:	add.l		%d0,%a4
+	rts
+iareg5:	add.l		%d0,%a5
+	rts
+iareg6:	add.l		%d0,(%a6)
+	rts
+iareg7:	mov.b		&mia7_flg,SPCOND_FLG(%a6)
+	cmpi.b		%d0,&0x1
+	beq.b		iareg7b
+	add.l		%d0,EXC_A7(%a6)
+	rts
+iareg7b:
+	addq.l		&0x2,EXC_A7(%a6)
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	dec_areg(): decrement an address register by the value in d0	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = amount to decrement by					#
+#	d1 = index of address register to decrement			#
+#									#
+# OUTPUT **************************************************************	#
+#	(address register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Typically used for an instruction w/ a pre-decrement <ea>,	#
+# this routine adds the decrement value in d0 to the address register	#
+# specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside	#
+# in their original places.						#
+#	For a7, if the decrement amount is one, then we have to		#
+# decrement by two. For any a7 update, set the mda7_flag so that if	#
+# an access error exception occurs later in emulation, this address	#
+# register update can be undone.					#
+#									#
+#########################################################################
+
+	global		dec_areg
+dec_areg:
+	mov.w		(tbl_dareg.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_dareg.b,%pc,%d1.w*1)
+
+tbl_dareg:
+	short		dareg0 - tbl_dareg
+	short		dareg1 - tbl_dareg
+	short		dareg2 - tbl_dareg
+	short		dareg3 - tbl_dareg
+	short		dareg4 - tbl_dareg
+	short		dareg5 - tbl_dareg
+	short		dareg6 - tbl_dareg
+	short		dareg7 - tbl_dareg
+
+dareg0:	sub.l		%d0,EXC_DREGS+0x8(%a6)
+	rts
+dareg1:	sub.l		%d0,EXC_DREGS+0xc(%a6)
+	rts
+dareg2:	sub.l		%d0,%a2
+	rts
+dareg3:	sub.l		%d0,%a3
+	rts
+dareg4:	sub.l		%d0,%a4
+	rts
+dareg5:	sub.l		%d0,%a5
+	rts
+dareg6:	sub.l		%d0,(%a6)
+	rts
+dareg7:	mov.b		&mda7_flg,SPCOND_FLG(%a6)
+	cmpi.b		%d0,&0x1
+	beq.b		dareg7b
+	sub.l		%d0,EXC_A7(%a6)
+	rts
+dareg7b:
+	subq.l		&0x2,EXC_A7(%a6)
+	rts
+
+##############################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	load_fpn1(): load FP register value into FP_SRC(a6).		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = index of FP register to load				#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SRC(a6) = value loaded from FP register file			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Using the index in d0, load FP_SRC(a6) with a number from the	#
+# FP register file.							#
+#									#
+#########################################################################
+
+	global		load_fpn1
+load_fpn1:
+	mov.w		(tbl_load_fpn1.b,%pc,%d0.w*2), %d0
+	jmp		(tbl_load_fpn1.b,%pc,%d0.w*1)
+
+tbl_load_fpn1:
+	short		load_fpn1_0 - tbl_load_fpn1
+	short		load_fpn1_1 - tbl_load_fpn1
+	short		load_fpn1_2 - tbl_load_fpn1
+	short		load_fpn1_3 - tbl_load_fpn1
+	short		load_fpn1_4 - tbl_load_fpn1
+	short		load_fpn1_5 - tbl_load_fpn1
+	short		load_fpn1_6 - tbl_load_fpn1
+	short		load_fpn1_7 - tbl_load_fpn1
+
+load_fpn1_0:
+	mov.l		0+EXC_FP0(%a6), 0+FP_SRC(%a6)
+	mov.l		4+EXC_FP0(%a6), 4+FP_SRC(%a6)
+	mov.l		8+EXC_FP0(%a6), 8+FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_1:
+	mov.l		0+EXC_FP1(%a6), 0+FP_SRC(%a6)
+	mov.l		4+EXC_FP1(%a6), 4+FP_SRC(%a6)
+	mov.l		8+EXC_FP1(%a6), 8+FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_2:
+	fmovm.x		&0x20, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_3:
+	fmovm.x		&0x10, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_4:
+	fmovm.x		&0x08, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_5:
+	fmovm.x		&0x04, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_6:
+	fmovm.x		&0x02, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_7:
+	fmovm.x		&0x01, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+
+#############################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	load_fpn2(): load FP register value into FP_DST(a6).		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = index of FP register to load				#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_DST(a6) = value loaded from FP register file			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Using the index in d0, load FP_DST(a6) with a number from the	#
+# FP register file.							#
+#									#
+#########################################################################
+
+	global		load_fpn2
+load_fpn2:
+	mov.w		(tbl_load_fpn2.b,%pc,%d0.w*2), %d0
+	jmp		(tbl_load_fpn2.b,%pc,%d0.w*1)
+
+tbl_load_fpn2:
+	short		load_fpn2_0 - tbl_load_fpn2
+	short		load_fpn2_1 - tbl_load_fpn2
+	short		load_fpn2_2 - tbl_load_fpn2
+	short		load_fpn2_3 - tbl_load_fpn2
+	short		load_fpn2_4 - tbl_load_fpn2
+	short		load_fpn2_5 - tbl_load_fpn2
+	short		load_fpn2_6 - tbl_load_fpn2
+	short		load_fpn2_7 - tbl_load_fpn2
+
+load_fpn2_0:
+	mov.l		0+EXC_FP0(%a6), 0+FP_DST(%a6)
+	mov.l		4+EXC_FP0(%a6), 4+FP_DST(%a6)
+	mov.l		8+EXC_FP0(%a6), 8+FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_1:
+	mov.l		0+EXC_FP1(%a6), 0+FP_DST(%a6)
+	mov.l		4+EXC_FP1(%a6), 4+FP_DST(%a6)
+	mov.l		8+EXC_FP1(%a6), 8+FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_2:
+	fmovm.x		&0x20, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_3:
+	fmovm.x		&0x10, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_4:
+	fmovm.x		&0x08, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_5:
+	fmovm.x		&0x04, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_6:
+	fmovm.x		&0x02, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_7:
+	fmovm.x		&0x01, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+
+#############################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	store_fpreg(): store an fp value to the fpreg designated d0.	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	fp0 = extended precision value to store				#
+#	d0  = index of floating-point register				#
+#									#
+# OUTPUT **************************************************************	#
+#	None								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Store the value in fp0 to the FP register designated by the	#
+# value in d0. The FP number can be DENORM or SNAN so we have to be	#
+# careful that we don't take an exception here.				#
+#									#
+#########################################################################
+
+	global		store_fpreg
+store_fpreg:
+	mov.w		(tbl_store_fpreg.b,%pc,%d0.w*2), %d0
+	jmp		(tbl_store_fpreg.b,%pc,%d0.w*1)
+
+tbl_store_fpreg:
+	short		store_fpreg_0 - tbl_store_fpreg
+	short		store_fpreg_1 - tbl_store_fpreg
+	short		store_fpreg_2 - tbl_store_fpreg
+	short		store_fpreg_3 - tbl_store_fpreg
+	short		store_fpreg_4 - tbl_store_fpreg
+	short		store_fpreg_5 - tbl_store_fpreg
+	short		store_fpreg_6 - tbl_store_fpreg
+	short		store_fpreg_7 - tbl_store_fpreg
+
+store_fpreg_0:
+	fmovm.x		&0x80, EXC_FP0(%a6)
+	rts
+store_fpreg_1:
+	fmovm.x		&0x80, EXC_FP1(%a6)
+	rts
+store_fpreg_2:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x20
+	rts
+store_fpreg_3:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x10
+	rts
+store_fpreg_4:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x08
+	rts
+store_fpreg_5:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x04
+	rts
+store_fpreg_6:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x02
+	rts
+store_fpreg_7:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x01
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_denorm(): denormalize an intermediate result			#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT *************************************************************** #
+#	a0 = points to the operand to be denormalized			#
+#		(in the internal extended format)			#
+#									#
+#	d0 = rounding precision						#
+#									#
+# OUTPUT **************************************************************	#
+#	a0 = pointer to the denormalized result				#
+#		(in the internal extended format)			#
+#									#
+#	d0 = guard,round,sticky						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the exponent underflow threshold for the given	#
+# precision, shift the mantissa bits to the right in order raise the	#
+# exponent of the operand to the threshold value. While shifting the	#
+# mantissa bits right, maintain the value of the guard, round, and	#
+# sticky bits.								#
+# other notes:								#
+#	(1) _denorm() is called by the underflow routines		#
+#	(2) _denorm() does NOT affect the status register		#
+#									#
+#########################################################################
+
+#
+# table of exponent threshold values for each precision
+#
+tbl_thresh:
+	short		0x0
+	short		sgl_thresh
+	short		dbl_thresh
+
+	global		_denorm
+_denorm:
+#
+# Load the exponent threshold for the precision selected and check
+# to see if (threshold - exponent) is > 65 in which case we can
+# simply calculate the sticky bit and zero the mantissa. otherwise
+# we have to call the denormalization routine.
+#
+	lsr.b		&0x2, %d0		# shift prec to lo bits
+	mov.w		(tbl_thresh.b,%pc,%d0.w*2), %d1 # load prec threshold
+	mov.w		%d1, %d0		# copy d1 into d0
+	sub.w		FTEMP_EX(%a0), %d0	# diff = threshold - exp
+	cmpi.w		%d0, &66		# is diff > 65? (mant + g,r bits)
+	bpl.b		denorm_set_stky		# yes; just calc sticky
+
+	clr.l		%d0			# clear g,r,s
+	btst		&inex2_bit, FPSR_EXCEPT(%a6) # yes; was INEX2 set?
+	beq.b		denorm_call		# no; don't change anything
+	bset		&29, %d0		# yes; set sticky bit
+
+denorm_call:
+	bsr.l		dnrm_lp			# denormalize the number
+	rts
+
+#
+# all bit would have been shifted off during the denorm so simply
+# calculate if the sticky should be set and clear the entire mantissa.
+#
+denorm_set_stky:
+	mov.l		&0x20000000, %d0	# set sticky bit in return value
+	mov.w		%d1, FTEMP_EX(%a0)	# load exp with threshold
+	clr.l		FTEMP_HI(%a0)		# set d1 = 0 (ms mantissa)
+	clr.l		FTEMP_LO(%a0)		# set d2 = 0 (ms mantissa)
+	rts
+
+#									#
+# dnrm_lp(): normalize exponent/mantissa to specified threshhold	#
+#									#
+# INPUT:								#
+#	%a0	   : points to the operand to be denormalized		#
+#	%d0{31:29} : initial guard,round,sticky				#
+#	%d1{15:0}  : denormalization threshold				#
+# OUTPUT:								#
+#	%a0	   : points to the denormalized operand			#
+#	%d0{31:29} : final guard,round,sticky				#
+#									#
+
+# *** Local Equates *** #
+set	GRS,		L_SCR2			# g,r,s temp storage
+set	FTEMP_LO2,	L_SCR1			# FTEMP_LO copy
+
+	global		dnrm_lp
+dnrm_lp:
+
+#
+# make a copy of FTEMP_LO and place the g,r,s bits directly after it
+# in memory so as to make the bitfield extraction for denormalization easier.
+#
+	mov.l		FTEMP_LO(%a0), FTEMP_LO2(%a6) # make FTEMP_LO copy
+	mov.l		%d0, GRS(%a6)		# place g,r,s after it
+
+#
+# check to see how much less than the underflow threshold the operand
+# exponent is.
+#
+	mov.l		%d1, %d0		# copy the denorm threshold
+	sub.w		FTEMP_EX(%a0), %d1	# d1 = threshold - uns exponent
+	ble.b		dnrm_no_lp		# d1 <= 0
+	cmpi.w		%d1, &0x20		# is ( 0 <= d1 < 32) ?
+	blt.b		case_1			# yes
+	cmpi.w		%d1, &0x40		# is (32 <= d1 < 64) ?
+	blt.b		case_2			# yes
+	bra.w		case_3			# (d1 >= 64)
+
+#
+# No normalization necessary
+#
+dnrm_no_lp:
+	mov.l		GRS(%a6), %d0		# restore original g,r,s
+	rts
+
+#
+# case (0<d1<32)
+#
+# %d0 = denorm threshold
+# %d1 = "n" = amt to shift
+#
+#	---------------------------------------------------------
+#	|     FTEMP_HI	  |	FTEMP_LO     |grs000.........000|
+#	---------------------------------------------------------
+#	<-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
+#	\	   \		      \			 \
+#	 \	    \		       \		  \
+#	  \	     \			\		   \
+#	   \	      \			 \		    \
+#	    \	       \		  \		     \
+#	     \		\		   \		      \
+#	      \		 \		    \		       \
+#	       \	  \		     \			\
+#	<-(n)-><-(32 - n)-><------(32)-------><------(32)------->
+#	---------------------------------------------------------
+#	|0.....0| NEW_HI  |  NEW_FTEMP_LO     |grs		|
+#	---------------------------------------------------------
+#
+case_1:
+	mov.l		%d2, -(%sp)		# create temp storage
+
+	mov.w		%d0, FTEMP_EX(%a0)	# exponent = denorm threshold
+	mov.l		&32, %d0
+	sub.w		%d1, %d0		# %d0 = 32 - %d1
+
+	cmpi.w		%d1, &29		# is shft amt >= 29
+	blt.b		case1_extract		# no; no fix needed
+	mov.b		GRS(%a6), %d2
+	or.b		%d2, 3+FTEMP_LO2(%a6)
+
+case1_extract:
+	bfextu		FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_HI
+	bfextu		FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new FTEMP_LO
+	bfextu		FTEMP_LO2(%a6){%d0:&32}, %d0 # %d0 = new G,R,S
+
+	mov.l		%d2, FTEMP_HI(%a0)	# store new FTEMP_HI
+	mov.l		%d1, FTEMP_LO(%a0)	# store new FTEMP_LO
+
+	bftst		%d0{&2:&30}		# were bits shifted off?
+	beq.b		case1_sticky_clear	# no; go finish
+	bset		&rnd_stky_bit, %d0	# yes; set sticky bit
+
+case1_sticky_clear:
+	and.l		&0xe0000000, %d0	# clear all but G,R,S
+	mov.l		(%sp)+, %d2		# restore temp register
+	rts
+
+#
+# case (32<=d1<64)
+#
+# %d0 = denorm threshold
+# %d1 = "n" = amt to shift
+#
+#	---------------------------------------------------------
+#	|     FTEMP_HI	  |	FTEMP_LO     |grs000.........000|
+#	---------------------------------------------------------
+#	<-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
+#	\	   \		      \
+#	 \	    \		       \
+#	  \	     \			-------------------
+#	   \	      --------------------		   \
+#	    -------------------		  \		    \
+#			       \	   \		     \
+#				\	    \		      \
+#				 \	     \		       \
+#	<-------(32)------><-(n)-><-(32 - n)-><------(32)------->
+#	---------------------------------------------------------
+#	|0...............0|0....0| NEW_LO     |grs		|
+#	---------------------------------------------------------
+#
+case_2:
+	mov.l		%d2, -(%sp)		# create temp storage
+
+	mov.w		%d0, FTEMP_EX(%a0)	# exponent = denorm threshold
+	subi.w		&0x20, %d1		# %d1 now between 0 and 32
+	mov.l		&0x20, %d0
+	sub.w		%d1, %d0		# %d0 = 32 - %d1
+
+# subtle step here; or in the g,r,s at the bottom of FTEMP_LO to minimize
+# the number of bits to check for the sticky detect.
+# it only plays a role in shift amounts of 61-63.
+	mov.b		GRS(%a6), %d2
+	or.b		%d2, 3+FTEMP_LO2(%a6)
+
+	bfextu		FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_LO
+	bfextu		FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new G,R,S
+
+	bftst		%d1{&2:&30}		# were any bits shifted off?
+	bne.b		case2_set_sticky	# yes; set sticky bit
+	bftst		FTEMP_LO2(%a6){%d0:&31}	# were any bits shifted off?
+	bne.b		case2_set_sticky	# yes; set sticky bit
+
+	mov.l		%d1, %d0		# move new G,R,S to %d0
+	bra.b		case2_end
+
+case2_set_sticky:
+	mov.l		%d1, %d0		# move new G,R,S to %d0
+	bset		&rnd_stky_bit, %d0	# set sticky bit
+
+case2_end:
+	clr.l		FTEMP_HI(%a0)		# store FTEMP_HI = 0
+	mov.l		%d2, FTEMP_LO(%a0)	# store FTEMP_LO
+	and.l		&0xe0000000, %d0	# clear all but G,R,S
+
+	mov.l		(%sp)+,%d2		# restore temp register
+	rts
+
+#
+# case (d1>=64)
+#
+# %d0 = denorm threshold
+# %d1 = amt to shift
+#
+case_3:
+	mov.w		%d0, FTEMP_EX(%a0)	# insert denorm threshold
+
+	cmpi.w		%d1, &65		# is shift amt > 65?
+	blt.b		case3_64		# no; it's == 64
+	beq.b		case3_65		# no; it's == 65
+
+#
+# case (d1>65)
+#
+# Shift value is > 65 and out of range. All bits are shifted off.
+# Return a zero mantissa with the sticky bit set
+#
+	clr.l		FTEMP_HI(%a0)		# clear hi(mantissa)
+	clr.l		FTEMP_LO(%a0)		# clear lo(mantissa)
+	mov.l		&0x20000000, %d0	# set sticky bit
+	rts
+
+#
+# case (d1 == 64)
+#
+#	---------------------------------------------------------
+#	|     FTEMP_HI	  |	FTEMP_LO     |grs000.........000|
+#	---------------------------------------------------------
+#	<-------(32)------>
+#	\		   \
+#	 \		    \
+#	  \		     \
+#	   \		      ------------------------------
+#	    -------------------------------		    \
+#					   \		     \
+#					    \		      \
+#					     \		       \
+#					      <-------(32)------>
+#	---------------------------------------------------------
+#	|0...............0|0................0|grs		|
+#	---------------------------------------------------------
+#
+case3_64:
+	mov.l		FTEMP_HI(%a0), %d0	# fetch hi(mantissa)
+	mov.l		%d0, %d1		# make a copy
+	and.l		&0xc0000000, %d0	# extract G,R
+	and.l		&0x3fffffff, %d1	# extract other bits
+
+	bra.b		case3_complete
+
+#
+# case (d1 == 65)
+#
+#	---------------------------------------------------------
+#	|     FTEMP_HI	  |	FTEMP_LO     |grs000.........000|
+#	---------------------------------------------------------
+#	<-------(32)------>
+#	\		   \
+#	 \		    \
+#	  \		     \
+#	   \		      ------------------------------
+#	    --------------------------------		    \
+#					    \		     \
+#					     \		      \
+#					      \		       \
+#					       <-------(31)----->
+#	---------------------------------------------------------
+#	|0...............0|0................0|0rs		|
+#	---------------------------------------------------------
+#
+case3_65:
+	mov.l		FTEMP_HI(%a0), %d0	# fetch hi(mantissa)
+	and.l		&0x80000000, %d0	# extract R bit
+	lsr.l		&0x1, %d0		# shift high bit into R bit
+	and.l		&0x7fffffff, %d1	# extract other bits
+
+case3_complete:
+# last operation done was an "and" of the bits shifted off so the condition
+# codes are already set so branch accordingly.
+	bne.b		case3_set_sticky	# yes; go set new sticky
+	tst.l		FTEMP_LO(%a0)		# were any bits shifted off?
+	bne.b		case3_set_sticky	# yes; go set new sticky
+	tst.b		GRS(%a6)		# were any bits shifted off?
+	bne.b		case3_set_sticky	# yes; go set new sticky
+
+#
+# no bits were shifted off so don't set the sticky bit.
+# the guard and
+# the entire mantissa is zero.
+#
+	clr.l		FTEMP_HI(%a0)		# clear hi(mantissa)
+	clr.l		FTEMP_LO(%a0)		# clear lo(mantissa)
+	rts
+
+#
+# some bits were shifted off so set the sticky bit.
+# the entire mantissa is zero.
+#
+case3_set_sticky:
+	bset		&rnd_stky_bit,%d0	# set new sticky bit
+	clr.l		FTEMP_HI(%a0)		# clear hi(mantissa)
+	clr.l		FTEMP_LO(%a0)		# clear lo(mantissa)
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_round(): round result according to precision/mode		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0	  = ptr to input operand in internal extended format	#
+#	d1(hi)    = contains rounding precision:			#
+#			ext = $0000xxxx					#
+#			sgl = $0004xxxx					#
+#			dbl = $0008xxxx					#
+#	d1(lo)	  = contains rounding mode:				#
+#			RN  = $xxxx0000					#
+#			RZ  = $xxxx0001					#
+#			RM  = $xxxx0002					#
+#			RP  = $xxxx0003					#
+#	d0{31:29} = contains the g,r,s bits (extended)			#
+#									#
+# OUTPUT **************************************************************	#
+#	a0 = pointer to rounded result					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	On return the value pointed to by a0 is correctly rounded,	#
+#	a0 is preserved and the g-r-s bits in d0 are cleared.		#
+#	The result is not typed - the tag field is invalid.  The	#
+#	result is still in the internal extended format.		#
+#									#
+#	The INEX bit of USER_FPSR will be set if the rounded result was	#
+#	inexact (i.e. if any of the g-r-s bits were set).		#
+#									#
+#########################################################################
+
+	global		_round
+_round:
+#
+# ext_grs() looks at the rounding precision and sets the appropriate
+# G,R,S bits.
+# If (G,R,S == 0) then result is exact and round is done, else set
+# the inex flag in status reg and continue.
+#
+	bsr.l		ext_grs			# extract G,R,S
+
+	tst.l		%d0			# are G,R,S zero?
+	beq.w		truncate		# yes; round is complete
+
+	or.w		&inx2a_mask, 2+USER_FPSR(%a6) # set inex2/ainex
+
+#
+# Use rounding mode as an index into a jump table for these modes.
+# All of the following assumes grs != 0.
+#
+	mov.w		(tbl_mode.b,%pc,%d1.w*2), %a1 # load jump offset
+	jmp		(tbl_mode.b,%pc,%a1)	# jmp to rnd mode handler
+
+tbl_mode:
+	short		rnd_near - tbl_mode
+	short		truncate - tbl_mode	# RZ always truncates
+	short		rnd_mnus - tbl_mode
+	short		rnd_plus - tbl_mode
+
+#################################################################
+#	ROUND PLUS INFINITY					#
+#								#
+#	If sign of fp number = 0 (positive), then add 1 to l.	#
+#################################################################
+rnd_plus:
+	tst.b		FTEMP_SGN(%a0)		# check for sign
+	bmi.w		truncate		# if positive then truncate
+
+	mov.l		&0xffffffff, %d0	# force g,r,s to be all f's
+	swap		%d1			# set up d1 for round prec.
+
+	cmpi.b		%d1, &s_mode		# is prec = sgl?
+	beq.w		add_sgl			# yes
+	bgt.w		add_dbl			# no; it's dbl
+	bra.w		add_ext			# no; it's ext
+
+#################################################################
+#	ROUND MINUS INFINITY					#
+#								#
+#	If sign of fp number = 1 (negative), then add 1 to l.	#
+#################################################################
+rnd_mnus:
+	tst.b		FTEMP_SGN(%a0)		# check for sign
+	bpl.w		truncate		# if negative then truncate
+
+	mov.l		&0xffffffff, %d0	# force g,r,s to be all f's
+	swap		%d1			# set up d1 for round prec.
+
+	cmpi.b		%d1, &s_mode		# is prec = sgl?
+	beq.w		add_sgl			# yes
+	bgt.w		add_dbl			# no; it's dbl
+	bra.w		add_ext			# no; it's ext
+
+#################################################################
+#	ROUND NEAREST						#
+#								#
+#	If (g=1), then add 1 to l and if (r=s=0), then clear l	#
+#	Note that this will round to even in case of a tie.	#
+#################################################################
+rnd_near:
+	asl.l		&0x1, %d0		# shift g-bit to c-bit
+	bcc.w		truncate		# if (g=1) then
+
+	swap		%d1			# set up d1 for round prec.
+
+	cmpi.b		%d1, &s_mode		# is prec = sgl?
+	beq.w		add_sgl			# yes
+	bgt.w		add_dbl			# no; it's dbl
+	bra.w		add_ext			# no; it's ext
+
+# *** LOCAL EQUATES ***
+set	ad_1_sgl,	0x00000100	# constant to add 1 to l-bit in sgl prec
+set	ad_1_dbl,	0x00000800	# constant to add 1 to l-bit in dbl prec
+
+#########################
+#	ADD SINGLE	#
+#########################
+add_sgl:
+	add.l		&ad_1_sgl, FTEMP_HI(%a0)
+	bcc.b		scc_clr			# no mantissa overflow
+	roxr.w		FTEMP_HI(%a0)		# shift v-bit back in
+	roxr.w		FTEMP_HI+2(%a0)		# shift v-bit back in
+	add.w		&0x1, FTEMP_EX(%a0)	# and incr exponent
+scc_clr:
+	tst.l		%d0			# test for rs = 0
+	bne.b		sgl_done
+	and.w		&0xfe00, FTEMP_HI+2(%a0) # clear the l-bit
+sgl_done:
+	and.l		&0xffffff00, FTEMP_HI(%a0) # truncate bits beyond sgl limit
+	clr.l		FTEMP_LO(%a0)		# clear d2
+	rts
+
+#########################
+#	ADD EXTENDED	#
+#########################
+add_ext:
+	addq.l		&1,FTEMP_LO(%a0)	# add 1 to l-bit
+	bcc.b		xcc_clr			# test for carry out
+	addq.l		&1,FTEMP_HI(%a0)	# propagate carry
+	bcc.b		xcc_clr
+	roxr.w		FTEMP_HI(%a0)		# mant is 0 so restore v-bit
+	roxr.w		FTEMP_HI+2(%a0)		# mant is 0 so restore v-bit
+	roxr.w		FTEMP_LO(%a0)
+	roxr.w		FTEMP_LO+2(%a0)
+	add.w		&0x1,FTEMP_EX(%a0)	# and inc exp
+xcc_clr:
+	tst.l		%d0			# test rs = 0
+	bne.b		add_ext_done
+	and.b		&0xfe,FTEMP_LO+3(%a0)	# clear the l bit
+add_ext_done:
+	rts
+
+#########################
+#	ADD DOUBLE	#
+#########################
+add_dbl:
+	add.l		&ad_1_dbl, FTEMP_LO(%a0) # add 1 to lsb
+	bcc.b		dcc_clr			# no carry
+	addq.l		&0x1, FTEMP_HI(%a0)	# propagate carry
+	bcc.b		dcc_clr			# no carry
+
+	roxr.w		FTEMP_HI(%a0)		# mant is 0 so restore v-bit
+	roxr.w		FTEMP_HI+2(%a0)		# mant is 0 so restore v-bit
+	roxr.w		FTEMP_LO(%a0)
+	roxr.w		FTEMP_LO+2(%a0)
+	addq.w		&0x1, FTEMP_EX(%a0)	# incr exponent
+dcc_clr:
+	tst.l		%d0			# test for rs = 0
+	bne.b		dbl_done
+	and.w		&0xf000, FTEMP_LO+2(%a0) # clear the l-bit
+
+dbl_done:
+	and.l		&0xfffff800,FTEMP_LO(%a0) # truncate bits beyond dbl limit
+	rts
+
+###########################
+# Truncate all other bits #
+###########################
+truncate:
+	swap		%d1			# select rnd prec
+
+	cmpi.b		%d1, &s_mode		# is prec sgl?
+	beq.w		sgl_done		# yes
+	bgt.b		dbl_done		# no; it's dbl
+	rts					# no; it's ext
+
+
+#
+# ext_grs(): extract guard, round and sticky bits according to
+#	     rounding precision.
+#
+# INPUT
+#	d0	   = extended precision g,r,s (in d0{31:29})
+#	d1	   = {PREC,ROUND}
+# OUTPUT
+#	d0{31:29}  = guard, round, sticky
+#
+# The ext_grs extract the guard/round/sticky bits according to the
+# selected rounding precision. It is called by the round subroutine
+# only.  All registers except d0 are kept intact. d0 becomes an
+# updated guard,round,sticky in d0{31:29}
+#
+# Notes: the ext_grs uses the round PREC, and therefore has to swap d1
+#	 prior to usage, and needs to restore d1 to original. this
+#	 routine is tightly tied to the round routine and not meant to
+#	 uphold standard subroutine calling practices.
+#
+
+ext_grs:
+	swap		%d1			# have d1.w point to round precision
+	tst.b		%d1			# is rnd prec = extended?
+	bne.b		ext_grs_not_ext		# no; go handle sgl or dbl
+
+#
+# %d0 actually already hold g,r,s since _round() had it before calling
+# this function. so, as long as we don't disturb it, we are "returning" it.
+#
+ext_grs_ext:
+	swap		%d1			# yes; return to correct positions
+	rts
+
+ext_grs_not_ext:
+	movm.l		&0x3000, -(%sp)		# make some temp registers {d2/d3}
+
+	cmpi.b		%d1, &s_mode		# is rnd prec = sgl?
+	bne.b		ext_grs_dbl		# no; go handle dbl
+
+#
+# sgl:
+#	96		64	  40	32		0
+#	-----------------------------------------------------
+#	| EXP	|XXXXXXX|	  |xx	|		|grs|
+#	-----------------------------------------------------
+#			<--(24)--->nn\			   /
+#				   ee ---------------------
+#				   ww		|
+#						v
+#				   gr	   new sticky
+#
+ext_grs_sgl:
+	bfextu		FTEMP_HI(%a0){&24:&2}, %d3 # sgl prec. g-r are 2 bits right
+	mov.l		&30, %d2		# of the sgl prec. limits
+	lsl.l		%d2, %d3		# shift g-r bits to MSB of d3
+	mov.l		FTEMP_HI(%a0), %d2	# get word 2 for s-bit test
+	and.l		&0x0000003f, %d2	# s bit is the or of all other
+	bne.b		ext_grs_st_stky		# bits to the right of g-r
+	tst.l		FTEMP_LO(%a0)		# test lower mantissa
+	bne.b		ext_grs_st_stky		# if any are set, set sticky
+	tst.l		%d0			# test original g,r,s
+	bne.b		ext_grs_st_stky		# if any are set, set sticky
+	bra.b		ext_grs_end_sd		# if words 3 and 4 are clr, exit
+
+#
+# dbl:
+#	96		64		32	 11	0
+#	-----------------------------------------------------
+#	| EXP	|XXXXXXX|		|	 |xx	|grs|
+#	-----------------------------------------------------
+#						  nn\	    /
+#						  ee -------
+#						  ww	|
+#							v
+#						  gr	new sticky
+#
+ext_grs_dbl:
+	bfextu		FTEMP_LO(%a0){&21:&2}, %d3 # dbl-prec. g-r are 2 bits right
+	mov.l		&30, %d2		# of the dbl prec. limits
+	lsl.l		%d2, %d3		# shift g-r bits to the MSB of d3
+	mov.l		FTEMP_LO(%a0), %d2	# get lower mantissa  for s-bit test
+	and.l		&0x000001ff, %d2	# s bit is the or-ing of all
+	bne.b		ext_grs_st_stky		# other bits to the right of g-r
+	tst.l		%d0			# test word original g,r,s
+	bne.b		ext_grs_st_stky		# if any are set, set sticky
+	bra.b		ext_grs_end_sd		# if clear, exit
+
+ext_grs_st_stky:
+	bset		&rnd_stky_bit, %d3	# set sticky bit
+ext_grs_end_sd:
+	mov.l		%d3, %d0		# return grs to d0
+
+	movm.l		(%sp)+, &0xc		# restore scratch registers {d2/d3}
+
+	swap		%d1			# restore d1 to original
+	rts
+
+#########################################################################
+# norm(): normalize the mantissa of an extended precision input. the	#
+#	  input operand should not be normalized already.		#
+#									#
+# XDEF ****************************************************************	#
+#	norm()								#
+#									#
+# XREF **************************************************************** #
+#	none								#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer fp extended precision operand to normalize		#
+#									#
+# OUTPUT ************************************************************** #
+#	d0 = number of bit positions the mantissa was shifted		#
+#	a0 = the input operand's mantissa is normalized; the exponent	#
+#	     is unchanged.						#
+#									#
+#########################################################################
+	global		norm
+norm:
+	mov.l		%d2, -(%sp)		# create some temp regs
+	mov.l		%d3, -(%sp)
+
+	mov.l		FTEMP_HI(%a0), %d0	# load hi(mantissa)
+	mov.l		FTEMP_LO(%a0), %d1	# load lo(mantissa)
+
+	bfffo		%d0{&0:&32}, %d2	# how many places to shift?
+	beq.b		norm_lo			# hi(man) is all zeroes!
+
+norm_hi:
+	lsl.l		%d2, %d0		# left shift hi(man)
+	bfextu		%d1{&0:%d2}, %d3	# extract lo bits
+
+	or.l		%d3, %d0		# create hi(man)
+	lsl.l		%d2, %d1		# create lo(man)
+
+	mov.l		%d0, FTEMP_HI(%a0)	# store new hi(man)
+	mov.l		%d1, FTEMP_LO(%a0)	# store new lo(man)
+
+	mov.l		%d2, %d0		# return shift amount
+
+	mov.l		(%sp)+, %d3		# restore temp regs
+	mov.l		(%sp)+, %d2
+
+	rts
+
+norm_lo:
+	bfffo		%d1{&0:&32}, %d2	# how many places to shift?
+	lsl.l		%d2, %d1		# shift lo(man)
+	add.l		&32, %d2		# add 32 to shft amount
+
+	mov.l		%d1, FTEMP_HI(%a0)	# store hi(man)
+	clr.l		FTEMP_LO(%a0)		# lo(man) is now zero
+
+	mov.l		%d2, %d0		# return shift amount
+
+	mov.l		(%sp)+, %d3		# restore temp regs
+	mov.l		(%sp)+, %d2
+
+	rts
+
+#########################################################################
+# unnorm_fix(): - changes an UNNORM to one of NORM, DENORM, or ZERO	#
+#		- returns corresponding optype tag			#
+#									#
+# XDEF ****************************************************************	#
+#	unnorm_fix()							#
+#									#
+# XREF **************************************************************** #
+#	norm() - normalize the mantissa					#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to unnormalized extended precision number		#
+#									#
+# OUTPUT ************************************************************** #
+#	d0 = optype tag - is corrected to one of NORM, DENORM, or ZERO	#
+#	a0 = input operand has been converted to a norm, denorm, or	#
+#	     zero; both the exponent and mantissa are changed.		#
+#									#
+#########################################################################
+
+	global		unnorm_fix
+unnorm_fix:
+	bfffo		FTEMP_HI(%a0){&0:&32}, %d0 # how many shifts are needed?
+	bne.b		unnorm_shift		# hi(man) is not all zeroes
+
+#
+# hi(man) is all zeroes so see if any bits in lo(man) are set
+#
+unnorm_chk_lo:
+	bfffo		FTEMP_LO(%a0){&0:&32}, %d0 # is operand really a zero?
+	beq.w		unnorm_zero		# yes
+
+	add.w		&32, %d0		# no; fix shift distance
+
+#
+# d0 = # shifts needed for complete normalization
+#
+unnorm_shift:
+	clr.l		%d1			# clear top word
+	mov.w		FTEMP_EX(%a0), %d1	# extract exponent
+	and.w		&0x7fff, %d1		# strip off sgn
+
+	cmp.w		%d0, %d1		# will denorm push exp < 0?
+	bgt.b		unnorm_nrm_zero		# yes; denorm only until exp = 0
+
+#
+# exponent would not go < 0. therefore, number stays normalized
+#
+	sub.w		%d0, %d1		# shift exponent value
+	mov.w		FTEMP_EX(%a0), %d0	# load old exponent
+	and.w		&0x8000, %d0		# save old sign
+	or.w		%d0, %d1		# {sgn,new exp}
+	mov.w		%d1, FTEMP_EX(%a0)	# insert new exponent
+
+	bsr.l		norm			# normalize UNNORM
+
+	mov.b		&NORM, %d0		# return new optype tag
+	rts
+
+#
+# exponent would go < 0, so only denormalize until exp = 0
+#
+unnorm_nrm_zero:
+	cmp.b		%d1, &32		# is exp <= 32?
+	bgt.b		unnorm_nrm_zero_lrg	# no; go handle large exponent
+
+	bfextu		FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man)
+	mov.l		%d0, FTEMP_HI(%a0)	# save new hi(man)
+
+	mov.l		FTEMP_LO(%a0), %d0	# fetch old lo(man)
+	lsl.l		%d1, %d0		# extract new lo(man)
+	mov.l		%d0, FTEMP_LO(%a0)	# save new lo(man)
+
+	and.w		&0x8000, FTEMP_EX(%a0)	# set exp = 0
+
+	mov.b		&DENORM, %d0		# return new optype tag
+	rts
+
+#
+# only mantissa bits set are in lo(man)
+#
+unnorm_nrm_zero_lrg:
+	sub.w		&32, %d1		# adjust shft amt by 32
+
+	mov.l		FTEMP_LO(%a0), %d0	# fetch old lo(man)
+	lsl.l		%d1, %d0		# left shift lo(man)
+
+	mov.l		%d0, FTEMP_HI(%a0)	# store new hi(man)
+	clr.l		FTEMP_LO(%a0)		# lo(man) = 0
+
+	and.w		&0x8000, FTEMP_EX(%a0)	# set exp = 0
+
+	mov.b		&DENORM, %d0		# return new optype tag
+	rts
+
+#
+# whole mantissa is zero so this UNNORM is actually a zero
+#
+unnorm_zero:
+	and.w		&0x8000, FTEMP_EX(%a0)	# force exponent to zero
+
+	mov.b		&ZERO, %d0		# fix optype tag
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	set_tag_x(): return the optype of the input ext fp number	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of type tag						#
+#		one of: NORM, INF, QNAN, SNAN, DENORM, UNNORM, ZERO	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Simply test the exponent, j-bit, and mantissa values to		#
+# determine the type of operand.					#
+#	If it's an unnormalized zero, alter the operand and force it	#
+# to be a normal zero.							#
+#									#
+#########################################################################
+
+	global		set_tag_x
+set_tag_x:
+	mov.w		FTEMP_EX(%a0), %d0	# extract exponent
+	andi.w		&0x7fff, %d0		# strip off sign
+	cmpi.w		%d0, &0x7fff		# is (EXP == MAX)?
+	beq.b		inf_or_nan_x
+not_inf_or_nan_x:
+	btst		&0x7,FTEMP_HI(%a0)
+	beq.b		not_norm_x
+is_norm_x:
+	mov.b		&NORM, %d0
+	rts
+not_norm_x:
+	tst.w		%d0			# is exponent = 0?
+	bne.b		is_unnorm_x
+not_unnorm_x:
+	tst.l		FTEMP_HI(%a0)
+	bne.b		is_denorm_x
+	tst.l		FTEMP_LO(%a0)
+	bne.b		is_denorm_x
+is_zero_x:
+	mov.b		&ZERO, %d0
+	rts
+is_denorm_x:
+	mov.b		&DENORM, %d0
+	rts
+# must distinguish now "Unnormalized zeroes" which we
+# must convert to zero.
+is_unnorm_x:
+	tst.l		FTEMP_HI(%a0)
+	bne.b		is_unnorm_reg_x
+	tst.l		FTEMP_LO(%a0)
+	bne.b		is_unnorm_reg_x
+# it's an "unnormalized zero". let's convert it to an actual zero...
+	andi.w		&0x8000,FTEMP_EX(%a0)	# clear exponent
+	mov.b		&ZERO, %d0
+	rts
+is_unnorm_reg_x:
+	mov.b		&UNNORM, %d0
+	rts
+inf_or_nan_x:
+	tst.l		FTEMP_LO(%a0)
+	bne.b		is_nan_x
+	mov.l		FTEMP_HI(%a0), %d0
+	and.l		&0x7fffffff, %d0	# msb is a don't care!
+	bne.b		is_nan_x
+is_inf_x:
+	mov.b		&INF, %d0
+	rts
+is_nan_x:
+	btst		&0x6, FTEMP_HI(%a0)
+	beq.b		is_snan_x
+	mov.b		&QNAN, %d0
+	rts
+is_snan_x:
+	mov.b		&SNAN, %d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	set_tag_d(): return the optype of the input dbl fp number	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = points to double precision operand				#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of type tag						#
+#		one of: NORM, INF, QNAN, SNAN, DENORM, ZERO		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Simply test the exponent, j-bit, and mantissa values to		#
+# determine the type of operand.					#
+#									#
+#########################################################################
+
+	global		set_tag_d
+set_tag_d:
+	mov.l		FTEMP(%a0), %d0
+	mov.l		%d0, %d1
+
+	andi.l		&0x7ff00000, %d0
+	beq.b		zero_or_denorm_d
+
+	cmpi.l		%d0, &0x7ff00000
+	beq.b		inf_or_nan_d
+
+is_norm_d:
+	mov.b		&NORM, %d0
+	rts
+zero_or_denorm_d:
+	and.l		&0x000fffff, %d1
+	bne		is_denorm_d
+	tst.l		4+FTEMP(%a0)
+	bne		is_denorm_d
+is_zero_d:
+	mov.b		&ZERO, %d0
+	rts
+is_denorm_d:
+	mov.b		&DENORM, %d0
+	rts
+inf_or_nan_d:
+	and.l		&0x000fffff, %d1
+	bne		is_nan_d
+	tst.l		4+FTEMP(%a0)
+	bne		is_nan_d
+is_inf_d:
+	mov.b		&INF, %d0
+	rts
+is_nan_d:
+	btst		&19, %d1
+	bne		is_qnan_d
+is_snan_d:
+	mov.b		&SNAN, %d0
+	rts
+is_qnan_d:
+	mov.b		&QNAN, %d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	set_tag_s(): return the optype of the input sgl fp number	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to single precision operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of type tag						#
+#		one of: NORM, INF, QNAN, SNAN, DENORM, ZERO		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Simply test the exponent, j-bit, and mantissa values to		#
+# determine the type of operand.					#
+#									#
+#########################################################################
+
+	global		set_tag_s
+set_tag_s:
+	mov.l		FTEMP(%a0), %d0
+	mov.l		%d0, %d1
+
+	andi.l		&0x7f800000, %d0
+	beq.b		zero_or_denorm_s
+
+	cmpi.l		%d0, &0x7f800000
+	beq.b		inf_or_nan_s
+
+is_norm_s:
+	mov.b		&NORM, %d0
+	rts
+zero_or_denorm_s:
+	and.l		&0x007fffff, %d1
+	bne		is_denorm_s
+is_zero_s:
+	mov.b		&ZERO, %d0
+	rts
+is_denorm_s:
+	mov.b		&DENORM, %d0
+	rts
+inf_or_nan_s:
+	and.l		&0x007fffff, %d1
+	bne		is_nan_s
+is_inf_s:
+	mov.b		&INF, %d0
+	rts
+is_nan_s:
+	btst		&22, %d1
+	bne		is_qnan_s
+is_snan_s:
+	mov.b		&SNAN, %d0
+	rts
+is_qnan_s:
+	mov.b		&QNAN, %d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	unf_res(): routine to produce default underflow result of a	#
+#		   scaled extended precision number; this is used by	#
+#		   fadd/fdiv/fmul/etc. emulation routines.		#
+#	unf_res4(): same as above but for fsglmul/fsgldiv which use	#
+#		    single round prec and extended prec mode.		#
+#									#
+# XREF ****************************************************************	#
+#	_denorm() - denormalize according to scale factor		#
+#	_round() - round denormalized number according to rnd prec	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precison operand			#
+#	d0 = scale factor						#
+#	d1 = rounding precision/mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	a0 = pointer to default underflow result in extended precision	#
+#	d0.b = result FPSR_cc which caller may or may not want to save	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Convert the input operand to "internal format" which means the	#
+# exponent is extended to 16 bits and the sign is stored in the unused	#
+# portion of the extended precison operand. Denormalize the number	#
+# according to the scale factor passed in d0. Then, round the		#
+# denormalized result.							#
+#	Set the FPSR_exc bits as appropriate but return the cc bits in	#
+# d0 in case the caller doesn't want to save them (as is the case for	#
+# fmove out).								#
+#	unf_res4() for fsglmul/fsgldiv forces the denorm to extended	#
+# precision and the rounding mode to single.				#
+#									#
+#########################################################################
+	global		unf_res
+unf_res:
+	mov.l		%d1, -(%sp)		# save rnd prec,mode on stack
+
+	btst		&0x7, FTEMP_EX(%a0)	# make "internal" format
+	sne		FTEMP_SGN(%a0)
+
+	mov.w		FTEMP_EX(%a0), %d1	# extract exponent
+	and.w		&0x7fff, %d1
+	sub.w		%d0, %d1
+	mov.w		%d1, FTEMP_EX(%a0)	# insert 16 bit exponent
+
+	mov.l		%a0, -(%sp)		# save operand ptr during calls
+
+	mov.l		0x4(%sp),%d0		# pass rnd prec.
+	andi.w		&0x00c0,%d0
+	lsr.w		&0x4,%d0
+	bsr.l		_denorm			# denorm result
+
+	mov.l		(%sp),%a0
+	mov.w		0x6(%sp),%d1		# load prec:mode into %d1
+	andi.w		&0xc0,%d1		# extract rnd prec
+	lsr.w		&0x4,%d1
+	swap		%d1
+	mov.w		0x6(%sp),%d1
+	andi.w		&0x30,%d1
+	lsr.w		&0x4,%d1
+	bsr.l		_round			# round the denorm
+
+	mov.l		(%sp)+, %a0
+
+# result is now rounded properly. convert back to normal format
+	bclr		&0x7, FTEMP_EX(%a0)	# clear sgn first; may have residue
+	tst.b		FTEMP_SGN(%a0)		# is "internal result" sign set?
+	beq.b		unf_res_chkifzero	# no; result is positive
+	bset		&0x7, FTEMP_EX(%a0)	# set result sgn
+	clr.b		FTEMP_SGN(%a0)		# clear temp sign
+
+# the number may have become zero after rounding. set ccodes accordingly.
+unf_res_chkifzero:
+	clr.l		%d0
+	tst.l		FTEMP_HI(%a0)		# is value now a zero?
+	bne.b		unf_res_cont		# no
+	tst.l		FTEMP_LO(%a0)
+	bne.b		unf_res_cont		# no
+#	bset		&z_bit, FPSR_CC(%a6)	# yes; set zero ccode bit
+	bset		&z_bit, %d0		# yes; set zero ccode bit
+
+unf_res_cont:
+
+#
+# can inex1 also be set along with unfl and inex2???
+#
+# we know that underflow has occurred. aunfl should be set if INEX2 is also set.
+#
+	btst		&inex2_bit, FPSR_EXCEPT(%a6) # is INEX2 set?
+	beq.b		unf_res_end		# no
+	bset		&aunfl_bit, FPSR_AEXCEPT(%a6) # yes; set aunfl
+
+unf_res_end:
+	add.l		&0x4, %sp		# clear stack
+	rts
+
+# unf_res() for fsglmul() and fsgldiv().
+	global		unf_res4
+unf_res4:
+	mov.l		%d1,-(%sp)		# save rnd prec,mode on stack
+
+	btst		&0x7,FTEMP_EX(%a0)	# make "internal" format
+	sne		FTEMP_SGN(%a0)
+
+	mov.w		FTEMP_EX(%a0),%d1	# extract exponent
+	and.w		&0x7fff,%d1
+	sub.w		%d0,%d1
+	mov.w		%d1,FTEMP_EX(%a0)	# insert 16 bit exponent
+
+	mov.l		%a0,-(%sp)		# save operand ptr during calls
+
+	clr.l		%d0			# force rnd prec = ext
+	bsr.l		_denorm			# denorm result
+
+	mov.l		(%sp),%a0
+	mov.w		&s_mode,%d1		# force rnd prec = sgl
+	swap		%d1
+	mov.w		0x6(%sp),%d1		# load rnd mode
+	andi.w		&0x30,%d1		# extract rnd prec
+	lsr.w		&0x4,%d1
+	bsr.l		_round			# round the denorm
+
+	mov.l		(%sp)+,%a0
+
+# result is now rounded properly. convert back to normal format
+	bclr		&0x7,FTEMP_EX(%a0)	# clear sgn first; may have residue
+	tst.b		FTEMP_SGN(%a0)		# is "internal result" sign set?
+	beq.b		unf_res4_chkifzero	# no; result is positive
+	bset		&0x7,FTEMP_EX(%a0)	# set result sgn
+	clr.b		FTEMP_SGN(%a0)		# clear temp sign
+
+# the number may have become zero after rounding. set ccodes accordingly.
+unf_res4_chkifzero:
+	clr.l		%d0
+	tst.l		FTEMP_HI(%a0)		# is value now a zero?
+	bne.b		unf_res4_cont		# no
+	tst.l		FTEMP_LO(%a0)
+	bne.b		unf_res4_cont		# no
+#	bset		&z_bit,FPSR_CC(%a6)	# yes; set zero ccode bit
+	bset		&z_bit,%d0		# yes; set zero ccode bit
+
+unf_res4_cont:
+
+#
+# can inex1 also be set along with unfl and inex2???
+#
+# we know that underflow has occurred. aunfl should be set if INEX2 is also set.
+#
+	btst		&inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+	beq.b		unf_res4_end		# no
+	bset		&aunfl_bit,FPSR_AEXCEPT(%a6) # yes; set aunfl
+
+unf_res4_end:
+	add.l		&0x4,%sp		# clear stack
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	ovf_res(): routine to produce the default overflow result of	#
+#		   an overflowing number.				#
+#	ovf_res2(): same as above but the rnd mode/prec are passed	#
+#		    differently.					#
+#									#
+# XREF ****************************************************************	#
+#	none								#
+#									#
+# INPUT ***************************************************************	#
+#	d1.b	= '-1' => (-); '0' => (+)				#
+#   ovf_res():								#
+#	d0	= rnd mode/prec						#
+#   ovf_res2():								#
+#	hi(d0)	= rnd prec						#
+#	lo(d0)	= rnd mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	a0	= points to extended precision result			#
+#	d0.b	= condition code bits					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	The default overflow result can be determined by the sign of	#
+# the result and the rounding mode/prec in effect. These bits are	#
+# concatenated together to create an index into the default result	#
+# table. A pointer to the correct result is returned in a0. The		#
+# resulting condition codes are returned in d0 in case the caller	#
+# doesn't want FPSR_cc altered (as is the case for fmove out).		#
+#									#
+#########################################################################
+
+	global		ovf_res
+ovf_res:
+	andi.w		&0x10,%d1		# keep result sign
+	lsr.b		&0x4,%d0		# shift prec/mode
+	or.b		%d0,%d1			# concat the two
+	mov.w		%d1,%d0			# make a copy
+	lsl.b		&0x1,%d1		# multiply d1 by 2
+	bra.b		ovf_res_load
+
+	global		ovf_res2
+ovf_res2:
+	and.w		&0x10, %d1		# keep result sign
+	or.b		%d0, %d1		# insert rnd mode
+	swap		%d0
+	or.b		%d0, %d1		# insert rnd prec
+	mov.w		%d1, %d0		# make a copy
+	lsl.b		&0x1, %d1		# shift left by 1
+
+#
+# use the rounding mode, precision, and result sign as in index into the
+# two tables below to fetch the default result and the result ccodes.
+#
+ovf_res_load:
+	mov.b		(tbl_ovfl_cc.b,%pc,%d0.w*1), %d0 # fetch result ccodes
+	lea		(tbl_ovfl_result.b,%pc,%d1.w*8), %a0 # return result ptr
+
+	rts
+
+tbl_ovfl_cc:
+	byte		0x2, 0x0, 0x0, 0x2
+	byte		0x2, 0x0, 0x0, 0x2
+	byte		0x2, 0x0, 0x0, 0x2
+	byte		0x0, 0x0, 0x0, 0x0
+	byte		0x2+0x8, 0x8, 0x2+0x8, 0x8
+	byte		0x2+0x8, 0x8, 0x2+0x8, 0x8
+	byte		0x2+0x8, 0x8, 0x2+0x8, 0x8
+
+tbl_ovfl_result:
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+	long		0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RZ
+	long		0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RM
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+	long		0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RZ
+	long		0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RM
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+	long		0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RZ
+	long		0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RM
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+	long		0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RZ
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+	long		0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RP
+
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+	long		0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RZ
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+	long		0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RP
+
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+	long		0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RZ
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+	long		0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RP
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	get_packed(): fetch a packed operand from memory and then	#
+#		      convert it to a floating-point binary number.	#
+#									#
+# XREF ****************************************************************	#
+#	_dcalc_ea() - calculate the correct <ea>			#
+#	_mem_read() - fetch the packed operand from memory		#
+#	facc_in_x() - the fetch failed so jump to special exit code	#
+#	decbin()    - convert packed to binary extended precision	#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	If no failure on _mem_read():					#
+#	FP_SRC(a6) = packed operand now as a binary FP number		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Get the correct <ea> whihc is the value on the exception stack	#
+# frame w/ maybe a correction factor if the <ea> is -(an) or (an)+.	#
+# Then, fetch the operand from memory. If the fetch fails, exit		#
+# through facc_in_x().							#
+#	If the packed operand is a ZERO,NAN, or INF, convert it to	#
+# its binary representation here. Else, call decbin() which will	#
+# convert the packed value to an extended precision binary value.	#
+#									#
+#########################################################################
+
+# the stacked <ea> for packed is correct except for -(An).
+# the base reg must be updated for both -(An) and (An)+.
+	global		get_packed
+get_packed:
+	mov.l		&0xc,%d0		# packed is 12 bytes
+	bsr.l		_dcalc_ea		# fetch <ea>; correct An
+
+	lea		FP_SRC(%a6),%a1		# pass: ptr to super dst
+	mov.l		&0xc,%d0		# pass: 12 bytes
+	bsr.l		_dmem_read		# read packed operand
+
+	tst.l		%d1			# did dfetch fail?
+	bne.l		facc_in_x		# yes
+
+# The packed operand is an INF or a NAN if the exponent field is all ones.
+	bfextu		FP_SRC(%a6){&1:&15},%d0	# get exp
+	cmpi.w		%d0,&0x7fff		# INF or NAN?
+	bne.b		gp_try_zero		# no
+	rts					# operand is an INF or NAN
+
+# The packed operand is a zero if the mantissa is all zero, else it's
+# a normal packed op.
+gp_try_zero:
+	mov.b		3+FP_SRC(%a6),%d0	# get byte 4
+	andi.b		&0x0f,%d0		# clear all but last nybble
+	bne.b		gp_not_spec		# not a zero
+	tst.l		FP_SRC_HI(%a6)		# is lw 2 zero?
+	bne.b		gp_not_spec		# not a zero
+	tst.l		FP_SRC_LO(%a6)		# is lw 3 zero?
+	bne.b		gp_not_spec		# not a zero
+	rts					# operand is a ZERO
+gp_not_spec:
+	lea		FP_SRC(%a6),%a0		# pass: ptr to packed op
+	bsr.l		decbin			# convert to extended
+	fmovm.x		&0x80,FP_SRC(%a6)	# make this the srcop
+	rts
+
+#########################################################################
+# decbin(): Converts normalized packed bcd value pointed to by register	#
+#	    a0 to extended-precision value in fp0.			#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to normalized packed bcd value			#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = exact fp representation of the packed bcd value.		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Expected is a normal bcd (i.e. non-exceptional; all inf, zero,	#
+#	and NaN operands are dispatched without entering this routine)	#
+#	value in 68881/882 format at location (a0).			#
+#									#
+#	A1. Convert the bcd exponent to binary by successive adds and	#
+#	muls. Set the sign according to SE. Subtract 16 to compensate	#
+#	for the mantissa which is to be interpreted as 17 integer	#
+#	digits, rather than 1 integer and 16 fraction digits.		#
+#	Note: this operation can never overflow.			#
+#									#
+#	A2. Convert the bcd mantissa to binary by successive		#
+#	adds and muls in FP0. Set the sign according to SM.		#
+#	The mantissa digits will be converted with the decimal point	#
+#	assumed following the least-significant digit.			#
+#	Note: this operation can never overflow.			#
+#									#
+#	A3. Count the number of leading/trailing zeros in the		#
+#	bcd string.  If SE is positive, count the leading zeros;	#
+#	if negative, count the trailing zeros.  Set the adjusted	#
+#	exponent equal to the exponent from A1 and the zero count	#
+#	added if SM = 1 and subtracted if SM = 0.  Scale the		#
+#	mantissa the equivalent of forcing in the bcd value:		#
+#									#
+#	SM = 0	a non-zero digit in the integer position		#
+#	SM = 1	a non-zero digit in Mant0, lsd of the fraction		#
+#									#
+#	this will insure that any value, regardless of its		#
+#	representation (ex. 0.1E2, 1E1, 10E0, 100E-1), is converted	#
+#	consistently.							#
+#									#
+#	A4. Calculate the factor 10^exp in FP1 using a table of		#
+#	10^(2^n) values.  To reduce the error in forming factors	#
+#	greater than 10^27, a directed rounding scheme is used with	#
+#	tables rounded to RN, RM, and RP, according to the table	#
+#	in the comments of the pwrten section.				#
+#									#
+#	A5. Form the final binary number by scaling the mantissa by	#
+#	the exponent factor.  This is done by multiplying the		#
+#	mantissa in FP0 by the factor in FP1 if the adjusted		#
+#	exponent sign is positive, and dividing FP0 by FP1 if		#
+#	it is negative.							#
+#									#
+#	Clean up and return. Check if the final mul or div was inexact.	#
+#	If so, set INEX1 in USER_FPSR.					#
+#									#
+#########################################################################
+
+#
+#	PTENRN, PTENRM, and PTENRP are arrays of powers of 10 rounded
+#	to nearest, minus, and plus, respectively.  The tables include
+#	10**{1,2,4,8,16,32,64,128,256,512,1024,2048,4096}.  No rounding
+#	is required until the power is greater than 27, however, all
+#	tables include the first 5 for ease of indexing.
+#
+RTABLE:
+	byte		0,0,0,0
+	byte		2,3,2,3
+	byte		2,3,3,2
+	byte		3,2,2,3
+
+	set		FNIBS,7
+	set		FSTRT,0
+
+	set		ESTRT,4
+	set		EDIGITS,2
+
+	global		decbin
+decbin:
+	mov.l		0x0(%a0),FP_SCR0_EX(%a6) # make a copy of input
+	mov.l		0x4(%a0),FP_SCR0_HI(%a6) # so we don't alter it
+	mov.l		0x8(%a0),FP_SCR0_LO(%a6)
+
+	lea		FP_SCR0(%a6),%a0
+
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+	fmovm.x		&0x1,-(%sp)		# save fp1
+#
+# Calculate exponent:
+#  1. Copy bcd value in memory for use as a working copy.
+#  2. Calculate absolute value of exponent in d1 by mul and add.
+#  3. Correct for exponent sign.
+#  4. Subtract 16 to compensate for interpreting the mant as all integer digits.
+#     (i.e., all digits assumed left of the decimal point.)
+#
+# Register usage:
+#
+#  calc_e:
+#	(*)  d0: temp digit storage
+#	(*)  d1: accumulator for binary exponent
+#	(*)  d2: digit count
+#	(*)  d3: offset pointer
+#	( )  d4: first word of bcd
+#	( )  a0: pointer to working bcd value
+#	( )  a6: pointer to original bcd value
+#	(*)  FP_SCR1: working copy of original bcd value
+#	(*)  L_SCR1: copy of original exponent word
+#
+calc_e:
+	mov.l		&EDIGITS,%d2		# # of nibbles (digits) in fraction part
+	mov.l		&ESTRT,%d3		# counter to pick up digits
+	mov.l		(%a0),%d4		# get first word of bcd
+	clr.l		%d1			# zero d1 for accumulator
+e_gd:
+	mulu.l		&0xa,%d1		# mul partial product by one digit place
+	bfextu		%d4{%d3:&4},%d0		# get the digit and zero extend into d0
+	add.l		%d0,%d1			# d1 = d1 + d0
+	addq.b		&4,%d3			# advance d3 to the next digit
+	dbf.w		%d2,e_gd		# if we have used all 3 digits, exit loop
+	btst		&30,%d4			# get SE
+	beq.b		e_pos			# don't negate if pos
+	neg.l		%d1			# negate before subtracting
+e_pos:
+	sub.l		&16,%d1			# sub to compensate for shift of mant
+	bge.b		e_save			# if still pos, do not neg
+	neg.l		%d1			# now negative, make pos and set SE
+	or.l		&0x40000000,%d4		# set SE in d4,
+	or.l		&0x40000000,(%a0)	# and in working bcd
+e_save:
+	mov.l		%d1,-(%sp)		# save exp on stack
+#
+#
+# Calculate mantissa:
+#  1. Calculate absolute value of mantissa in fp0 by mul and add.
+#  2. Correct for mantissa sign.
+#     (i.e., all digits assumed left of the decimal point.)
+#
+# Register usage:
+#
+#  calc_m:
+#	(*)  d0: temp digit storage
+#	(*)  d1: lword counter
+#	(*)  d2: digit count
+#	(*)  d3: offset pointer
+#	( )  d4: words 2 and 3 of bcd
+#	( )  a0: pointer to working bcd value
+#	( )  a6: pointer to original bcd value
+#	(*) fp0: mantissa accumulator
+#	( )  FP_SCR1: working copy of original bcd value
+#	( )  L_SCR1: copy of original exponent word
+#
+calc_m:
+	mov.l		&1,%d1			# word counter, init to 1
+	fmov.s		&0x00000000,%fp0	# accumulator
+#
+#
+#  Since the packed number has a long word between the first & second parts,
+#  get the integer digit then skip down & get the rest of the
+#  mantissa.  We will unroll the loop once.
+#
+	bfextu		(%a0){&28:&4},%d0	# integer part is ls digit in long word
+	fadd.b		%d0,%fp0		# add digit to sum in fp0
+#
+#
+#  Get the rest of the mantissa.
+#
+loadlw:
+	mov.l		(%a0,%d1.L*4),%d4	# load mantissa lonqword into d4
+	mov.l		&FSTRT,%d3		# counter to pick up digits
+	mov.l		&FNIBS,%d2		# reset number of digits per a0 ptr
+md2b:
+	fmul.s		&0x41200000,%fp0	# fp0 = fp0 * 10
+	bfextu		%d4{%d3:&4},%d0		# get the digit and zero extend
+	fadd.b		%d0,%fp0		# fp0 = fp0 + digit
+#
+#
+#  If all the digits (8) in that long word have been converted (d2=0),
+#  then inc d1 (=2) to point to the next long word and reset d3 to 0
+#  to initialize the digit offset, and set d2 to 7 for the digit count;
+#  else continue with this long word.
+#
+	addq.b		&4,%d3			# advance d3 to the next digit
+	dbf.w		%d2,md2b		# check for last digit in this lw
+nextlw:
+	addq.l		&1,%d1			# inc lw pointer in mantissa
+	cmp.l		%d1,&2			# test for last lw
+	ble.b		loadlw			# if not, get last one
+#
+#  Check the sign of the mant and make the value in fp0 the same sign.
+#
+m_sign:
+	btst		&31,(%a0)		# test sign of the mantissa
+	beq.b		ap_st_z			# if clear, go to append/strip zeros
+	fneg.x		%fp0			# if set, negate fp0
+#
+# Append/strip zeros:
+#
+#  For adjusted exponents which have an absolute value greater than 27*,
+#  this routine calculates the amount needed to normalize the mantissa
+#  for the adjusted exponent.  That number is subtracted from the exp
+#  if the exp was positive, and added if it was negative.  The purpose
+#  of this is to reduce the value of the exponent and the possibility
+#  of error in calculation of pwrten.
+#
+#  1. Branch on the sign of the adjusted exponent.
+#  2p.(positive exp)
+#   2. Check M16 and the digits in lwords 2 and 3 in decending order.
+#   3. Add one for each zero encountered until a non-zero digit.
+#   4. Subtract the count from the exp.
+#   5. Check if the exp has crossed zero in #3 above; make the exp abs
+#	   and set SE.
+#	6. Multiply the mantissa by 10**count.
+#  2n.(negative exp)
+#   2. Check the digits in lwords 3 and 2 in decending order.
+#   3. Add one for each zero encountered until a non-zero digit.
+#   4. Add the count to the exp.
+#   5. Check if the exp has crossed zero in #3 above; clear SE.
+#   6. Divide the mantissa by 10**count.
+#
+#  *Why 27?  If the adjusted exponent is within -28 < expA < 28, than
+#   any adjustment due to append/strip zeros will drive the resultane
+#   exponent towards zero.  Since all pwrten constants with a power
+#   of 27 or less are exact, there is no need to use this routine to
+#   attempt to lessen the resultant exponent.
+#
+# Register usage:
+#
+#  ap_st_z:
+#	(*)  d0: temp digit storage
+#	(*)  d1: zero count
+#	(*)  d2: digit count
+#	(*)  d3: offset pointer
+#	( )  d4: first word of bcd
+#	(*)  d5: lword counter
+#	( )  a0: pointer to working bcd value
+#	( )  FP_SCR1: working copy of original bcd value
+#	( )  L_SCR1: copy of original exponent word
+#
+#
+# First check the absolute value of the exponent to see if this
+# routine is necessary.  If so, then check the sign of the exponent
+# and do append (+) or strip (-) zeros accordingly.
+# This section handles a positive adjusted exponent.
+#
+ap_st_z:
+	mov.l		(%sp),%d1		# load expA for range test
+	cmp.l		%d1,&27			# test is with 27
+	ble.w		pwrten			# if abs(expA) <28, skip ap/st zeros
+	btst		&30,(%a0)		# check sign of exp
+	bne.b		ap_st_n			# if neg, go to neg side
+	clr.l		%d1			# zero count reg
+	mov.l		(%a0),%d4		# load lword 1 to d4
+	bfextu		%d4{&28:&4},%d0		# get M16 in d0
+	bne.b		ap_p_fx			# if M16 is non-zero, go fix exp
+	addq.l		&1,%d1			# inc zero count
+	mov.l		&1,%d5			# init lword counter
+	mov.l		(%a0,%d5.L*4),%d4	# get lword 2 to d4
+	bne.b		ap_p_cl			# if lw 2 is zero, skip it
+	addq.l		&8,%d1			# and inc count by 8
+	addq.l		&1,%d5			# inc lword counter
+	mov.l		(%a0,%d5.L*4),%d4	# get lword 3 to d4
+ap_p_cl:
+	clr.l		%d3			# init offset reg
+	mov.l		&7,%d2			# init digit counter
+ap_p_gd:
+	bfextu		%d4{%d3:&4},%d0		# get digit
+	bne.b		ap_p_fx			# if non-zero, go to fix exp
+	addq.l		&4,%d3			# point to next digit
+	addq.l		&1,%d1			# inc digit counter
+	dbf.w		%d2,ap_p_gd		# get next digit
+ap_p_fx:
+	mov.l		%d1,%d0			# copy counter to d2
+	mov.l		(%sp),%d1		# get adjusted exp from memory
+	sub.l		%d0,%d1			# subtract count from exp
+	bge.b		ap_p_fm			# if still pos, go to pwrten
+	neg.l		%d1			# now its neg; get abs
+	mov.l		(%a0),%d4		# load lword 1 to d4
+	or.l		&0x40000000,%d4		# and set SE in d4
+	or.l		&0x40000000,(%a0)	# and in memory
+#
+# Calculate the mantissa multiplier to compensate for the striping of
+# zeros from the mantissa.
+#
+ap_p_fm:
+	lea.l		PTENRN(%pc),%a1		# get address of power-of-ten table
+	clr.l		%d3			# init table index
+	fmov.s		&0x3f800000,%fp1	# init fp1 to 1
+	mov.l		&3,%d2			# init d2 to count bits in counter
+ap_p_el:
+	asr.l		&1,%d0			# shift lsb into carry
+	bcc.b		ap_p_en			# if 1, mul fp1 by pwrten factor
+	fmul.x		(%a1,%d3),%fp1		# mul by 10**(d3_bit_no)
+ap_p_en:
+	add.l		&12,%d3			# inc d3 to next rtable entry
+	tst.l		%d0			# check if d0 is zero
+	bne.b		ap_p_el			# if not, get next bit
+	fmul.x		%fp1,%fp0		# mul mantissa by 10**(no_bits_shifted)
+	bra.b		pwrten			# go calc pwrten
+#
+# This section handles a negative adjusted exponent.
+#
+ap_st_n:
+	clr.l		%d1			# clr counter
+	mov.l		&2,%d5			# set up d5 to point to lword 3
+	mov.l		(%a0,%d5.L*4),%d4	# get lword 3
+	bne.b		ap_n_cl			# if not zero, check digits
+	sub.l		&1,%d5			# dec d5 to point to lword 2
+	addq.l		&8,%d1			# inc counter by 8
+	mov.l		(%a0,%d5.L*4),%d4	# get lword 2
+ap_n_cl:
+	mov.l		&28,%d3			# point to last digit
+	mov.l		&7,%d2			# init digit counter
+ap_n_gd:
+	bfextu		%d4{%d3:&4},%d0		# get digit
+	bne.b		ap_n_fx			# if non-zero, go to exp fix
+	subq.l		&4,%d3			# point to previous digit
+	addq.l		&1,%d1			# inc digit counter
+	dbf.w		%d2,ap_n_gd		# get next digit
+ap_n_fx:
+	mov.l		%d1,%d0			# copy counter to d0
+	mov.l		(%sp),%d1		# get adjusted exp from memory
+	sub.l		%d0,%d1			# subtract count from exp
+	bgt.b		ap_n_fm			# if still pos, go fix mantissa
+	neg.l		%d1			# take abs of exp and clr SE
+	mov.l		(%a0),%d4		# load lword 1 to d4
+	and.l		&0xbfffffff,%d4		# and clr SE in d4
+	and.l		&0xbfffffff,(%a0)	# and in memory
+#
+# Calculate the mantissa multiplier to compensate for the appending of
+# zeros to the mantissa.
+#
+ap_n_fm:
+	lea.l		PTENRN(%pc),%a1		# get address of power-of-ten table
+	clr.l		%d3			# init table index
+	fmov.s		&0x3f800000,%fp1	# init fp1 to 1
+	mov.l		&3,%d2			# init d2 to count bits in counter
+ap_n_el:
+	asr.l		&1,%d0			# shift lsb into carry
+	bcc.b		ap_n_en			# if 1, mul fp1 by pwrten factor
+	fmul.x		(%a1,%d3),%fp1		# mul by 10**(d3_bit_no)
+ap_n_en:
+	add.l		&12,%d3			# inc d3 to next rtable entry
+	tst.l		%d0			# check if d0 is zero
+	bne.b		ap_n_el			# if not, get next bit
+	fdiv.x		%fp1,%fp0		# div mantissa by 10**(no_bits_shifted)
+#
+#
+# Calculate power-of-ten factor from adjusted and shifted exponent.
+#
+# Register usage:
+#
+#  pwrten:
+#	(*)  d0: temp
+#	( )  d1: exponent
+#	(*)  d2: {FPCR[6:5],SM,SE} as index in RTABLE; temp
+#	(*)  d3: FPCR work copy
+#	( )  d4: first word of bcd
+#	(*)  a1: RTABLE pointer
+#  calc_p:
+#	(*)  d0: temp
+#	( )  d1: exponent
+#	(*)  d3: PWRTxx table index
+#	( )  a0: pointer to working copy of bcd
+#	(*)  a1: PWRTxx pointer
+#	(*) fp1: power-of-ten accumulator
+#
+# Pwrten calculates the exponent factor in the selected rounding mode
+# according to the following table:
+#
+#	Sign of Mant  Sign of Exp  Rounding Mode  PWRTEN Rounding Mode
+#
+#	ANY	  ANY	RN	RN
+#
+#	 +	   +	RP	RP
+#	 -	   +	RP	RM
+#	 +	   -	RP	RM
+#	 -	   -	RP	RP
+#
+#	 +	   +	RM	RM
+#	 -	   +	RM	RP
+#	 +	   -	RM	RP
+#	 -	   -	RM	RM
+#
+#	 +	   +	RZ	RM
+#	 -	   +	RZ	RM
+#	 +	   -	RZ	RP
+#	 -	   -	RZ	RP
+#
+#
+pwrten:
+	mov.l		USER_FPCR(%a6),%d3	# get user's FPCR
+	bfextu		%d3{&26:&2},%d2		# isolate rounding mode bits
+	mov.l		(%a0),%d4		# reload 1st bcd word to d4
+	asl.l		&2,%d2			# format d2 to be
+	bfextu		%d4{&0:&2},%d0		# {FPCR[6],FPCR[5],SM,SE}
+	add.l		%d0,%d2			# in d2 as index into RTABLE
+	lea.l		RTABLE(%pc),%a1		# load rtable base
+	mov.b		(%a1,%d2),%d0		# load new rounding bits from table
+	clr.l		%d3			# clear d3 to force no exc and extended
+	bfins		%d0,%d3{&26:&2}		# stuff new rounding bits in FPCR
+	fmov.l		%d3,%fpcr		# write new FPCR
+	asr.l		&1,%d0			# write correct PTENxx table
+	bcc.b		not_rp			# to a1
+	lea.l		PTENRP(%pc),%a1		# it is RP
+	bra.b		calc_p			# go to init section
+not_rp:
+	asr.l		&1,%d0			# keep checking
+	bcc.b		not_rm
+	lea.l		PTENRM(%pc),%a1		# it is RM
+	bra.b		calc_p			# go to init section
+not_rm:
+	lea.l		PTENRN(%pc),%a1		# it is RN
+calc_p:
+	mov.l		%d1,%d0			# copy exp to d0;use d0
+	bpl.b		no_neg			# if exp is negative,
+	neg.l		%d0			# invert it
+	or.l		&0x40000000,(%a0)	# and set SE bit
+no_neg:
+	clr.l		%d3			# table index
+	fmov.s		&0x3f800000,%fp1	# init fp1 to 1
+e_loop:
+	asr.l		&1,%d0			# shift next bit into carry
+	bcc.b		e_next			# if zero, skip the mul
+	fmul.x		(%a1,%d3),%fp1		# mul by 10**(d3_bit_no)
+e_next:
+	add.l		&12,%d3			# inc d3 to next rtable entry
+	tst.l		%d0			# check if d0 is zero
+	bne.b		e_loop			# not zero, continue shifting
+#
+#
+#  Check the sign of the adjusted exp and make the value in fp0 the
+#  same sign. If the exp was pos then multiply fp1*fp0;
+#  else divide fp0/fp1.
+#
+# Register Usage:
+#  norm:
+#	( )  a0: pointer to working bcd value
+#	(*) fp0: mantissa accumulator
+#	( ) fp1: scaling factor - 10**(abs(exp))
+#
+pnorm:
+	btst		&30,(%a0)		# test the sign of the exponent
+	beq.b		mul			# if clear, go to multiply
+div:
+	fdiv.x		%fp1,%fp0		# exp is negative, so divide mant by exp
+	bra.b		end_dec
+mul:
+	fmul.x		%fp1,%fp0		# exp is positive, so multiply by exp
+#
+#
+# Clean up and return with result in fp0.
+#
+# If the final mul/div in decbin incurred an inex exception,
+# it will be inex2, but will be reported as inex1 by get_op.
+#
+end_dec:
+	fmov.l		%fpsr,%d0		# get status register
+	bclr		&inex2_bit+8,%d0	# test for inex2 and clear it
+	beq.b		no_exc			# skip this if no exc
+	ori.w		&inx1a_mask,2+USER_FPSR(%a6) # set INEX1/AINEX
+no_exc:
+	add.l		&0x4,%sp		# clear 1 lw param
+	fmovm.x		(%sp)+,&0x40		# restore fp1
+	movm.l		(%sp)+,&0x3c		# restore d2-d5
+	fmov.l		&0x0,%fpcr
+	fmov.l		&0x0,%fpsr
+	rts
+
+#########################################################################
+# bindec(): Converts an input in extended precision format to bcd format#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to the input extended precision value in memory.	#
+#	     the input may be either normalized, unnormalized, or	#
+#	     denormalized.						#
+#	d0 = contains the k-factor sign-extended to 32-bits.		#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SCR0(a6) = bcd format result on the stack.			#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	A1.	Set RM and size ext;  Set SIGMA = sign of input.	#
+#		The k-factor is saved for use in d7. Clear the		#
+#		BINDEC_FLG for separating normalized/denormalized	#
+#		input.  If input is unnormalized or denormalized,	#
+#		normalize it.						#
+#									#
+#	A2.	Set X = abs(input).					#
+#									#
+#	A3.	Compute ILOG.						#
+#		ILOG is the log base 10 of the input value.  It is	#
+#		approximated by adding e + 0.f when the original	#
+#		value is viewed as 2^^e * 1.f in extended precision.	#
+#		This value is stored in d6.				#
+#									#
+#	A4.	Clr INEX bit.						#
+#		The operation in A3 above may have set INEX2.		#
+#									#
+#	A5.	Set ICTR = 0;						#
+#		ICTR is a flag used in A13.  It must be set before the	#
+#		loop entry A6.						#
+#									#
+#	A6.	Calculate LEN.						#
+#		LEN is the number of digits to be displayed.  The	#
+#		k-factor can dictate either the total number of digits,	#
+#		if it is a positive number, or the number of digits	#
+#		after the decimal point which are to be included as	#
+#		significant.  See the 68882 manual for examples.	#
+#		If LEN is computed to be greater than 17, set OPERR in	#
+#		USER_FPSR.  LEN is stored in d4.			#
+#									#
+#	A7.	Calculate SCALE.					#
+#		SCALE is equal to 10^ISCALE, where ISCALE is the number	#
+#		of decimal places needed to insure LEN integer digits	#
+#		in the output before conversion to bcd. LAMBDA is the	#
+#		sign of ISCALE, used in A9. Fp1 contains		#
+#		10^^(abs(ISCALE)) using a rounding mode which is a	#
+#		function of the original rounding mode and the signs	#
+#		of ISCALE and X.  A table is given in the code.		#
+#									#
+#	A8.	Clr INEX; Force RZ.					#
+#		The operation in A3 above may have set INEX2.		#
+#		RZ mode is forced for the scaling operation to insure	#
+#		only one rounding error.  The grs bits are collected in #
+#		the INEX flag for use in A10.				#
+#									#
+#	A9.	Scale X -> Y.						#
+#		The mantissa is scaled to the desired number of		#
+#		significant digits.  The excess digits are collected	#
+#		in INEX2.						#
+#									#
+#	A10.	Or in INEX.						#
+#		If INEX is set, round error occurred.  This is		#
+#		compensated for by 'or-ing' in the INEX2 flag to	#
+#		the lsb of Y.						#
+#									#
+#	A11.	Restore original FPCR; set size ext.			#
+#		Perform FINT operation in the user's rounding mode.	#
+#		Keep the size to extended.				#
+#									#
+#	A12.	Calculate YINT = FINT(Y) according to user's rounding	#
+#		mode.  The FPSP routine sintd0 is used.  The output	#
+#		is in fp0.						#
+#									#
+#	A13.	Check for LEN digits.					#
+#		If the int operation results in more than LEN digits,	#
+#		or less than LEN -1 digits, adjust ILOG and repeat from	#
+#		A6.  This test occurs only on the first pass.  If the	#
+#		result is exactly 10^LEN, decrement ILOG and divide	#
+#		the mantissa by 10.					#
+#									#
+#	A14.	Convert the mantissa to bcd.				#
+#		The binstr routine is used to convert the LEN digit	#
+#		mantissa to bcd in memory.  The input to binstr is	#
+#		to be a fraction; i.e. (mantissa)/10^LEN and adjusted	#
+#		such that the decimal point is to the left of bit 63.	#
+#		The bcd digits are stored in the correct position in	#
+#		the final string area in memory.			#
+#									#
+#	A15.	Convert the exponent to bcd.				#
+#		As in A14 above, the exp is converted to bcd and the	#
+#		digits are stored in the final string.			#
+#		Test the length of the final exponent string.  If the	#
+#		length is 4, set operr.					#
+#									#
+#	A16.	Write sign bits to final string.			#
+#									#
+#########################################################################
+
+set	BINDEC_FLG,	EXC_TEMP	# DENORM flag
+
+# Constants in extended precision
+PLOG2:
+	long		0x3FFD0000,0x9A209A84,0xFBCFF798,0x00000000
+PLOG2UP1:
+	long		0x3FFD0000,0x9A209A84,0xFBCFF799,0x00000000
+
+# Constants in single precision
+FONE:
+	long		0x3F800000,0x00000000,0x00000000,0x00000000
+FTWO:
+	long		0x40000000,0x00000000,0x00000000,0x00000000
+FTEN:
+	long		0x41200000,0x00000000,0x00000000,0x00000000
+F4933:
+	long		0x459A2800,0x00000000,0x00000000,0x00000000
+
+RBDTBL:
+	byte		0,0,0,0
+	byte		3,3,2,2
+	byte		3,2,2,3
+	byte		2,3,3,2
+
+#	Implementation Notes:
+#
+#	The registers are used as follows:
+#
+#		d0: scratch; LEN input to binstr
+#		d1: scratch
+#		d2: upper 32-bits of mantissa for binstr
+#		d3: scratch;lower 32-bits of mantissa for binstr
+#		d4: LEN
+#		d5: LAMBDA/ICTR
+#		d6: ILOG
+#		d7: k-factor
+#		a0: ptr for original operand/final result
+#		a1: scratch pointer
+#		a2: pointer to FP_X; abs(original value) in ext
+#		fp0: scratch
+#		fp1: scratch
+#		fp2: scratch
+#		F_SCR1:
+#		F_SCR2:
+#		L_SCR1:
+#		L_SCR2:
+
+	global		bindec
+bindec:
+	movm.l		&0x3f20,-(%sp)	#  {%d2-%d7/%a2}
+	fmovm.x		&0x7,-(%sp)	#  {%fp0-%fp2}
+
+# A1. Set RM and size ext. Set SIGMA = sign input;
+#     The k-factor is saved for use in d7.  Clear BINDEC_FLG for
+#     separating  normalized/denormalized input.  If the input
+#     is a denormalized number, set the BINDEC_FLG memory word
+#     to signal denorm.  If the input is unnormalized, normalize
+#     the input and test for denormalized result.
+#
+	fmov.l		&rm_mode*0x10,%fpcr	# set RM and ext
+	mov.l		(%a0),L_SCR2(%a6)	# save exponent for sign check
+	mov.l		%d0,%d7		# move k-factor to d7
+
+	clr.b		BINDEC_FLG(%a6)	# clr norm/denorm flag
+	cmpi.b		STAG(%a6),&DENORM # is input a DENORM?
+	bne.w		A2_str		# no; input is a NORM
+
+#
+# Normalize the denorm
+#
+un_de_norm:
+	mov.w		(%a0),%d0
+	and.w		&0x7fff,%d0	# strip sign of normalized exp
+	mov.l		4(%a0),%d1
+	mov.l		8(%a0),%d2
+norm_loop:
+	sub.w		&1,%d0
+	lsl.l		&1,%d2
+	roxl.l		&1,%d1
+	tst.l		%d1
+	bge.b		norm_loop
+#
+# Test if the normalized input is denormalized
+#
+	tst.w		%d0
+	bgt.b		pos_exp		# if greater than zero, it is a norm
+	st		BINDEC_FLG(%a6)	# set flag for denorm
+pos_exp:
+	and.w		&0x7fff,%d0	# strip sign of normalized exp
+	mov.w		%d0,(%a0)
+	mov.l		%d1,4(%a0)
+	mov.l		%d2,8(%a0)
+
+# A2. Set X = abs(input).
+#
+A2_str:
+	mov.l		(%a0),FP_SCR1(%a6)	# move input to work space
+	mov.l		4(%a0),FP_SCR1+4(%a6)	# move input to work space
+	mov.l		8(%a0),FP_SCR1+8(%a6)	# move input to work space
+	and.l		&0x7fffffff,FP_SCR1(%a6)	# create abs(X)
+
+# A3. Compute ILOG.
+#     ILOG is the log base 10 of the input value.  It is approx-
+#     imated by adding e + 0.f when the original value is viewed
+#     as 2^^e * 1.f in extended precision.  This value is stored
+#     in d6.
+#
+# Register usage:
+#	Input/Output
+#	d0: k-factor/exponent
+#	d2: x/x
+#	d3: x/x
+#	d4: x/x
+#	d5: x/x
+#	d6: x/ILOG
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: x/x
+#	a2: x/x
+#	fp0: x/float(ILOG)
+#	fp1: x/x
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Abs(X)/Abs(X) with $3fff exponent
+#	L_SCR1:x/x
+#	L_SCR2:first word of X packed/Unchanged
+
+	tst.b		BINDEC_FLG(%a6)	# check for denorm
+	beq.b		A3_cont		# if clr, continue with norm
+	mov.l		&-4933,%d6	# force ILOG = -4933
+	bra.b		A4_str
+A3_cont:
+	mov.w		FP_SCR1(%a6),%d0	# move exp to d0
+	mov.w		&0x3fff,FP_SCR1(%a6)	# replace exponent with 0x3fff
+	fmov.x		FP_SCR1(%a6),%fp0	# now fp0 has 1.f
+	sub.w		&0x3fff,%d0	# strip off bias
+	fadd.w		%d0,%fp0	# add in exp
+	fsub.s		FONE(%pc),%fp0	# subtract off 1.0
+	fbge.w		pos_res		# if pos, branch
+	fmul.x		PLOG2UP1(%pc),%fp0	# if neg, mul by LOG2UP1
+	fmov.l		%fp0,%d6	# put ILOG in d6 as a lword
+	bra.b		A4_str		# go move out ILOG
+pos_res:
+	fmul.x		PLOG2(%pc),%fp0	# if pos, mul by LOG2
+	fmov.l		%fp0,%d6	# put ILOG in d6 as a lword
+
+
+# A4. Clr INEX bit.
+#     The operation in A3 above may have set INEX2.
+
+A4_str:
+	fmov.l		&0,%fpsr	# zero all of fpsr - nothing needed
+
+
+# A5. Set ICTR = 0;
+#     ICTR is a flag used in A13.  It must be set before the
+#     loop entry A6. The lower word of d5 is used for ICTR.
+
+	clr.w		%d5		# clear ICTR
+
+# A6. Calculate LEN.
+#     LEN is the number of digits to be displayed.  The k-factor
+#     can dictate either the total number of digits, if it is
+#     a positive number, or the number of digits after the
+#     original decimal point which are to be included as
+#     significant.  See the 68882 manual for examples.
+#     If LEN is computed to be greater than 17, set OPERR in
+#     USER_FPSR.  LEN is stored in d4.
+#
+# Register usage:
+#	Input/Output
+#	d0: exponent/Unchanged
+#	d2: x/x/scratch
+#	d3: x/x
+#	d4: exc picture/LEN
+#	d5: ICTR/Unchanged
+#	d6: ILOG/Unchanged
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: x/x
+#	a2: x/x
+#	fp0: float(ILOG)/Unchanged
+#	fp1: x/x
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Abs(X) with $3fff exponent/Unchanged
+#	L_SCR1:x/x
+#	L_SCR2:first word of X packed/Unchanged
+
+A6_str:
+	tst.l		%d7		# branch on sign of k
+	ble.b		k_neg		# if k <= 0, LEN = ILOG + 1 - k
+	mov.l		%d7,%d4		# if k > 0, LEN = k
+	bra.b		len_ck		# skip to LEN check
+k_neg:
+	mov.l		%d6,%d4		# first load ILOG to d4
+	sub.l		%d7,%d4		# subtract off k
+	addq.l		&1,%d4		# add in the 1
+len_ck:
+	tst.l		%d4		# LEN check: branch on sign of LEN
+	ble.b		LEN_ng		# if neg, set LEN = 1
+	cmp.l		%d4,&17		# test if LEN > 17
+	ble.b		A7_str		# if not, forget it
+	mov.l		&17,%d4		# set max LEN = 17
+	tst.l		%d7		# if negative, never set OPERR
+	ble.b		A7_str		# if positive, continue
+	or.l		&opaop_mask,USER_FPSR(%a6)	# set OPERR & AIOP in USER_FPSR
+	bra.b		A7_str		# finished here
+LEN_ng:
+	mov.l		&1,%d4		# min LEN is 1
+
+
+# A7. Calculate SCALE.
+#     SCALE is equal to 10^ISCALE, where ISCALE is the number
+#     of decimal places needed to insure LEN integer digits
+#     in the output before conversion to bcd. LAMBDA is the sign
+#     of ISCALE, used in A9.  Fp1 contains 10^^(abs(ISCALE)) using
+#     the rounding mode as given in the following table (see
+#     Coonen, p. 7.23 as ref.; however, the SCALE variable is
+#     of opposite sign in bindec.sa from Coonen).
+#
+#	Initial					USE
+#	FPCR[6:5]	LAMBDA	SIGN(X)		FPCR[6:5]
+#	----------------------------------------------
+#	 RN	00	   0	   0		00/0	RN
+#	 RN	00	   0	   1		00/0	RN
+#	 RN	00	   1	   0		00/0	RN
+#	 RN	00	   1	   1		00/0	RN
+#	 RZ	01	   0	   0		11/3	RP
+#	 RZ	01	   0	   1		11/3	RP
+#	 RZ	01	   1	   0		10/2	RM
+#	 RZ	01	   1	   1		10/2	RM
+#	 RM	10	   0	   0		11/3	RP
+#	 RM	10	   0	   1		10/2	RM
+#	 RM	10	   1	   0		10/2	RM
+#	 RM	10	   1	   1		11/3	RP
+#	 RP	11	   0	   0		10/2	RM
+#	 RP	11	   0	   1		11/3	RP
+#	 RP	11	   1	   0		11/3	RP
+#	 RP	11	   1	   1		10/2	RM
+#
+# Register usage:
+#	Input/Output
+#	d0: exponent/scratch - final is 0
+#	d2: x/0 or 24 for A9
+#	d3: x/scratch - offset ptr into PTENRM array
+#	d4: LEN/Unchanged
+#	d5: 0/ICTR:LAMBDA
+#	d6: ILOG/ILOG or k if ((k<=0)&(ILOG<k))
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: x/ptr to PTENRM array
+#	a2: x/x
+#	fp0: float(ILOG)/Unchanged
+#	fp1: x/10^ISCALE
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Abs(X) with $3fff exponent/Unchanged
+#	L_SCR1:x/x
+#	L_SCR2:first word of X packed/Unchanged
+
+A7_str:
+	tst.l		%d7		# test sign of k
+	bgt.b		k_pos		# if pos and > 0, skip this
+	cmp.l		%d7,%d6		# test k - ILOG
+	blt.b		k_pos		# if ILOG >= k, skip this
+	mov.l		%d7,%d6		# if ((k<0) & (ILOG < k)) ILOG = k
+k_pos:
+	mov.l		%d6,%d0		# calc ILOG + 1 - LEN in d0
+	addq.l		&1,%d0		# add the 1
+	sub.l		%d4,%d0		# sub off LEN
+	swap		%d5		# use upper word of d5 for LAMBDA
+	clr.w		%d5		# set it zero initially
+	clr.w		%d2		# set up d2 for very small case
+	tst.l		%d0		# test sign of ISCALE
+	bge.b		iscale		# if pos, skip next inst
+	addq.w		&1,%d5		# if neg, set LAMBDA true
+	cmp.l		%d0,&0xffffecd4	# test iscale <= -4908
+	bgt.b		no_inf		# if false, skip rest
+	add.l		&24,%d0		# add in 24 to iscale
+	mov.l		&24,%d2		# put 24 in d2 for A9
+no_inf:
+	neg.l		%d0		# and take abs of ISCALE
+iscale:
+	fmov.s		FONE(%pc),%fp1	# init fp1 to 1
+	bfextu		USER_FPCR(%a6){&26:&2},%d1	# get initial rmode bits
+	lsl.w		&1,%d1		# put them in bits 2:1
+	add.w		%d5,%d1		# add in LAMBDA
+	lsl.w		&1,%d1		# put them in bits 3:1
+	tst.l		L_SCR2(%a6)	# test sign of original x
+	bge.b		x_pos		# if pos, don't set bit 0
+	addq.l		&1,%d1		# if neg, set bit 0
+x_pos:
+	lea.l		RBDTBL(%pc),%a2	# load rbdtbl base
+	mov.b		(%a2,%d1),%d3	# load d3 with new rmode
+	lsl.l		&4,%d3		# put bits in proper position
+	fmov.l		%d3,%fpcr	# load bits into fpu
+	lsr.l		&4,%d3		# put bits in proper position
+	tst.b		%d3		# decode new rmode for pten table
+	bne.b		not_rn		# if zero, it is RN
+	lea.l		PTENRN(%pc),%a1	# load a1 with RN table base
+	bra.b		rmode		# exit decode
+not_rn:
+	lsr.b		&1,%d3		# get lsb in carry
+	bcc.b		not_rp2		# if carry clear, it is RM
+	lea.l		PTENRP(%pc),%a1	# load a1 with RP table base
+	bra.b		rmode		# exit decode
+not_rp2:
+	lea.l		PTENRM(%pc),%a1	# load a1 with RM table base
+rmode:
+	clr.l		%d3		# clr table index
+e_loop2:
+	lsr.l		&1,%d0		# shift next bit into carry
+	bcc.b		e_next2		# if zero, skip the mul
+	fmul.x		(%a1,%d3),%fp1	# mul by 10**(d3_bit_no)
+e_next2:
+	add.l		&12,%d3		# inc d3 to next pwrten table entry
+	tst.l		%d0		# test if ISCALE is zero
+	bne.b		e_loop2		# if not, loop
+
+# A8. Clr INEX; Force RZ.
+#     The operation in A3 above may have set INEX2.
+#     RZ mode is forced for the scaling operation to insure
+#     only one rounding error.  The grs bits are collected in
+#     the INEX flag for use in A10.
+#
+# Register usage:
+#	Input/Output
+
+	fmov.l		&0,%fpsr	# clr INEX
+	fmov.l		&rz_mode*0x10,%fpcr	# set RZ rounding mode
+
+# A9. Scale X -> Y.
+#     The mantissa is scaled to the desired number of significant
+#     digits.  The excess digits are collected in INEX2. If mul,
+#     Check d2 for excess 10 exponential value.  If not zero,
+#     the iscale value would have caused the pwrten calculation
+#     to overflow.  Only a negative iscale can cause this, so
+#     multiply by 10^(d2), which is now only allowed to be 24,
+#     with a multiply by 10^8 and 10^16, which is exact since
+#     10^24 is exact.  If the input was denormalized, we must
+#     create a busy stack frame with the mul command and the
+#     two operands, and allow the fpu to complete the multiply.
+#
+# Register usage:
+#	Input/Output
+#	d0: FPCR with RZ mode/Unchanged
+#	d2: 0 or 24/unchanged
+#	d3: x/x
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA
+#	d6: ILOG/Unchanged
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: ptr to PTENRM array/Unchanged
+#	a2: x/x
+#	fp0: float(ILOG)/X adjusted for SCALE (Y)
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Abs(X) with $3fff exponent/Unchanged
+#	L_SCR1:x/x
+#	L_SCR2:first word of X packed/Unchanged
+
+A9_str:
+	fmov.x		(%a0),%fp0	# load X from memory
+	fabs.x		%fp0		# use abs(X)
+	tst.w		%d5		# LAMBDA is in lower word of d5
+	bne.b		sc_mul		# if neg (LAMBDA = 1), scale by mul
+	fdiv.x		%fp1,%fp0	# calculate X / SCALE -> Y to fp0
+	bra.w		A10_st		# branch to A10
+
+sc_mul:
+	tst.b		BINDEC_FLG(%a6)	# check for denorm
+	beq.w		A9_norm		# if norm, continue with mul
+
+# for DENORM, we must calculate:
+#	fp0 = input_op * 10^ISCALE * 10^24
+# since the input operand is a DENORM, we can't multiply it directly.
+# so, we do the multiplication of the exponents and mantissas separately.
+# in this way, we avoid underflow on intermediate stages of the
+# multiplication and guarantee a result without exception.
+	fmovm.x		&0x2,-(%sp)	# save 10^ISCALE to stack
+
+	mov.w		(%sp),%d3	# grab exponent
+	andi.w		&0x7fff,%d3	# clear sign
+	ori.w		&0x8000,(%a0)	# make DENORM exp negative
+	add.w		(%a0),%d3	# add DENORM exp to 10^ISCALE exp
+	subi.w		&0x3fff,%d3	# subtract BIAS
+	add.w		36(%a1),%d3
+	subi.w		&0x3fff,%d3	# subtract BIAS
+	add.w		48(%a1),%d3
+	subi.w		&0x3fff,%d3	# subtract BIAS
+
+	bmi.w		sc_mul_err	# is result is DENORM, punt!!!
+
+	andi.w		&0x8000,(%sp)	# keep sign
+	or.w		%d3,(%sp)	# insert new exponent
+	andi.w		&0x7fff,(%a0)	# clear sign bit on DENORM again
+	mov.l		0x8(%a0),-(%sp) # put input op mantissa on stk
+	mov.l		0x4(%a0),-(%sp)
+	mov.l		&0x3fff0000,-(%sp) # force exp to zero
+	fmovm.x		(%sp)+,&0x80	# load normalized DENORM into fp0
+	fmul.x		(%sp)+,%fp0
+
+#	fmul.x	36(%a1),%fp0	# multiply fp0 by 10^8
+#	fmul.x	48(%a1),%fp0	# multiply fp0 by 10^16
+	mov.l		36+8(%a1),-(%sp) # get 10^8 mantissa
+	mov.l		36+4(%a1),-(%sp)
+	mov.l		&0x3fff0000,-(%sp) # force exp to zero
+	mov.l		48+8(%a1),-(%sp) # get 10^16 mantissa
+	mov.l		48+4(%a1),-(%sp)
+	mov.l		&0x3fff0000,-(%sp)# force exp to zero
+	fmul.x		(%sp)+,%fp0	# multiply fp0 by 10^8
+	fmul.x		(%sp)+,%fp0	# multiply fp0 by 10^16
+	bra.b		A10_st
+
+sc_mul_err:
+	bra.b		sc_mul_err
+
+A9_norm:
+	tst.w		%d2		# test for small exp case
+	beq.b		A9_con		# if zero, continue as normal
+	fmul.x		36(%a1),%fp0	# multiply fp0 by 10^8
+	fmul.x		48(%a1),%fp0	# multiply fp0 by 10^16
+A9_con:
+	fmul.x		%fp1,%fp0	# calculate X * SCALE -> Y to fp0
+
+# A10. Or in INEX.
+#      If INEX is set, round error occurred.  This is compensated
+#      for by 'or-ing' in the INEX2 flag to the lsb of Y.
+#
+# Register usage:
+#	Input/Output
+#	d0: FPCR with RZ mode/FPSR with INEX2 isolated
+#	d2: x/x
+#	d3: x/x
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA
+#	d6: ILOG/Unchanged
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: x/ptr to FP_SCR1(a6)
+#	fp0: Y/Y with lsb adjusted
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: x/x
+
+A10_st:
+	fmov.l		%fpsr,%d0	# get FPSR
+	fmov.x		%fp0,FP_SCR1(%a6)	# move Y to memory
+	lea.l		FP_SCR1(%a6),%a2	# load a2 with ptr to FP_SCR1
+	btst		&9,%d0		# check if INEX2 set
+	beq.b		A11_st		# if clear, skip rest
+	or.l		&1,8(%a2)	# or in 1 to lsb of mantissa
+	fmov.x		FP_SCR1(%a6),%fp0	# write adjusted Y back to fpu
+
+
+# A11. Restore original FPCR; set size ext.
+#      Perform FINT operation in the user's rounding mode.  Keep
+#      the size to extended.  The sintdo entry point in the sint
+#      routine expects the FPCR value to be in USER_FPCR for
+#      mode and precision.  The original FPCR is saved in L_SCR1.
+
+A11_st:
+	mov.l		USER_FPCR(%a6),L_SCR1(%a6)	# save it for later
+	and.l		&0x00000030,USER_FPCR(%a6)	# set size to ext,
+#					;block exceptions
+
+
+# A12. Calculate YINT = FINT(Y) according to user's rounding mode.
+#      The FPSP routine sintd0 is used.  The output is in fp0.
+#
+# Register usage:
+#	Input/Output
+#	d0: FPSR with AINEX cleared/FPCR with size set to ext
+#	d2: x/x/scratch
+#	d3: x/x
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA/Unchanged
+#	d6: ILOG/Unchanged
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/src ptr for sintdo
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	a6: temp pointer to FP_SCR1(a6) - orig value saved and restored
+#	fp0: Y/YINT
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Y adjusted for inex/Y with original exponent
+#	L_SCR1:x/original USER_FPCR
+#	L_SCR2:first word of X packed/Unchanged
+
+A12_st:
+	movm.l	&0xc0c0,-(%sp)	# save regs used by sintd0	 {%d0-%d1/%a0-%a1}
+	mov.l	L_SCR1(%a6),-(%sp)
+	mov.l	L_SCR2(%a6),-(%sp)
+
+	lea.l		FP_SCR1(%a6),%a0	# a0 is ptr to FP_SCR1(a6)
+	fmov.x		%fp0,(%a0)	# move Y to memory at FP_SCR1(a6)
+	tst.l		L_SCR2(%a6)	# test sign of original operand
+	bge.b		do_fint12		# if pos, use Y
+	or.l		&0x80000000,(%a0)	# if neg, use -Y
+do_fint12:
+	mov.l	USER_FPSR(%a6),-(%sp)
+#	bsr	sintdo		# sint routine returns int in fp0
+
+	fmov.l	USER_FPCR(%a6),%fpcr
+	fmov.l	&0x0,%fpsr			# clear the AEXC bits!!!
+##	mov.l		USER_FPCR(%a6),%d0	# ext prec/keep rnd mode
+##	andi.l		&0x00000030,%d0
+##	fmov.l		%d0,%fpcr
+	fint.x		FP_SCR1(%a6),%fp0	# do fint()
+	fmov.l	%fpsr,%d0
+	or.w	%d0,FPSR_EXCEPT(%a6)
+##	fmov.l		&0x0,%fpcr
+##	fmov.l		%fpsr,%d0		# don't keep ccodes
+##	or.w		%d0,FPSR_EXCEPT(%a6)
+
+	mov.b	(%sp),USER_FPSR(%a6)
+	add.l	&4,%sp
+
+	mov.l	(%sp)+,L_SCR2(%a6)
+	mov.l	(%sp)+,L_SCR1(%a6)
+	movm.l	(%sp)+,&0x303	# restore regs used by sint	 {%d0-%d1/%a0-%a1}
+
+	mov.l	L_SCR2(%a6),FP_SCR1(%a6)	# restore original exponent
+	mov.l	L_SCR1(%a6),USER_FPCR(%a6)	# restore user's FPCR
+
+# A13. Check for LEN digits.
+#      If the int operation results in more than LEN digits,
+#      or less than LEN -1 digits, adjust ILOG and repeat from
+#      A6.  This test occurs only on the first pass.  If the
+#      result is exactly 10^LEN, decrement ILOG and divide
+#      the mantissa by 10.  The calculation of 10^LEN cannot
+#      be inexact, since all powers of ten upto 10^27 are exact
+#      in extended precision, so the use of a previous power-of-ten
+#      table will introduce no error.
+#
+#
+# Register usage:
+#	Input/Output
+#	d0: FPCR with size set to ext/scratch final = 0
+#	d2: x/x
+#	d3: x/scratch final = x
+#	d4: LEN/LEN adjusted
+#	d5: ICTR:LAMBDA/LAMBDA:ICTR
+#	d6: ILOG/ILOG adjusted
+#	d7: k-factor/Unchanged
+#	a0: pointer into memory for packed bcd string formation
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	fp0: int portion of Y/abs(YINT) adjusted
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: x/10^LEN
+#	F_SCR1:x/x
+#	F_SCR2:Y with original exponent/Unchanged
+#	L_SCR1:original USER_FPCR/Unchanged
+#	L_SCR2:first word of X packed/Unchanged
+
+A13_st:
+	swap		%d5		# put ICTR in lower word of d5
+	tst.w		%d5		# check if ICTR = 0
+	bne		not_zr		# if non-zero, go to second test
+#
+# Compute 10^(LEN-1)
+#
+	fmov.s		FONE(%pc),%fp2	# init fp2 to 1.0
+	mov.l		%d4,%d0		# put LEN in d0
+	subq.l		&1,%d0		# d0 = LEN -1
+	clr.l		%d3		# clr table index
+l_loop:
+	lsr.l		&1,%d0		# shift next bit into carry
+	bcc.b		l_next		# if zero, skip the mul
+	fmul.x		(%a1,%d3),%fp2	# mul by 10**(d3_bit_no)
+l_next:
+	add.l		&12,%d3		# inc d3 to next pwrten table entry
+	tst.l		%d0		# test if LEN is zero
+	bne.b		l_loop		# if not, loop
+#
+# 10^LEN-1 is computed for this test and A14.  If the input was
+# denormalized, check only the case in which YINT > 10^LEN.
+#
+	tst.b		BINDEC_FLG(%a6)	# check if input was norm
+	beq.b		A13_con		# if norm, continue with checking
+	fabs.x		%fp0		# take abs of YINT
+	bra		test_2
+#
+# Compare abs(YINT) to 10^(LEN-1) and 10^LEN
+#
+A13_con:
+	fabs.x		%fp0		# take abs of YINT
+	fcmp.x		%fp0,%fp2	# compare abs(YINT) with 10^(LEN-1)
+	fbge.w		test_2		# if greater, do next test
+	subq.l		&1,%d6		# subtract 1 from ILOG
+	mov.w		&1,%d5		# set ICTR
+	fmov.l		&rm_mode*0x10,%fpcr	# set rmode to RM
+	fmul.s		FTEN(%pc),%fp2	# compute 10^LEN
+	bra.w		A6_str		# return to A6 and recompute YINT
+test_2:
+	fmul.s		FTEN(%pc),%fp2	# compute 10^LEN
+	fcmp.x		%fp0,%fp2	# compare abs(YINT) with 10^LEN
+	fblt.w		A14_st		# if less, all is ok, go to A14
+	fbgt.w		fix_ex		# if greater, fix and redo
+	fdiv.s		FTEN(%pc),%fp0	# if equal, divide by 10
+	addq.l		&1,%d6		# and inc ILOG
+	bra.b		A14_st		# and continue elsewhere
+fix_ex:
+	addq.l		&1,%d6		# increment ILOG by 1
+	mov.w		&1,%d5		# set ICTR
+	fmov.l		&rm_mode*0x10,%fpcr	# set rmode to RM
+	bra.w		A6_str		# return to A6 and recompute YINT
+#
+# Since ICTR <> 0, we have already been through one adjustment,
+# and shouldn't have another; this is to check if abs(YINT) = 10^LEN
+# 10^LEN is again computed using whatever table is in a1 since the
+# value calculated cannot be inexact.
+#
+not_zr:
+	fmov.s		FONE(%pc),%fp2	# init fp2 to 1.0
+	mov.l		%d4,%d0		# put LEN in d0
+	clr.l		%d3		# clr table index
+z_loop:
+	lsr.l		&1,%d0		# shift next bit into carry
+	bcc.b		z_next		# if zero, skip the mul
+	fmul.x		(%a1,%d3),%fp2	# mul by 10**(d3_bit_no)
+z_next:
+	add.l		&12,%d3		# inc d3 to next pwrten table entry
+	tst.l		%d0		# test if LEN is zero
+	bne.b		z_loop		# if not, loop
+	fabs.x		%fp0		# get abs(YINT)
+	fcmp.x		%fp0,%fp2	# check if abs(YINT) = 10^LEN
+	fbneq.w		A14_st		# if not, skip this
+	fdiv.s		FTEN(%pc),%fp0	# divide abs(YINT) by 10
+	addq.l		&1,%d6		# and inc ILOG by 1
+	addq.l		&1,%d4		# and inc LEN
+	fmul.s		FTEN(%pc),%fp2	# if LEN++, the get 10^^LEN
+
+# A14. Convert the mantissa to bcd.
+#      The binstr routine is used to convert the LEN digit
+#      mantissa to bcd in memory.  The input to binstr is
+#      to be a fraction; i.e. (mantissa)/10^LEN and adjusted
+#      such that the decimal point is to the left of bit 63.
+#      The bcd digits are stored in the correct position in
+#      the final string area in memory.
+#
+#
+# Register usage:
+#	Input/Output
+#	d0: x/LEN call to binstr - final is 0
+#	d1: x/0
+#	d2: x/ms 32-bits of mant of abs(YINT)
+#	d3: x/ls 32-bits of mant of abs(YINT)
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA/LAMBDA:ICTR
+#	d6: ILOG
+#	d7: k-factor/Unchanged
+#	a0: pointer into memory for packed bcd string formation
+#	    /ptr to first mantissa byte in result string
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	fp0: int portion of Y/abs(YINT) adjusted
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: 10^LEN/Unchanged
+#	F_SCR1:x/Work area for final result
+#	F_SCR2:Y with original exponent/Unchanged
+#	L_SCR1:original USER_FPCR/Unchanged
+#	L_SCR2:first word of X packed/Unchanged
+
+A14_st:
+	fmov.l		&rz_mode*0x10,%fpcr	# force rz for conversion
+	fdiv.x		%fp2,%fp0	# divide abs(YINT) by 10^LEN
+	lea.l		FP_SCR0(%a6),%a0
+	fmov.x		%fp0,(%a0)	# move abs(YINT)/10^LEN to memory
+	mov.l		4(%a0),%d2	# move 2nd word of FP_RES to d2
+	mov.l		8(%a0),%d3	# move 3rd word of FP_RES to d3
+	clr.l		4(%a0)		# zero word 2 of FP_RES
+	clr.l		8(%a0)		# zero word 3 of FP_RES
+	mov.l		(%a0),%d0	# move exponent to d0
+	swap		%d0		# put exponent in lower word
+	beq.b		no_sft		# if zero, don't shift
+	sub.l		&0x3ffd,%d0	# sub bias less 2 to make fract
+	tst.l		%d0		# check if > 1
+	bgt.b		no_sft		# if so, don't shift
+	neg.l		%d0		# make exp positive
+m_loop:
+	lsr.l		&1,%d2		# shift d2:d3 right, add 0s
+	roxr.l		&1,%d3		# the number of places
+	dbf.w		%d0,m_loop	# given in d0
+no_sft:
+	tst.l		%d2		# check for mantissa of zero
+	bne.b		no_zr		# if not, go on
+	tst.l		%d3		# continue zero check
+	beq.b		zer_m		# if zero, go directly to binstr
+no_zr:
+	clr.l		%d1		# put zero in d1 for addx
+	add.l		&0x00000080,%d3	# inc at bit 7
+	addx.l		%d1,%d2		# continue inc
+	and.l		&0xffffff80,%d3	# strip off lsb not used by 882
+zer_m:
+	mov.l		%d4,%d0		# put LEN in d0 for binstr call
+	addq.l		&3,%a0		# a0 points to M16 byte in result
+	bsr		binstr		# call binstr to convert mant
+
+
+# A15. Convert the exponent to bcd.
+#      As in A14 above, the exp is converted to bcd and the
+#      digits are stored in the final string.
+#
+#      Digits are stored in L_SCR1(a6) on return from BINDEC as:
+#
+#	 32               16 15                0
+#	-----------------------------------------
+#	|  0 | e3 | e2 | e1 | e4 |  X |  X |  X |
+#	-----------------------------------------
+#
+# And are moved into their proper places in FP_SCR0.  If digit e4
+# is non-zero, OPERR is signaled.  In all cases, all 4 digits are
+# written as specified in the 881/882 manual for packed decimal.
+#
+# Register usage:
+#	Input/Output
+#	d0: x/LEN call to binstr - final is 0
+#	d1: x/scratch (0);shift count for final exponent packing
+#	d2: x/ms 32-bits of exp fraction/scratch
+#	d3: x/ls 32-bits of exp fraction
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA/LAMBDA:ICTR
+#	d6: ILOG
+#	d7: k-factor/Unchanged
+#	a0: ptr to result string/ptr to L_SCR1(a6)
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	fp0: abs(YINT) adjusted/float(ILOG)
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: 10^LEN/Unchanged
+#	F_SCR1:Work area for final result/BCD result
+#	F_SCR2:Y with original exponent/ILOG/10^4
+#	L_SCR1:original USER_FPCR/Exponent digits on return from binstr
+#	L_SCR2:first word of X packed/Unchanged
+
+A15_st:
+	tst.b		BINDEC_FLG(%a6)	# check for denorm
+	beq.b		not_denorm
+	ftest.x		%fp0		# test for zero
+	fbeq.w		den_zero	# if zero, use k-factor or 4933
+	fmov.l		%d6,%fp0	# float ILOG
+	fabs.x		%fp0		# get abs of ILOG
+	bra.b		convrt
+den_zero:
+	tst.l		%d7		# check sign of the k-factor
+	blt.b		use_ilog	# if negative, use ILOG
+	fmov.s		F4933(%pc),%fp0	# force exponent to 4933
+	bra.b		convrt		# do it
+use_ilog:
+	fmov.l		%d6,%fp0	# float ILOG
+	fabs.x		%fp0		# get abs of ILOG
+	bra.b		convrt
+not_denorm:
+	ftest.x		%fp0		# test for zero
+	fbneq.w		not_zero	# if zero, force exponent
+	fmov.s		FONE(%pc),%fp0	# force exponent to 1
+	bra.b		convrt		# do it
+not_zero:
+	fmov.l		%d6,%fp0	# float ILOG
+	fabs.x		%fp0		# get abs of ILOG
+convrt:
+	fdiv.x		24(%a1),%fp0	# compute ILOG/10^4
+	fmov.x		%fp0,FP_SCR1(%a6)	# store fp0 in memory
+	mov.l		4(%a2),%d2	# move word 2 to d2
+	mov.l		8(%a2),%d3	# move word 3 to d3
+	mov.w		(%a2),%d0	# move exp to d0
+	beq.b		x_loop_fin	# if zero, skip the shift
+	sub.w		&0x3ffd,%d0	# subtract off bias
+	neg.w		%d0		# make exp positive
+x_loop:
+	lsr.l		&1,%d2		# shift d2:d3 right
+	roxr.l		&1,%d3		# the number of places
+	dbf.w		%d0,x_loop	# given in d0
+x_loop_fin:
+	clr.l		%d1		# put zero in d1 for addx
+	add.l		&0x00000080,%d3	# inc at bit 6
+	addx.l		%d1,%d2		# continue inc
+	and.l		&0xffffff80,%d3	# strip off lsb not used by 882
+	mov.l		&4,%d0		# put 4 in d0 for binstr call
+	lea.l		L_SCR1(%a6),%a0	# a0 is ptr to L_SCR1 for exp digits
+	bsr		binstr		# call binstr to convert exp
+	mov.l		L_SCR1(%a6),%d0	# load L_SCR1 lword to d0
+	mov.l		&12,%d1		# use d1 for shift count
+	lsr.l		%d1,%d0		# shift d0 right by 12
+	bfins		%d0,FP_SCR0(%a6){&4:&12}	# put e3:e2:e1 in FP_SCR0
+	lsr.l		%d1,%d0		# shift d0 right by 12
+	bfins		%d0,FP_SCR0(%a6){&16:&4}	# put e4 in FP_SCR0
+	tst.b		%d0		# check if e4 is zero
+	beq.b		A16_st		# if zero, skip rest
+	or.l		&opaop_mask,USER_FPSR(%a6)	# set OPERR & AIOP in USER_FPSR
+
+
+# A16. Write sign bits to final string.
+#	   Sigma is bit 31 of initial value; RHO is bit 31 of d6 (ILOG).
+#
+# Register usage:
+#	Input/Output
+#	d0: x/scratch - final is x
+#	d2: x/x
+#	d3: x/x
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA/LAMBDA:ICTR
+#	d6: ILOG/ILOG adjusted
+#	d7: k-factor/Unchanged
+#	a0: ptr to L_SCR1(a6)/Unchanged
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	fp0: float(ILOG)/Unchanged
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: 10^LEN/Unchanged
+#	F_SCR1:BCD result with correct signs
+#	F_SCR2:ILOG/10^4
+#	L_SCR1:Exponent digits on return from binstr
+#	L_SCR2:first word of X packed/Unchanged
+
+A16_st:
+	clr.l		%d0		# clr d0 for collection of signs
+	and.b		&0x0f,FP_SCR0(%a6)	# clear first nibble of FP_SCR0
+	tst.l		L_SCR2(%a6)	# check sign of original mantissa
+	bge.b		mant_p		# if pos, don't set SM
+	mov.l		&2,%d0		# move 2 in to d0 for SM
+mant_p:
+	tst.l		%d6		# check sign of ILOG
+	bge.b		wr_sgn		# if pos, don't set SE
+	addq.l		&1,%d0		# set bit 0 in d0 for SE
+wr_sgn:
+	bfins		%d0,FP_SCR0(%a6){&0:&2}	# insert SM and SE into FP_SCR0
+
+# Clean up and restore all registers used.
+
+	fmov.l		&0,%fpsr	# clear possible inex2/ainex bits
+	fmovm.x		(%sp)+,&0xe0	#  {%fp0-%fp2}
+	movm.l		(%sp)+,&0x4fc	#  {%d2-%d7/%a2}
+	rts
+
+	global		PTENRN
+PTENRN:
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59E	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D5	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CE0	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8E	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C7	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C17	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE5	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979B	# 10 ^ 4096
+
+	global		PTENRP
+PTENRP:
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59E	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D6	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CE0	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8E	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C7	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C18	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE5	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979B	# 10 ^ 4096
+
+	global		PTENRM
+PTENRM:
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59D	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D5	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CDF	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8D	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C6	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C17	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE4	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979A	# 10 ^ 4096
+
+#########################################################################
+# binstr(): Converts a 64-bit binary integer to bcd.			#
+#									#
+# INPUT *************************************************************** #
+#	d2:d3 = 64-bit binary integer					#
+#	d0    = desired length (LEN)					#
+#	a0    = pointer to start in memory for bcd characters		#
+#		(This pointer must point to byte 4 of the first		#
+#		 lword of the packed decimal memory string.)		#
+#									#
+# OUTPUT ************************************************************** #
+#	a0 = pointer to LEN bcd digits representing the 64-bit integer.	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	The 64-bit binary is assumed to have a decimal point before	#
+#	bit 63.  The fraction is multiplied by 10 using a mul by 2	#
+#	shift and a mul by 8 shift.  The bits shifted out of the	#
+#	msb form a decimal digit.  This process is iterated until	#
+#	LEN digits are formed.						#
+#									#
+# A1. Init d7 to 1.  D7 is the byte digit counter, and if 1, the	#
+#     digit formed will be assumed the least significant.  This is	#
+#     to force the first byte formed to have a 0 in the upper 4 bits.	#
+#									#
+# A2. Beginning of the loop:						#
+#     Copy the fraction in d2:d3 to d4:d5.				#
+#									#
+# A3. Multiply the fraction in d2:d3 by 8 using bit-field		#
+#     extracts and shifts.  The three msbs from d2 will go into d1.	#
+#									#
+# A4. Multiply the fraction in d4:d5 by 2 using shifts.  The msb	#
+#     will be collected by the carry.					#
+#									#
+# A5. Add using the carry the 64-bit quantities in d2:d3 and d4:d5	#
+#     into d2:d3.  D1 will contain the bcd digit formed.		#
+#									#
+# A6. Test d7.  If zero, the digit formed is the ms digit.  If non-	#
+#     zero, it is the ls digit.  Put the digit in its place in the	#
+#     upper word of d0.  If it is the ls digit, write the word		#
+#     from d0 to memory.						#
+#									#
+# A7. Decrement d6 (LEN counter) and repeat the loop until zero.	#
+#									#
+#########################################################################
+
+#	Implementation Notes:
+#
+#	The registers are used as follows:
+#
+#		d0: LEN counter
+#		d1: temp used to form the digit
+#		d2: upper 32-bits of fraction for mul by 8
+#		d3: lower 32-bits of fraction for mul by 8
+#		d4: upper 32-bits of fraction for mul by 2
+#		d5: lower 32-bits of fraction for mul by 2
+#		d6: temp for bit-field extracts
+#		d7: byte digit formation word;digit count {0,1}
+#		a0: pointer into memory for packed bcd string formation
+#
+
+	global		binstr
+binstr:
+	movm.l		&0xff00,-(%sp)	#  {%d0-%d7}
+
+#
+# A1: Init d7
+#
+	mov.l		&1,%d7		# init d7 for second digit
+	subq.l		&1,%d0		# for dbf d0 would have LEN+1 passes
+#
+# A2. Copy d2:d3 to d4:d5.  Start loop.
+#
+loop:
+	mov.l		%d2,%d4		# copy the fraction before muls
+	mov.l		%d3,%d5		# to d4:d5
+#
+# A3. Multiply d2:d3 by 8; extract msbs into d1.
+#
+	bfextu		%d2{&0:&3},%d1	# copy 3 msbs of d2 into d1
+	asl.l		&3,%d2		# shift d2 left by 3 places
+	bfextu		%d3{&0:&3},%d6	# copy 3 msbs of d3 into d6
+	asl.l		&3,%d3		# shift d3 left by 3 places
+	or.l		%d6,%d2		# or in msbs from d3 into d2
+#
+# A4. Multiply d4:d5 by 2; add carry out to d1.
+#
+	asl.l		&1,%d5		# mul d5 by 2
+	roxl.l		&1,%d4		# mul d4 by 2
+	swap		%d6		# put 0 in d6 lower word
+	addx.w		%d6,%d1		# add in extend from mul by 2
+#
+# A5. Add mul by 8 to mul by 2.  D1 contains the digit formed.
+#
+	add.l		%d5,%d3		# add lower 32 bits
+	nop				# ERRATA FIX #13 (Rev. 1.2 6/6/90)
+	addx.l		%d4,%d2		# add with extend upper 32 bits
+	nop				# ERRATA FIX #13 (Rev. 1.2 6/6/90)
+	addx.w		%d6,%d1		# add in extend from add to d1
+	swap		%d6		# with d6 = 0; put 0 in upper word
+#
+# A6. Test d7 and branch.
+#
+	tst.w		%d7		# if zero, store digit & to loop
+	beq.b		first_d		# if non-zero, form byte & write
+sec_d:
+	swap		%d7		# bring first digit to word d7b
+	asl.w		&4,%d7		# first digit in upper 4 bits d7b
+	add.w		%d1,%d7		# add in ls digit to d7b
+	mov.b		%d7,(%a0)+	# store d7b byte in memory
+	swap		%d7		# put LEN counter in word d7a
+	clr.w		%d7		# set d7a to signal no digits done
+	dbf.w		%d0,loop	# do loop some more!
+	bra.b		end_bstr	# finished, so exit
+first_d:
+	swap		%d7		# put digit word in d7b
+	mov.w		%d1,%d7		# put new digit in d7b
+	swap		%d7		# put LEN counter in word d7a
+	addq.w		&1,%d7		# set d7a to signal first digit done
+	dbf.w		%d0,loop	# do loop some more!
+	swap		%d7		# put last digit in string
+	lsl.w		&4,%d7		# move it to upper 4 bits
+	mov.b		%d7,(%a0)+	# store it in memory string
+#
+# Clean up and return with result in fp0.
+#
+end_bstr:
+	movm.l		(%sp)+,&0xff	#  {%d0-%d7}
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	facc_in_b(): dmem_read_byte failed				#
+#	facc_in_w(): dmem_read_word failed				#
+#	facc_in_l(): dmem_read_long failed				#
+#	facc_in_d(): dmem_read of dbl prec failed			#
+#	facc_in_x(): dmem_read of ext prec failed			#
+#									#
+#	facc_out_b(): dmem_write_byte failed				#
+#	facc_out_w(): dmem_write_word failed				#
+#	facc_out_l(): dmem_write_long failed				#
+#	facc_out_d(): dmem_write of dbl prec failed			#
+#	facc_out_x(): dmem_write of ext prec failed			#
+#									#
+# XREF ****************************************************************	#
+#	_real_access() - exit through access error handler		#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	None								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Flow jumps here when an FP data fetch call gets an error	#
+# result. This means the operating system wants an access error frame	#
+# made out of the current exception stack frame.			#
+#	So, we first call restore() which makes sure that any updated	#
+# -(an)+ register gets returned to its pre-exception value and then	#
+# we change the stack to an access error stack frame.			#
+#									#
+#########################################################################
+
+facc_in_b:
+	movq.l		&0x1,%d0			# one byte
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0121,EXC_VOFF(%a6)		# set FSLW
+	bra.w		facc_finish
+
+facc_in_w:
+	movq.l		&0x2,%d0			# two bytes
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0141,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_in_l:
+	movq.l		&0x4,%d0			# four bytes
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0101,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_in_d:
+	movq.l		&0x8,%d0			# eight bytes
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0161,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_in_x:
+	movq.l		&0xc,%d0			# twelve bytes
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0161,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+################################################################
+
+facc_out_b:
+	movq.l		&0x1,%d0			# one byte
+	bsr.w		restore				# restore An
+
+	mov.w		&0x00a1,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_out_w:
+	movq.l		&0x2,%d0			# two bytes
+	bsr.w		restore				# restore An
+
+	mov.w		&0x00c1,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_out_l:
+	movq.l		&0x4,%d0			# four bytes
+	bsr.w		restore				# restore An
+
+	mov.w		&0x0081,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_out_d:
+	movq.l		&0x8,%d0			# eight bytes
+	bsr.w		restore				# restore An
+
+	mov.w		&0x00e1,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_out_x:
+	mov.l		&0xc,%d0			# twelve bytes
+	bsr.w		restore				# restore An
+
+	mov.w		&0x00e1,EXC_VOFF(%a6)		# set FSLW
+
+# here's where we actually create the access error frame from the
+# current exception stack frame.
+facc_finish:
+	mov.l		USER_FPIAR(%a6),EXC_PC(%a6) # store current PC
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	mov.l		(%sp),-(%sp)		# store SR, hi(PC)
+	mov.l		0x8(%sp),0x4(%sp)	# store lo(PC)
+	mov.l		0xc(%sp),0x8(%sp)	# store EA
+	mov.l		&0x00000001,0xc(%sp)	# store FSLW
+	mov.w		0x6(%sp),0xc(%sp)	# fix FSLW (size)
+	mov.w		&0x4008,0x6(%sp)	# store voff
+
+	btst		&0x5,(%sp)		# supervisor or user mode?
+	beq.b		facc_out2		# user
+	bset		&0x2,0xd(%sp)		# set supervisor TM bit
+
+facc_out2:
+	bra.l		_real_access
+
+##################################################################
+
+# if the effective addressing mode was predecrement or postincrement,
+# the emulation has already changed its value to the correct post-
+# instruction value. but since we're exiting to the access error
+# handler, then AN must be returned to its pre-instruction value.
+# we do that here.
+restore:
+	mov.b		EXC_OPWORD+0x1(%a6),%d1
+	andi.b		&0x38,%d1		# extract opmode
+	cmpi.b		%d1,&0x18		# postinc?
+	beq.w		rest_inc
+	cmpi.b		%d1,&0x20		# predec?
+	beq.w		rest_dec
+	rts
+
+rest_inc:
+	mov.b		EXC_OPWORD+0x1(%a6),%d1
+	andi.w		&0x0007,%d1		# fetch An
+
+	mov.w		(tbl_rest_inc.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_rest_inc.b,%pc,%d1.w*1)
+
+tbl_rest_inc:
+	short		ri_a0 - tbl_rest_inc
+	short		ri_a1 - tbl_rest_inc
+	short		ri_a2 - tbl_rest_inc
+	short		ri_a3 - tbl_rest_inc
+	short		ri_a4 - tbl_rest_inc
+	short		ri_a5 - tbl_rest_inc
+	short		ri_a6 - tbl_rest_inc
+	short		ri_a7 - tbl_rest_inc
+
+ri_a0:
+	sub.l		%d0,EXC_DREGS+0x8(%a6)	# fix stacked a0
+	rts
+ri_a1:
+	sub.l		%d0,EXC_DREGS+0xc(%a6)	# fix stacked a1
+	rts
+ri_a2:
+	sub.l		%d0,%a2			# fix a2
+	rts
+ri_a3:
+	sub.l		%d0,%a3			# fix a3
+	rts
+ri_a4:
+	sub.l		%d0,%a4			# fix a4
+	rts
+ri_a5:
+	sub.l		%d0,%a5			# fix a5
+	rts
+ri_a6:
+	sub.l		%d0,(%a6)		# fix stacked a6
+	rts
+# if it's a fmove out instruction, we don't have to fix a7
+# because we hadn't changed it yet. if it's an opclass two
+# instruction (data moved in) and the exception was in supervisor
+# mode, then also also wasn't updated. if it was user mode, then
+# restore the correct a7 which is in the USP currently.
+ri_a7:
+	cmpi.b		EXC_VOFF(%a6),&0x30	# move in or out?
+	bne.b		ri_a7_done		# out
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor?
+	bne.b		ri_a7_done		# supervisor
+	movc		%usp,%a0		# restore USP
+	sub.l		%d0,%a0
+	movc		%a0,%usp
+ri_a7_done:
+	rts
+
+# need to invert adjustment value if the <ea> was predec
+rest_dec:
+	neg.l		%d0
+	bra.b		rest_inc
diff --git a/arch/m68k/ifpsp060/src/ftest.S b/arch/m68k/ifpsp060/src/ftest.S
new file mode 100644
index 0000000..2edcbae
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/ftest.S
@@ -0,0 +1,1456 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#############################################
+set	SREGS,		-64
+set	IREGS,		-128
+set	IFPREGS,	-224
+set	SFPREGS,	-320
+set	IFPCREGS,	-332
+set	SFPCREGS,	-344
+set	ICCR,		-346
+set	SCCR,		-348
+set	TESTCTR,	-352
+set	DATA,		-384
+
+#############################################
+TESTTOP:
+	bra.l		_060TESTS_
+	short		0x0000
+
+	bra.l		_060TESTS_unimp
+	short		0x0000
+
+	bra.l		_060TESTS_enable
+	short		0x0000
+
+start_str:
+	string		"Testing 68060 FPSP started:\n"
+
+start_str_unimp:
+	string		"Testing 68060 FPSP unimplemented instruction started:\n"
+
+start_str_enable:
+	string		"Testing 68060 FPSP exception enabled started:\n"
+
+pass_str:
+	string		"passed\n"
+
+fail_str:
+	string		" failed\n"
+
+	align		0x4
+chk_test:
+	tst.l		%d0
+	bne.b		test_fail
+test_pass:
+	pea		pass_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+	rts
+test_fail:
+	mov.l		%d1,-(%sp)
+	bsr.l		_print_num
+	addq.l		&0x4,%sp
+
+	pea		fail_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+	rts
+
+#############################################
+_060TESTS_:
+	link		%a6,&-384
+
+	movm.l		&0x3f3c,-(%sp)
+	fmovm.x		&0xff,-(%sp)
+
+	pea		start_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+### effadd
+	clr.l		TESTCTR(%a6)
+	pea		effadd_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		effadd_0
+
+	bsr.l		chk_test
+
+### unsupp
+	clr.l		TESTCTR(%a6)
+	pea		unsupp_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		unsupp_0
+
+	bsr.l		chk_test
+
+### ovfl non-maskable
+	clr.l		TESTCTR(%a6)
+	pea		ovfl_nm_str(%pc)
+	bsr.l		_print_str
+	bsr.l		ovfl_nm_0
+
+	bsr.l		chk_test
+
+### unfl non-maskable
+	clr.l		TESTCTR(%a6)
+	pea		unfl_nm_str(%pc)
+	bsr.l		_print_str
+	bsr.l		unfl_nm_0
+
+	bsr.l		chk_test
+
+	movm.l		(%sp)+,&0x3cfc
+	fmovm.x		(%sp)+,&0xff
+
+	unlk		%a6
+	rts
+
+_060TESTS_unimp:
+	link		%a6,&-384
+
+	movm.l		&0x3f3c,-(%sp)
+	fmovm.x		&0xff,-(%sp)
+
+	pea		start_str_unimp(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+### unimp
+	clr.l		TESTCTR(%a6)
+	pea		unimp_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		unimp_0
+
+	bsr.l		chk_test
+
+	movm.l		(%sp)+,&0x3cfc
+	fmovm.x		(%sp)+,&0xff
+
+	unlk		%a6
+	rts
+
+_060TESTS_enable:
+	link		%a6,&-384
+
+	movm.l		&0x3f3c,-(%sp)
+	fmovm.x		&0xff,-(%sp)
+
+	pea		start_str_enable(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+### snan
+	clr.l		TESTCTR(%a6)
+	pea		snan_str(%pc)
+	bsr.l		_print_str
+	bsr.l		snan_0
+
+	bsr.l		chk_test
+
+### operr
+	clr.l		TESTCTR(%a6)
+	pea		operr_str(%pc)
+	bsr.l		_print_str
+	bsr.l		operr_0
+
+	bsr.l		chk_test
+
+### ovfl
+	clr.l		TESTCTR(%a6)
+	pea		ovfl_str(%pc)
+	bsr.l		_print_str
+	bsr.l		ovfl_0
+
+	bsr.l		chk_test
+
+### unfl
+	clr.l		TESTCTR(%a6)
+	pea		unfl_str(%pc)
+	bsr.l		_print_str
+	bsr.l		unfl_0
+
+	bsr.l		chk_test
+
+### dz
+	clr.l		TESTCTR(%a6)
+	pea		dz_str(%pc)
+	bsr.l		_print_str
+	bsr.l		dz_0
+
+	bsr.l		chk_test
+
+### inexact
+	clr.l		TESTCTR(%a6)
+	pea		inex_str(%pc)
+	bsr.l		_print_str
+	bsr.l		inex_0
+
+	bsr.l		chk_test
+
+	movm.l		(%sp)+,&0x3cfc
+	fmovm.x		(%sp)+,&0xff
+
+	unlk		%a6
+	rts
+
+#############################################
+#############################################
+
+unimp_str:
+	string		"\tUnimplemented FP instructions..."
+
+	align		0x4
+unimp_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0x40000000,DATA+0x0(%a6)
+	mov.l		&0xc90fdaa2,DATA+0x4(%a6)
+	mov.l		&0x2168c235,DATA+0x8(%a6)
+
+	mov.w		&0x0000,%cc
+unimp_0_pc:
+	fsin.x		DATA(%a6),%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0xbfbf0000,IFPREGS+0x0(%a6)
+	mov.l		&0x80000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x08000208,IFPCREGS+0x4(%a6)
+	lea		unimp_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+unimp_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0x3ffe0000,DATA+0x0(%a6)
+	mov.l		&0xc90fdaa2,DATA+0x4(%a6)
+	mov.l		&0x2168c235,DATA+0x8(%a6)
+
+	mov.w		&0x0000,%cc
+unimp_1_pc:
+	ftan.x		DATA(%a6),%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x3fff0000,IFPREGS+0x0(%a6)
+	mov.l		&0x80000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x00000208,IFPCREGS+0x4(%a6)
+	lea		unimp_1_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+# fmovecr
+unimp_2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.w		&0x0000,%cc
+unimp_2_pc:
+	fmovcr.x	&0x31,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x40000000,IFPREGS+0x0(%a6)
+	mov.l		&0x935d8ddd,IFPREGS+0x4(%a6)
+	mov.l		&0xaaa8ac17,IFPREGS+0x8(%a6)
+	mov.l		&0x00000208,IFPCREGS+0x4(%a6)
+	lea		unimp_2_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+# fscc
+unimp_3:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	fmov.l		&0x0f000000,%fpsr
+	mov.l		&0x00,%d7
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.w		&0x0000,%cc
+unimp_3_pc:
+	fsgt		%d7
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+	mov.l		&0x0f008080,IFPCREGS+0x4(%a6)
+	lea		unimp_3_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+# fdbcc
+unimp_4:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	fmov.l		&0x0f000000,%fpsr
+	mov.l		&0x2,%d7
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.w		&0x0000,%cc
+unimp_4_pc:
+	fdbgt.w		%d7,unimp_4_pc
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+	mov.w		&0xffff,IREGS+28+2(%a6)
+	mov.l		&0x0f008080,IFPCREGS+0x4(%a6)
+	lea		unimp_4_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+# ftrapcc
+unimp_5:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	fmov.l		&0x0f000000,%fpsr
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.w		&0x0000,%cc
+unimp_5_pc:
+	ftpgt.l		&0xabcdef01
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+	mov.l		&0x0f008080,IFPCREGS+0x4(%a6)
+	lea		unimp_5_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+#############################################
+
+effadd_str:
+	string		"\tUnimplemented <ea>..."
+
+	align		0x4
+effadd_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	fmov.b		&0x2,%fp0
+
+	mov.w		&0x0000,%cc
+effadd_0_pc:
+	fmul.x		&0xc00000008000000000000000,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0xc0010000,IFPREGS+0x0(%a6)
+	mov.l		&0x80000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x08000000,IFPCREGS+0x4(%a6)
+	lea		effadd_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+effadd_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.w		&0x0000,%cc
+effadd_1_pc:
+	fabs.p		&0xc12300012345678912345678,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x3e660000,IFPREGS+0x0(%a6)
+	mov.l		&0xd0ed23e8,IFPREGS+0x4(%a6)
+	mov.l		&0xd14035bc,IFPREGS+0x8(%a6)
+	mov.l		&0x00000108,IFPCREGS+0x4(%a6)
+	lea		effadd_1_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+fmovml_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	fmovm.l		&0xffffffffffffffff,%fpcr,%fpsr
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+	mov.l		&0x0000fff0,IFPCREGS+0x0(%a6)
+	mov.l		&0x0ffffff8,IFPCREGS+0x4(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+fmovml_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	fmovm.l		&0xffffffffffffffff,%fpcr,%fpiar
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+	mov.l		&0x0000fff0,IFPCREGS+0x0(%a6)
+	mov.l		&0xffffffff,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+fmovml_2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	fmovm.l		&0xffffffffffffffff,%fpsr,%fpiar
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+	mov.l		&0x0ffffff8,IFPCREGS+0x4(%a6)
+	mov.l		&0xffffffff,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+fmovml_3:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	fmovm.l		&0xffffffffffffffffffffffff,%fpcr,%fpsr,%fpiar
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+	mov.l		&0x0000fff0,IFPCREGS+0x0(%a6)
+	mov.l		&0x0ffffff8,IFPCREGS+0x4(%a6)
+	mov.l		&0xffffffff,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+# fmovmx dynamic
+fmovmx_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	fmov.b		&0x1,%fp0
+	fmov.b		&0x2,%fp1
+	fmov.b		&0x3,%fp2
+	fmov.b		&0x4,%fp3
+	fmov.b		&0x5,%fp4
+	fmov.b		&0x6,%fp5
+	fmov.b		&0x7,%fp6
+	fmov.b		&0x8,%fp7
+
+	fmov.l		&0x0,%fpiar
+	mov.l		&0xffffffaa,%d0
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0xffff,IREGS(%a6)
+
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+
+	mov.w		&0x0000,%cc
+
+	fmovm.x		%d0,-(%sp)
+
+	mov.w		%cc,SCCR(%a6)
+
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	fmov.s		&0x7f800000,%fp1
+	fmov.s		&0x7f800000,%fp3
+	fmov.s		&0x7f800000,%fp5
+	fmov.s		&0x7f800000,%fp7
+
+	fmov.x		(%sp)+,%fp1
+	fmov.x		(%sp)+,%fp3
+	fmov.x		(%sp)+,%fp5
+	fmov.x		(%sp)+,%fp7
+
+	movm.l		&0xffff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+fmovmx_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	fmov.b		&0x1,%fp0
+	fmov.b		&0x2,%fp1
+	fmov.b		&0x3,%fp2
+	fmov.b		&0x4,%fp3
+	fmov.b		&0x5,%fp4
+	fmov.b		&0x6,%fp5
+	fmov.b		&0x7,%fp6
+	fmov.b		&0x8,%fp7
+
+	fmov.x		%fp6,-(%sp)
+	fmov.x		%fp4,-(%sp)
+	fmov.x		%fp2,-(%sp)
+	fmov.x		%fp0,-(%sp)
+
+	fmovm.x		&0xff,IFPREGS(%a6)
+
+	fmov.s		&0x7f800000,%fp6
+	fmov.s		&0x7f800000,%fp4
+	fmov.s		&0x7f800000,%fp2
+	fmov.s		&0x7f800000,%fp0
+
+	fmov.l		&0x0,%fpiar
+	fmov.l		&0x0,%fpsr
+	mov.l		&0xffffffaa,%d0
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0xffff,IREGS(%a6)
+
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.w		&0x0000,%cc
+
+	fmovm.x		(%sp)+,%d0
+
+	mov.w		%cc,SCCR(%a6)
+
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	movm.l		&0xffff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+fmovmx_2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	fmov.b		&0x1,%fp0
+	fmov.b		&0x2,%fp1
+	fmov.b		&0x3,%fp2
+	fmov.b		&0x4,%fp3
+	fmov.b		&0x5,%fp4
+	fmov.b		&0x6,%fp5
+	fmov.b		&0x7,%fp6
+	fmov.b		&0x8,%fp7
+
+	fmov.l		&0x0,%fpiar
+	mov.l		&0xffffff00,%d0
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0xffff,IREGS(%a6)
+
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+
+	mov.w		&0x0000,%cc
+
+	fmovm.x		%d0,-(%sp)
+
+	mov.w		%cc,SCCR(%a6)
+
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	movm.l		&0xffff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+###########################################################
+
+# This test will take a non-maskable overflow directly.
+ovfl_nm_str:
+	string		"\tNon-maskable overflow..."
+
+	align		0x4
+ovfl_nm_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	fmov.b		&0x2,%fp0
+	mov.l		&0x7ffe0000,DATA+0x0(%a6)
+	mov.l		&0x80000000,DATA+0x4(%a6)
+	mov.l		&0x00000000,DATA+0x8(%a6)
+
+	mov.w		&0x0000,%cc
+ovfl_nm_0_pc:
+	fmul.x		DATA(%a6),%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x7fff0000,IFPREGS+0x0(%a6)
+	mov.l		&0x00000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x02001048,IFPCREGS+0x4(%a6)
+	lea		ovfl_nm_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+###########################################################
+
+# This test will take an overflow directly.
+ovfl_str:
+	string		"\tEnabled overflow..."
+
+	align		0x4
+ovfl_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmov.l		&0x00001000,%fpcr
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	fmov.b		&0x2,%fp0
+	mov.l		&0x7ffe0000,DATA+0x0(%a6)
+	mov.l		&0x80000000,DATA+0x4(%a6)
+	mov.l		&0x00000000,DATA+0x8(%a6)
+
+	mov.w		&0x0000,%cc
+ovfl_0_pc:
+	fmul.x		DATA(%a6),%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x7fff0000,IFPREGS+0x0(%a6)
+	mov.l		&0x00000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x02001048,IFPCREGS+0x4(%a6)
+	lea		ovfl_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+#####################################################################
+
+# This test will take an underflow directly.
+unfl_str:
+	string		"\tEnabled underflow..."
+
+	align		0x4
+unfl_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmov.l		&0x00000800,%fpcr
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0x00000000,DATA+0x0(%a6)
+	mov.l		&0x80000000,DATA+0x4(%a6)
+	mov.l		&0x00000000,DATA+0x8(%a6)
+	fmovm.x		DATA(%a6),&0x80
+
+	mov.w		&0x0000,%cc
+unfl_0_pc:
+	fdiv.b		&0x2,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x00000000,IFPREGS+0x0(%a6)
+	mov.l		&0x40000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x00000800,IFPCREGS+0x4(%a6)
+	lea		unfl_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+#####################################################################
+
+# This test will take a non-maskable underflow directly.
+unfl_nm_str:
+	string		"\tNon-maskable underflow..."
+
+	align		0x4
+unfl_nm_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0x00000000,DATA+0x0(%a6)
+	mov.l		&0x80000000,DATA+0x4(%a6)
+	mov.l		&0x00000000,DATA+0x8(%a6)
+	fmovm.x		DATA(%a6),&0x80
+
+	mov.w		&0x0000,%cc
+unfl_nm_0_pc:
+	fdiv.b		&0x2,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x00000000,IFPREGS+0x0(%a6)
+	mov.l		&0x40000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x00000800,IFPCREGS+0x4(%a6)
+	lea		unfl_nm_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+#####################################################################
+
+inex_str:
+	string		"\tEnabled inexact..."
+
+	align		0x4
+inex_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmov.l		&0x00000200,%fpcr		# enable inexact
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0x50000000,DATA+0x0(%a6)
+	mov.l		&0x80000000,DATA+0x4(%a6)
+	mov.l		&0x00000000,DATA+0x8(%a6)
+	fmovm.x		DATA(%a6),&0x80
+
+	mov.w		&0x0000,%cc
+inex_0_pc:
+	fadd.b		&0x2,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x50000000,IFPREGS+0x0(%a6)
+	mov.l		&0x80000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x00000208,IFPCREGS+0x4(%a6)
+	lea		inex_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+#####################################################################
+
+snan_str:
+	string		"\tEnabled SNAN..."
+
+	align		0x4
+snan_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmov.l		&0x00004000,%fpcr		# enable SNAN
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0xffff0000,DATA+0x0(%a6)
+	mov.l		&0x00000000,DATA+0x4(%a6)
+	mov.l		&0x00000001,DATA+0x8(%a6)
+	fmovm.x		DATA(%a6),&0x80
+
+	mov.w		&0x0000,%cc
+snan_0_pc:
+	fadd.b		&0x2,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0xffff0000,IFPREGS+0x0(%a6)
+	mov.l		&0x00000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000001,IFPREGS+0x8(%a6)
+	mov.l		&0x09004080,IFPCREGS+0x4(%a6)
+	lea		snan_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+#####################################################################
+
+operr_str:
+	string		"\tEnabled OPERR..."
+
+	align		0x4
+operr_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmov.l		&0x00002000,%fpcr		# enable OPERR
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0xffff0000,DATA+0x0(%a6)
+	mov.l		&0x00000000,DATA+0x4(%a6)
+	mov.l		&0x00000000,DATA+0x8(%a6)
+	fmovm.x		DATA(%a6),&0x80
+
+	mov.w		&0x0000,%cc
+operr_0_pc:
+	fadd.s		&0x7f800000,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0xffff0000,IFPREGS+0x0(%a6)
+	mov.l		&0x00000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x01002080,IFPCREGS+0x4(%a6)
+	lea		operr_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+#####################################################################
+
+dz_str:
+	string		"\tEnabled DZ..."
+
+	align		0x4
+dz_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmov.l		&0x00000400,%fpcr		# enable DZ
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0x40000000,DATA+0x0(%a6)
+	mov.l		&0x80000000,DATA+0x4(%a6)
+	mov.l		&0x00000000,DATA+0x8(%a6)
+	fmovm.x		DATA(%a6),&0x80
+
+	mov.w		&0x0000,%cc
+dz_0_pc:
+	fdiv.b		&0x0,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x40000000,IFPREGS+0x0(%a6)
+	mov.l		&0x80000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x02000410,IFPCREGS+0x4(%a6)
+	lea		dz_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+#####################################################################
+
+unsupp_str:
+	string		"\tUnimplemented data type/format..."
+
+# an unnormalized number
+	align		0x4
+unsupp_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0xc03f0000,DATA+0x0(%a6)
+	mov.l		&0x00000000,DATA+0x4(%a6)
+	mov.l		&0x00000001,DATA+0x8(%a6)
+	fmov.b		&0x2,%fp0
+	mov.w		&0x0000,%cc
+unsupp_0_pc:
+	fmul.x		DATA(%a6),%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0xc0010000,IFPREGS+0x0(%a6)
+	mov.l		&0x80000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x08000000,IFPCREGS+0x4(%a6)
+	lea		unsupp_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+# a denormalized number
+unsupp_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0x80000000,DATA+0x0(%a6)
+	mov.l		&0x01000000,DATA+0x4(%a6)
+	mov.l		&0x00000000,DATA+0x8(%a6)
+	fmov.l		&0x7fffffff,%fp0
+
+	mov.w		&0x0000,%cc
+unsupp_1_pc:
+	fmul.x		DATA(%a6),%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x80170000,IFPREGS+0x0(%a6)
+	mov.l		&0xfffffffe,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x08000000,IFPCREGS+0x4(%a6)
+	lea		unsupp_1_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+# packed
+unsupp_2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0xc1230001,DATA+0x0(%a6)
+	mov.l		&0x23456789,DATA+0x4(%a6)
+	mov.l		&0x12345678,DATA+0x8(%a6)
+
+	mov.w		&0x0000,%cc
+unsupp_2_pc:
+	fabs.p		DATA(%a6),%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x3e660000,IFPREGS+0x0(%a6)
+	mov.l		&0xd0ed23e8,IFPREGS+0x4(%a6)
+	mov.l		&0xd14035bc,IFPREGS+0x8(%a6)
+	mov.l		&0x00000108,IFPCREGS+0x4(%a6)
+	lea		unsupp_2_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+###########################################################
+###########################################################
+
+chkregs:
+	lea		IREGS(%a6),%a0
+	lea		SREGS(%a6),%a1
+	mov.l		&14,%d0
+chkregs_loop:
+	cmp.l		(%a0)+,(%a1)+
+	bne.l		chkregs_error
+	dbra.w		%d0,chkregs_loop
+
+	mov.w		ICCR(%a6),%d0
+	mov.w		SCCR(%a6),%d1
+	cmp.w		%d0,%d1
+	bne.l		chkregs_error
+
+	clr.l		%d0
+	rts
+
+chkregs_error:
+	movq.l		&0x1,%d0
+	rts
+
+error:
+	mov.l		TESTCTR(%a6),%d1
+	movq.l		&0x1,%d0
+	rts
+
+chkfpregs:
+	lea		IFPREGS(%a6),%a0
+	lea		SFPREGS(%a6),%a1
+	mov.l		&23,%d0
+chkfpregs_loop:
+	cmp.l		(%a0)+,(%a1)+
+	bne.l		chkfpregs_error
+	dbra.w		%d0,chkfpregs_loop
+
+	lea		IFPCREGS(%a6),%a0
+	lea		SFPCREGS(%a6),%a1
+	cmp.l		(%a0)+,(%a1)+
+	bne.l		chkfpregs_error
+	cmp.l		(%a0)+,(%a1)+
+	bne.l		chkfpregs_error
+	cmp.l		(%a0)+,(%a1)+
+	bne.l		chkfpregs_error
+
+	clr.l		%d0
+	rts
+
+chkfpregs_error:
+	movq.l		&0x1,%d0
+	rts
+
+DEF_REGS:
+	long		0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+	long		0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+
+	long		0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+	long		0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+
+DEF_FPREGS:
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+
+DEF_FPCREGS:
+	long		0x00000000, 0x00000000, 0x00000000
+
+############################################################
+
+_print_str:
+	mov.l		%d0,-(%sp)
+	mov.l		(TESTTOP-0x80+0x0,%pc),%d0
+	pea		(TESTTOP-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+_print_num:
+	mov.l		%d0,-(%sp)
+	mov.l		(TESTTOP-0x80+0x4,%pc),%d0
+	pea		(TESTTOP-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+############################################################
diff --git a/arch/m68k/ifpsp060/src/ilsp.S b/arch/m68k/ifpsp060/src/ilsp.S
new file mode 100644
index 0000000..afa7422
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/ilsp.S
@@ -0,0 +1,932 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# litop.s:
+#	This file is appended to the top of the 060FPLSP package
+# and contains the entry points into the package. The user, in
+# effect, branches to one of the branch table entries located here.
+#
+
+	bra.l	_060LSP__idivs64_
+	short	0x0000
+	bra.l	_060LSP__idivu64_
+	short	0x0000
+
+	bra.l	_060LSP__imuls64_
+	short	0x0000
+	bra.l	_060LSP__imulu64_
+	short	0x0000
+
+	bra.l	_060LSP__cmp2_Ab_
+	short	0x0000
+	bra.l	_060LSP__cmp2_Aw_
+	short	0x0000
+	bra.l	_060LSP__cmp2_Al_
+	short	0x0000
+	bra.l	_060LSP__cmp2_Db_
+	short	0x0000
+	bra.l	_060LSP__cmp2_Dw_
+	short	0x0000
+	bra.l	_060LSP__cmp2_Dl_
+	short	0x0000
+
+# leave room for future possible aditions.
+	align	0x200
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_060LSP__idivu64_(): Emulate 64-bit unsigned div instruction.	#
+#	_060LSP__idivs64_(): Emulate 64-bit signed div instruction.	#
+#									#
+#	This is the library version which is accessed as a subroutine	#
+#	and therefore does not work exactly like the 680X0 div{s,u}.l	#
+#	64-bit divide instruction.					#
+#									#
+# XREF ****************************************************************	#
+#	None.								#
+#									#
+# INPUT ***************************************************************	#
+#	0x4(sp)  = divisor						#
+#	0x8(sp)  = hi(dividend)						#
+#	0xc(sp)  = lo(dividend)						#
+#	0x10(sp) = pointer to location to place quotient/remainder	#
+#									#
+# OUTPUT **************************************************************	#
+#	0x10(sp) = points to location of remainder/quotient.		#
+#		   remainder is in first longword, quotient is in 2nd.	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	If the operands are signed, make them unsigned and save the	#
+# sign info for later. Separate out special cases like divide-by-zero	#
+# or 32-bit divides if possible. Else, use a special math algorithm	#
+# to calculate the result.						#
+#	Restore sign info if signed instruction. Set the condition	#
+# codes before performing the final "rts". If the divisor was equal to	#
+# zero, then perform a divide-by-zero using a 16-bit implemented	#
+# divide instruction. This way, the operating system can record that	#
+# the event occurred even though it may not point to the correct place.	#
+#									#
+#########################################################################
+
+set	POSNEG,		-1
+set	NDIVISOR,	-2
+set	NDIVIDEND,	-3
+set	DDSECOND,	-4
+set	DDNORMAL,	-8
+set	DDQUOTIENT,	-12
+set	DIV64_CC,	-16
+
+##########
+# divs.l #
+##########
+	global		_060LSP__idivs64_
+_060LSP__idivs64_:
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-16
+	movm.l		&0x3f00,-(%sp)		# save d2-d7
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,DIV64_CC(%a6)
+	st		POSNEG(%a6)		# signed operation
+	bra.b		ldiv64_cont
+
+##########
+# divu.l #
+##########
+	global		_060LSP__idivu64_
+_060LSP__idivu64_:
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-16
+	movm.l		&0x3f00,-(%sp)		# save d2-d7
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,DIV64_CC(%a6)
+	sf		POSNEG(%a6)		# unsigned operation
+
+ldiv64_cont:
+	mov.l		0x8(%a6),%d7		# fetch divisor
+
+	beq.w		ldiv64eq0		# divisor is = 0!!!
+
+	mov.l		0xc(%a6), %d5		# get dividend hi
+	mov.l		0x10(%a6), %d6		# get dividend lo
+
+# separate signed and unsigned divide
+	tst.b		POSNEG(%a6)		# signed or unsigned?
+	beq.b		ldspecialcases		# use positive divide
+
+# save the sign of the divisor
+# make divisor unsigned if it's negative
+	tst.l		%d7			# chk sign of divisor
+	slt		NDIVISOR(%a6)		# save sign of divisor
+	bpl.b		ldsgndividend
+	neg.l		%d7			# complement negative divisor
+
+# save the sign of the dividend
+# make dividend unsigned if it's negative
+ldsgndividend:
+	tst.l		%d5			# chk sign of hi(dividend)
+	slt		NDIVIDEND(%a6)		# save sign of dividend
+	bpl.b		ldspecialcases
+
+	mov.w		&0x0, %cc		# clear 'X' cc bit
+	negx.l		%d6			# complement signed dividend
+	negx.l		%d5
+
+# extract some special cases:
+#	- is (dividend == 0) ?
+#	- is (hi(dividend) == 0 && (divisor <= lo(dividend))) ? (32-bit div)
+ldspecialcases:
+	tst.l		%d5			# is (hi(dividend) == 0)
+	bne.b		ldnormaldivide		# no, so try it the long way
+
+	tst.l		%d6			# is (lo(dividend) == 0), too
+	beq.w		lddone			# yes, so (dividend == 0)
+
+	cmp.l		%d7,%d6			# is (divisor <= lo(dividend))
+	bls.b		ld32bitdivide		# yes, so use 32 bit divide
+
+	exg		%d5,%d6			# q = 0, r = dividend
+	bra.w		ldivfinish		# can't divide, we're done.
+
+ld32bitdivide:
+	tdivu.l		%d7, %d5:%d6		# it's only a 32/32 bit div!
+
+	bra.b		ldivfinish
+
+ldnormaldivide:
+# last special case:
+#	- is hi(dividend) >= divisor ? if yes, then overflow
+	cmp.l		%d7,%d5
+	bls.b		lddovf			# answer won't fit in 32 bits
+
+# perform the divide algorithm:
+	bsr.l		ldclassical		# do int divide
+
+# separate into signed and unsigned finishes.
+ldivfinish:
+	tst.b		POSNEG(%a6)		# do divs, divu separately
+	beq.b		lddone			# divu has no processing!!!
+
+# it was a divs.l, so ccode setting is a little more complicated...
+	tst.b		NDIVIDEND(%a6)		# remainder has same sign
+	beq.b		ldcc			# as dividend.
+	neg.l		%d5			# sgn(rem) = sgn(dividend)
+ldcc:
+	mov.b		NDIVISOR(%a6), %d0
+	eor.b		%d0, NDIVIDEND(%a6)	# chk if quotient is negative
+	beq.b		ldqpos			# branch to quot positive
+
+# 0x80000000 is the largest number representable as a 32-bit negative
+# number. the negative of 0x80000000 is 0x80000000.
+	cmpi.l		%d6, &0x80000000	# will (-quot) fit in 32 bits?
+	bhi.b		lddovf
+
+	neg.l		%d6			# make (-quot) 2's comp
+
+	bra.b		lddone
+
+ldqpos:
+	btst		&0x1f, %d6		# will (+quot) fit in 32 bits?
+	bne.b		lddovf
+
+lddone:
+# if the register numbers are the same, only the quotient gets saved.
+# so, if we always save the quotient second, we save ourselves a cmp&beq
+	andi.w		&0x10,DIV64_CC(%a6)
+	mov.w		DIV64_CC(%a6),%cc
+	tst.l		%d6			# may set 'N' ccode bit
+
+# here, the result is in d1 and d0. the current strategy is to save
+# the values at the location pointed to by a0.
+# use movm here to not disturb the condition codes.
+ldexit:
+	movm.l		&0x0060,([0x14,%a6])	# save result
+
+# EPILOGUE BEGIN ########################################################
+#	fmovm.l		(%sp)+,&0x0		# restore no fpregs
+	movm.l		(%sp)+,&0x00fc		# restore d2-d7
+	unlk		%a6
+# EPILOGUE END ##########################################################
+
+	rts
+
+# the result should be the unchanged dividend
+lddovf:
+	mov.l		0xc(%a6), %d5		# get dividend hi
+	mov.l		0x10(%a6), %d6		# get dividend lo
+
+	andi.w		&0x1c,DIV64_CC(%a6)
+	ori.w		&0x02,DIV64_CC(%a6)	# set 'V' ccode bit
+	mov.w		DIV64_CC(%a6),%cc
+
+	bra.b		ldexit
+
+ldiv64eq0:
+	mov.l		0xc(%a6),([0x14,%a6])
+	mov.l		0x10(%a6),([0x14,%a6],0x4)
+
+	mov.w		DIV64_CC(%a6),%cc
+
+# EPILOGUE BEGIN ########################################################
+#	fmovm.l		(%sp)+,&0x0		# restore no fpregs
+	movm.l		(%sp)+,&0x00fc		# restore d2-d7
+	unlk		%a6
+# EPILOGUE END ##########################################################
+
+	divu.w		&0x0,%d0		# force a divbyzero exception
+	rts
+
+###########################################################################
+#########################################################################
+# This routine uses the 'classical' Algorithm D from Donald Knuth's	#
+# Art of Computer Programming, vol II, Seminumerical Algorithms.	#
+# For this implementation b=2**16, and the target is U1U2U3U4/V1V2,	#
+# where U,V are words of the quadword dividend and longword divisor,	#
+# and U1, V1 are the most significant words.				#
+#									#
+# The most sig. longword of the 64 bit dividend must be in %d5, least	#
+# in %d6. The divisor must be in the variable ddivisor, and the		#
+# signed/unsigned flag ddusign must be set (0=unsigned,1=signed).	#
+# The quotient is returned in %d6, remainder in %d5, unless the		#
+# v (overflow) bit is set in the saved %ccr. If overflow, the dividend	#
+# is unchanged.								#
+#########################################################################
+ldclassical:
+# if the divisor msw is 0, use simpler algorithm then the full blown
+# one at ddknuth:
+
+	cmpi.l		%d7, &0xffff
+	bhi.b		lddknuth		# go use D. Knuth algorithm
+
+# Since the divisor is only a word (and larger than the mslw of the dividend),
+# a simpler algorithm may be used :
+# In the general case, four quotient words would be created by
+# dividing the divisor word into each dividend word. In this case,
+# the first two quotient words must be zero, or overflow would occur.
+# Since we already checked this case above, we can treat the most significant
+# longword of the dividend as (0) remainder (see Knuth) and merely complete
+# the last two divisions to get a quotient longword and word remainder:
+
+	clr.l		%d1
+	swap		%d5			# same as r*b if previous step rqd
+	swap		%d6			# get u3 to lsw position
+	mov.w		%d6, %d5		# rb + u3
+
+	divu.w		%d7, %d5
+
+	mov.w		%d5, %d1		# first quotient word
+	swap		%d6			# get u4
+	mov.w		%d6, %d5		# rb + u4
+
+	divu.w		%d7, %d5
+
+	swap		%d1
+	mov.w		%d5, %d1		# 2nd quotient 'digit'
+	clr.w		%d5
+	swap		%d5			# now remainder
+	mov.l		%d1, %d6		# and quotient
+
+	rts
+
+lddknuth:
+# In this algorithm, the divisor is treated as a 2 digit (word) number
+# which is divided into a 3 digit (word) dividend to get one quotient
+# digit (word). After subtraction, the dividend is shifted and the
+# process repeated. Before beginning, the divisor and quotient are
+# 'normalized' so that the process of estimating the quotient digit
+# will yield verifiably correct results..
+
+	clr.l		DDNORMAL(%a6)		# count of shifts for normalization
+	clr.b		DDSECOND(%a6)		# clear flag for quotient digits
+	clr.l		%d1			# %d1 will hold trial quotient
+lddnchk:
+	btst		&31, %d7		# must we normalize? first word of
+	bne.b		lddnormalized		# divisor (V1) must be >= 65536/2
+	addq.l		&0x1, DDNORMAL(%a6)	# count normalization shifts
+	lsl.l		&0x1, %d7		# shift the divisor
+	lsl.l		&0x1, %d6		# shift u4,u3 with overflow to u2
+	roxl.l		&0x1, %d5		# shift u1,u2
+	bra.w		lddnchk
+lddnormalized:
+
+# Now calculate an estimate of the quotient words (msw first, then lsw).
+# The comments use subscripts for the first quotient digit determination.
+	mov.l		%d7, %d3		# divisor
+	mov.l		%d5, %d2		# dividend mslw
+	swap		%d2
+	swap		%d3
+	cmp.w		%d2, %d3		# V1 = U1 ?
+	bne.b		lddqcalc1
+	mov.w		&0xffff, %d1		# use max trial quotient word
+	bra.b		lddadj0
+lddqcalc1:
+	mov.l		%d5, %d1
+
+	divu.w		%d3, %d1		# use quotient of mslw/msw
+
+	andi.l		&0x0000ffff, %d1	# zero any remainder
+lddadj0:
+
+# now test the trial quotient and adjust. This step plus the
+# normalization assures (according to Knuth) that the trial
+# quotient will be at worst 1 too large.
+	mov.l		%d6, -(%sp)
+	clr.w		%d6			# word u3 left
+	swap		%d6			# in lsw position
+lddadj1: mov.l		%d7, %d3
+	mov.l		%d1, %d2
+	mulu.w		%d7, %d2		# V2q
+	swap		%d3
+	mulu.w		%d1, %d3		# V1q
+	mov.l		%d5, %d4		# U1U2
+	sub.l		%d3, %d4		# U1U2 - V1q
+
+	swap		%d4
+
+	mov.w		%d4,%d0
+	mov.w		%d6,%d4			# insert lower word (U3)
+
+	tst.w		%d0			# is upper word set?
+	bne.w		lddadjd1
+
+#	add.l		%d6, %d4		# (U1U2 - V1q) + U3
+
+	cmp.l		%d2, %d4
+	bls.b		lddadjd1		# is V2q > (U1U2-V1q) + U3 ?
+	subq.l		&0x1, %d1		# yes, decrement and recheck
+	bra.b		lddadj1
+lddadjd1:
+# now test the word by multiplying it by the divisor (V1V2) and comparing
+# the 3 digit (word) result with the current dividend words
+	mov.l		%d5, -(%sp)		# save %d5 (%d6 already saved)
+	mov.l		%d1, %d6
+	swap		%d6			# shift answer to ms 3 words
+	mov.l		%d7, %d5
+	bsr.l		ldmm2
+	mov.l		%d5, %d2		# now %d2,%d3 are trial*divisor
+	mov.l		%d6, %d3
+	mov.l		(%sp)+, %d5		# restore dividend
+	mov.l		(%sp)+, %d6
+	sub.l		%d3, %d6
+	subx.l		%d2, %d5		# subtract double precision
+	bcc		ldd2nd			# no carry, do next quotient digit
+	subq.l		&0x1, %d1		# q is one too large
+# need to add back divisor longword to current ms 3 digits of dividend
+# - according to Knuth, this is done only 2 out of 65536 times for random
+# divisor, dividend selection.
+	clr.l		%d2
+	mov.l		%d7, %d3
+	swap		%d3
+	clr.w		%d3			# %d3 now ls word of divisor
+	add.l		%d3, %d6		# aligned with 3rd word of dividend
+	addx.l		%d2, %d5
+	mov.l		%d7, %d3
+	clr.w		%d3			# %d3 now ms word of divisor
+	swap		%d3			# aligned with 2nd word of dividend
+	add.l		%d3, %d5
+ldd2nd:
+	tst.b		DDSECOND(%a6)	# both q words done?
+	bne.b		lddremain
+# first quotient digit now correct. store digit and shift the
+# (subtracted) dividend
+	mov.w		%d1, DDQUOTIENT(%a6)
+	clr.l		%d1
+	swap		%d5
+	swap		%d6
+	mov.w		%d6, %d5
+	clr.w		%d6
+	st		DDSECOND(%a6)		# second digit
+	bra.w		lddnormalized
+lddremain:
+# add 2nd word to quotient, get the remainder.
+	mov.w		%d1, DDQUOTIENT+2(%a6)
+# shift down one word/digit to renormalize remainder.
+	mov.w		%d5, %d6
+	swap		%d6
+	swap		%d5
+	mov.l		DDNORMAL(%a6), %d7	# get norm shift count
+	beq.b		lddrn
+	subq.l		&0x1, %d7		# set for loop count
+lddnlp:
+	lsr.l		&0x1, %d5		# shift into %d6
+	roxr.l		&0x1, %d6
+	dbf		%d7, lddnlp
+lddrn:
+	mov.l		%d6, %d5		# remainder
+	mov.l		DDQUOTIENT(%a6), %d6	# quotient
+
+	rts
+ldmm2:
+# factors for the 32X32->64 multiplication are in %d5 and %d6.
+# returns 64 bit result in %d5 (hi) %d6(lo).
+# destroys %d2,%d3,%d4.
+
+# multiply hi,lo words of each factor to get 4 intermediate products
+	mov.l		%d6, %d2
+	mov.l		%d6, %d3
+	mov.l		%d5, %d4
+	swap		%d3
+	swap		%d4
+	mulu.w		%d5, %d6		# %d6 <- lsw*lsw
+	mulu.w		%d3, %d5		# %d5 <- msw-dest*lsw-source
+	mulu.w		%d4, %d2		# %d2 <- msw-source*lsw-dest
+	mulu.w		%d4, %d3		# %d3 <- msw*msw
+# now use swap and addx to consolidate to two longwords
+	clr.l		%d4
+	swap		%d6
+	add.w		%d5, %d6		# add msw of l*l to lsw of m*l product
+	addx.w		%d4, %d3		# add any carry to m*m product
+	add.w		%d2, %d6		# add in lsw of other m*l product
+	addx.w		%d4, %d3		# add any carry to m*m product
+	swap		%d6			# %d6 is low 32 bits of final product
+	clr.w		%d5
+	clr.w		%d2			# lsw of two mixed products used,
+	swap		%d5			# now use msws of longwords
+	swap		%d2
+	add.l		%d2, %d5
+	add.l		%d3, %d5	# %d5 now ms 32 bits of final product
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_060LSP__imulu64_(): Emulate 64-bit unsigned mul instruction	#
+#	_060LSP__imuls64_(): Emulate 64-bit signed mul instruction.	#
+#									#
+#	This is the library version which is accessed as a subroutine	#
+#	and therefore does not work exactly like the 680X0 mul{s,u}.l	#
+#	64-bit multiply instruction.					#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	0x4(sp) = multiplier						#
+#	0x8(sp) = multiplicand						#
+#	0xc(sp) = pointer to location to place 64-bit result		#
+#									#
+# OUTPUT **************************************************************	#
+#	0xc(sp) = points to location of 64-bit result			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Perform the multiply in pieces using 16x16->32 unsigned		#
+# multiplies and "add" instructions.					#
+#	Set the condition codes as appropriate before performing an	#
+# "rts".								#
+#									#
+#########################################################################
+
+set MUL64_CC, -4
+
+	global		_060LSP__imulu64_
+_060LSP__imulu64_:
+
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-4
+	movm.l		&0x3800,-(%sp)		# save d2-d4
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,MUL64_CC(%a6)	# save incoming ccodes
+
+	mov.l		0x8(%a6),%d0		# store multiplier in d0
+	beq.w		mulu64_zero		# handle zero separately
+
+	mov.l		0xc(%a6),%d1		# get multiplicand in d1
+	beq.w		mulu64_zero		# handle zero separately
+
+#########################################################################
+#	63			   32				0	#
+#	----------------------------					#
+#	| hi(mplier) * hi(mplicand)|					#
+#	----------------------------					#
+#		     -----------------------------			#
+#		     | hi(mplier) * lo(mplicand) |			#
+#		     -----------------------------			#
+#		     -----------------------------			#
+#		     | lo(mplier) * hi(mplicand) |			#
+#		     -----------------------------			#
+#	  |			   -----------------------------	#
+#	--|--			   | lo(mplier) * lo(mplicand) |	#
+#	  |			   -----------------------------	#
+#	========================================================	#
+#	--------------------------------------------------------	#
+#	|	hi(result)	   |	    lo(result)         |	#
+#	--------------------------------------------------------	#
+#########################################################################
+mulu64_alg:
+# load temp registers with operands
+	mov.l		%d0,%d2			# mr in d2
+	mov.l		%d0,%d3			# mr in d3
+	mov.l		%d1,%d4			# md in d4
+	swap		%d3			# hi(mr) in lo d3
+	swap		%d4			# hi(md) in lo d4
+
+# complete necessary multiplies:
+	mulu.w		%d1,%d0			# [1] lo(mr) * lo(md)
+	mulu.w		%d3,%d1			# [2] hi(mr) * lo(md)
+	mulu.w		%d4,%d2			# [3] lo(mr) * hi(md)
+	mulu.w		%d4,%d3			# [4] hi(mr) * hi(md)
+
+# add lo portions of [2],[3] to hi portion of [1].
+# add carries produced from these adds to [4].
+# lo([1]) is the final lo 16 bits of the result.
+	clr.l		%d4			# load d4 w/ zero value
+	swap		%d0			# hi([1]) <==> lo([1])
+	add.w		%d1,%d0			# hi([1]) + lo([2])
+	addx.l		%d4,%d3			#    [4]  + carry
+	add.w		%d2,%d0			# hi([1]) + lo([3])
+	addx.l		%d4,%d3			#    [4]  + carry
+	swap		%d0			# lo([1]) <==> hi([1])
+
+# lo portions of [2],[3] have been added in to final result.
+# now, clear lo, put hi in lo reg, and add to [4]
+	clr.w		%d1			# clear lo([2])
+	clr.w		%d2			# clear hi([3])
+	swap		%d1			# hi([2]) in lo d1
+	swap		%d2			# hi([3]) in lo d2
+	add.l		%d2,%d1			#    [4]  + hi([2])
+	add.l		%d3,%d1			#    [4]  + hi([3])
+
+# now, grab the condition codes. only one that can be set is 'N'.
+# 'N' CAN be set if the operation is unsigned if bit 63 is set.
+	mov.w		MUL64_CC(%a6),%d4
+	andi.b		&0x10,%d4		# keep old 'X' bit
+	tst.l		%d1			# may set 'N' bit
+	bpl.b		mulu64_ddone
+	ori.b		&0x8,%d4		# set 'N' bit
+mulu64_ddone:
+	mov.w		%d4,%cc
+
+# here, the result is in d1 and d0. the current strategy is to save
+# the values at the location pointed to by a0.
+# use movm here to not disturb the condition codes.
+mulu64_end:
+	exg		%d1,%d0
+	movm.l		&0x0003,([0x10,%a6])		# save result
+
+# EPILOGUE BEGIN ########################################################
+#	fmovm.l		(%sp)+,&0x0		# restore no fpregs
+	movm.l		(%sp)+,&0x001c		# restore d2-d4
+	unlk		%a6
+# EPILOGUE END ##########################################################
+
+	rts
+
+# one or both of the operands is zero so the result is also zero.
+# save the zero result to the register file and set the 'Z' ccode bit.
+mulu64_zero:
+	clr.l		%d0
+	clr.l		%d1
+
+	mov.w		MUL64_CC(%a6),%d4
+	andi.b		&0x10,%d4
+	ori.b		&0x4,%d4
+	mov.w		%d4,%cc			# set 'Z' ccode bit
+
+	bra.b		mulu64_end
+
+##########
+# muls.l #
+##########
+	global		_060LSP__imuls64_
+_060LSP__imuls64_:
+
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-4
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,MUL64_CC(%a6)	# save incoming ccodes
+
+	mov.l		0x8(%a6),%d0		# store multiplier in d0
+	beq.b		mulu64_zero		# handle zero separately
+
+	mov.l		0xc(%a6),%d1		# get multiplicand in d1
+	beq.b		mulu64_zero		# handle zero separately
+
+	clr.b		%d5			# clear sign tag
+	tst.l		%d0			# is multiplier negative?
+	bge.b		muls64_chk_md_sgn	# no
+	neg.l		%d0			# make multiplier positive
+
+	ori.b		&0x1,%d5		# save multiplier sgn
+
+# the result sign is the exclusive or of the operand sign bits.
+muls64_chk_md_sgn:
+	tst.l		%d1			# is multiplicand negative?
+	bge.b		muls64_alg		# no
+	neg.l		%d1			# make multiplicand positive
+
+	eori.b		&0x1,%d5		# calculate correct sign
+
+#########################################################################
+#	63			   32				0	#
+#	----------------------------					#
+#	| hi(mplier) * hi(mplicand)|					#
+#	----------------------------					#
+#		     -----------------------------			#
+#		     | hi(mplier) * lo(mplicand) |			#
+#		     -----------------------------			#
+#		     -----------------------------			#
+#		     | lo(mplier) * hi(mplicand) |			#
+#		     -----------------------------			#
+#	  |			   -----------------------------	#
+#	--|--			   | lo(mplier) * lo(mplicand) |	#
+#	  |			   -----------------------------	#
+#	========================================================	#
+#	--------------------------------------------------------	#
+#	|	hi(result)	   |	    lo(result)         |	#
+#	--------------------------------------------------------	#
+#########################################################################
+muls64_alg:
+# load temp registers with operands
+	mov.l		%d0,%d2			# mr in d2
+	mov.l		%d0,%d3			# mr in d3
+	mov.l		%d1,%d4			# md in d4
+	swap		%d3			# hi(mr) in lo d3
+	swap		%d4			# hi(md) in lo d4
+
+# complete necessary multiplies:
+	mulu.w		%d1,%d0			# [1] lo(mr) * lo(md)
+	mulu.w		%d3,%d1			# [2] hi(mr) * lo(md)
+	mulu.w		%d4,%d2			# [3] lo(mr) * hi(md)
+	mulu.w		%d4,%d3			# [4] hi(mr) * hi(md)
+
+# add lo portions of [2],[3] to hi portion of [1].
+# add carries produced from these adds to [4].
+# lo([1]) is the final lo 16 bits of the result.
+	clr.l		%d4			# load d4 w/ zero value
+	swap		%d0			# hi([1]) <==> lo([1])
+	add.w		%d1,%d0			# hi([1]) + lo([2])
+	addx.l		%d4,%d3			#    [4]  + carry
+	add.w		%d2,%d0			# hi([1]) + lo([3])
+	addx.l		%d4,%d3			#    [4]  + carry
+	swap		%d0			# lo([1]) <==> hi([1])
+
+# lo portions of [2],[3] have been added in to final result.
+# now, clear lo, put hi in lo reg, and add to [4]
+	clr.w		%d1			# clear lo([2])
+	clr.w		%d2			# clear hi([3])
+	swap		%d1			# hi([2]) in lo d1
+	swap		%d2			# hi([3]) in lo d2
+	add.l		%d2,%d1			#    [4]  + hi([2])
+	add.l		%d3,%d1			#    [4]  + hi([3])
+
+	tst.b		%d5			# should result be signed?
+	beq.b		muls64_done		# no
+
+# result should be a signed negative number.
+# compute 2's complement of the unsigned number:
+#   -negate all bits and add 1
+muls64_neg:
+	not.l		%d0			# negate lo(result) bits
+	not.l		%d1			# negate hi(result) bits
+	addq.l		&1,%d0			# add 1 to lo(result)
+	addx.l		%d4,%d1			# add carry to hi(result)
+
+muls64_done:
+	mov.w		MUL64_CC(%a6),%d4
+	andi.b		&0x10,%d4		# keep old 'X' bit
+	tst.l		%d1			# may set 'N' bit
+	bpl.b		muls64_ddone
+	ori.b		&0x8,%d4		# set 'N' bit
+muls64_ddone:
+	mov.w		%d4,%cc
+
+# here, the result is in d1 and d0. the current strategy is to save
+# the values at the location pointed to by a0.
+# use movm here to not disturb the condition codes.
+muls64_end:
+	exg		%d1,%d0
+	movm.l		&0x0003,([0x10,%a6])	# save result at (a0)
+
+# EPILOGUE BEGIN ########################################################
+#	fmovm.l		(%sp)+,&0x0		# restore no fpregs
+	movm.l		(%sp)+,&0x003c		# restore d2-d5
+	unlk		%a6
+# EPILOGUE END ##########################################################
+
+	rts
+
+# one or both of the operands is zero so the result is also zero.
+# save the zero result to the register file and set the 'Z' ccode bit.
+muls64_zero:
+	clr.l		%d0
+	clr.l		%d1
+
+	mov.w		MUL64_CC(%a6),%d4
+	andi.b		&0x10,%d4
+	ori.b		&0x4,%d4
+	mov.w		%d4,%cc			# set 'Z' ccode bit
+
+	bra.b		muls64_end
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_060LSP__cmp2_Ab_(): Emulate "cmp2.b An,<ea>".			#
+#	_060LSP__cmp2_Aw_(): Emulate "cmp2.w An,<ea>".			#
+#	_060LSP__cmp2_Al_(): Emulate "cmp2.l An,<ea>".			#
+#	_060LSP__cmp2_Db_(): Emulate "cmp2.b Dn,<ea>".			#
+#	_060LSP__cmp2_Dw_(): Emulate "cmp2.w Dn,<ea>".			#
+#	_060LSP__cmp2_Dl_(): Emulate "cmp2.l Dn,<ea>".			#
+#									#
+#	This is the library version which is accessed as a subroutine	#
+#	and therefore does not work exactly like the 680X0 "cmp2"	#
+#	instruction.							#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	0x4(sp) = Rn							#
+#	0x8(sp) = pointer to boundary pair				#
+#									#
+# OUTPUT **************************************************************	#
+#	cc = condition codes are set correctly				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In the interest of simplicity, all operands are converted to	#
+# longword size whether the operation is byte, word, or long. The	#
+# bounds are sign extended accordingly. If Rn is a data regsiter, Rn is #
+# also sign extended. If Rn is an address register, it need not be sign #
+# extended since the full register is always used.			#
+#	The condition codes are set correctly before the final "rts".	#
+#									#
+#########################################################################
+
+set	CMP2_CC,	-4
+
+	global		_060LSP__cmp2_Ab_
+_060LSP__cmp2_Ab_:
+
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-4
+	movm.l		&0x3800,-(%sp)		# save d2-d4
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,CMP2_CC(%a6)
+	mov.l		0x8(%a6), %d2		# get regval
+
+	mov.b		([0xc,%a6],0x0),%d0
+	mov.b		([0xc,%a6],0x1),%d1
+
+	extb.l		%d0			# sign extend lo bnd
+	extb.l		%d1			# sign extend hi bnd
+	bra.w		l_cmp2_cmp		# go do the compare emulation
+
+	global		_060LSP__cmp2_Aw_
+_060LSP__cmp2_Aw_:
+
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-4
+	movm.l		&0x3800,-(%sp)		# save d2-d4
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,CMP2_CC(%a6)
+	mov.l		0x8(%a6), %d2		# get regval
+
+	mov.w		([0xc,%a6],0x0),%d0
+	mov.w		([0xc,%a6],0x2),%d1
+
+	ext.l		%d0			# sign extend lo bnd
+	ext.l		%d1			# sign extend hi bnd
+	bra.w		l_cmp2_cmp		# go do the compare emulation
+
+	global		_060LSP__cmp2_Al_
+_060LSP__cmp2_Al_:
+
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-4
+	movm.l		&0x3800,-(%sp)		# save d2-d4
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,CMP2_CC(%a6)
+	mov.l		0x8(%a6), %d2		# get regval
+
+	mov.l		([0xc,%a6],0x0),%d0
+	mov.l		([0xc,%a6],0x4),%d1
+	bra.w		l_cmp2_cmp		# go do the compare emulation
+
+	global		_060LSP__cmp2_Db_
+_060LSP__cmp2_Db_:
+
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-4
+	movm.l		&0x3800,-(%sp)		# save d2-d4
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,CMP2_CC(%a6)
+	mov.l		0x8(%a6), %d2		# get regval
+
+	mov.b		([0xc,%a6],0x0),%d0
+	mov.b		([0xc,%a6],0x1),%d1
+
+	extb.l		%d0			# sign extend lo bnd
+	extb.l		%d1			# sign extend hi bnd
+
+# operation is a data register compare.
+# sign extend byte to long so we can do simple longword compares.
+	extb.l		%d2			# sign extend data byte
+	bra.w		l_cmp2_cmp		# go do the compare emulation
+
+	global		_060LSP__cmp2_Dw_
+_060LSP__cmp2_Dw_:
+
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-4
+	movm.l		&0x3800,-(%sp)		# save d2-d4
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,CMP2_CC(%a6)
+	mov.l		0x8(%a6), %d2		# get regval
+
+	mov.w		([0xc,%a6],0x0),%d0
+	mov.w		([0xc,%a6],0x2),%d1
+
+	ext.l		%d0			# sign extend lo bnd
+	ext.l		%d1			# sign extend hi bnd
+
+# operation is a data register compare.
+# sign extend word to long so we can do simple longword compares.
+	ext.l		%d2			# sign extend data word
+	bra.w		l_cmp2_cmp		# go emulate compare
+
+	global		_060LSP__cmp2_Dl_
+_060LSP__cmp2_Dl_:
+
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-4
+	movm.l		&0x3800,-(%sp)		# save d2-d4
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,CMP2_CC(%a6)
+	mov.l		0x8(%a6), %d2		# get regval
+
+	mov.l		([0xc,%a6],0x0),%d0
+	mov.l		([0xc,%a6],0x4),%d1
+
+#
+# To set the ccodes correctly:
+#	(1) save 'Z' bit from (Rn - lo)
+#	(2) save 'Z' and 'N' bits from ((hi - lo) - (Rn - hi))
+#	(3) keep 'X', 'N', and 'V' from before instruction
+#	(4) combine ccodes
+#
+l_cmp2_cmp:
+	sub.l		%d0, %d2		# (Rn - lo)
+	mov.w		%cc, %d3		# fetch resulting ccodes
+	andi.b		&0x4, %d3		# keep 'Z' bit
+	sub.l		%d0, %d1		# (hi - lo)
+	cmp.l		%d1,%d2			# ((hi - lo) - (Rn - hi))
+
+	mov.w		%cc, %d4		# fetch resulting ccodes
+	or.b		%d4, %d3		# combine w/ earlier ccodes
+	andi.b		&0x5, %d3		# keep 'Z' and 'N'
+
+	mov.w		CMP2_CC(%a6), %d4	# fetch old ccodes
+	andi.b		&0x1a, %d4		# keep 'X','N','V' bits
+	or.b		%d3, %d4		# insert new ccodes
+	mov.w		%d4,%cc			# save new ccodes
+
+# EPILOGUE BEGIN ########################################################
+#	fmovm.l		(%sp)+,&0x0		# restore no fpregs
+	movm.l		(%sp)+,&0x001c		# restore d2-d4
+	unlk		%a6
+# EPILOGUE END ##########################################################
+
+	rts
diff --git a/arch/m68k/ifpsp060/src/isp.S b/arch/m68k/ifpsp060/src/isp.S
new file mode 100644
index 0000000..b269091
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/isp.S
@@ -0,0 +1,4299 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# ireal.s:
+#	This file is appended to the top of the 060ISP package
+# and contains the entry points into the package. The user, in
+# effect, branches to one of the branch table entries located
+# after _060ISP_TABLE.
+#	Also, subroutine stubs exist in this file (_isp_done for
+# example) that are referenced by the ISP package itself in order
+# to call a given routine. The stub routine actually performs the
+# callout. The ISP code does a "bsr" to the stub routine. This
+# extra layer of hierarchy adds a slight performance penalty but
+# it makes the ISP code easier to read and more mainatinable.
+#
+
+set	_off_chk,	0x00
+set	_off_divbyzero,	0x04
+set	_off_trace,	0x08
+set	_off_access,	0x0c
+set	_off_done,	0x10
+
+set	_off_cas,	0x14
+set	_off_cas2,	0x18
+set	_off_lock,	0x1c
+set	_off_unlock,	0x20
+
+set	_off_imr,	0x40
+set	_off_dmr,	0x44
+set	_off_dmw,	0x48
+set	_off_irw,	0x4c
+set	_off_irl,	0x50
+set	_off_drb,	0x54
+set	_off_drw,	0x58
+set	_off_drl,	0x5c
+set	_off_dwb,	0x60
+set	_off_dww,	0x64
+set	_off_dwl,	0x68
+
+_060ISP_TABLE:
+
+# Here's the table of ENTRY POINTS for those linking the package.
+	bra.l		_isp_unimp
+	short		0x0000
+
+	bra.l		_isp_cas
+	short		0x0000
+
+	bra.l		_isp_cas2
+	short		0x0000
+
+	bra.l		_isp_cas_finish
+	short		0x0000
+
+	bra.l		_isp_cas2_finish
+	short		0x0000
+
+	bra.l		_isp_cas_inrange
+	short		0x0000
+
+	bra.l		_isp_cas_terminate
+	short		0x0000
+
+	bra.l		_isp_cas_restart
+	short		0x0000
+
+	space		64
+
+#############################################################
+
+	global		_real_chk
+_real_chk:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_chk,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_divbyzero
+_real_divbyzero:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_divbyzero,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_trace
+_real_trace:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_trace,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_access
+_real_access:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_access,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_isp_done
+_isp_done:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_done,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+#######################################
+
+	global		_real_cas
+_real_cas:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_cas,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_cas2
+_real_cas2:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_cas2,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_lock_page
+_real_lock_page:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_lock,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_unlock_page
+_real_unlock_page:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_unlock,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+#######################################
+
+	global		_imem_read
+_imem_read:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_imr,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read
+_dmem_read:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_dmr,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write
+_dmem_write:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_dmw,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_imem_read_word
+_imem_read_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_irw,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_imem_read_long
+_imem_read_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_irl,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_byte
+_dmem_read_byte:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_drb,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_word
+_dmem_read_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_drw,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_long
+_dmem_read_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_drl,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_byte
+_dmem_write_byte:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_dwb,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_word
+_dmem_write_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_dww,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_long
+_dmem_write_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_dwl,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+#
+# This file contains a set of define statements for constants
+# in oreder to promote readability within the core code itself.
+#
+
+set LOCAL_SIZE,		96			# stack frame size(bytes)
+set LV,			-LOCAL_SIZE		# stack offset
+
+set EXC_ISR,		0x4			# stack status register
+set EXC_IPC,		0x6			# stack pc
+set EXC_IVOFF,		0xa			# stacked vector offset
+
+set EXC_AREGS,		LV+64			# offset of all address regs
+set EXC_DREGS,		LV+32			# offset of all data regs
+
+set EXC_A7,		EXC_AREGS+(7*4)		# offset of a7
+set EXC_A6,		EXC_AREGS+(6*4)		# offset of a6
+set EXC_A5,		EXC_AREGS+(5*4)		# offset of a5
+set EXC_A4,		EXC_AREGS+(4*4)		# offset of a4
+set EXC_A3,		EXC_AREGS+(3*4)		# offset of a3
+set EXC_A2,		EXC_AREGS+(2*4)		# offset of a2
+set EXC_A1,		EXC_AREGS+(1*4)		# offset of a1
+set EXC_A0,		EXC_AREGS+(0*4)		# offset of a0
+set EXC_D7,		EXC_DREGS+(7*4)		# offset of d7
+set EXC_D6,		EXC_DREGS+(6*4)		# offset of d6
+set EXC_D5,		EXC_DREGS+(5*4)		# offset of d5
+set EXC_D4,		EXC_DREGS+(4*4)		# offset of d4
+set EXC_D3,		EXC_DREGS+(3*4)		# offset of d3
+set EXC_D2,		EXC_DREGS+(2*4)		# offset of d2
+set EXC_D1,		EXC_DREGS+(1*4)		# offset of d1
+set EXC_D0,		EXC_DREGS+(0*4)		# offset of d0
+
+set EXC_TEMP,		LV+16			# offset of temp stack space
+
+set EXC_SAVVAL,		LV+12			# offset of old areg value
+set EXC_SAVREG,		LV+11			# offset of old areg index
+
+set SPCOND_FLG,		LV+10			# offset of spc condition flg
+
+set EXC_CC,		LV+8			# offset of cc register
+set EXC_EXTWPTR,	LV+4			# offset of current PC
+set EXC_EXTWORD,	LV+2			# offset of current ext opword
+set EXC_OPWORD,		LV+0			# offset of current opword
+
+###########################
+# SPecial CONDition FLaGs #
+###########################
+set mia7_flg,		0x04			# (a7)+ flag
+set mda7_flg,		0x08			# -(a7) flag
+set ichk_flg,		0x10			# chk exception flag
+set idbyz_flg,		0x20			# divbyzero flag
+set restore_flg,	0x40			# restore -(an)+ flag
+set immed_flg,		0x80			# immediate data flag
+
+set mia7_bit,		0x2			# (a7)+ bit
+set mda7_bit,		0x3			# -(a7) bit
+set ichk_bit,		0x4			# chk exception bit
+set idbyz_bit,		0x5			# divbyzero bit
+set restore_bit,	0x6			# restore -(a7)+ bit
+set immed_bit,		0x7			# immediate data bit
+
+#########
+# Misc. #
+#########
+set BYTE,		1			# len(byte) == 1 byte
+set WORD,		2			# len(word) == 2 bytes
+set LONG,		4			# len(longword) == 4 bytes
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_isp_unimp(): 060ISP entry point for Unimplemented Instruction	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	"Unimplemented Integer Instruction" exception in an operating	#
+#	system.								#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_{word,long}() - read instruction word/longword	#
+#	_mul64() - emulate 64-bit multiply				#
+#	_div64() - emulate 64-bit divide				#
+#	_moveperipheral() - emulate "movep"				#
+#	_compandset() - emulate misaligned "cas"			#
+#	_compandset2() - emulate "cas2"					#
+#	_chk2_cmp2() - emulate "cmp2" and "chk2"			#
+#	_isp_done() - "callout" for normal final exit			#
+#	_real_trace() - "callout" for Trace exception			#
+#	_real_chk() - "callout" for Chk exception			#
+#	_real_divbyzero() - "callout" for DZ exception			#
+#	_real_access() - "callout" for access error exception		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the Unimp Int Instr stack frame	#
+#									#
+# OUTPUT **************************************************************	#
+#	If Trace exception:						#
+#	- The system stack changed to contain Trace exc stack frame	#
+#	If Chk exception:						#
+#	- The system stack changed to contain Chk exc stack frame	#
+#	If DZ exception:						#
+#	- The system stack changed to contain DZ exc stack frame	#
+#	If access error exception:					#
+#	- The system stack changed to contain access err exc stk frame	#
+#	Else:								#
+#	- Results saved as appropriate					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	This handler fetches the first instruction longword from	#
+# memory and decodes it to determine which of the unimplemented		#
+# integer instructions caused this exception. This handler then calls	#
+# one of _mul64(), _div64(), _moveperipheral(), _compandset(),		#
+# _compandset2(), or _chk2_cmp2() as appropriate.			#
+#	Some of these instructions, by their nature, may produce other	#
+# types of exceptions. "div" can produce a divide-by-zero exception,	#
+# and "chk2" can cause a "Chk" exception. In both cases, the current	#
+# exception stack frame must be converted to an exception stack frame	#
+# of the correct exception type and an exit must be made through	#
+# _real_divbyzero() or _real_chk() as appropriate. In addition, all	#
+# instructions may be executing while Trace is enabled. If so, then	#
+# a Trace exception stack frame must be created and an exit made	#
+# through _real_trace().						#
+#	Meanwhile, if any read or write to memory using the		#
+# _mem_{read,write}() "callout"s returns a failing value, then an	#
+# access error frame must be created and an exit made through		#
+# _real_access().							#
+#	If none of these occur, then a normal exit is made through	#
+# _isp_done().								#
+#									#
+#	This handler, upon entry, saves almost all user-visible		#
+# address and data registers to the stack. Although this may seem to	#
+# cause excess memory traffic, it was found that due to having to	#
+# access these register files for things like data retrieval and <ea>	#
+# calculations, it was more efficient to have them on the stack where	#
+# they could be accessed by indexing rather than to make subroutine	#
+# calls to retrieve a register of a particular index.			#
+#									#
+#########################################################################
+
+	global		_isp_unimp
+_isp_unimp:
+	link.w		%a6,&-LOCAL_SIZE	# create room for stack frame
+
+	movm.l		&0x3fff,EXC_DREGS(%a6)	# store d0-d7/a0-a5
+	mov.l		(%a6),EXC_A6(%a6)	# store a6
+
+	btst		&0x5,EXC_ISR(%a6)	# from s or u mode?
+	bne.b		uieh_s			# supervisor mode
+uieh_u:
+	mov.l		%usp,%a0		# fetch user stack pointer
+	mov.l		%a0,EXC_A7(%a6)		# store a7
+	bra.b		uieh_cont
+uieh_s:
+	lea		0xc(%a6),%a0
+	mov.l		%a0,EXC_A7(%a6)		# store corrected sp
+
+###############################################################################
+
+uieh_cont:
+	clr.b		SPCOND_FLG(%a6)		# clear "special case" flag
+
+	mov.w		EXC_ISR(%a6),EXC_CC(%a6) # store cc copy on stack
+	mov.l		EXC_IPC(%a6),EXC_EXTWPTR(%a6) # store extwptr on stack
+
+#
+# fetch the opword and first extension word pointed to by the stacked pc
+# and store them to the stack for now
+#
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch opword & extword
+	mov.l		%d0,EXC_OPWORD(%a6)	# store extword on stack
+
+
+#########################################################################
+# muls.l	0100 1100 00 |<ea>|	0*** 1100 0000 0***		#
+# mulu.l	0100 1100 00 |<ea>|	0*** 0100 0000 0***		#
+#									#
+# divs.l	0100 1100 01 |<ea>|	0*** 1100 0000 0***		#
+# divu.l	0100 1100 01 |<ea>|	0*** 0100 0000 0***		#
+#									#
+# movep.w m2r	0000 ***1 00 001***	| <displacement>  |		#
+# movep.l m2r	0000 ***1 01 001***	| <displacement>  |		#
+# movep.w r2m	0000 ***1 10 001***	| <displacement>  |		#
+# movep.l r2m	0000 ***1 11 001***	| <displacement>  |		#
+#									#
+# cas.w		0000 1100 11 |<ea>|	0000 000* **00 0***		#
+# cas.l		0000 1110 11 |<ea>|	0000 000* **00 0***		#
+#									#
+# cas2.w	0000 1100 11 111100	**** 000* **00 0***		#
+#					**** 000* **00 0***		#
+# cas2.l	0000 1110 11 111100	**** 000* **00 0***		#
+#					**** 000* **00 0***		#
+#									#
+# chk2.b	0000 0000 11 |<ea>|	**** 1000 0000 0000		#
+# chk2.w	0000 0010 11 |<ea>|	**** 1000 0000 0000		#
+# chk2.l	0000 0100 11 |<ea>|	**** 1000 0000 0000		#
+#									#
+# cmp2.b	0000 0000 11 |<ea>|	**** 0000 0000 0000		#
+# cmp2.w	0000 0010 11 |<ea>|	**** 0000 0000 0000		#
+# cmp2.l	0000 0100 11 |<ea>|	**** 0000 0000 0000		#
+#########################################################################
+
+#
+# using bit 14 of the operation word, separate into 2 groups:
+# (group1) mul64, div64
+# (group2) movep, chk2, cmp2, cas2, cas
+#
+	btst		&0x1e,%d0		# group1 or group2
+	beq.b		uieh_group2		# go handle group2
+
+#
+# now, w/ group1, make mul64's decode the fastest since it will
+# most likely be used the most.
+#
+uieh_group1:
+	btst		&0x16,%d0		# test for div64
+	bne.b		uieh_div64		# go handle div64
+
+uieh_mul64:
+# mul64() may use ()+ addressing and may, therefore, alter a7
+
+	bsr.l		_mul64			# _mul64()
+
+	btst		&0x5,EXC_ISR(%a6)	# supervisor mode?
+	beq.w		uieh_done
+	btst		&mia7_bit,SPCOND_FLG(%a6) # was a7 changed?
+	beq.w		uieh_done		# no
+	btst		&0x7,EXC_ISR(%a6)	# is trace enabled?
+	bne.w		uieh_trace_a7		# yes
+	bra.w		uieh_a7			# no
+
+uieh_div64:
+# div64() may use ()+ addressing and may, therefore, alter a7.
+# div64() may take a divide by zero exception.
+
+	bsr.l		_div64			# _div64()
+
+# here, we sort out all of the special cases that may have happened.
+	btst		&mia7_bit,SPCOND_FLG(%a6) # was a7 changed?
+	bne.b		uieh_div64_a7		# yes
+uieh_div64_dbyz:
+	btst		&idbyz_bit,SPCOND_FLG(%a6) # did divide-by-zero occur?
+	bne.w		uieh_divbyzero		# yes
+	bra.w		uieh_done		# no
+uieh_div64_a7:
+	btst		&0x5,EXC_ISR(%a6)	# supervisor mode?
+	beq.b		uieh_div64_dbyz		# no
+# here, a7 has been incremented by 4 bytes in supervisor mode. we still
+# may have the following 3 cases:
+#	(i)	(a7)+
+#	(ii)	(a7)+; trace
+#	(iii)	(a7)+; divide-by-zero
+#
+	btst		&idbyz_bit,SPCOND_FLG(%a6) # did divide-by-zero occur?
+	bne.w		uieh_divbyzero_a7	# yes
+	tst.b		EXC_ISR(%a6)		# no; is trace enabled?
+	bmi.w		uieh_trace_a7		# yes
+	bra.w		uieh_a7			# no
+
+#
+# now, w/ group2, make movep's decode the fastest since it will
+# most likely be used the most.
+#
+uieh_group2:
+	btst		&0x18,%d0		# test for not movep
+	beq.b		uieh_not_movep
+
+
+	bsr.l		_moveperipheral		# _movep()
+	bra.w		uieh_done
+
+uieh_not_movep:
+	btst		&0x1b,%d0		# test for chk2,cmp2
+	beq.b		uieh_chk2cmp2		# go handle chk2,cmp2
+
+	swap		%d0			# put opword in lo word
+	cmpi.b		%d0,&0xfc		# test for cas2
+	beq.b		uieh_cas2		# go handle cas2
+
+uieh_cas:
+
+	bsr.l		_compandset		# _cas()
+
+# the cases of "cas Dc,Du,(a7)+" and "cas Dc,Du,-(a7)" used from supervisor
+# mode are simply not considered valid and therefore are not handled.
+
+	bra.w		uieh_done
+
+uieh_cas2:
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# read extension word
+
+	tst.l		%d1			# ifetch error?
+	bne.w		isp_iacc		# yes
+
+	bsr.l		_compandset2		# _cas2()
+	bra.w		uieh_done
+
+uieh_chk2cmp2:
+# chk2 may take a chk exception
+
+	bsr.l		_chk2_cmp2		# _chk2_cmp2()
+
+# here we check to see if a chk trap should be taken
+	cmpi.b		SPCOND_FLG(%a6),&ichk_flg
+	bne.w		uieh_done
+	bra.b		uieh_chk_trap
+
+###########################################################################
+
+#
+# the required emulation has been completed. now, clean up the necessary stack
+# info and prepare for rte
+#
+uieh_done:
+	mov.b		EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+
+# if exception occurred in user mode, then we have to restore a7 in case it
+# changed. we don't have to update a7  for supervisor mose because that case
+# doesn't flow through here
+	btst		&0x5,EXC_ISR(%a6)	# user or supervisor?
+	bne.b		uieh_finish		# supervisor
+
+	mov.l		EXC_A7(%a6),%a0		# fetch user stack pointer
+	mov.l		%a0,%usp		# restore it
+
+uieh_finish:
+	movm.l		EXC_DREGS(%a6),&0x3fff	# restore d0-d7/a0-a5
+
+	btst		&0x7,EXC_ISR(%a6)	# is trace mode on?
+	bne.b		uieh_trace		# yes;go handle trace mode
+
+	mov.l		EXC_EXTWPTR(%a6),EXC_IPC(%a6) # new pc on stack frame
+	mov.l		EXC_A6(%a6),(%a6)	# prepare new a6 for unlink
+	unlk		%a6			# unlink stack frame
+	bra.l		_isp_done
+
+#
+# The instruction that was just emulated was also being traced. The trace
+# trap for this instruction will be lost unless we jump to the trace handler.
+# So, here we create a Trace Exception format number two exception stack
+# frame from the Unimplemented Integer Intruction Exception stack frame
+# format number zero and jump to the user supplied hook "_real_trace()".
+#
+#		   UIEH FRAME		   TRACE FRAME
+#		*****************	*****************
+#		* 0x0 *  0x0f4	*	*    Current	*
+#		*****************	*      PC	*
+#		*    Current	*	*****************
+#		*      PC	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*      SR	*	*     Next	*
+#		*****************	*      PC	*
+#	      ->*     Old	*	*****************
+#  from link -->*      A6	*	*      SR	*
+#	        *****************	*****************
+#	       /*      A7	*	*      New	* <-- for final unlink
+#	      / *		*	*      A6	*
+# link frame <  *****************	*****************
+#	      \ ~		~	~		~
+#	       \*****************	*****************
+#
+uieh_trace:
+	mov.l		EXC_A6(%a6),-0x4(%a6)
+	mov.w		EXC_ISR(%a6),0x0(%a6)
+	mov.l		EXC_IPC(%a6),0x8(%a6)
+	mov.l		EXC_EXTWPTR(%a6),0x2(%a6)
+	mov.w		&0x2024,0x6(%a6)
+	sub.l		&0x4,%a6
+	unlk		%a6
+	bra.l		_real_trace
+
+#
+#	   UIEH FRAME		    CHK FRAME
+#	*****************	*****************
+#	* 0x0 *  0x0f4	*	*    Current	*
+#	*****************	*      PC	*
+#	*    Current	*	*****************
+#	*      PC	*	* 0x2 *  0x018	*
+#	*****************	*****************
+#	*      SR	*	*     Next	*
+#	*****************	*      PC	*
+#	    (4 words)		*****************
+#				*      SR	*
+#				*****************
+#				    (6 words)
+#
+# the chk2 instruction should take a chk trap. so, here we must create a
+# chk stack frame from an unimplemented integer instruction exception frame
+# and jump to the user supplied entry point "_real_chk()".
+#
+uieh_chk_trap:
+	mov.b		EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+	movm.l		EXC_DREGS(%a6),&0x3fff	# restore d0-d7/a0-a5
+
+	mov.w		EXC_ISR(%a6),(%a6)	# put new SR on stack
+	mov.l		EXC_IPC(%a6),0x8(%a6)	# put "Current PC" on stack
+	mov.l		EXC_EXTWPTR(%a6),0x2(%a6) # put "Next PC" on stack
+	mov.w		&0x2018,0x6(%a6)	# put Vector Offset on stack
+
+	mov.l		EXC_A6(%a6),%a6		# restore a6
+	add.l		&LOCAL_SIZE,%sp		# clear stack frame
+
+	bra.l		_real_chk
+
+#
+#	   UIEH FRAME		 DIVBYZERO FRAME
+#	*****************	*****************
+#	* 0x0 *  0x0f4	*	*    Current	*
+#	*****************	*      PC	*
+#	*    Current	*	*****************
+#	*      PC	*	* 0x2 *  0x014	*
+#	*****************	*****************
+#	*      SR	*	*     Next	*
+#	*****************	*      PC	*
+#	    (4 words)		*****************
+#				*      SR	*
+#				*****************
+#				    (6 words)
+#
+# the divide instruction should take an integer divide by zero trap. so, here
+# we must create a divbyzero stack frame from an unimplemented integer
+# instruction exception frame and jump to the user supplied entry point
+# "_real_divbyzero()".
+#
+uieh_divbyzero:
+	mov.b		EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+	movm.l		EXC_DREGS(%a6),&0x3fff	# restore d0-d7/a0-a5
+
+	mov.w		EXC_ISR(%a6),(%a6)	# put new SR on stack
+	mov.l		EXC_IPC(%a6),0x8(%a6)	# put "Current PC" on stack
+	mov.l		EXC_EXTWPTR(%a6),0x2(%a6) # put "Next PC" on stack
+	mov.w		&0x2014,0x6(%a6)	# put Vector Offset on stack
+
+	mov.l		EXC_A6(%a6),%a6		# restore a6
+	add.l		&LOCAL_SIZE,%sp		# clear stack frame
+
+	bra.l		_real_divbyzero
+
+#
+#				 DIVBYZERO FRAME
+#				*****************
+#				*    Current	*
+#	   UIEH FRAME		*      PC	*
+#	*****************	*****************
+#	* 0x0 *  0x0f4	*	* 0x2 * 0x014	*
+#	*****************	*****************
+#	*    Current	*	*     Next	*
+#	*      PC	*	*      PC	*
+#	*****************	*****************
+#	*      SR	*	*      SR	*
+#	*****************	*****************
+#	    (4 words)		    (6 words)
+#
+# the divide instruction should take an integer divide by zero trap. so, here
+# we must create a divbyzero stack frame from an unimplemented integer
+# instruction exception frame and jump to the user supplied entry point
+# "_real_divbyzero()".
+#
+# However, we must also deal with the fact that (a7)+ was used from supervisor
+# mode, thereby shifting the stack frame up 4 bytes.
+#
+uieh_divbyzero_a7:
+	mov.b		EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+	movm.l		EXC_DREGS(%a6),&0x3fff	# restore d0-d7/a0-a5
+
+	mov.l		EXC_IPC(%a6),0xc(%a6)	# put "Current PC" on stack
+	mov.w		&0x2014,0xa(%a6)	# put Vector Offset on stack
+	mov.l		EXC_EXTWPTR(%a6),0x6(%a6) # put "Next PC" on stack
+
+	mov.l		EXC_A6(%a6),%a6		# restore a6
+	add.l		&4+LOCAL_SIZE,%sp	# clear stack frame
+
+	bra.l		_real_divbyzero
+
+#
+#				   TRACE FRAME
+#				*****************
+#				*    Current	*
+#	   UIEH FRAME		*      PC	*
+#	*****************	*****************
+#	* 0x0 *  0x0f4	*	* 0x2 * 0x024	*
+#	*****************	*****************
+#	*    Current	*	*     Next	*
+#	*      PC	*	*      PC	*
+#	*****************	*****************
+#	*      SR	*	*      SR	*
+#	*****************	*****************
+#	    (4 words)		    (6 words)
+#
+#
+# The instruction that was just emulated was also being traced. The trace
+# trap for this instruction will be lost unless we jump to the trace handler.
+# So, here we create a Trace Exception format number two exception stack
+# frame from the Unimplemented Integer Intruction Exception stack frame
+# format number zero and jump to the user supplied hook "_real_trace()".
+#
+# However, we must also deal with the fact that (a7)+ was used from supervisor
+# mode, thereby shifting the stack frame up 4 bytes.
+#
+uieh_trace_a7:
+	mov.b		EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+	movm.l		EXC_DREGS(%a6),&0x3fff	# restore d0-d7/a0-a5
+
+	mov.l		EXC_IPC(%a6),0xc(%a6)	# put "Current PC" on stack
+	mov.w		&0x2024,0xa(%a6)	# put Vector Offset on stack
+	mov.l		EXC_EXTWPTR(%a6),0x6(%a6) # put "Next PC" on stack
+
+	mov.l		EXC_A6(%a6),%a6		# restore a6
+	add.l		&4+LOCAL_SIZE,%sp	# clear stack frame
+
+	bra.l		_real_trace
+
+#
+#				   UIEH FRAME
+#				*****************
+#				* 0x0 * 0x0f4	*
+#	   UIEH FRAME		*****************
+#	*****************	*     Next	*
+#	* 0x0 *  0x0f4	*	*      PC	*
+#	*****************	*****************
+#	*    Current	*	*      SR	*
+#	*      PC	*	*****************
+#	*****************	    (4 words)
+#	*      SR	*
+#	*****************
+#	    (4 words)
+uieh_a7:
+	mov.b		EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+	movm.l		EXC_DREGS(%a6),&0x3fff	# restore d0-d7/a0-a5
+
+	mov.w		&0x00f4,0xe(%a6)	# put Vector Offset on stack
+	mov.l		EXC_EXTWPTR(%a6),0xa(%a6) # put "Next PC" on stack
+	mov.w		EXC_ISR(%a6),0x8(%a6)	# put SR on stack
+
+	mov.l		EXC_A6(%a6),%a6		# restore a6
+	add.l		&8+LOCAL_SIZE,%sp	# clear stack frame
+	bra.l		_isp_done
+
+##########
+
+# this is the exit point if a data read or write fails.
+# a0 = failing address
+# d0 = fslw
+isp_dacc:
+	mov.l		%a0,(%a6)		# save address
+	mov.l		%d0,-0x4(%a6)		# save partial fslw
+
+	lea		-64(%a6),%sp
+	movm.l		(%sp)+,&0x7fff		# restore d0-d7/a0-a6
+
+	mov.l		0xc(%sp),-(%sp)		# move voff,hi(pc)
+	mov.l		0x4(%sp),0x10(%sp)	# store fslw
+	mov.l		0xc(%sp),0x4(%sp)	# store sr,lo(pc)
+	mov.l		0x8(%sp),0xc(%sp)	# store address
+	mov.l		(%sp)+,0x4(%sp)		# store voff,hi(pc)
+	mov.w		&0x4008,0x6(%sp)	# store new voff
+
+	bra.b		isp_acc_exit
+
+# this is the exit point if an instruction word read fails.
+# FSLW:
+#	misaligned = true
+#	read = true
+#	size = word
+#	instruction = true
+#	software emulation error = true
+isp_iacc:
+	movm.l		EXC_DREGS(%a6),&0x3fff	# restore d0-d7/a0-a5
+	unlk		%a6			# unlink frame
+	sub.w		&0x8,%sp		# make room for acc frame
+	mov.l		0x8(%sp),(%sp)		# store sr,lo(pc)
+	mov.w		0xc(%sp),0x4(%sp)	# store hi(pc)
+	mov.w		&0x4008,0x6(%sp)	# store new voff
+	mov.l		0x2(%sp),0x8(%sp)	# store address (=pc)
+	mov.l		&0x09428001,0xc(%sp)	# store fslw
+
+isp_acc_exit:
+	btst		&0x5,(%sp)		# user or supervisor?
+	beq.b		isp_acc_exit2		# user
+	bset		&0x2,0xd(%sp)		# set supervisor TM bit
+isp_acc_exit2:
+	bra.l		_real_access
+
+# if the addressing mode was (an)+ or -(an), the address register must
+# be restored to its pre-exception value before entering _real_access.
+isp_restore:
+	cmpi.b		SPCOND_FLG(%a6),&restore_flg # do we need a restore?
+	bne.b		isp_restore_done	# no
+	clr.l		%d0
+	mov.b		EXC_SAVREG(%a6),%d0	# regno to restore
+	mov.l		EXC_SAVVAL(%a6),(EXC_AREGS,%a6,%d0.l*4) # restore value
+isp_restore_done:
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_calc_ea(): routine to calculate effective address		#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_word() - read instruction word			#
+#	_imem_read_long() - read instruction longword			#
+#	_dmem_read_long() - read data longword (for memory indirect)	#
+#	isp_iacc() - handle instruction access error exception		#
+#	isp_dacc() - handle data access error exception			#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = number of bytes related to effective address (w,l)		#
+#									#
+# OUTPUT **************************************************************	#
+#	If exiting through isp_dacc...					#
+#		a0 = failing address					#
+#		d0 = FSLW						#
+#	elsif exiting though isp_iacc...				#
+#		none							#
+#	else								#
+#		a0 = effective address					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	The effective address type is decoded from the opword residing	#
+# on the stack. A jump table is used to vector to a routine for the	#
+# appropriate mode. Since none of the emulated integer instructions	#
+# uses byte-sized operands, only handle word and long operations.	#
+#									#
+#	Dn,An	- shouldn't enter here					#
+#	(An)	- fetch An value from stack				#
+#	-(An)	- fetch An value from stack; return decr value;		#
+#		  place decr value on stack; store old value in case of	#
+#		  future access error; if -(a7), set mda7_flg in	#
+#		  SPCOND_FLG						#
+#	(An)+	- fetch An value from stack; return value;		#
+#		  place incr value on stack; store old value in case of	#
+#		  future access error; if (a7)+, set mia7_flg in	#
+#		  SPCOND_FLG						#
+#	(d16,An) - fetch An value from stack; read d16 using		#
+#		  _imem_read_word(); fetch may fail -> branch to	#
+#		  isp_iacc()						#
+#	(xxx).w,(xxx).l - use _imem_read_{word,long}() to fetch		#
+#		  address; fetch may fail				#
+#	#<data> - return address of immediate value; set immed_flg	#
+#		  in SPCOND_FLG						#
+#	(d16,PC) - fetch stacked PC value; read d16 using		#
+#		  _imem_read_word(); fetch may fail -> branch to	#
+#		  isp_iacc()						#
+#	everything else - read needed displacements as appropriate w/	#
+#		  _imem_read_{word,long}(); read may fail; if memory	#
+#		  indirect, read indirect address using			#
+#		  _dmem_read_long() which may also fail			#
+#									#
+#########################################################################
+
+	global		_calc_ea
+_calc_ea:
+	mov.l		%d0,%a0			# move # bytes to a0
+
+# MODE and REG are taken from the EXC_OPWORD.
+	mov.w		EXC_OPWORD(%a6),%d0	# fetch opcode word
+	mov.w		%d0,%d1			# make a copy
+
+	andi.w		&0x3f,%d0		# extract mode field
+	andi.l		&0x7,%d1		# extract reg  field
+
+# jump to the corresponding function for each {MODE,REG} pair.
+	mov.w		(tbl_ea_mode.b,%pc,%d0.w*2), %d0 # fetch jmp distance
+	jmp		(tbl_ea_mode.b,%pc,%d0.w*1) # jmp to correct ea mode
+
+	swbeg		&64
+tbl_ea_mode:
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+
+	short		addr_ind_a0	-	tbl_ea_mode
+	short		addr_ind_a1	-	tbl_ea_mode
+	short		addr_ind_a2	-	tbl_ea_mode
+	short		addr_ind_a3	-	tbl_ea_mode
+	short		addr_ind_a4	-	tbl_ea_mode
+	short		addr_ind_a5	-	tbl_ea_mode
+	short		addr_ind_a6	-	tbl_ea_mode
+	short		addr_ind_a7	-	tbl_ea_mode
+
+	short		addr_ind_p_a0	-	tbl_ea_mode
+	short		addr_ind_p_a1	-	tbl_ea_mode
+	short		addr_ind_p_a2	-	tbl_ea_mode
+	short		addr_ind_p_a3	-	tbl_ea_mode
+	short		addr_ind_p_a4	-	tbl_ea_mode
+	short		addr_ind_p_a5	-	tbl_ea_mode
+	short		addr_ind_p_a6	-	tbl_ea_mode
+	short		addr_ind_p_a7	-	tbl_ea_mode
+
+	short		addr_ind_m_a0		-	tbl_ea_mode
+	short		addr_ind_m_a1		-	tbl_ea_mode
+	short		addr_ind_m_a2		-	tbl_ea_mode
+	short		addr_ind_m_a3		-	tbl_ea_mode
+	short		addr_ind_m_a4		-	tbl_ea_mode
+	short		addr_ind_m_a5		-	tbl_ea_mode
+	short		addr_ind_m_a6		-	tbl_ea_mode
+	short		addr_ind_m_a7		-	tbl_ea_mode
+
+	short		addr_ind_disp_a0	-	tbl_ea_mode
+	short		addr_ind_disp_a1	-	tbl_ea_mode
+	short		addr_ind_disp_a2	-	tbl_ea_mode
+	short		addr_ind_disp_a3	-	tbl_ea_mode
+	short		addr_ind_disp_a4	-	tbl_ea_mode
+	short		addr_ind_disp_a5	-	tbl_ea_mode
+	short		addr_ind_disp_a6	-	tbl_ea_mode
+	short		addr_ind_disp_a7	-	tbl_ea_mode
+
+	short		_addr_ind_ext		-	tbl_ea_mode
+	short		_addr_ind_ext		-	tbl_ea_mode
+	short		_addr_ind_ext		-	tbl_ea_mode
+	short		_addr_ind_ext		-	tbl_ea_mode
+	short		_addr_ind_ext		-	tbl_ea_mode
+	short		_addr_ind_ext		-	tbl_ea_mode
+	short		_addr_ind_ext		-	tbl_ea_mode
+	short		_addr_ind_ext		-	tbl_ea_mode
+
+	short		abs_short		-	tbl_ea_mode
+	short		abs_long		-	tbl_ea_mode
+	short		pc_ind			-	tbl_ea_mode
+	short		pc_ind_ext		-	tbl_ea_mode
+	short		immediate		-	tbl_ea_mode
+	short		tbl_ea_mode		-	tbl_ea_mode
+	short		tbl_ea_mode		-	tbl_ea_mode
+	short		tbl_ea_mode		-	tbl_ea_mode
+
+###################################
+# Address register indirect: (An) #
+###################################
+addr_ind_a0:
+	mov.l		EXC_A0(%a6),%a0		# Get current a0
+	rts
+
+addr_ind_a1:
+	mov.l		EXC_A1(%a6),%a0		# Get current a1
+	rts
+
+addr_ind_a2:
+	mov.l		EXC_A2(%a6),%a0		# Get current a2
+	rts
+
+addr_ind_a3:
+	mov.l		EXC_A3(%a6),%a0		# Get current a3
+	rts
+
+addr_ind_a4:
+	mov.l		EXC_A4(%a6),%a0		# Get current a4
+	rts
+
+addr_ind_a5:
+	mov.l		EXC_A5(%a6),%a0		# Get current a5
+	rts
+
+addr_ind_a6:
+	mov.l		EXC_A6(%a6),%a0		# Get current a6
+	rts
+
+addr_ind_a7:
+	mov.l		EXC_A7(%a6),%a0		# Get current a7
+	rts
+
+#####################################################
+# Address register indirect w/ postincrement: (An)+ #
+#####################################################
+addr_ind_p_a0:
+	mov.l		%a0,%d0			# copy no. bytes
+	mov.l		EXC_A0(%a6),%a0		# load current value
+	add.l		%a0,%d0			# increment
+	mov.l		%d0,EXC_A0(%a6)		# save incremented value
+
+	mov.l		%a0,EXC_SAVVAL(%a6)	# save in case of access error
+	mov.b		&0x0,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_p_a1:
+	mov.l		%a0,%d0			# copy no. bytes
+	mov.l		EXC_A1(%a6),%a0		# load current value
+	add.l		%a0,%d0			# increment
+	mov.l		%d0,EXC_A1(%a6)		# save incremented value
+
+	mov.l		%a0,EXC_SAVVAL(%a6)	# save in case of access error
+	mov.b		&0x1,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_p_a2:
+	mov.l		%a0,%d0			# copy no. bytes
+	mov.l		EXC_A2(%a6),%a0		# load current value
+	add.l		%a0,%d0			# increment
+	mov.l		%d0,EXC_A2(%a6)		# save incremented value
+
+	mov.l		%a0,EXC_SAVVAL(%a6)	# save in case of access error
+	mov.b		&0x2,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_p_a3:
+	mov.l		%a0,%d0			# copy no. bytes
+	mov.l		EXC_A3(%a6),%a0		# load current value
+	add.l		%a0,%d0			# increment
+	mov.l		%d0,EXC_A3(%a6)		# save incremented value
+
+	mov.l		%a0,EXC_SAVVAL(%a6)	# save in case of access error
+	mov.b		&0x3,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_p_a4:
+	mov.l		%a0,%d0			# copy no. bytes
+	mov.l		EXC_A4(%a6),%a0		# load current value
+	add.l		%a0,%d0			# increment
+	mov.l		%d0,EXC_A4(%a6)		# save incremented value
+
+	mov.l		%a0,EXC_SAVVAL(%a6)	# save in case of access error
+	mov.b		&0x4,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_p_a5:
+	mov.l		%a0,%d0			# copy no. bytes
+	mov.l		EXC_A5(%a6),%a0		# load current value
+	add.l		%a0,%d0			# increment
+	mov.l		%d0,EXC_A5(%a6)		# save incremented value
+
+	mov.l		%a0,EXC_SAVVAL(%a6)	# save in case of access error
+	mov.b		&0x5,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_p_a6:
+	mov.l		%a0,%d0			# copy no. bytes
+	mov.l		EXC_A6(%a6),%a0		# load current value
+	add.l		%a0,%d0			# increment
+	mov.l		%d0,EXC_A6(%a6)		# save incremented value
+
+	mov.l		%a0,EXC_SAVVAL(%a6)	# save in case of access error
+	mov.b		&0x6,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_p_a7:
+	mov.b		&mia7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+	mov.l		%a0,%d0			# copy no. bytes
+	mov.l		EXC_A7(%a6),%a0		# load current value
+	add.l		%a0,%d0			# increment
+	mov.l		%d0,EXC_A7(%a6)		# save incremented value
+	rts
+
+####################################################
+# Address register indirect w/ predecrement: -(An) #
+####################################################
+addr_ind_m_a0:
+	mov.l		EXC_A0(%a6),%d0		# Get current a0
+	mov.l		%d0,EXC_SAVVAL(%a6)	# save in case of access error
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A0(%a6)		# Save decr value
+	mov.l		%d0,%a0
+
+	mov.b		&0x0,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_m_a1:
+	mov.l		EXC_A1(%a6),%d0		# Get current a1
+	mov.l		%d0,EXC_SAVVAL(%a6)	# save in case of access error
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A1(%a6)		# Save decr value
+	mov.l		%d0,%a0
+
+	mov.b		&0x1,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_m_a2:
+	mov.l		EXC_A2(%a6),%d0		# Get current a2
+	mov.l		%d0,EXC_SAVVAL(%a6)	# save in case of access error
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A2(%a6)		# Save decr value
+	mov.l		%d0,%a0
+
+	mov.b		&0x2,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_m_a3:
+	mov.l		EXC_A3(%a6),%d0		# Get current a3
+	mov.l		%d0,EXC_SAVVAL(%a6)	# save in case of access error
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A3(%a6)		# Save decr value
+	mov.l		%d0,%a0
+
+	mov.b		&0x3,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_m_a4:
+	mov.l		EXC_A4(%a6),%d0		# Get current a4
+	mov.l		%d0,EXC_SAVVAL(%a6)	# save in case of access error
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A4(%a6)		# Save decr value
+	mov.l		%d0,%a0
+
+	mov.b		&0x4,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_m_a5:
+	mov.l		EXC_A5(%a6),%d0		# Get current a5
+	mov.l		%d0,EXC_SAVVAL(%a6)	# save in case of access error
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A5(%a6)		# Save decr value
+	mov.l		%d0,%a0
+
+	mov.b		&0x5,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_m_a6:
+	mov.l		EXC_A6(%a6),%d0		# Get current a6
+	mov.l		%d0,EXC_SAVVAL(%a6)	# save in case of access error
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A6(%a6)		# Save decr value
+	mov.l		%d0,%a0
+
+	mov.b		&0x6,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_m_a7:
+	mov.b		&mda7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+	mov.l		EXC_A7(%a6),%d0		# Get current a7
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A7(%a6)		# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+########################################################
+# Address register indirect w/ displacement: (d16, An) #
+########################################################
+addr_ind_disp_a0:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+	add.l		EXC_A0(%a6),%a0		# a0 + d16
+	rts
+
+addr_ind_disp_a1:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+	add.l		EXC_A1(%a6),%a0		# a1 + d16
+	rts
+
+addr_ind_disp_a2:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+	add.l		EXC_A2(%a6),%a0		# a2 + d16
+	rts
+
+addr_ind_disp_a3:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+	add.l		EXC_A3(%a6),%a0		# a3 + d16
+	rts
+
+addr_ind_disp_a4:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+	add.l		EXC_A4(%a6),%a0		# a4 + d16
+	rts
+
+addr_ind_disp_a5:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+	add.l		EXC_A5(%a6),%a0		# a5 + d16
+	rts
+
+addr_ind_disp_a6:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+	add.l		EXC_A6(%a6),%a0		# a6 + d16
+	rts
+
+addr_ind_disp_a7:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+	add.l		EXC_A7(%a6),%a0		# a7 + d16
+	rts
+
+########################################################################
+# Address register indirect w/ index(8-bit displacement): (dn, An, Xn) #
+#    "       "         "    w/   "  (base displacement): (bd, An, Xn)  #
+# Memory indirect postindexed: ([bd, An], Xn, od)		       #
+# Memory indirect preindexed: ([bd, An, Xn], od)		       #
+########################################################################
+_addr_ind_ext:
+	mov.l		%d1,-(%sp)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch extword in d0
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.l		(%sp)+,%d1
+
+	mov.l		(EXC_AREGS,%a6,%d1.w*4),%a0 # put base in a0
+
+	btst		&0x8,%d0
+	beq.b		addr_ind_index_8bit	# for ext word or not?
+
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+
+	mov.l		%d0,%d5			# put extword in d5
+	mov.l		%a0,%d3			# put base in d3
+
+	bra.l		calc_mem_ind		# calc memory indirect
+
+addr_ind_index_8bit:
+	mov.l		%d2,-(%sp)		# save old d2
+
+	mov.l		%d0,%d1
+	rol.w		&0x4,%d1
+	andi.w		&0xf,%d1		# extract index regno
+
+	mov.l		(EXC_DREGS,%a6,%d1.w*4),%d1 # fetch index reg value
+
+	btst		&0xb,%d0		# is it word or long?
+	bne.b		aii8_long
+	ext.l		%d1			# sign extend word index
+aii8_long:
+	mov.l		%d0,%d2
+	rol.w		&0x7,%d2
+	andi.l		&0x3,%d2		# extract scale value
+
+	lsl.l		%d2,%d1			# shift index by scale
+
+	extb.l		%d0			# sign extend displacement
+	add.l		%d1,%d0			# index + disp
+	add.l		%d0,%a0			# An + (index + disp)
+
+	mov.l		(%sp)+,%d2		# restore old d2
+	rts
+
+######################
+# Immediate: #<data> #
+#########################################################################
+# word, long: <ea> of the data is the current extension word		#
+#	pointer value. new extension word pointer is simply the old	#
+#	plus the number of bytes in the data type(2 or 4).		#
+#########################################################################
+immediate:
+	mov.b		&immed_flg,SPCOND_FLG(%a6) # set immediate flag
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch extension word ptr
+	rts
+
+###########################
+# Absolute short: (XXX).W #
+###########################
+abs_short:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch short address
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# return <ea> in a0
+	rts
+
+##########################
+# Absolute long: (XXX).L #
+##########################
+abs_long:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch long address
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.l		%d0,%a0			# return <ea> in a0
+	rts
+
+#######################################################
+# Program counter indirect w/ displacement: (d16, PC) #
+#######################################################
+pc_ind:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch word displacement
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_EXTWPTR(%a6),%a0	# pc + d16
+
+# _imem_read_word() increased the extwptr by 2. need to adjust here.
+	subq.l		&0x2,%a0		# adjust <ea>
+
+	rts
+
+##########################################################
+# PC indirect w/ index(8-bit displacement): (d8, PC, An) #
+# "     "     w/   "  (base displacement): (bd, PC, An)  #
+# PC memory indirect postindexed: ([bd, PC], Xn, od)     #
+# PC memory indirect preindexed: ([bd, PC, Xn], od)      #
+##########################################################
+pc_ind_ext:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch ext word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# put base in a0
+	subq.l		&0x2,%a0		# adjust base
+
+	btst		&0x8,%d0		# is disp only 8 bits?
+	beq.b		pc_ind_index_8bit	# yes
+
+# the indexed addressing mode uses a base displacement of size
+# word or long
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+
+	mov.l		%d0,%d5			# put extword in d5
+	mov.l		%a0,%d3			# put base in d3
+
+	bra.l		calc_mem_ind		# calc memory indirect
+
+pc_ind_index_8bit:
+	mov.l		%d2,-(%sp)		# create a temp register
+
+	mov.l		%d0,%d1			# make extword copy
+	rol.w		&0x4,%d1		# rotate reg num into place
+	andi.w		&0xf,%d1		# extract register number
+
+	mov.l		(EXC_DREGS,%a6,%d1.w*4),%d1 # fetch index reg value
+
+	btst		&0xb,%d0		# is index word or long?
+	bne.b		pii8_long		# long
+	ext.l		%d1			# sign extend word index
+pii8_long:
+	mov.l		%d0,%d2			# make extword copy
+	rol.w		&0x7,%d2		# rotate scale value into place
+	andi.l		&0x3,%d2		# extract scale value
+
+	lsl.l		%d2,%d1			# shift index by scale
+
+	extb.l		%d0			# sign extend displacement
+	add.l		%d1,%d0			# index + disp
+	add.l		%d0,%a0			# An + (index + disp)
+
+	mov.l		(%sp)+,%d2		# restore temp register
+
+	rts
+
+# a5 = exc_extwptr	(global to uaeh)
+# a4 = exc_opword	(global to uaeh)
+# a3 = exc_dregs	(global to uaeh)
+
+# d2 = index		(internal "     "    )
+# d3 = base		(internal "     "    )
+# d4 = od		(internal "     "    )
+# d5 = extword		(internal "     "    )
+calc_mem_ind:
+	btst		&0x6,%d5		# is the index suppressed?
+	beq.b		calc_index
+	clr.l		%d2			# yes, so index = 0
+	bra.b		base_supp_ck
+calc_index:
+	bfextu		%d5{&16:&4},%d2
+	mov.l		(EXC_DREGS,%a6,%d2.w*4),%d2
+	btst		&0xb,%d5		# is index word or long?
+	bne.b		no_ext
+	ext.l		%d2
+no_ext:
+	bfextu		%d5{&21:&2},%d0
+	lsl.l		%d0,%d2
+base_supp_ck:
+	btst		&0x7,%d5		# is the bd suppressed?
+	beq.b		no_base_sup
+	clr.l		%d3
+no_base_sup:
+	bfextu		%d5{&26:&2},%d0	# get bd size
+#	beq.l		_error			# if (size == 0) it's reserved
+	cmpi.b		%d0,&2
+	blt.b		no_bd
+	beq.b		get_word_bd
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	bra.b		chk_ind
+get_word_bd:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	ext.l		%d0			# sign extend bd
+
+chk_ind:
+	add.l		%d0,%d3			# base += bd
+no_bd:
+	bfextu		%d5{&30:&2},%d0		# is od suppressed?
+	beq.w		aii_bd
+	cmpi.b		%d0,&0x2
+	blt.b		null_od
+	beq.b		word_od
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	bra.b		add_them
+
+word_od:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	ext.l		%d0			# sign extend od
+	bra.b		add_them
+
+null_od:
+	clr.l		%d0
+add_them:
+	mov.l		%d0,%d4
+	btst		&0x2,%d5		# pre or post indexing?
+	beq.b		pre_indexed
+
+	mov.l		%d3,%a0
+	bsr.l		_dmem_read_long
+
+	tst.l		%d1			# dfetch error?
+	bne.b		calc_ea_err		# yes
+
+	add.l		%d2,%d0			# <ea> += index
+	add.l		%d4,%d0			# <ea> += od
+	bra.b		done_ea
+
+pre_indexed:
+	add.l		%d2,%d3			# preindexing
+	mov.l		%d3,%a0
+	bsr.l		_dmem_read_long
+
+	tst.l		%d1			# ifetch error?
+	bne.b		calc_ea_err		# yes
+
+	add.l		%d4,%d0			# ea += od
+	bra.b		done_ea
+
+aii_bd:
+	add.l		%d2,%d3			# ea = (base + bd) + index
+	mov.l		%d3,%d0
+done_ea:
+	mov.l		%d0,%a0
+
+	movm.l		(%sp)+,&0x003c		# restore d2-d5
+	rts
+
+# if dmem_read_long() returns a fail message in d1, the package
+# must create an access error frame. here, we pass a skeleton fslw
+# and the failing address to the routine that creates the new frame.
+# FSLW:
+#	read = true
+#	size = longword
+#	TM = data
+#	software emulation error = true
+calc_ea_err:
+	mov.l		%d3,%a0			# pass failing address
+	mov.l		&0x01010001,%d0		# pass fslw
+	bra.l		isp_dacc
+
+#########################################################################
+# XDEF **************************************************************** #
+#	_moveperipheral(): routine to emulate movep instruction		#
+#									#
+# XREF **************************************************************** #
+#	_dmem_read_byte() - read byte from memory			#
+#	_dmem_write_byte() - write byte to memory			#
+#	isp_dacc() - handle data access error exception			#
+#									#
+# INPUT *************************************************************** #
+#	none								#
+#									#
+# OUTPUT ************************************************************** #
+#	If exiting through isp_dacc...					#
+#		a0 = failing address					#
+#		d0 = FSLW						#
+#	else								#
+#		none							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Decode the movep instruction words stored at EXC_OPWORD and	#
+# either read or write the required bytes from/to memory. Use the	#
+# _dmem_{read,write}_byte() routines. If one of the memory routines	#
+# returns a failing value, we must pass the failing address and	a FSLW	#
+# to the _isp_dacc() routine.						#
+#	Since this instruction is used to access peripherals, make sure	#
+# to only access the required bytes.					#
+#									#
+#########################################################################
+
+###########################
+# movep.(w,l)	Dx,(d,Ay) #
+# movep.(w,l)	(d,Ay),Dx #
+###########################
+	global		_moveperipheral
+_moveperipheral:
+	mov.w		EXC_OPWORD(%a6),%d1	# fetch the opcode word
+
+	mov.b		%d1,%d0
+	and.w		&0x7,%d0		# extract Ay from opcode word
+
+	mov.l		(EXC_AREGS,%a6,%d0.w*4),%a0 # fetch ay
+
+	add.w		EXC_EXTWORD(%a6),%a0	# add: an + sgn_ext(disp)
+
+	btst		&0x7,%d1		# (reg 2 mem) or (mem 2 reg)
+	beq.w		mem2reg
+
+# reg2mem: fetch dx, then write it to memory
+reg2mem:
+	mov.w		%d1,%d0
+	rol.w		&0x7,%d0
+	and.w		&0x7,%d0		# extract Dx from opcode word
+
+	mov.l		(EXC_DREGS,%a6,%d0.w*4), %d0 # fetch dx
+
+	btst		&0x6,%d1		# word or long operation?
+	beq.b		r2mwtrans
+
+# a0 = dst addr
+# d0 = Dx
+r2mltrans:
+	mov.l		%d0,%d2			# store data
+	mov.l		%a0,%a2			# store addr
+	rol.l		&0x8,%d2
+	mov.l		%d2,%d0
+
+	bsr.l		_dmem_write_byte	# os  : write hi
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_write_err		# yes
+
+	add.w		&0x2,%a2		# incr addr
+	mov.l		%a2,%a0
+	rol.l		&0x8,%d2
+	mov.l		%d2,%d0
+
+	bsr.l		_dmem_write_byte	# os  : write lo
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_write_err		# yes
+
+	add.w		&0x2,%a2		# incr addr
+	mov.l		%a2,%a0
+	rol.l		&0x8,%d2
+	mov.l		%d2,%d0
+
+	bsr.l		_dmem_write_byte	# os  : write lo
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_write_err		# yes
+
+	add.w		&0x2,%a2		# incr addr
+	mov.l		%a2,%a0
+	rol.l		&0x8,%d2
+	mov.l		%d2,%d0
+
+	bsr.l		_dmem_write_byte	# os  : write lo
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_write_err		# yes
+
+	rts
+
+# a0 = dst addr
+# d0 = Dx
+r2mwtrans:
+	mov.l		%d0,%d2			# store data
+	mov.l		%a0,%a2			# store addr
+	lsr.w		&0x8,%d0
+
+	bsr.l		_dmem_write_byte	# os  : write hi
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_write_err		# yes
+
+	add.w		&0x2,%a2
+	mov.l		%a2,%a0
+	mov.l		%d2,%d0
+
+	bsr.l		_dmem_write_byte	# os  : write lo
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_write_err		# yes
+
+	rts
+
+# mem2reg: read bytes from memory.
+# determines the dest register, and then writes the bytes into it.
+mem2reg:
+	btst		&0x6,%d1		# word or long operation?
+	beq.b		m2rwtrans
+
+# a0 = dst addr
+m2rltrans:
+	mov.l		%a0,%a2			# store addr
+
+	bsr.l		_dmem_read_byte		# read first byte
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_read_err		# yes
+
+	mov.l		%d0,%d2
+
+	add.w		&0x2,%a2		# incr addr by 2 bytes
+	mov.l		%a2,%a0
+
+	bsr.l		_dmem_read_byte		# read second byte
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_read_err		# yes
+
+	lsl.w		&0x8,%d2
+	mov.b		%d0,%d2			# append bytes
+
+	add.w		&0x2,%a2		# incr addr by 2 bytes
+	mov.l		%a2,%a0
+
+	bsr.l		_dmem_read_byte		# read second byte
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_read_err		# yes
+
+	lsl.l		&0x8,%d2
+	mov.b		%d0,%d2			# append bytes
+
+	add.w		&0x2,%a2		# incr addr by 2 bytes
+	mov.l		%a2,%a0
+
+	bsr.l		_dmem_read_byte		# read second byte
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_read_err		# yes
+
+	lsl.l		&0x8,%d2
+	mov.b		%d0,%d2			# append bytes
+
+	mov.b		EXC_OPWORD(%a6),%d1
+	lsr.b		&0x1,%d1
+	and.w		&0x7,%d1		# extract Dx from opcode word
+
+	mov.l		%d2,(EXC_DREGS,%a6,%d1.w*4) # store dx
+
+	rts
+
+# a0 = dst addr
+m2rwtrans:
+	mov.l		%a0,%a2			# store addr
+
+	bsr.l		_dmem_read_byte		# read first byte
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_read_err		# yes
+
+	mov.l		%d0,%d2
+
+	add.w		&0x2,%a2		# incr addr by 2 bytes
+	mov.l		%a2,%a0
+
+	bsr.l		_dmem_read_byte		# read second byte
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_read_err		# yes
+
+	lsl.w		&0x8,%d2
+	mov.b		%d0,%d2			# append bytes
+
+	mov.b		EXC_OPWORD(%a6),%d1
+	lsr.b		&0x1,%d1
+	and.w		&0x7,%d1		# extract Dx from opcode word
+
+	mov.w		%d2,(EXC_DREGS+2,%a6,%d1.w*4) # store dx
+
+	rts
+
+# if dmem_{read,write}_byte() returns a fail message in d1, the package
+# must create an access error frame. here, we pass a skeleton fslw
+# and the failing address to the routine that creates the new frame.
+# FSLW:
+#	write = true
+#	size = byte
+#	TM = data
+#	software emulation error = true
+movp_write_err:
+	mov.l		%a2,%a0			# pass failing address
+	mov.l		&0x00a10001,%d0		# pass fslw
+	bra.l		isp_dacc
+
+# FSLW:
+#	read = true
+#	size = byte
+#	TM = data
+#	software emulation error = true
+movp_read_err:
+	mov.l		%a2,%a0			# pass failing address
+	mov.l		&0x01210001,%d0		# pass fslw
+	bra.l		isp_dacc
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_chk2_cmp2(): routine to emulate chk2/cmp2 instructions		#
+#									#
+# XREF ****************************************************************	#
+#	_calc_ea(): calculate effective address				#
+#	_dmem_read_long(): read operands				#
+#	_dmem_read_word(): read operands				#
+#	isp_dacc(): handle data access error exception			#
+#									#
+# INPUT ***************************************************************	#
+#	none								#
+#									#
+# OUTPUT **************************************************************	#
+#	If exiting through isp_dacc...					#
+#		a0 = failing address					#
+#		d0 = FSLW						#
+#	else								#
+#		none							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	First, calculate the effective address, then fetch the byte,	#
+# word, or longword sized operands. Then, in the interest of		#
+# simplicity, all operands are converted to longword size whether the	#
+# operation is byte, word, or long. The bounds are sign extended	#
+# accordingly. If Rn is a data regsiter, Rn is also sign extended. If	#
+# Rn is an address register, it need not be sign extended since the	#
+# full register is always used.						#
+#	The comparisons are made and the condition codes calculated.	#
+# If the instruction is chk2 and the Rn value is out-of-bounds, set	#
+# the ichk_flg in SPCOND_FLG.						#
+#	If the memory fetch returns a failing value, pass the failing	#
+# address and FSLW to the isp_dacc() routine.				#
+#									#
+#########################################################################
+
+	global		_chk2_cmp2
+_chk2_cmp2:
+
+# passing size parameter doesn't matter since chk2 & cmp2 can't do
+# either predecrement, postincrement, or immediate.
+	bsr.l		_calc_ea		# calculate <ea>
+
+	mov.b		EXC_EXTWORD(%a6), %d0	# fetch hi extension word
+	rol.b		&0x4, %d0		# rotate reg bits into lo
+	and.w		&0xf, %d0		# extract reg bits
+
+	mov.l		(EXC_DREGS,%a6,%d0.w*4), %d2 # get regval
+
+	cmpi.b		EXC_OPWORD(%a6), &0x2	# what size is operation?
+	blt.b		chk2_cmp2_byte		# size == byte
+	beq.b		chk2_cmp2_word		# size == word
+
+# the bounds are longword size. call routine to read the lower
+# bound into d0 and the higher bound into d1.
+chk2_cmp2_long:
+	mov.l		%a0,%a2			# save copy of <ea>
+	bsr.l		_dmem_read_long		# fetch long lower bound
+
+	tst.l		%d1			# dfetch error?
+	bne.w		chk2_cmp2_err_l		# yes
+
+	mov.l		%d0,%d3			# save long lower bound
+	addq.l		&0x4,%a2
+	mov.l		%a2,%a0			# pass <ea> of long upper bound
+	bsr.l		_dmem_read_long		# fetch long upper bound
+
+	tst.l		%d1			# dfetch error?
+	bne.w		chk2_cmp2_err_l		# yes
+
+	mov.l		%d0,%d1			# long upper bound in d1
+	mov.l		%d3,%d0			# long lower bound in d0
+	bra.w		chk2_cmp2_compare	# go do the compare emulation
+
+# the bounds are word size. fetch them in one subroutine call by
+# reading a longword. sign extend both. if it's a data operation,
+# sign extend Rn to long, also.
+chk2_cmp2_word:
+	mov.l		%a0,%a2
+	bsr.l		_dmem_read_long		# fetch 2 word bounds
+
+	tst.l		%d1			# dfetch error?
+	bne.w		chk2_cmp2_err_l		# yes
+
+	mov.w		%d0, %d1		# place hi in %d1
+	swap		%d0			# place lo in %d0
+
+	ext.l		%d0			# sign extend lo bnd
+	ext.l		%d1			# sign extend hi bnd
+
+	btst		&0x7, EXC_EXTWORD(%a6)	# address compare?
+	bne.w		chk2_cmp2_compare	# yes; don't sign extend
+
+# operation is a data register compare.
+# sign extend word to long so we can do simple longword compares.
+	ext.l		%d2			# sign extend data word
+	bra.w		chk2_cmp2_compare	# go emulate compare
+
+# the bounds are byte size. fetch them in one subroutine call by
+# reading a word. sign extend both. if it's a data operation,
+# sign extend Rn to long, also.
+chk2_cmp2_byte:
+	mov.l		%a0,%a2
+	bsr.l		_dmem_read_word		# fetch 2 byte bounds
+
+	tst.l		%d1			# dfetch error?
+	bne.w		chk2_cmp2_err_w		# yes
+
+	mov.b		%d0, %d1		# place hi in %d1
+	lsr.w		&0x8, %d0		# place lo in %d0
+
+	extb.l		%d0			# sign extend lo bnd
+	extb.l		%d1			# sign extend hi bnd
+
+	btst		&0x7, EXC_EXTWORD(%a6)	# address compare?
+	bne.b		chk2_cmp2_compare	# yes; don't sign extend
+
+# operation is a data register compare.
+# sign extend byte to long so we can do simple longword compares.
+	extb.l		%d2			# sign extend data byte
+
+#
+# To set the ccodes correctly:
+#	(1) save 'Z' bit from (Rn - lo)
+#	(2) save 'Z' and 'N' bits from ((hi - lo) - (Rn - hi))
+#	(3) keep 'X', 'N', and 'V' from before instruction
+#	(4) combine ccodes
+#
+chk2_cmp2_compare:
+	sub.l		%d0, %d2		# (Rn - lo)
+	mov.w		%cc, %d3		# fetch resulting ccodes
+	andi.b		&0x4, %d3		# keep 'Z' bit
+	sub.l		%d0, %d1		# (hi - lo)
+	cmp.l		%d1,%d2			# ((hi - lo) - (Rn - hi))
+
+	mov.w		%cc, %d4		# fetch resulting ccodes
+	or.b		%d4, %d3		# combine w/ earlier ccodes
+	andi.b		&0x5, %d3		# keep 'Z' and 'N'
+
+	mov.w		EXC_CC(%a6), %d4	# fetch old ccodes
+	andi.b		&0x1a, %d4		# keep 'X','N','V' bits
+	or.b		%d3, %d4		# insert new ccodes
+	mov.w		%d4, EXC_CC(%a6)	# save new ccodes
+
+	btst		&0x3, EXC_EXTWORD(%a6)	# separate chk2,cmp2
+	bne.b		chk2_finish		# it's a chk2
+
+	rts
+
+# this code handles the only difference between chk2 and cmp2. chk2 would
+# have trapped out if the value was out of bounds. we check this by seeing
+# if the 'N' bit was set by the operation.
+chk2_finish:
+	btst		&0x0, %d4		# is 'N' bit set?
+	bne.b		chk2_trap		# yes;chk2 should trap
+	rts
+chk2_trap:
+	mov.b		&ichk_flg,SPCOND_FLG(%a6) # set "special case" flag
+	rts
+
+# if dmem_read_{long,word}() returns a fail message in d1, the package
+# must create an access error frame. here, we pass a skeleton fslw
+# and the failing address to the routine that creates the new frame.
+# FSLW:
+#	read = true
+#	size = longword
+#	TM = data
+#	software emulation error = true
+chk2_cmp2_err_l:
+	mov.l		%a2,%a0			# pass failing address
+	mov.l		&0x01010001,%d0		# pass fslw
+	bra.l		isp_dacc
+
+# FSLW:
+#	read = true
+#	size = word
+#	TM = data
+#	software emulation error = true
+chk2_cmp2_err_w:
+	mov.l		%a2,%a0			# pass failing address
+	mov.l		&0x01410001,%d0		# pass fslw
+	bra.l		isp_dacc
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_div64(): routine to emulate div{u,s}.l <ea>,Dr:Dq		#
+#							64/32->32r:32q	#
+#									#
+# XREF ****************************************************************	#
+#	_calc_ea() - calculate effective address			#
+#	isp_iacc() - handle instruction access error exception		#
+#	isp_dacc() - handle data access error exception			#
+#	isp_restore() - restore An on access error w/ -() or ()+	#
+#									#
+# INPUT ***************************************************************	#
+#	none								#
+#									#
+# OUTPUT **************************************************************	#
+#	If exiting through isp_dacc...					#
+#		a0 = failing address					#
+#		d0 = FSLW						#
+#	else								#
+#		none							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	First, decode the operand location. If it's in Dn, fetch from	#
+# the stack. If it's in memory, use _calc_ea() to calculate the		#
+# effective address. Use _dmem_read_long() to fetch at that address.	#
+# Unless the operand is immediate data. Then use _imem_read_long().	#
+# Send failures to isp_dacc() or isp_iacc() as appropriate.		#
+#	If the operands are signed, make them unsigned and save	the	#
+# sign info for later. Separate out special cases like divide-by-zero	#
+# or 32-bit divides if possible. Else, use a special math algorithm	#
+# to calculate the result.						#
+#	Restore sign info if signed instruction. Set the condition	#
+# codes. Set idbyz_flg in SPCOND_FLG if divisor was zero. Store the	#
+# quotient and remainder in the appropriate data registers on the stack.#
+#									#
+#########################################################################
+
+set	NDIVISOR,	EXC_TEMP+0x0
+set	NDIVIDEND,	EXC_TEMP+0x1
+set	NDRSAVE,	EXC_TEMP+0x2
+set	NDQSAVE,	EXC_TEMP+0x4
+set	DDSECOND,	EXC_TEMP+0x6
+set	DDQUOTIENT,	EXC_TEMP+0x8
+set	DDNORMAL,	EXC_TEMP+0xc
+
+	global		_div64
+#############
+# div(u,s)l #
+#############
+_div64:
+	mov.b		EXC_OPWORD+1(%a6), %d0
+	andi.b		&0x38, %d0		# extract src mode
+
+	bne.w		dcontrolmodel_s		# %dn dest or control mode?
+
+	mov.b		EXC_OPWORD+1(%a6), %d0	# extract Dn from opcode
+	andi.w		&0x7, %d0
+	mov.l		(EXC_DREGS,%a6,%d0.w*4), %d7 # fetch divisor from register
+
+dgotsrcl:
+	beq.w		div64eq0		# divisor is = 0!!!
+
+	mov.b		EXC_EXTWORD+1(%a6), %d0	# extract Dr from extword
+	mov.b		EXC_EXTWORD(%a6), %d1	# extract Dq from extword
+	and.w		&0x7, %d0
+	lsr.b		&0x4, %d1
+	and.w		&0x7, %d1
+	mov.w		%d0, NDRSAVE(%a6)	# save Dr for later
+	mov.w		%d1, NDQSAVE(%a6)	# save Dq for later
+
+# fetch %dr and %dq directly off stack since all regs are saved there
+	mov.l		(EXC_DREGS,%a6,%d0.w*4), %d5 # get dividend hi
+	mov.l		(EXC_DREGS,%a6,%d1.w*4), %d6 # get dividend lo
+
+# separate signed and unsigned divide
+	btst		&0x3, EXC_EXTWORD(%a6)	# signed or unsigned?
+	beq.b		dspecialcases		# use positive divide
+
+# save the sign of the divisor
+# make divisor unsigned if it's negative
+	tst.l		%d7			# chk sign of divisor
+	slt		NDIVISOR(%a6)		# save sign of divisor
+	bpl.b		dsgndividend
+	neg.l		%d7			# complement negative divisor
+
+# save the sign of the dividend
+# make dividend unsigned if it's negative
+dsgndividend:
+	tst.l		%d5			# chk sign of hi(dividend)
+	slt		NDIVIDEND(%a6)		# save sign of dividend
+	bpl.b		dspecialcases
+
+	mov.w		&0x0, %cc		# clear 'X' cc bit
+	negx.l		%d6			# complement signed dividend
+	negx.l		%d5
+
+# extract some special cases:
+#	- is (dividend == 0) ?
+#	- is (hi(dividend) == 0 && (divisor <= lo(dividend))) ? (32-bit div)
+dspecialcases:
+	tst.l		%d5			# is (hi(dividend) == 0)
+	bne.b		dnormaldivide		# no, so try it the long way
+
+	tst.l		%d6			# is (lo(dividend) == 0), too
+	beq.w		ddone			# yes, so (dividend == 0)
+
+	cmp.l		%d7,%d6			# is (divisor <= lo(dividend))
+	bls.b		d32bitdivide		# yes, so use 32 bit divide
+
+	exg		%d5,%d6			# q = 0, r = dividend
+	bra.w		divfinish		# can't divide, we're done.
+
+d32bitdivide:
+	tdivu.l		%d7, %d5:%d6		# it's only a 32/32 bit div!
+
+	bra.b		divfinish
+
+dnormaldivide:
+# last special case:
+#	- is hi(dividend) >= divisor ? if yes, then overflow
+	cmp.l		%d7,%d5
+	bls.b		ddovf			# answer won't fit in 32 bits
+
+# perform the divide algorithm:
+	bsr.l		dclassical		# do int divide
+
+# separate into signed and unsigned finishes.
+divfinish:
+	btst		&0x3, EXC_EXTWORD(%a6)	# do divs, divu separately
+	beq.b		ddone			# divu has no processing!!!
+
+# it was a divs.l, so ccode setting is a little more complicated...
+	tst.b		NDIVIDEND(%a6)		# remainder has same sign
+	beq.b		dcc			# as dividend.
+	neg.l		%d5			# sgn(rem) = sgn(dividend)
+dcc:
+	mov.b		NDIVISOR(%a6), %d0
+	eor.b		%d0, NDIVIDEND(%a6)	# chk if quotient is negative
+	beq.b		dqpos			# branch to quot positive
+
+# 0x80000000 is the largest number representable as a 32-bit negative
+# number. the negative of 0x80000000 is 0x80000000.
+	cmpi.l		%d6, &0x80000000	# will (-quot) fit in 32 bits?
+	bhi.b		ddovf
+
+	neg.l		%d6			# make (-quot) 2's comp
+
+	bra.b		ddone
+
+dqpos:
+	btst		&0x1f, %d6		# will (+quot) fit in 32 bits?
+	bne.b		ddovf
+
+ddone:
+# at this point, result is normal so ccodes are set based on result.
+	mov.w		EXC_CC(%a6), %cc
+	tst.l		%d6			# set %ccode bits
+	mov.w		%cc, EXC_CC(%a6)
+
+	mov.w		NDRSAVE(%a6), %d0	# get Dr off stack
+	mov.w		NDQSAVE(%a6), %d1	# get Dq off stack
+
+# if the register numbers are the same, only the quotient gets saved.
+# so, if we always save the quotient second, we save ourselves a cmp&beq
+	mov.l		%d5, (EXC_DREGS,%a6,%d0.w*4) # save remainder
+	mov.l		%d6, (EXC_DREGS,%a6,%d1.w*4) # save quotient
+
+	rts
+
+ddovf:
+	bset		&0x1, EXC_CC+1(%a6)	# 'V' set on overflow
+	bclr		&0x0, EXC_CC+1(%a6)	# 'C' cleared on overflow
+
+	rts
+
+div64eq0:
+	andi.b		&0x1e, EXC_CC+1(%a6)	# clear 'C' bit on divbyzero
+	ori.b		&idbyz_flg,SPCOND_FLG(%a6) # set "special case" flag
+	rts
+
+###########################################################################
+#########################################################################
+# This routine uses the 'classical' Algorithm D from Donald Knuth's	#
+# Art of Computer Programming, vol II, Seminumerical Algorithms.	#
+# For this implementation b=2**16, and the target is U1U2U3U4/V1V2,	#
+# where U,V are words of the quadword dividend and longword divisor,	#
+# and U1, V1 are the most significant words.				#
+#									#
+# The most sig. longword of the 64 bit dividend must be in %d5, least	#
+# in %d6. The divisor must be in the variable ddivisor, and the		#
+# signed/unsigned flag ddusign must be set (0=unsigned,1=signed).	#
+# The quotient is returned in %d6, remainder in %d5, unless the		#
+# v (overflow) bit is set in the saved %ccr. If overflow, the dividend	#
+# is unchanged.								#
+#########################################################################
+dclassical:
+# if the divisor msw is 0, use simpler algorithm then the full blown
+# one at ddknuth:
+
+	cmpi.l		%d7, &0xffff
+	bhi.b		ddknuth			# go use D. Knuth algorithm
+
+# Since the divisor is only a word (and larger than the mslw of the dividend),
+# a simpler algorithm may be used :
+# In the general case, four quotient words would be created by
+# dividing the divisor word into each dividend word. In this case,
+# the first two quotient words must be zero, or overflow would occur.
+# Since we already checked this case above, we can treat the most significant
+# longword of the dividend as (0) remainder (see Knuth) and merely complete
+# the last two divisions to get a quotient longword and word remainder:
+
+	clr.l		%d1
+	swap		%d5			# same as r*b if previous step rqd
+	swap		%d6			# get u3 to lsw position
+	mov.w		%d6, %d5		# rb + u3
+
+	divu.w		%d7, %d5
+
+	mov.w		%d5, %d1		# first quotient word
+	swap		%d6			# get u4
+	mov.w		%d6, %d5		# rb + u4
+
+	divu.w		%d7, %d5
+
+	swap		%d1
+	mov.w		%d5, %d1		# 2nd quotient 'digit'
+	clr.w		%d5
+	swap		%d5			# now remainder
+	mov.l		%d1, %d6		# and quotient
+
+	rts
+
+ddknuth:
+# In this algorithm, the divisor is treated as a 2 digit (word) number
+# which is divided into a 3 digit (word) dividend to get one quotient
+# digit (word). After subtraction, the dividend is shifted and the
+# process repeated. Before beginning, the divisor and quotient are
+# 'normalized' so that the process of estimating the quotient digit
+# will yield verifiably correct results..
+
+	clr.l		DDNORMAL(%a6)		# count of shifts for normalization
+	clr.b		DDSECOND(%a6)		# clear flag for quotient digits
+	clr.l		%d1			# %d1 will hold trial quotient
+ddnchk:
+	btst		&31, %d7		# must we normalize? first word of
+	bne.b		ddnormalized		# divisor (V1) must be >= 65536/2
+	addq.l		&0x1, DDNORMAL(%a6)	# count normalization shifts
+	lsl.l		&0x1, %d7		# shift the divisor
+	lsl.l		&0x1, %d6		# shift u4,u3 with overflow to u2
+	roxl.l		&0x1, %d5		# shift u1,u2
+	bra.w		ddnchk
+ddnormalized:
+
+# Now calculate an estimate of the quotient words (msw first, then lsw).
+# The comments use subscripts for the first quotient digit determination.
+	mov.l		%d7, %d3		# divisor
+	mov.l		%d5, %d2		# dividend mslw
+	swap		%d2
+	swap		%d3
+	cmp.w		%d2, %d3		# V1 = U1 ?
+	bne.b		ddqcalc1
+	mov.w		&0xffff, %d1		# use max trial quotient word
+	bra.b		ddadj0
+ddqcalc1:
+	mov.l		%d5, %d1
+
+	divu.w		%d3, %d1		# use quotient of mslw/msw
+
+	andi.l		&0x0000ffff, %d1	# zero any remainder
+ddadj0:
+
+# now test the trial quotient and adjust. This step plus the
+# normalization assures (according to Knuth) that the trial
+# quotient will be at worst 1 too large.
+	mov.l		%d6, -(%sp)
+	clr.w		%d6			# word u3 left
+	swap		%d6			# in lsw position
+ddadj1: mov.l		%d7, %d3
+	mov.l		%d1, %d2
+	mulu.w		%d7, %d2		# V2q
+	swap		%d3
+	mulu.w		%d1, %d3		# V1q
+	mov.l		%d5, %d4		# U1U2
+	sub.l		%d3, %d4		# U1U2 - V1q
+
+	swap		%d4
+
+	mov.w		%d4,%d0
+	mov.w		%d6,%d4			# insert lower word (U3)
+
+	tst.w		%d0			# is upper word set?
+	bne.w		ddadjd1
+
+#	add.l		%d6, %d4		# (U1U2 - V1q) + U3
+
+	cmp.l		%d2, %d4
+	bls.b		ddadjd1			# is V2q > (U1U2-V1q) + U3 ?
+	subq.l		&0x1, %d1		# yes, decrement and recheck
+	bra.b		ddadj1
+ddadjd1:
+# now test the word by multiplying it by the divisor (V1V2) and comparing
+# the 3 digit (word) result with the current dividend words
+	mov.l		%d5, -(%sp)		# save %d5 (%d6 already saved)
+	mov.l		%d1, %d6
+	swap		%d6			# shift answer to ms 3 words
+	mov.l		%d7, %d5
+	bsr.l		dmm2
+	mov.l		%d5, %d2		# now %d2,%d3 are trial*divisor
+	mov.l		%d6, %d3
+	mov.l		(%sp)+, %d5		# restore dividend
+	mov.l		(%sp)+, %d6
+	sub.l		%d3, %d6
+	subx.l		%d2, %d5		# subtract double precision
+	bcc		dd2nd			# no carry, do next quotient digit
+	subq.l		&0x1, %d1		# q is one too large
+# need to add back divisor longword to current ms 3 digits of dividend
+# - according to Knuth, this is done only 2 out of 65536 times for random
+# divisor, dividend selection.
+	clr.l		%d2
+	mov.l		%d7, %d3
+	swap		%d3
+	clr.w		%d3			# %d3 now ls word of divisor
+	add.l		%d3, %d6		# aligned with 3rd word of dividend
+	addx.l		%d2, %d5
+	mov.l		%d7, %d3
+	clr.w		%d3			# %d3 now ms word of divisor
+	swap		%d3			# aligned with 2nd word of dividend
+	add.l		%d3, %d5
+dd2nd:
+	tst.b		DDSECOND(%a6)		# both q words done?
+	bne.b		ddremain
+# first quotient digit now correct. store digit and shift the
+# (subtracted) dividend
+	mov.w		%d1, DDQUOTIENT(%a6)
+	clr.l		%d1
+	swap		%d5
+	swap		%d6
+	mov.w		%d6, %d5
+	clr.w		%d6
+	st		DDSECOND(%a6)		# second digit
+	bra.w		ddnormalized
+ddremain:
+# add 2nd word to quotient, get the remainder.
+	mov.w		%d1, DDQUOTIENT+2(%a6)
+# shift down one word/digit to renormalize remainder.
+	mov.w		%d5, %d6
+	swap		%d6
+	swap		%d5
+	mov.l		DDNORMAL(%a6), %d7	# get norm shift count
+	beq.b		ddrn
+	subq.l		&0x1, %d7		# set for loop count
+ddnlp:
+	lsr.l		&0x1, %d5		# shift into %d6
+	roxr.l		&0x1, %d6
+	dbf		%d7, ddnlp
+ddrn:
+	mov.l		%d6, %d5		# remainder
+	mov.l		DDQUOTIENT(%a6), %d6	# quotient
+
+	rts
+dmm2:
+# factors for the 32X32->64 multiplication are in %d5 and %d6.
+# returns 64 bit result in %d5 (hi) %d6(lo).
+# destroys %d2,%d3,%d4.
+
+# multiply hi,lo words of each factor to get 4 intermediate products
+	mov.l		%d6, %d2
+	mov.l		%d6, %d3
+	mov.l		%d5, %d4
+	swap		%d3
+	swap		%d4
+	mulu.w		%d5, %d6		# %d6 <- lsw*lsw
+	mulu.w		%d3, %d5		# %d5 <- msw-dest*lsw-source
+	mulu.w		%d4, %d2		# %d2 <- msw-source*lsw-dest
+	mulu.w		%d4, %d3		# %d3 <- msw*msw
+# now use swap and addx to consolidate to two longwords
+	clr.l		%d4
+	swap		%d6
+	add.w		%d5, %d6		# add msw of l*l to lsw of m*l product
+	addx.w		%d4, %d3		# add any carry to m*m product
+	add.w		%d2, %d6		# add in lsw of other m*l product
+	addx.w		%d4, %d3		# add any carry to m*m product
+	swap		%d6			# %d6 is low 32 bits of final product
+	clr.w		%d5
+	clr.w		%d2			# lsw of two mixed products used,
+	swap		%d5			# now use msws of longwords
+	swap		%d2
+	add.l		%d2, %d5
+	add.l		%d3, %d5		# %d5 now ms 32 bits of final product
+	rts
+
+##########
+dcontrolmodel_s:
+	movq.l		&LONG,%d0
+	bsr.l		_calc_ea		# calc <ea>
+
+	cmpi.b		SPCOND_FLG(%a6),&immed_flg # immediate addressing mode?
+	beq.b		dimmed			# yes
+
+	mov.l		%a0,%a2
+	bsr.l		_dmem_read_long		# fetch divisor from <ea>
+
+	tst.l		%d1			# dfetch error?
+	bne.b		div64_err		# yes
+
+	mov.l		%d0, %d7
+	bra.w		dgotsrcl
+
+# we have to split out immediate data here because it must be read using
+# imem_read() instead of dmem_read(). this becomes especially important
+# if the fetch runs into some deadly fault.
+dimmed:
+	addq.l		&0x4,EXC_EXTWPTR(%a6)
+	bsr.l		_imem_read_long		# read immediate value
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.l		%d0,%d7
+	bra.w		dgotsrcl
+
+##########
+
+# if dmem_read_long() returns a fail message in d1, the package
+# must create an access error frame. here, we pass a skeleton fslw
+# and the failing address to the routine that creates the new frame.
+# also, we call isp_restore in case the effective addressing mode was
+# (an)+ or -(an) in which case the previous "an" value must be restored.
+# FSLW:
+#	read = true
+#	size = longword
+#	TM = data
+#	software emulation error = true
+div64_err:
+	bsr.l		isp_restore		# restore addr reg
+	mov.l		%a2,%a0			# pass failing address
+	mov.l		&0x01010001,%d0		# pass fslw
+	bra.l		isp_dacc
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_mul64(): routine to emulate mul{u,s}.l <ea>,Dh:Dl 32x32->64	#
+#									#
+# XREF ****************************************************************	#
+#	_calc_ea() - calculate effective address			#
+#	isp_iacc() - handle instruction access error exception		#
+#	isp_dacc() - handle data access error exception			#
+#	isp_restore() - restore An on access error w/ -() or ()+	#
+#									#
+# INPUT ***************************************************************	#
+#	none								#
+#									#
+# OUTPUT **************************************************************	#
+#	If exiting through isp_dacc...					#
+#		a0 = failing address					#
+#		d0 = FSLW						#
+#	else								#
+#		none							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	First, decode the operand location. If it's in Dn, fetch from	#
+# the stack. If it's in memory, use _calc_ea() to calculate the		#
+# effective address. Use _dmem_read_long() to fetch at that address.	#
+# Unless the operand is immediate data. Then use _imem_read_long().	#
+# Send failures to isp_dacc() or isp_iacc() as appropriate.		#
+#	If the operands are signed, make them unsigned and save the	#
+# sign info for later. Perform the multiplication using 16x16->32	#
+# unsigned multiplies and "add" instructions. Store the high and low	#
+# portions of the result in the appropriate data registers on the	#
+# stack. Calculate the condition codes, also.				#
+#									#
+#########################################################################
+
+#############
+# mul(u,s)l #
+#############
+	global		_mul64
+_mul64:
+	mov.b		EXC_OPWORD+1(%a6), %d0	# extract src {mode,reg}
+	cmpi.b		%d0, &0x7		# is src mode Dn or other?
+	bgt.w		mul64_memop		# src is in memory
+
+# multiplier operand in the data register file.
+# must extract the register number and fetch the operand from the stack.
+mul64_regop:
+	andi.w		&0x7, %d0		# extract Dn
+	mov.l		(EXC_DREGS,%a6,%d0.w*4), %d3 # fetch multiplier
+
+# multiplier is in %d3. now, extract Dl and Dh fields and fetch the
+# multiplicand from the data register specified by Dl.
+mul64_multiplicand:
+	mov.w		EXC_EXTWORD(%a6), %d2	# fetch ext word
+	clr.w		%d1			# clear Dh reg
+	mov.b		%d2, %d1		# grab Dh
+	rol.w		&0x4, %d2		# align Dl byte
+	andi.w		&0x7, %d2		# extract Dl
+
+	mov.l		(EXC_DREGS,%a6,%d2.w*4), %d4 # get multiplicand
+
+# check for the case of "zero" result early
+	tst.l		%d4			# test multiplicand
+	beq.w		mul64_zero		# handle zero separately
+	tst.l		%d3			# test multiplier
+	beq.w		mul64_zero		# handle zero separately
+
+# multiplier is in %d3 and multiplicand is in %d4.
+# if the operation is to be signed, then the operands are converted
+# to unsigned and the result sign is saved for the end.
+	clr.b		EXC_TEMP(%a6)		# clear temp space
+	btst		&0x3, EXC_EXTWORD(%a6)	# signed or unsigned?
+	beq.b		mul64_alg		# unsigned; skip sgn calc
+
+	tst.l		%d3			# is multiplier negative?
+	bge.b		mul64_chk_md_sgn	# no
+	neg.l		%d3			# make multiplier positive
+	ori.b		&0x1, EXC_TEMP(%a6)	# save multiplier sgn
+
+# the result sign is the exclusive or of the operand sign bits.
+mul64_chk_md_sgn:
+	tst.l		%d4			# is multiplicand negative?
+	bge.b		mul64_alg		# no
+	neg.l		%d4			# make multiplicand positive
+	eori.b		&0x1, EXC_TEMP(%a6)	# calculate correct sign
+
+#########################################################################
+#	63			   32				0	#
+#	----------------------------					#
+#	| hi(mplier) * hi(mplicand)|					#
+#	----------------------------					#
+#		     -----------------------------			#
+#		     | hi(mplier) * lo(mplicand) |			#
+#		     -----------------------------			#
+#		     -----------------------------			#
+#		     | lo(mplier) * hi(mplicand) |			#
+#		     -----------------------------			#
+#	  |			   -----------------------------	#
+#	--|--			   | lo(mplier) * lo(mplicand) |	#
+#	  |			   -----------------------------	#
+#	========================================================	#
+#	--------------------------------------------------------	#
+#	|	hi(result)	   |	    lo(result)         |	#
+#	--------------------------------------------------------	#
+#########################################################################
+mul64_alg:
+# load temp registers with operands
+	mov.l		%d3, %d5		# mr in %d5
+	mov.l		%d3, %d6		# mr in %d6
+	mov.l		%d4, %d7		# md in %d7
+	swap		%d6			# hi(mr) in lo %d6
+	swap		%d7			# hi(md) in lo %d7
+
+# complete necessary multiplies:
+	mulu.w		%d4, %d3		# [1] lo(mr) * lo(md)
+	mulu.w		%d6, %d4		# [2] hi(mr) * lo(md)
+	mulu.w		%d7, %d5		# [3] lo(mr) * hi(md)
+	mulu.w		%d7, %d6		# [4] hi(mr) * hi(md)
+
+# add lo portions of [2],[3] to hi portion of [1].
+# add carries produced from these adds to [4].
+# lo([1]) is the final lo 16 bits of the result.
+	clr.l		%d7			# load %d7 w/ zero value
+	swap		%d3			# hi([1]) <==> lo([1])
+	add.w		%d4, %d3		# hi([1]) + lo([2])
+	addx.l		%d7, %d6		#    [4]  + carry
+	add.w		%d5, %d3		# hi([1]) + lo([3])
+	addx.l		%d7, %d6		#    [4]  + carry
+	swap		%d3			# lo([1]) <==> hi([1])
+
+# lo portions of [2],[3] have been added in to final result.
+# now, clear lo, put hi in lo reg, and add to [4]
+	clr.w		%d4			# clear lo([2])
+	clr.w		%d5			# clear hi([3])
+	swap		%d4			# hi([2]) in lo %d4
+	swap		%d5			# hi([3]) in lo %d5
+	add.l		%d5, %d4		#    [4]  + hi([2])
+	add.l		%d6, %d4		#    [4]  + hi([3])
+
+# unsigned result is now in {%d4,%d3}
+	tst.b		EXC_TEMP(%a6)		# should result be signed?
+	beq.b		mul64_done		# no
+
+# result should be a signed negative number.
+# compute 2's complement of the unsigned number:
+#   -negate all bits and add 1
+mul64_neg:
+	not.l		%d3			# negate lo(result) bits
+	not.l		%d4			# negate hi(result) bits
+	addq.l		&1, %d3			# add 1 to lo(result)
+	addx.l		%d7, %d4		# add carry to hi(result)
+
+# the result is saved to the register file.
+# for '040 compatibility, if Dl == Dh then only the hi(result) is
+# saved. so, saving hi after lo accomplishes this without need to
+# check Dl,Dh equality.
+mul64_done:
+	mov.l		%d3, (EXC_DREGS,%a6,%d2.w*4) # save lo(result)
+	mov.w		&0x0, %cc
+	mov.l		%d4, (EXC_DREGS,%a6,%d1.w*4) # save hi(result)
+
+# now, grab the condition codes. only one that can be set is 'N'.
+# 'N' CAN be set if the operation is unsigned if bit 63 is set.
+	mov.w		%cc, %d7		# fetch %ccr to see if 'N' set
+	andi.b		&0x8, %d7		# extract 'N' bit
+
+mul64_ccode_set:
+	mov.b		EXC_CC+1(%a6), %d6	# fetch previous %ccr
+	andi.b		&0x10, %d6		# all but 'X' bit changes
+
+	or.b		%d7, %d6		# group 'X' and 'N'
+	mov.b		%d6, EXC_CC+1(%a6)	# save new %ccr
+
+	rts
+
+# one or both of the operands is zero so the result is also zero.
+# save the zero result to the register file and set the 'Z' ccode bit.
+mul64_zero:
+	clr.l		(EXC_DREGS,%a6,%d2.w*4) # save lo(result)
+	clr.l		(EXC_DREGS,%a6,%d1.w*4) # save hi(result)
+
+	movq.l		&0x4, %d7		# set 'Z' ccode bit
+	bra.b		mul64_ccode_set		# finish ccode set
+
+##########
+
+# multiplier operand is in memory at the effective address.
+# must calculate the <ea> and go fetch the 32-bit operand.
+mul64_memop:
+	movq.l		&LONG, %d0		# pass # of bytes
+	bsr.l		_calc_ea		# calculate <ea>
+
+	cmpi.b		SPCOND_FLG(%a6),&immed_flg # immediate addressing mode?
+	beq.b		mul64_immed		# yes
+
+	mov.l		%a0,%a2
+	bsr.l		_dmem_read_long		# fetch src from addr (%a0)
+
+	tst.l		%d1			# dfetch error?
+	bne.w		mul64_err		# yes
+
+	mov.l		%d0, %d3		# store multiplier in %d3
+
+	bra.w		mul64_multiplicand
+
+# we have to split out immediate data here because it must be read using
+# imem_read() instead of dmem_read(). this becomes especially important
+# if the fetch runs into some deadly fault.
+mul64_immed:
+	addq.l		&0x4,EXC_EXTWPTR(%a6)
+	bsr.l		_imem_read_long		# read immediate value
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.l		%d0,%d3
+	bra.w		mul64_multiplicand
+
+##########
+
+# if dmem_read_long() returns a fail message in d1, the package
+# must create an access error frame. here, we pass a skeleton fslw
+# and the failing address to the routine that creates the new frame.
+# also, we call isp_restore in case the effective addressing mode was
+# (an)+ or -(an) in which case the previous "an" value must be restored.
+# FSLW:
+#	read = true
+#	size = longword
+#	TM = data
+#	software emulation error = true
+mul64_err:
+	bsr.l		isp_restore		# restore addr reg
+	mov.l		%a2,%a0			# pass failing address
+	mov.l		&0x01010001,%d0		# pass fslw
+	bra.l		isp_dacc
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_compandset2(): routine to emulate cas2()			#
+#			(internal to package)				#
+#									#
+#	_isp_cas2_finish(): store ccodes, store compare regs		#
+#			    (external to package)			#
+#									#
+# XREF ****************************************************************	#
+#	_real_lock_page() - "callout" to lock op's page from page-outs	#
+#	_cas_terminate2() - access error exit				#
+#	_real_cas2() - "callout" to core cas2 emulation code		#
+#	_real_unlock_page() - "callout" to unlock page			#
+#									#
+# INPUT ***************************************************************	#
+# _compandset2():							#
+#	d0 = instruction extension word					#
+#									#
+# _isp_cas2_finish():							#
+#	see cas2 core emulation code					#
+#									#
+# OUTPUT **************************************************************	#
+# _compandset2():							#
+#	see cas2 core emulation code					#
+#									#
+# _isp_cas_finish():							#
+#	None (register file or memroy changed as appropriate)		#
+#									#
+# ALGORITHM ***********************************************************	#
+# compandset2():							#
+#	Decode the instruction and fetch the appropriate Update and	#
+# Compare operands. Then call the "callout" _real_lock_page() for each	#
+# memory operand address so that the operating system can keep these	#
+# pages from being paged out. If either _real_lock_page() fails, exit	#
+# through _cas_terminate2(). Don't forget to unlock the 1st locked page	#
+# using _real_unlock_paged() if the 2nd lock-page fails.		#
+# Finally, branch to the core cas2 emulation code by calling the	#
+# "callout" _real_cas2().						#
+#									#
+# _isp_cas2_finish():							#
+#	Re-perform the comparison so we can determine the condition	#
+# codes which were too much trouble to keep around during the locked	#
+# emulation. Then unlock each operands page by calling the "callout"	#
+# _real_unlock_page().							#
+#									#
+#########################################################################
+
+set ADDR1,	EXC_TEMP+0xc
+set ADDR2,	EXC_TEMP+0x0
+set DC2,	EXC_TEMP+0xa
+set DC1,	EXC_TEMP+0x8
+
+	global		_compandset2
+_compandset2:
+	mov.l		%d0,EXC_TEMP+0x4(%a6)		# store for possible restart
+	mov.l		%d0,%d1			# extension word in d0
+
+	rol.w		&0x4,%d0
+	andi.w		&0xf,%d0		# extract Rn2
+	mov.l		(EXC_DREGS,%a6,%d0.w*4),%a1 # fetch ADDR2
+	mov.l		%a1,ADDR2(%a6)
+
+	mov.l		%d1,%d0
+
+	lsr.w		&0x6,%d1
+	andi.w		&0x7,%d1		# extract Du2
+	mov.l		(EXC_DREGS,%a6,%d1.w*4),%d5 # fetch Update2 Op
+
+	andi.w		&0x7,%d0		# extract Dc2
+	mov.l		(EXC_DREGS,%a6,%d0.w*4),%d3 # fetch Compare2 Op
+	mov.w		%d0,DC2(%a6)
+
+	mov.w		EXC_EXTWORD(%a6),%d0
+	mov.l		%d0,%d1
+
+	rol.w		&0x4,%d0
+	andi.w		&0xf,%d0		# extract Rn1
+	mov.l		(EXC_DREGS,%a6,%d0.w*4),%a0 # fetch ADDR1
+	mov.l		%a0,ADDR1(%a6)
+
+	mov.l		%d1,%d0
+
+	lsr.w		&0x6,%d1
+	andi.w		&0x7,%d1		# extract Du1
+	mov.l		(EXC_DREGS,%a6,%d1.w*4),%d4 # fetch Update1 Op
+
+	andi.w		&0x7,%d0		# extract Dc1
+	mov.l		(EXC_DREGS,%a6,%d0.w*4),%d2 # fetch Compare1 Op
+	mov.w		%d0,DC1(%a6)
+
+	btst		&0x1,EXC_OPWORD(%a6)	# word or long?
+	sne		%d7
+
+	btst		&0x5,EXC_ISR(%a6)	# user or supervisor?
+	sne		%d6
+
+	mov.l		%a0,%a2
+	mov.l		%a1,%a3
+
+	mov.l		%d7,%d1			# pass size
+	mov.l		%d6,%d0			# pass mode
+	bsr.l		_real_lock_page		# lock page
+	mov.l		%a2,%a0
+	tst.l		%d0			# error?
+	bne.l		_cas_terminate2		# yes
+
+	mov.l		%d7,%d1			# pass size
+	mov.l		%d6,%d0			# pass mode
+	mov.l		%a3,%a0			# pass addr
+	bsr.l		_real_lock_page		# lock page
+	mov.l		%a3,%a0
+	tst.l		%d0			# error?
+	bne.b		cas_preterm		# yes
+
+	mov.l		%a2,%a0
+	mov.l		%a3,%a1
+
+	bra.l		_real_cas2
+
+# if the 2nd lock attempt fails, then we must still unlock the
+# first page(s).
+cas_preterm:
+	mov.l		%d0,-(%sp)		# save FSLW
+	mov.l		%d7,%d1			# pass size
+	mov.l		%d6,%d0			# pass mode
+	mov.l		%a2,%a0			# pass ADDR1
+	bsr.l		_real_unlock_page	# unlock first page(s)
+	mov.l		(%sp)+,%d0		# restore FSLW
+	mov.l		%a3,%a0			# pass failing addr
+	bra.l		_cas_terminate2
+
+#############################################################
+
+	global		_isp_cas2_finish
+_isp_cas2_finish:
+	btst		&0x1,EXC_OPWORD(%a6)
+	bne.b		cas2_finish_l
+
+	mov.w		EXC_CC(%a6),%cc		# load old ccodes
+	cmp.w		%d0,%d2
+	bne.b		cas2_finish_w_save
+	cmp.w		%d1,%d3
+cas2_finish_w_save:
+	mov.w		%cc,EXC_CC(%a6)		# save new ccodes
+
+	tst.b		%d4			# update compare reg?
+	bne.b		cas2_finish_w_done	# no
+
+	mov.w		DC2(%a6),%d3		# fetch Dc2
+	mov.w		%d1,(2+EXC_DREGS,%a6,%d3.w*4) # store new Compare2 Op
+
+	mov.w		DC1(%a6),%d2		# fetch Dc1
+	mov.w		%d0,(2+EXC_DREGS,%a6,%d2.w*4) # store new Compare1 Op
+
+cas2_finish_w_done:
+	btst		&0x5,EXC_ISR(%a6)
+	sne		%d2
+	mov.l		%d2,%d0			# pass mode
+	sf		%d1			# pass size
+	mov.l		ADDR1(%a6),%a0		# pass ADDR1
+	bsr.l		_real_unlock_page	# unlock page
+
+	mov.l		%d2,%d0			# pass mode
+	sf		%d1			# pass size
+	mov.l		ADDR2(%a6),%a0		# pass ADDR2
+	bsr.l		_real_unlock_page	# unlock page
+	rts
+
+cas2_finish_l:
+	mov.w		EXC_CC(%a6),%cc		# load old ccodes
+	cmp.l		%d0,%d2
+	bne.b		cas2_finish_l_save
+	cmp.l		%d1,%d3
+cas2_finish_l_save:
+	mov.w		%cc,EXC_CC(%a6)		# save new ccodes
+
+	tst.b		%d4			# update compare reg?
+	bne.b		cas2_finish_l_done	# no
+
+	mov.w		DC2(%a6),%d3		# fetch Dc2
+	mov.l		%d1,(EXC_DREGS,%a6,%d3.w*4) # store new Compare2 Op
+
+	mov.w		DC1(%a6),%d2		# fetch Dc1
+	mov.l		%d0,(EXC_DREGS,%a6,%d2.w*4) # store new Compare1 Op
+
+cas2_finish_l_done:
+	btst		&0x5,EXC_ISR(%a6)
+	sne		%d2
+	mov.l		%d2,%d0			# pass mode
+	st		%d1			# pass size
+	mov.l		ADDR1(%a6),%a0		# pass ADDR1
+	bsr.l		_real_unlock_page	# unlock page
+
+	mov.l		%d2,%d0			# pass mode
+	st		%d1			# pass size
+	mov.l		ADDR2(%a6),%a0		# pass ADDR2
+	bsr.l		_real_unlock_page	# unlock page
+	rts
+
+########
+	global		cr_cas2
+cr_cas2:
+	mov.l		EXC_TEMP+0x4(%a6),%d0
+	bra.w		_compandset2
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_compandset(): routine to emulate cas w/ misaligned <ea>	#
+#		       (internal to package)				#
+#	_isp_cas_finish(): routine called when cas emulation completes	#
+#			   (external and internal to package)		#
+#	_isp_cas_restart(): restart cas emulation after a fault		#
+#			    (external to package)			#
+#	_isp_cas_terminate(): create access error stack frame on fault	#
+#			      (external and internal to package)	#
+#	_isp_cas_inrange(): checks whether instr addess is within range	#
+#			    of core cas/cas2emulation code		#
+#			    (external to package)			#
+#									#
+# XREF ****************************************************************	#
+#	_calc_ea(): calculate effective address				#
+#									#
+# INPUT ***************************************************************	#
+# compandset():								#
+#	none								#
+# _isp_cas_restart():							#
+#	d6 = previous sfc/dfc						#
+# _isp_cas_finish():							#
+# _isp_cas_terminate():							#
+#	a0 = failing address						#
+#	d0 = FSLW							#
+#	d6 = previous sfc/dfc						#
+# _isp_cas_inrange():							#
+#	a0 = instruction address to be checked				#
+#									#
+# OUTPUT **************************************************************	#
+# compandset():								#
+#		none							#
+# _isp_cas_restart():							#
+#	a0 = effective address						#
+#	d7 = word or longword flag					#
+# _isp_cas_finish():							#
+#	a0 = effective address						#
+# _isp_cas_terminate():							#
+#	initial register set before emulation exception			#
+# _isp_cas_inrange():							#
+#	d0 = 0 => in range; -1 => out of range				#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+# compandset():								#
+#	First, calculate the effective address. Then, decode the	#
+# instruction word and fetch the "compare" (DC) and "update" (Du)	#
+# operands.								#
+#	Next, call the external routine _real_lock_page() so that the	#
+# operating system can keep this page from being paged out while we're	#
+# in this routine. If this call fails, jump to _cas_terminate2().	#
+#	The routine then branches to _real_cas(). This external routine	#
+# that actually emulates cas can be supplied by the external os or	#
+# made to point directly back into the 060ISP which has a routine for	#
+# this purpose.								#
+#									#
+# _isp_cas_finish():							#
+#	Either way, after emulation, the package is re-entered at	#
+# _isp_cas_finish(). This routine re-compares the operands in order to	#
+# set the condition codes. Finally, these routines will call		#
+# _real_unlock_page() in order to unlock the pages that were previously	#
+# locked.								#
+#									#
+# _isp_cas_restart():							#
+#	This routine can be entered from an access error handler where	#
+# the emulation sequence should be re-started from the beginning.	#
+#									#
+# _isp_cas_terminate():							#
+#	This routine can be entered from an access error handler where	#
+# an emulation operand access failed and the operating system would	#
+# like an access error stack frame created instead of the current	#
+# unimplemented integer instruction frame.				#
+#	Also, the package enters here if a call to _real_lock_page()	#
+# fails.								#
+#									#
+# _isp_cas_inrange():							#
+#	Checks to see whether the instruction address passed to it in	#
+# a0 is within the software package cas/cas2 emulation routines. This	#
+# can be helpful for an operating system to determine whether an access	#
+# error during emulation was due to a cas/cas2 emulation access.	#
+#									#
+#########################################################################
+
+set DC,		EXC_TEMP+0x8
+set ADDR,	EXC_TEMP+0x4
+
+	global		_compandset
+_compandset:
+	btst		&0x1,EXC_OPWORD(%a6)	# word or long operation?
+	bne.b		compandsetl		# long
+
+compandsetw:
+	movq.l		&0x2,%d0		# size = 2 bytes
+	bsr.l		_calc_ea		# a0 = calculated <ea>
+	mov.l		%a0,ADDR(%a6)		# save <ea> for possible restart
+	sf		%d7			# clear d7 for word size
+	bra.b		compandsetfetch
+
+compandsetl:
+	movq.l		&0x4,%d0		# size = 4 bytes
+	bsr.l		_calc_ea		# a0 = calculated <ea>
+	mov.l		%a0,ADDR(%a6)		# save <ea> for possible restart
+	st		%d7			# set d7 for longword size
+
+compandsetfetch:
+	mov.w		EXC_EXTWORD(%a6),%d0	# fetch cas extension word
+	mov.l		%d0,%d1			# make a copy
+
+	lsr.w		&0x6,%d0
+	andi.w		&0x7,%d0		# extract Du
+	mov.l		(EXC_DREGS,%a6,%d0.w*4),%d2 # get update operand
+
+	andi.w		&0x7,%d1		# extract Dc
+	mov.l		(EXC_DREGS,%a6,%d1.w*4),%d4 # get compare operand
+	mov.w		%d1,DC(%a6)		# save Dc
+
+	btst		&0x5,EXC_ISR(%a6)	# which mode for exception?
+	sne		%d6			# set on supervisor mode
+
+	mov.l		%a0,%a2			# save temporarily
+	mov.l		%d7,%d1			# pass size
+	mov.l		%d6,%d0			# pass mode
+	bsr.l		_real_lock_page		# lock page
+	tst.l		%d0			# did error occur?
+	bne.w		_cas_terminate2		# yes, clean up the mess
+	mov.l		%a2,%a0			# pass addr in a0
+
+	bra.l		_real_cas
+
+########
+	global		_isp_cas_finish
+_isp_cas_finish:
+	btst		&0x1,EXC_OPWORD(%a6)
+	bne.b		cas_finish_l
+
+# just do the compare again since it's faster than saving the ccodes
+# from the locked routine...
+cas_finish_w:
+	mov.w		EXC_CC(%a6),%cc		# restore cc
+	cmp.w		%d0,%d4			# do word compare
+	mov.w		%cc,EXC_CC(%a6)		# save cc
+
+	tst.b		%d1			# update compare reg?
+	bne.b		cas_finish_w_done	# no
+
+	mov.w		DC(%a6),%d3
+	mov.w		%d0,(EXC_DREGS+2,%a6,%d3.w*4) # Dc = destination
+
+cas_finish_w_done:
+	mov.l		ADDR(%a6),%a0		# pass addr
+	sf		%d1			# pass size
+	btst		&0x5,EXC_ISR(%a6)
+	sne		%d0			# pass mode
+	bsr.l		_real_unlock_page	# unlock page
+	rts
+
+# just do the compare again since it's faster than saving the ccodes
+# from the locked routine...
+cas_finish_l:
+	mov.w		EXC_CC(%a6),%cc		# restore cc
+	cmp.l		%d0,%d4			# do longword compare
+	mov.w		%cc,EXC_CC(%a6)		# save cc
+
+	tst.b		%d1			# update compare reg?
+	bne.b		cas_finish_l_done	# no
+
+	mov.w		DC(%a6),%d3
+	mov.l		%d0,(EXC_DREGS,%a6,%d3.w*4) # Dc = destination
+
+cas_finish_l_done:
+	mov.l		ADDR(%a6),%a0		# pass addr
+	st		%d1			# pass size
+	btst		&0x5,EXC_ISR(%a6)
+	sne		%d0			# pass mode
+	bsr.l		_real_unlock_page	# unlock page
+	rts
+
+########
+
+	global		_isp_cas_restart
+_isp_cas_restart:
+	mov.l		%d6,%sfc		# restore previous sfc
+	mov.l		%d6,%dfc		# restore previous dfc
+
+	cmpi.b		EXC_OPWORD+1(%a6),&0xfc	# cas or cas2?
+	beq.l		cr_cas2			# cas2
+cr_cas:
+	mov.l		ADDR(%a6),%a0		# load <ea>
+	btst		&0x1,EXC_OPWORD(%a6)	# word or long operation?
+	sne		%d7			# set d7 accordingly
+	bra.w		compandsetfetch
+
+########
+
+# At this stage, it would be nice if d0 held the FSLW.
+	global		_isp_cas_terminate
+_isp_cas_terminate:
+	mov.l		%d6,%sfc		# restore previous sfc
+	mov.l		%d6,%dfc		# restore previous dfc
+
+	global		_cas_terminate2
+_cas_terminate2:
+	mov.l		%a0,%a2			# copy failing addr to a2
+
+	mov.l		%d0,-(%sp)
+	bsr.l		isp_restore		# restore An (if ()+ or -())
+	mov.l		(%sp)+,%d0
+
+	addq.l		&0x4,%sp		# remove sub return addr
+	subq.l		&0x8,%sp		# make room for bigger stack
+	subq.l		&0x8,%a6		# shift frame ptr down, too
+	mov.l		&26,%d1			# want to move 51 longwords
+	lea		0x8(%sp),%a0		# get address of old stack
+	lea		0x0(%sp),%a1		# get address of new stack
+cas_term_cont:
+	mov.l		(%a0)+,(%a1)+		# move a longword
+	dbra.w		%d1,cas_term_cont	# keep going
+
+	mov.w		&0x4008,EXC_IVOFF(%a6)	# put new stk fmt, voff
+	mov.l		%a2,EXC_IVOFF+0x2(%a6)	# put faulting addr on stack
+	mov.l		%d0,EXC_IVOFF+0x6(%a6)	# put FSLW on stack
+	movm.l		EXC_DREGS(%a6),&0x3fff	# restore user regs
+	unlk		%a6			# unlink stack frame
+	bra.l		_real_access
+
+########
+
+	global		_isp_cas_inrange
+_isp_cas_inrange:
+	clr.l		%d0			# clear return result
+	lea		_CASHI(%pc),%a1		# load end of CAS core code
+	cmp.l		%a1,%a0			# is PC in range?
+	blt.b		cin_no			# no
+	lea		_CASLO(%pc),%a1		# load begin of CAS core code
+	cmp.l		%a0,%a1			# is PC in range?
+	blt.b		cin_no			# no
+	rts					# yes; return d0 = 0
+cin_no:
+	mov.l		&-0x1,%d0		# out of range; return d0 = -1
+	rts
+
+#################################################################
+#################################################################
+#################################################################
+# This is the start of the cas and cas2 "core" emulation code.	#
+# This is the section that may need to be replaced by the host	#
+# OS if it is too operating system-specific.			#
+# Please refer to the package documentation to see how to	#
+# "replace" this section, if necessary.				#
+#################################################################
+#################################################################
+#################################################################
+
+#       ######      ##      ######     ####
+#       #	   #  #     #         #    #
+#	#	  ######    ######        #
+#	#	  #    #         #      #
+#       ######    #    #    ######    ######
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_isp_cas2(): "core" emulation code for the cas2 instruction	#
+#									#
+# XREF ****************************************************************	#
+#	_isp_cas2_finish() - only exit point for this emulation code;	#
+#			     do clean-up; calculate ccodes; store	#
+#			     Compare Ops if appropriate.		#
+#									#
+# INPUT ***************************************************************	#
+#	*see chart below*						#
+#									#
+# OUTPUT **************************************************************	#
+#	*see chart below*						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	(1) Make several copies of the effective address.		#
+#	(2) Save current SR; Then mask off all maskable interrupts.	#
+#	(3) Save current SFC/DFC (ASSUMED TO BE EQUAL!!!); Then set	#
+#	    according to whether exception occurred in user or		#
+#	    supervisor mode.						#
+#	(4) Use "plpaw" instruction to pre-load ATC with effective	#
+#	    address pages(s). THIS SHOULD NOT FAULT!!! The relevant	#
+#	    page(s) should have already been made resident prior to	#
+#	    entering this routine.					#
+#	(5) Push the operand lines from the cache w/ "cpushl".		#
+#	    In the 68040, this was done within the locked region. In	#
+#	    the 68060, it is done outside of the locked region.		#
+#	(6) Use "plpar" instruction to do a re-load of ATC entries for	#
+#	    ADDR1 since ADDR2 entries may have pushed ADDR1 out of the	#
+#	    ATC.							#
+#	(7) Pre-fetch the core emulation instructions by executing	#
+#	    one branch within each physical line (16 bytes) of the code	#
+#	    before actually executing the code.				#
+#	(8) Load the BUSCR w/ the bus lock value.			#
+#	(9) Fetch the source operands using "moves".			#
+#	(10)Do the compares. If both equal, go to step (13).		#
+#	(11)Unequal. No update occurs. But, we do write the DST1 op	#
+#	    back to itself (as w/ the '040) so we can gracefully unlock	#
+#	    the bus (and assert LOCKE*) using BUSCR and the final move.	#
+#	(12)Exit.							#
+#	(13)Write update operand to the DST locations. Use BUSCR to	#
+#	    assert LOCKE* for the final write operation.		#
+#	(14)Exit.							#
+#									#
+#	The algorithm is actually implemented slightly differently	#
+# depending on the size of the operation and the misalignment of the	#
+# operands. A misaligned operand must be written in aligned chunks or	#
+# else the BUSCR register control gets confused.			#
+#									#
+#########################################################################
+
+#################################################################
+# THIS IS THE STATE OF THE INTEGER REGISTER FILE UPON		#
+# ENTERING _isp_cas2().						#
+#								#
+# D0 = xxxxxxxx							#
+# D1 = xxxxxxxx							#
+# D2 = cmp operand 1						#
+# D3 = cmp operand 2						#
+# D4 = update oper 1						#
+# D5 = update oper 2						#
+# D6 = 'xxxxxxff if supervisor mode; 'xxxxxx00 if user mode	#
+# D7 = 'xxxxxxff if longword operation; 'xxxxxx00 if word	#
+# A0 = ADDR1							#
+# A1 = ADDR2							#
+# A2 = xxxxxxxx							#
+# A3 = xxxxxxxx							#
+# A4 = xxxxxxxx							#
+# A5 = xxxxxxxx							#
+# A6 = frame pointer						#
+# A7 = stack pointer						#
+#################################################################
+
+#	align		0x1000
+# beginning label used by _isp_cas_inrange()
+	global		_CASLO
+_CASLO:
+
+	global		_isp_cas2
+_isp_cas2:
+	tst.b		%d6			# user or supervisor mode?
+	bne.b		cas2_supervisor		# supervisor
+cas2_user:
+	movq.l		&0x1,%d0		# load user data fc
+	bra.b		cas2_cont
+cas2_supervisor:
+	movq.l		&0x5,%d0		# load supervisor data fc
+cas2_cont:
+	tst.b		%d7			# word or longword?
+	beq.w		cas2w			# word
+
+####
+cas2l:
+	mov.l		%a0,%a2			# copy ADDR1
+	mov.l		%a1,%a3			# copy ADDR2
+	mov.l		%a0,%a4			# copy ADDR1
+	mov.l		%a1,%a5			# copy ADDR2
+
+	addq.l		&0x3,%a4		# ADDR1+3
+	addq.l		&0x3,%a5		# ADDR2+3
+	mov.l		%a2,%d1			# ADDR1
+
+# mask interrupts levels 0-6. save old mask value.
+	mov.w		%sr,%d7			# save current SR
+	ori.w		&0x0700,%sr		# inhibit interrupts
+
+# load the SFC and DFC with the appropriate mode.
+	movc		%sfc,%d6		# save old SFC/DFC
+	movc		%d0,%sfc		# store new SFC
+	movc		%d0,%dfc		# store new DFC
+
+# pre-load the operand ATC. no page faults should occur here because
+# _real_lock_page() should have taken care of this.
+	plpaw		(%a2)			# load atc for ADDR1
+	plpaw		(%a4)			# load atc for ADDR1+3
+	plpaw		(%a3)			# load atc for ADDR2
+	plpaw		(%a5)			# load atc for ADDR2+3
+
+# push the operand lines from the cache if they exist.
+	cpushl		%dc,(%a2)		# push line for ADDR1
+	cpushl		%dc,(%a4)		# push line for ADDR1+3
+	cpushl		%dc,(%a3)		# push line for ADDR2
+	cpushl		%dc,(%a5)		# push line for ADDR2+2
+
+	mov.l		%d1,%a2			# ADDR1
+	addq.l		&0x3,%d1
+	mov.l		%d1,%a4			# ADDR1+3
+# if ADDR1 was ATC resident before the above "plpaw" and was executed
+# and it was the next entry scheduled for replacement and ADDR2
+# shares the same set, then the "plpaw" for ADDR2 can push the ADDR1
+# entries from the ATC. so, we do a second set of "plpa"s.
+	plpar		(%a2)			# load atc for ADDR1
+	plpar		(%a4)			# load atc for ADDR1+3
+
+# load the BUSCR values.
+	mov.l		&0x80000000,%a2		# assert LOCK* buscr value
+	mov.l		&0xa0000000,%a3		# assert LOCKE* buscr value
+	mov.l		&0x00000000,%a4		# buscr unlock value
+
+# there are three possible mis-aligned cases for longword cas. they
+# are separated because the final write which asserts LOCKE* must
+# be aligned.
+	mov.l		%a0,%d0			# is ADDR1 misaligned?
+	andi.b		&0x3,%d0
+	beq.b		CAS2L_ENTER		# no
+	cmpi.b		%d0,&0x2
+	beq.w		CAS2L2_ENTER		# yes; word misaligned
+	bra.w		CAS2L3_ENTER		# yes; byte misaligned
+
+#
+# D0 = dst operand 1 <-
+# D1 = dst operand 2 <-
+# D2 = cmp operand 1
+# D3 = cmp operand 2
+# D4 = update oper 1
+# D5 = update oper 2
+# D6 = old SFC/DFC
+# D7 = old SR
+# A0 = ADDR1
+# A1 = ADDR2
+# A2 = bus LOCK*  value
+# A3 = bus LOCKE* value
+# A4 = bus unlock value
+# A5 = xxxxxxxx
+#
+	align		0x10
+CAS2L_START:
+	movc		%a2,%buscr		# assert LOCK*
+	movs.l		(%a1),%d1		# fetch Dest2[31:0]
+	movs.l		(%a0),%d0		# fetch Dest1[31:0]
+	bra.b		CAS2L_CONT
+CAS2L_ENTER:
+	bra.b		~+16
+
+CAS2L_CONT:
+	cmp.l		%d0,%d2			# Dest1 - Compare1
+	bne.b		CAS2L_NOUPDATE
+	cmp.l		%d1,%d3			# Dest2 - Compare2
+	bne.b		CAS2L_NOUPDATE
+	movs.l		%d5,(%a1)		# Update2[31:0] -> DEST2
+	bra.b		CAS2L_UPDATE
+	bra.b		~+16
+
+CAS2L_UPDATE:
+	movc		%a3,%buscr		# assert LOCKE*
+	movs.l		%d4,(%a0)		# Update1[31:0] -> DEST1
+	movc		%a4,%buscr		# unlock the bus
+	bra.b		cas2l_update_done
+	bra.b		~+16
+
+CAS2L_NOUPDATE:
+	movc		%a3,%buscr		# assert LOCKE*
+	movs.l		%d0,(%a0)		# Dest1[31:0] -> DEST1
+	movc		%a4,%buscr		# unlock the bus
+	bra.b		cas2l_noupdate_done
+	bra.b		~+16
+
+CAS2L_FILLER:
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bra.b		CAS2L_START
+
+####
+
+#################################################################
+# THIS MUST BE THE STATE OF THE INTEGER REGISTER FILE UPON	#
+# ENTERING _isp_cas2().						#
+#								#
+# D0 = destination[31:0] operand 1				#
+# D1 = destination[31:0] operand 2				#
+# D2 = cmp[31:0] operand 1					#
+# D3 = cmp[31:0] operand 2					#
+# D4 = 'xxxxxx11 -> no reg update; 'xxxxxx00 -> update required	#
+# D5 = xxxxxxxx							#
+# D6 = xxxxxxxx							#
+# D7 = xxxxxxxx							#
+# A0 = xxxxxxxx							#
+# A1 = xxxxxxxx							#
+# A2 = xxxxxxxx							#
+# A3 = xxxxxxxx							#
+# A4 = xxxxxxxx							#
+# A5 = xxxxxxxx							#
+# A6 = frame pointer						#
+# A7 = stack pointer						#
+#################################################################
+
+cas2l_noupdate_done:
+
+# restore previous SFC/DFC value.
+	movc		%d6,%sfc		# restore old SFC
+	movc		%d6,%dfc		# restore old DFC
+
+# restore previous interrupt mask level.
+	mov.w		%d7,%sr			# restore old SR
+
+	sf		%d4			# indicate no update was done
+	bra.l		_isp_cas2_finish
+
+cas2l_update_done:
+
+# restore previous SFC/DFC value.
+	movc		%d6,%sfc		# restore old SFC
+	movc		%d6,%dfc		# restore old DFC
+
+# restore previous interrupt mask level.
+	mov.w		%d7,%sr			# restore old SR
+
+	st		%d4			# indicate update was done
+	bra.l		_isp_cas2_finish
+####
+
+	align		0x10
+CAS2L2_START:
+	movc		%a2,%buscr		# assert LOCK*
+	movs.l		(%a1),%d1		# fetch Dest2[31:0]
+	movs.l		(%a0),%d0		# fetch Dest1[31:0]
+	bra.b		CAS2L2_CONT
+CAS2L2_ENTER:
+	bra.b		~+16
+
+CAS2L2_CONT:
+	cmp.l		%d0,%d2			# Dest1 - Compare1
+	bne.b		CAS2L2_NOUPDATE
+	cmp.l		%d1,%d3			# Dest2 - Compare2
+	bne.b		CAS2L2_NOUPDATE
+	movs.l		%d5,(%a1)		# Update2[31:0] -> Dest2
+	bra.b		CAS2L2_UPDATE
+	bra.b		~+16
+
+CAS2L2_UPDATE:
+	swap		%d4			# get Update1[31:16]
+	movs.w		%d4,(%a0)+		# Update1[31:16] -> DEST1
+	movc		%a3,%buscr		# assert LOCKE*
+	swap		%d4			# get Update1[15:0]
+	bra.b		CAS2L2_UPDATE2
+	bra.b		~+16
+
+CAS2L2_UPDATE2:
+	movs.w		%d4,(%a0)		# Update1[15:0] -> DEST1+0x2
+	movc		%a4,%buscr		# unlock the bus
+	bra.w		cas2l_update_done
+	nop
+	bra.b		~+16
+
+CAS2L2_NOUPDATE:
+	swap		%d0			# get Dest1[31:16]
+	movs.w		%d0,(%a0)+		# Dest1[31:16] -> DEST1
+	movc		%a3,%buscr		# assert LOCKE*
+	swap		%d0			# get Dest1[15:0]
+	bra.b		CAS2L2_NOUPDATE2
+	bra.b		~+16
+
+CAS2L2_NOUPDATE2:
+	movs.w		%d0,(%a0)		# Dest1[15:0] -> DEST1+0x2
+	movc		%a4,%buscr		# unlock the bus
+	bra.w		cas2l_noupdate_done
+	nop
+	bra.b		~+16
+
+CAS2L2_FILLER:
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bra.b		CAS2L2_START
+
+#################################
+
+	align		0x10
+CAS2L3_START:
+	movc		%a2,%buscr		# assert LOCK*
+	movs.l		(%a1),%d1		# fetch Dest2[31:0]
+	movs.l		(%a0),%d0		# fetch Dest1[31:0]
+	bra.b		CAS2L3_CONT
+CAS2L3_ENTER:
+	bra.b		~+16
+
+CAS2L3_CONT:
+	cmp.l		%d0,%d2			# Dest1 - Compare1
+	bne.b		CAS2L3_NOUPDATE
+	cmp.l		%d1,%d3			# Dest2 - Compare2
+	bne.b		CAS2L3_NOUPDATE
+	movs.l		%d5,(%a1)		# Update2[31:0] -> DEST2
+	bra.b		CAS2L3_UPDATE
+	bra.b		~+16
+
+CAS2L3_UPDATE:
+	rol.l		&0x8,%d4		# get Update1[31:24]
+	movs.b		%d4,(%a0)+		# Update1[31:24] -> DEST1
+	swap		%d4			# get Update1[23:8]
+	movs.w		%d4,(%a0)+		# Update1[23:8] -> DEST1+0x1
+	bra.b		CAS2L3_UPDATE2
+	bra.b		~+16
+
+CAS2L3_UPDATE2:
+	rol.l		&0x8,%d4		# get Update1[7:0]
+	movc		%a3,%buscr		# assert LOCKE*
+	movs.b		%d4,(%a0)		# Update1[7:0] -> DEST1+0x3
+	bra.b		CAS2L3_UPDATE3
+	nop
+	bra.b		~+16
+
+CAS2L3_UPDATE3:
+	movc		%a4,%buscr		# unlock the bus
+	bra.w		cas2l_update_done
+	nop
+	nop
+	nop
+	bra.b		~+16
+
+CAS2L3_NOUPDATE:
+	rol.l		&0x8,%d0		# get Dest1[31:24]
+	movs.b		%d0,(%a0)+		# Dest1[31:24] -> DEST1
+	swap		%d0			# get Dest1[23:8]
+	movs.w		%d0,(%a0)+		# Dest1[23:8] -> DEST1+0x1
+	bra.b		CAS2L3_NOUPDATE2
+	bra.b		~+16
+
+CAS2L3_NOUPDATE2:
+	rol.l		&0x8,%d0		# get Dest1[7:0]
+	movc		%a3,%buscr		# assert LOCKE*
+	movs.b		%d0,(%a0)		# Update1[7:0] -> DEST1+0x3
+	bra.b		CAS2L3_NOUPDATE3
+	nop
+	bra.b		~+16
+
+CAS2L3_NOUPDATE3:
+	movc		%a4,%buscr		# unlock the bus
+	bra.w		cas2l_noupdate_done
+	nop
+	nop
+	nop
+	bra.b		~+14
+
+CAS2L3_FILLER:
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bra.w		CAS2L3_START
+
+#############################################################
+#############################################################
+
+cas2w:
+	mov.l		%a0,%a2			# copy ADDR1
+	mov.l		%a1,%a3			# copy ADDR2
+	mov.l		%a0,%a4			# copy ADDR1
+	mov.l		%a1,%a5			# copy ADDR2
+
+	addq.l		&0x1,%a4		# ADDR1+1
+	addq.l		&0x1,%a5		# ADDR2+1
+	mov.l		%a2,%d1			# ADDR1
+
+# mask interrupt levels 0-6. save old mask value.
+	mov.w		%sr,%d7			# save current SR
+	ori.w		&0x0700,%sr		# inhibit interrupts
+
+# load the SFC and DFC with the appropriate mode.
+	movc		%sfc,%d6		# save old SFC/DFC
+	movc		%d0,%sfc		# store new SFC
+	movc		%d0,%dfc		# store new DFC
+
+# pre-load the operand ATC. no page faults should occur because
+# _real_lock_page() should have taken care of this.
+	plpaw		(%a2)			# load atc for ADDR1
+	plpaw		(%a4)			# load atc for ADDR1+1
+	plpaw		(%a3)			# load atc for ADDR2
+	plpaw		(%a5)			# load atc for ADDR2+1
+
+# push the operand cache lines from the cache if they exist.
+	cpushl		%dc,(%a2)		# push line for ADDR1
+	cpushl		%dc,(%a4)		# push line for ADDR1+1
+	cpushl		%dc,(%a3)		# push line for ADDR2
+	cpushl		%dc,(%a5)		# push line for ADDR2+1
+
+	mov.l		%d1,%a2			# ADDR1
+	addq.l		&0x3,%d1
+	mov.l		%d1,%a4			# ADDR1+3
+# if ADDR1 was ATC resident before the above "plpaw" and was executed
+# and it was the next entry scheduled for replacement and ADDR2
+# shares the same set, then the "plpaw" for ADDR2 can push the ADDR1
+# entries from the ATC. so, we do a second set of "plpa"s.
+	plpar		(%a2)			# load atc for ADDR1
+	plpar		(%a4)			# load atc for ADDR1+3
+
+# load the BUSCR values.
+	mov.l		&0x80000000,%a2		# assert LOCK* buscr value
+	mov.l		&0xa0000000,%a3		# assert LOCKE* buscr value
+	mov.l		&0x00000000,%a4		# buscr unlock value
+
+# there are two possible mis-aligned cases for word cas. they
+# are separated because the final write which asserts LOCKE* must
+# be aligned.
+	mov.l		%a0,%d0			# is ADDR1 misaligned?
+	btst		&0x0,%d0
+	bne.w		CAS2W2_ENTER		# yes
+	bra.b		CAS2W_ENTER		# no
+
+#
+# D0 = dst operand 1 <-
+# D1 = dst operand 2 <-
+# D2 = cmp operand 1
+# D3 = cmp operand 2
+# D4 = update oper 1
+# D5 = update oper 2
+# D6 = old SFC/DFC
+# D7 = old SR
+# A0 = ADDR1
+# A1 = ADDR2
+# A2 = bus LOCK*  value
+# A3 = bus LOCKE* value
+# A4 = bus unlock value
+# A5 = xxxxxxxx
+#
+	align		0x10
+CAS2W_START:
+	movc		%a2,%buscr		# assert LOCK*
+	movs.w		(%a1),%d1		# fetch Dest2[15:0]
+	movs.w		(%a0),%d0		# fetch Dest1[15:0]
+	bra.b		CAS2W_CONT2
+CAS2W_ENTER:
+	bra.b		~+16
+
+CAS2W_CONT2:
+	cmp.w		%d0,%d2			# Dest1 - Compare1
+	bne.b		CAS2W_NOUPDATE
+	cmp.w		%d1,%d3			# Dest2 - Compare2
+	bne.b		CAS2W_NOUPDATE
+	movs.w		%d5,(%a1)		# Update2[15:0] -> DEST2
+	bra.b		CAS2W_UPDATE
+	bra.b		~+16
+
+CAS2W_UPDATE:
+	movc		%a3,%buscr		# assert LOCKE*
+	movs.w		%d4,(%a0)		# Update1[15:0] -> DEST1
+	movc		%a4,%buscr		# unlock the bus
+	bra.b		cas2w_update_done
+	bra.b		~+16
+
+CAS2W_NOUPDATE:
+	movc		%a3,%buscr		# assert LOCKE*
+	movs.w		%d0,(%a0)		# Dest1[15:0] -> DEST1
+	movc		%a4,%buscr		# unlock the bus
+	bra.b		cas2w_noupdate_done
+	bra.b		~+16
+
+CAS2W_FILLER:
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bra.b		CAS2W_START
+
+####
+
+#################################################################
+# THIS MUST BE THE STATE OF THE INTEGER REGISTER FILE UPON	#
+# ENTERING _isp_cas2().						#
+#								#
+# D0 = destination[15:0] operand 1				#
+# D1 = destination[15:0] operand 2				#
+# D2 = cmp[15:0] operand 1					#
+# D3 = cmp[15:0] operand 2					#
+# D4 = 'xxxxxx11 -> no reg update; 'xxxxxx00 -> update required	#
+# D5 = xxxxxxxx							#
+# D6 = xxxxxxxx							#
+# D7 = xxxxxxxx							#
+# A0 = xxxxxxxx							#
+# A1 = xxxxxxxx							#
+# A2 = xxxxxxxx							#
+# A3 = xxxxxxxx							#
+# A4 = xxxxxxxx							#
+# A5 = xxxxxxxx							#
+# A6 = frame pointer						#
+# A7 = stack pointer						#
+#################################################################
+
+cas2w_noupdate_done:
+
+# restore previous SFC/DFC value.
+	movc		%d6,%sfc		# restore old SFC
+	movc		%d6,%dfc		# restore old DFC
+
+# restore previous interrupt mask level.
+	mov.w		%d7,%sr			# restore old SR
+
+	sf		%d4			# indicate no update was done
+	bra.l		_isp_cas2_finish
+
+cas2w_update_done:
+
+# restore previous SFC/DFC value.
+	movc		%d6,%sfc		# restore old SFC
+	movc		%d6,%dfc		# restore old DFC
+
+# restore previous interrupt mask level.
+	mov.w		%d7,%sr			# restore old SR
+
+	st		%d4			# indicate update was done
+	bra.l		_isp_cas2_finish
+####
+
+	align		0x10
+CAS2W2_START:
+	movc		%a2,%buscr		# assert LOCK*
+	movs.w		(%a1),%d1		# fetch Dest2[15:0]
+	movs.w		(%a0),%d0		# fetch Dest1[15:0]
+	bra.b		CAS2W2_CONT2
+CAS2W2_ENTER:
+	bra.b		~+16
+
+CAS2W2_CONT2:
+	cmp.w		%d0,%d2			# Dest1 - Compare1
+	bne.b		CAS2W2_NOUPDATE
+	cmp.w		%d1,%d3			# Dest2 - Compare2
+	bne.b		CAS2W2_NOUPDATE
+	movs.w		%d5,(%a1)		# Update2[15:0] -> DEST2
+	bra.b		CAS2W2_UPDATE
+	bra.b		~+16
+
+CAS2W2_UPDATE:
+	ror.l		&0x8,%d4		# get Update1[15:8]
+	movs.b		%d4,(%a0)+		# Update1[15:8] -> DEST1
+	movc		%a3,%buscr		# assert LOCKE*
+	rol.l		&0x8,%d4		# get Update1[7:0]
+	bra.b		CAS2W2_UPDATE2
+	bra.b		~+16
+
+CAS2W2_UPDATE2:
+	movs.b		%d4,(%a0)		# Update1[7:0] -> DEST1+0x1
+	movc		%a4,%buscr		# unlock the bus
+	bra.w		cas2w_update_done
+	nop
+	bra.b		~+16
+
+CAS2W2_NOUPDATE:
+	ror.l		&0x8,%d0		# get Dest1[15:8]
+	movs.b		%d0,(%a0)+		# Dest1[15:8] -> DEST1
+	movc		%a3,%buscr		# assert LOCKE*
+	rol.l		&0x8,%d0		# get Dest1[7:0]
+	bra.b		CAS2W2_NOUPDATE2
+	bra.b		~+16
+
+CAS2W2_NOUPDATE2:
+	movs.b		%d0,(%a0)		# Dest1[7:0] -> DEST1+0x1
+	movc		%a4,%buscr		# unlock the bus
+	bra.w		cas2w_noupdate_done
+	nop
+	bra.b		~+16
+
+CAS2W2_FILLER:
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bra.b		CAS2W2_START
+
+#       ######      ##      ######
+#       #	   #  #     #
+#	#	  ######    ######
+#	#	  #    #         #
+#       ######    #    #    ######
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_isp_cas(): "core" emulation code for the cas instruction	#
+#									#
+# XREF ****************************************************************	#
+#	_isp_cas_finish() - only exit point for this emulation code;	#
+#			    do clean-up					#
+#									#
+# INPUT ***************************************************************	#
+#	*see entry chart below*						#
+#									#
+# OUTPUT **************************************************************	#
+#	*see exit chart below*						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	(1) Make several copies of the effective address.		#
+#	(2) Save current SR; Then mask off all maskable interrupts.	#
+#	(3) Save current DFC/SFC (ASSUMED TO BE EQUAL!!!); Then set	#
+#	    SFC/DFC according to whether exception occurred in user or	#
+#	    supervisor mode.						#
+#	(4) Use "plpaw" instruction to pre-load ATC with efective	#
+#	    address page(s). THIS SHOULD NOT FAULT!!! The relevant	#
+#	    page(s) should have been made resident prior to entering	#
+#	    this routine.						#
+#	(5) Push the operand lines from the cache w/ "cpushl".		#
+#	    In the 68040, this was done within the locked region. In	#
+#	    the 68060, it is done outside of the locked region.		#
+#	(6) Pre-fetch the core emulation instructions by executing one	#
+#	    branch within each physical line (16 bytes) of the code	#
+#	    before actually executing the code.				#
+#	(7) Load the BUSCR with the bus lock value.			#
+#	(8) Fetch the source operand.					#
+#	(9) Do the compare. If equal, go to step (12).			#
+#	(10)Unequal. No update occurs. But, we do write the DST op back	#
+#	    to itself (as w/ the '040) so we can gracefully unlock	#
+#	    the bus (and assert LOCKE*) using BUSCR and the final move.	#
+#	(11)Exit.							#
+#	(12)Write update operand to the DST location. Use BUSCR to	#
+#	    assert LOCKE* for the final write operation.		#
+#	(13)Exit.							#
+#									#
+#	The algorithm is actually implemented slightly differently	#
+# depending on the size of the operation and the misalignment of the	#
+# operand. A misaligned operand must be written in aligned chunks or	#
+# else the BUSCR register control gets confused.			#
+#									#
+#########################################################################
+
+#########################################################
+# THIS IS THE STATE OF THE INTEGER REGISTER FILE UPON	#
+# ENTERING _isp_cas().					#
+#							#
+# D0 = xxxxxxxx						#
+# D1 = xxxxxxxx						#
+# D2 = update operand					#
+# D3 = xxxxxxxx						#
+# D4 = compare operand					#
+# D5 = xxxxxxxx						#
+# D6 = supervisor ('xxxxxxff) or user mode ('xxxxxx00)	#
+# D7 = longword ('xxxxxxff) or word size ('xxxxxx00)	#
+# A0 = ADDR						#
+# A1 = xxxxxxxx						#
+# A2 = xxxxxxxx						#
+# A3 = xxxxxxxx						#
+# A4 = xxxxxxxx						#
+# A5 = xxxxxxxx						#
+# A6 = frame pointer					#
+# A7 = stack pointer					#
+#########################################################
+
+	global		_isp_cas
+_isp_cas:
+	tst.b		%d6			# user or supervisor mode?
+	bne.b		cas_super		# supervisor
+cas_user:
+	movq.l		&0x1,%d0		# load user data fc
+	bra.b		cas_cont
+cas_super:
+	movq.l		&0x5,%d0		# load supervisor data fc
+
+cas_cont:
+	tst.b		%d7			# word or longword?
+	bne.w		casl			# longword
+
+####
+casw:
+	mov.l		%a0,%a1			# make copy for plpaw1
+	mov.l		%a0,%a2			# make copy for plpaw2
+	addq.l		&0x1,%a2		# plpaw2 points to end of word
+
+	mov.l		%d2,%d3			# d3 = update[7:0]
+	lsr.w		&0x8,%d2		# d2 = update[15:8]
+
+# mask interrupt levels 0-6. save old mask value.
+	mov.w		%sr,%d7			# save current SR
+	ori.w		&0x0700,%sr		# inhibit interrupts
+
+# load the SFC and DFC with the appropriate mode.
+	movc		%sfc,%d6		# save old SFC/DFC
+	movc		%d0,%sfc		# load new sfc
+	movc		%d0,%dfc		# load new dfc
+
+# pre-load the operand ATC. no page faults should occur here because
+# _real_lock_page() should have taken care of this.
+	plpaw		(%a1)			# load atc for ADDR
+	plpaw		(%a2)			# load atc for ADDR+1
+
+# push the operand lines from the cache if they exist.
+	cpushl		%dc,(%a1)		# push dirty data
+	cpushl		%dc,(%a2)		# push dirty data
+
+# load the BUSCR values.
+	mov.l		&0x80000000,%a1		# assert LOCK* buscr value
+	mov.l		&0xa0000000,%a2		# assert LOCKE* buscr value
+	mov.l		&0x00000000,%a3		# buscr unlock value
+
+# pre-load the instruction cache for the following algorithm.
+# this will minimize the number of cycles that LOCK* will be asserted.
+	bra.b		CASW_ENTER		# start pre-loading icache
+
+#
+# D0 = dst operand <-
+# D1 = update[15:8] operand
+# D2 = update[7:0]  operand
+# D3 = xxxxxxxx
+# D4 = compare[15:0] operand
+# D5 = xxxxxxxx
+# D6 = old SFC/DFC
+# D7 = old SR
+# A0 = ADDR
+# A1 = bus LOCK*  value
+# A2 = bus LOCKE* value
+# A3 = bus unlock value
+# A4 = xxxxxxxx
+# A5 = xxxxxxxx
+#
+	align		0x10
+CASW_START:
+	movc		%a1,%buscr		# assert LOCK*
+	movs.w		(%a0),%d0		# fetch Dest[15:0]
+	cmp.w		%d0,%d4			# Dest - Compare
+	bne.b		CASW_NOUPDATE
+	bra.b		CASW_UPDATE
+CASW_ENTER:
+	bra.b		~+16
+
+CASW_UPDATE:
+	movs.b		%d2,(%a0)+		# Update[15:8] -> DEST
+	movc		%a2,%buscr		# assert LOCKE*
+	movs.b		%d3,(%a0)		# Update[7:0] -> DEST+0x1
+	bra.b		CASW_UPDATE2
+	bra.b		~+16
+
+CASW_UPDATE2:
+	movc		%a3,%buscr		# unlock the bus
+	bra.b		casw_update_done
+	nop
+	nop
+	nop
+	nop
+	bra.b		~+16
+
+CASW_NOUPDATE:
+	ror.l		&0x8,%d0		# get Dest[15:8]
+	movs.b		%d0,(%a0)+		# Dest[15:8] -> DEST
+	movc		%a2,%buscr		# assert LOCKE*
+	rol.l		&0x8,%d0		# get Dest[7:0]
+	bra.b		CASW_NOUPDATE2
+	bra.b		~+16
+
+CASW_NOUPDATE2:
+	movs.b		%d0,(%a0)		# Dest[7:0] -> DEST+0x1
+	movc		%a3,%buscr		# unlock the bus
+	bra.b		casw_noupdate_done
+	nop
+	nop
+	bra.b		~+16
+
+CASW_FILLER:
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bra.b		CASW_START
+
+#################################################################
+# THIS MUST BE THE STATE OF THE INTEGER REGISTER FILE UPON	#
+# CALLING _isp_cas_finish().					#
+#								#
+# D0 = destination[15:0] operand				#
+# D1 = 'xxxxxx11 -> no reg update; 'xxxxxx00 -> update required	#
+# D2 = xxxxxxxx							#
+# D3 = xxxxxxxx							#
+# D4 = compare[15:0] operand					#
+# D5 = xxxxxxxx							#
+# D6 = xxxxxxxx							#
+# D7 = xxxxxxxx							#
+# A0 = xxxxxxxx							#
+# A1 = xxxxxxxx							#
+# A2 = xxxxxxxx							#
+# A3 = xxxxxxxx							#
+# A4 = xxxxxxxx							#
+# A5 = xxxxxxxx							#
+# A6 = frame pointer						#
+# A7 = stack pointer						#
+#################################################################
+
+casw_noupdate_done:
+
+# restore previous SFC/DFC value.
+	movc		%d6,%sfc		# restore old SFC
+	movc		%d6,%dfc		# restore old DFC
+
+# restore previous interrupt mask level.
+	mov.w		%d7,%sr			# restore old SR
+
+	sf		%d1			# indicate no update was done
+	bra.l		_isp_cas_finish
+
+casw_update_done:
+
+# restore previous SFC/DFC value.
+	movc		%d6,%sfc		# restore old SFC
+	movc		%d6,%dfc		# restore old DFC
+
+# restore previous interrupt mask level.
+	mov.w		%d7,%sr			# restore old SR
+
+	st		%d1			# indicate update was done
+	bra.l		_isp_cas_finish
+
+################
+
+# there are two possible mis-aligned cases for longword cas. they
+# are separated because the final write which asserts LOCKE* must
+# be an aligned write.
+casl:
+	mov.l		%a0,%a1			# make copy for plpaw1
+	mov.l		%a0,%a2			# make copy for plpaw2
+	addq.l		&0x3,%a2		# plpaw2 points to end of longword
+
+	mov.l		%a0,%d1			# byte or word misaligned?
+	btst		&0x0,%d1
+	bne.w		casl2			# byte misaligned
+
+	mov.l		%d2,%d3			# d3 = update[15:0]
+	swap		%d2			# d2 = update[31:16]
+
+# mask interrupts levels 0-6. save old mask value.
+	mov.w		%sr,%d7			# save current SR
+	ori.w		&0x0700,%sr		# inhibit interrupts
+
+# load the SFC and DFC with the appropriate mode.
+	movc		%sfc,%d6		# save old SFC/DFC
+	movc		%d0,%sfc		# load new sfc
+	movc		%d0,%dfc		# load new dfc
+
+# pre-load the operand ATC. no page faults should occur here because
+# _real_lock_page() should have taken care of this.
+	plpaw		(%a1)			# load atc for ADDR
+	plpaw		(%a2)			# load atc for ADDR+3
+
+# push the operand lines from the cache if they exist.
+	cpushl		%dc,(%a1)		# push dirty data
+	cpushl		%dc,(%a2)		# push dirty data
+
+# load the BUSCR values.
+	mov.l		&0x80000000,%a1		# assert LOCK* buscr value
+	mov.l		&0xa0000000,%a2		# assert LOCKE* buscr value
+	mov.l		&0x00000000,%a3		# buscr unlock value
+
+	bra.b		CASL_ENTER		# start pre-loading icache
+
+#
+# D0 = dst operand <-
+# D1 = xxxxxxxx
+# D2 = update[31:16] operand
+# D3 = update[15:0]  operand
+# D4 = compare[31:0] operand
+# D5 = xxxxxxxx
+# D6 = old SFC/DFC
+# D7 = old SR
+# A0 = ADDR
+# A1 = bus LOCK*  value
+# A2 = bus LOCKE* value
+# A3 = bus unlock value
+# A4 = xxxxxxxx
+# A5 = xxxxxxxx
+#
+	align		0x10
+CASL_START:
+	movc		%a1,%buscr		# assert LOCK*
+	movs.l		(%a0),%d0		# fetch Dest[31:0]
+	cmp.l		%d0,%d4			# Dest - Compare
+	bne.b		CASL_NOUPDATE
+	bra.b		CASL_UPDATE
+CASL_ENTER:
+	bra.b		~+16
+
+CASL_UPDATE:
+	movs.w		%d2,(%a0)+		# Update[31:16] -> DEST
+	movc		%a2,%buscr		# assert LOCKE*
+	movs.w		%d3,(%a0)		# Update[15:0] -> DEST+0x2
+	bra.b		CASL_UPDATE2
+	bra.b		~+16
+
+CASL_UPDATE2:
+	movc		%a3,%buscr		# unlock the bus
+	bra.b		casl_update_done
+	nop
+	nop
+	nop
+	nop
+	bra.b		~+16
+
+CASL_NOUPDATE:
+	swap		%d0			# get Dest[31:16]
+	movs.w		%d0,(%a0)+		# Dest[31:16] -> DEST
+	swap		%d0			# get Dest[15:0]
+	movc		%a2,%buscr		# assert LOCKE*
+	bra.b		CASL_NOUPDATE2
+	bra.b		~+16
+
+CASL_NOUPDATE2:
+	movs.w		%d0,(%a0)		# Dest[15:0] -> DEST+0x2
+	movc		%a3,%buscr		# unlock the bus
+	bra.b		casl_noupdate_done
+	nop
+	nop
+	bra.b		~+16
+
+CASL_FILLER:
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bra.b		CASL_START
+
+#################################################################
+# THIS MUST BE THE STATE OF THE INTEGER REGISTER FILE UPON	#
+# CALLING _isp_cas_finish().					#
+#								#
+# D0 = destination[31:0] operand				#
+# D1 = 'xxxxxx11 -> no reg update; 'xxxxxx00 -> update required	#
+# D2 = xxxxxxxx							#
+# D3 = xxxxxxxx							#
+# D4 = compare[31:0] operand					#
+# D5 = xxxxxxxx							#
+# D6 = xxxxxxxx							#
+# D7 = xxxxxxxx							#
+# A0 = xxxxxxxx							#
+# A1 = xxxxxxxx							#
+# A2 = xxxxxxxx							#
+# A3 = xxxxxxxx							#
+# A4 = xxxxxxxx							#
+# A5 = xxxxxxxx							#
+# A6 = frame pointer						#
+# A7 = stack pointer						#
+#################################################################
+
+casl_noupdate_done:
+
+# restore previous SFC/DFC value.
+	movc		%d6,%sfc		# restore old SFC
+	movc		%d6,%dfc		# restore old DFC
+
+# restore previous interrupt mask level.
+	mov.w		%d7,%sr			# restore old SR
+
+	sf		%d1			# indicate no update was done
+	bra.l		_isp_cas_finish
+
+casl_update_done:
+
+# restore previous SFC/DFC value.
+	movc		%d6,%sfc		# restore old SFC
+	movc		%d6,%dfc		# restore old DFC
+
+# restore previous interrupts mask level.
+	mov.w		%d7,%sr			# restore old SR
+
+	st		%d1			# indicate update was done
+	bra.l		_isp_cas_finish
+
+#######################################
+casl2:
+	mov.l		%d2,%d5			# d5 = Update[7:0]
+	lsr.l		&0x8,%d2
+	mov.l		%d2,%d3			# d3 = Update[23:8]
+	swap		%d2			# d2 = Update[31:24]
+
+# mask interrupts levels 0-6. save old mask value.
+	mov.w		%sr,%d7			# save current SR
+	ori.w		&0x0700,%sr		# inhibit interrupts
+
+# load the SFC and DFC with the appropriate mode.
+	movc		%sfc,%d6		# save old SFC/DFC
+	movc		%d0,%sfc		# load new sfc
+	movc		%d0,%dfc		# load new dfc
+
+# pre-load the operand ATC. no page faults should occur here because
+# _real_lock_page() should have taken care of this already.
+	plpaw		(%a1)			# load atc for ADDR
+	plpaw		(%a2)			# load atc for ADDR+3
+
+# puch the operand lines from the cache if they exist.
+	cpushl		%dc,(%a1)		# push dirty data
+	cpushl		%dc,(%a2)		# push dirty data
+
+# load the BUSCR values.
+	mov.l		&0x80000000,%a1		# assert LOCK* buscr value
+	mov.l		&0xa0000000,%a2		# assert LOCKE* buscr value
+	mov.l		&0x00000000,%a3		# buscr unlock value
+
+# pre-load the instruction cache for the following algorithm.
+# this will minimize the number of cycles that LOCK* will be asserted.
+	bra.b		CASL2_ENTER		# start pre-loading icache
+
+#
+# D0 = dst operand <-
+# D1 = xxxxxxxx
+# D2 = update[31:24] operand
+# D3 = update[23:8]  operand
+# D4 = compare[31:0] operand
+# D5 = update[7:0]  operand
+# D6 = old SFC/DFC
+# D7 = old SR
+# A0 = ADDR
+# A1 = bus LOCK*  value
+# A2 = bus LOCKE* value
+# A3 = bus unlock value
+# A4 = xxxxxxxx
+# A5 = xxxxxxxx
+#
+	align		0x10
+CASL2_START:
+	movc		%a1,%buscr		# assert LOCK*
+	movs.l		(%a0),%d0		# fetch Dest[31:0]
+	cmp.l		%d0,%d4			# Dest - Compare
+	bne.b		CASL2_NOUPDATE
+	bra.b		CASL2_UPDATE
+CASL2_ENTER:
+	bra.b		~+16
+
+CASL2_UPDATE:
+	movs.b		%d2,(%a0)+		# Update[31:24] -> DEST
+	movs.w		%d3,(%a0)+		# Update[23:8] -> DEST+0x1
+	movc		%a2,%buscr		# assert LOCKE*
+	bra.b		CASL2_UPDATE2
+	bra.b		~+16
+
+CASL2_UPDATE2:
+	movs.b		%d5,(%a0)		# Update[7:0] -> DEST+0x3
+	movc		%a3,%buscr		# unlock the bus
+	bra.w		casl_update_done
+	nop
+	bra.b		~+16
+
+CASL2_NOUPDATE:
+	rol.l		&0x8,%d0		# get Dest[31:24]
+	movs.b		%d0,(%a0)+		# Dest[31:24] -> DEST
+	swap		%d0			# get Dest[23:8]
+	movs.w		%d0,(%a0)+		# Dest[23:8] -> DEST+0x1
+	bra.b		CASL2_NOUPDATE2
+	bra.b		~+16
+
+CASL2_NOUPDATE2:
+	rol.l		&0x8,%d0		# get Dest[7:0]
+	movc		%a2,%buscr		# assert LOCKE*
+	movs.b		%d0,(%a0)		# Dest[7:0] -> DEST+0x3
+	bra.b		CASL2_NOUPDATE3
+	nop
+	bra.b		~+16
+
+CASL2_NOUPDATE3:
+	movc		%a3,%buscr		# unlock the bus
+	bra.w		casl_noupdate_done
+	nop
+	nop
+	nop
+	bra.b		~+16
+
+CASL2_FILLER:
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bra.b		CASL2_START
+
+####
+####
+# end label used by _isp_cas_inrange()
+	global		_CASHI
+_CASHI:
diff --git a/arch/m68k/ifpsp060/src/itest.S b/arch/m68k/ifpsp060/src/itest.S
new file mode 100644
index 0000000..ba4a30c
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/itest.S
@@ -0,0 +1,6386 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#############################################
+set	SREGS,		-64
+set	IREGS,		-128
+set	SCCR,		-130
+set	ICCR,		-132
+set	TESTCTR,	-136
+set	EAMEM,		-140
+set	EASTORE,	-144
+set	DATA,		-160
+
+#############################################
+TESTTOP:
+	bra.l		_060TESTS_
+
+start_str:
+	string		"Testing 68060 ISP started:\n"
+
+pass_str:
+	string		"passed\n"
+fail_str:
+	string		" failed\n"
+
+	align		0x4
+chk_test:
+	tst.l		%d0
+	bne.b		test_fail
+test_pass:
+	pea		pass_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+	rts
+test_fail:
+	mov.l		%d1,-(%sp)
+	bsr.l		_print_num
+	addq.l		&0x4,%sp
+
+	pea		fail_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+	rts
+
+#############################################
+_060TESTS_:
+	link		%a6,&-160
+
+	movm.l		&0x3f3c,-(%sp)
+
+	pea		start_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+### mul
+	clr.l		TESTCTR(%a6)
+	pea		mulul_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		mulul_0
+
+	bsr.l		chk_test
+
+### div
+	clr.l		TESTCTR(%a6)
+	pea		divul_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		divul_0
+
+	bsr.l		chk_test
+
+### cmp2
+	clr.l		TESTCTR(%a6)
+	pea		cmp2_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		cmp2_1
+
+	bsr.l		chk_test
+
+### movp
+	clr.l		TESTCTR(%a6)
+	pea		movp_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		movp_0
+
+	bsr.l		chk_test
+
+### ea
+	clr.l		TESTCTR(%a6)
+	pea		ea_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	mov.l		&0x2,EAMEM(%a6)
+	bsr.l		ea_0
+
+	bsr.l		chk_test
+
+### cas
+	clr.l		TESTCTR(%a6)
+	pea		cas_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		cas0
+
+	bsr.l		chk_test
+
+### cas2
+	clr.l		TESTCTR(%a6)
+	pea		cas2_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		cas20
+
+	bsr.l		chk_test
+
+###
+	movm.l		(%sp)+,&0x3cfc
+
+	unlk		%a6
+	rts
+
+#############################################
+#############################################
+
+mulul_str:
+	string		"\t64-bit multiply..."
+
+	align		0x4
+mulul_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d1
+	mov.l		&0x99999999,%d2
+	mov.l		&0x88888888,%d3
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	clr.l		IREGS+0x8(%a6)
+	clr.l		IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+mulul_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x77777777,%d1
+	mov.l		&0x99999999,%d2
+	mov.l		&0x00000000,%d3
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	clr.l		IREGS+0x8(%a6)
+	clr.l		IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+mulul_2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x00000010,%d1
+	mov.l		&0x66666666,%d2
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		%d1,%d2:%d2
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000006,IREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+mulul_3:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x55555555,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0x00000003,%d3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000000,IREGS+0x8(%a6)
+	mov.l		&0xffffffff,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+mulul_4:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x40000000,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0x00000004,%d3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000001,IREGS+0x8(%a6)
+	mov.l		&0x00000000,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+mulul_5:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xffffffff,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0xffffffff,%d3
+
+	mov.w		&0x0008,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0xfffffffe,IREGS+0x8(%a6)
+	mov.l		&0x00000001,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+mulul_6:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x80000000,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0xffffffff,%d3
+
+	mov.w		&0x00000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	muls.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000000,IREGS+0x8(%a6)
+	mov.l		&0x80000000,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+mulul_7:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x80000000,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0x00000001,%d3
+
+	mov.w		&0x0008,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	muls.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0xffffffff,IREGS+0x8(%a6)
+	mov.l		&0x80000000,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+mulul_8:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x00000001,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0x80000000,%d3
+
+	mov.w		&0x0008,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	muls.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0xffffffff,IREGS+0x8(%a6)
+	mov.l		&0x80000000,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	mov.l		TESTCTR(%a6),%d1
+	clr.l		%d0
+	rts
+
+#############################################
+
+movp_str:
+	string	"\tmovep..."
+
+	align	0x4
+###############################
+# movep.w	%d0,(0x0,%a0) #
+###############################
+movp_0:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.w	&0xaaaa,%d0
+	clr.b	0x0(%a0)
+	clr.b	0x2(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	%d0,(0x0,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x2(%a0),%d1
+	lsl.w	&0x8,%d1
+	mov.b	0x0(%a0),%d1
+
+	cmp.w	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.w	%d0,(0x0,%a0) #
+###############################
+movp_1:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA+0x4(%a6),%a0
+	mov.w	&0xaaaa,%d0
+	clr.l	-0x4(%a0)
+	clr.l	(%a0)
+	clr.l	0x4(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	%d0,(0x0,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	tst.l	-0x4(%a0)
+	bne.l	error
+	tst.l	0x4(%a0)
+	bne.l	error
+	cmpi.l	(%a0),&0xaa00aa00
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+#####################################################
+# movep.w	%d0,(0x0,%a0)			    #
+#	- this test has %cc initially equal to zero #
+#####################################################
+movp_2:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.w	&0xaaaa,%d0
+	clr.b	0x0(%a0)
+	clr.b	0x2(%a0)
+
+	mov.w	&0x0000,ICCR(%a6)
+	mov.w	&0x0000,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	%d0,(0x0,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x2(%a0),%d1
+	lsl.w	&0x8,%d1
+	mov.b	0x0(%a0),%d1
+
+	cmp.w	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.w	(0x0,%a0),%d0 #
+###############################
+movp_3:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.b	&0xaa,0x0(%a0)
+	mov.b	&0xaa,0x2(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	(0x0,%a0),%d0
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+	mov.w	&0xaaaa,IREGS+0x2(%a6)
+
+	mov.w	&0xaaaa,%d1
+
+	cmp.w	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.l	%d0,(0x0,%a0) #
+###############################
+movp_4:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.l	&0xaaaaaaaa,%d0
+	clr.b	0x0(%a0)
+	clr.b	0x2(%a0)
+	clr.b	0x4(%a0)
+	clr.b	0x6(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.l	%d0,(0x0,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x6(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x4(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x2(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x0(%a0),%d1
+
+	cmp.l	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.l	%d0,(0x0,%a0) #
+###############################
+movp_5:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA+0x4(%a6),%a0
+	mov.l	&0xaaaaaaaa,%d0
+	clr.l	-0x4(%a0)
+	clr.l	(%a0)
+	clr.l	0x4(%a0)
+	clr.l	0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.l	%d0,(0x0,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	tst.l	-0x4(%a0)
+	bne.l	error
+	tst.l	0x8(%a0)
+	bne.l	error
+	cmpi.l	(%a0),&0xaa00aa00
+	bne.l	error
+	cmpi.l	0x4(%a0),&0xaa00aa00
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.l	(0x0,%a0),%d0 #
+###############################
+movp_6:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.b	&0xaa,0x0(%a0)
+	mov.b	&0xaa,0x2(%a0)
+	mov.b	&0xaa,0x4(%a0)
+	mov.b	&0xaa,0x6(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.l	(0x0,%a0),%d0
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+	mov.l	&0xaaaaaaaa,IREGS(%a6)
+
+	mov.l	&0xaaaaaaaa,%d1
+
+	cmp.l	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.w	%d7,(0x0,%a0) #
+###############################
+movp_7:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.w	&0xaaaa,%d7
+	clr.b	0x0(%a0)
+	clr.b	0x2(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	%d7,(0x0,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x2(%a0),%d1
+	lsl.w	&0x8,%d1
+	mov.b	0x0(%a0),%d1
+
+	cmp.w	%d7,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.w	(0x0,%a0),%d7 #
+###############################
+movp_8:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.b	&0xaa,0x0(%a0)
+	mov.b	&0xaa,0x2(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	(0x0,%a0),%d7
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+	mov.w	&0xaaaa,IREGS+30(%a6)
+
+	mov.w	&0xaaaa,%d1
+
+	cmp.w	%d7,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.w	%d0,(0x0,%a0) #
+###############################
+movp_9:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.w	&0xaaaa,%d0
+	clr.b	0x0(%a0)
+	clr.b	0x2(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	%d0,(0x0,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x2(%a0),%d1
+	lsl.w	&0x8,%d1
+	mov.b	0x0(%a0),%d1
+
+	cmp.w	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.w	%d0,(0x8,%a0) #
+###############################
+movp_10:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.w	&0xaaaa,%d0
+	clr.b	0x0+0x8(%a0)
+	clr.b	0x2+0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x1f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	%d0,(0x8,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x2+0x8(%a0),%d1
+	lsl.w	&0x8,%d1
+	mov.b	0x0+0x8(%a0),%d1
+
+	cmp.w	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.w	(0x8,%a0),%d0 #
+###############################
+movp_11:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.b	&0xaa,0x0+0x8(%a0)
+	mov.b	&0xaa,0x2+0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x1f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	(0x8,%a0),%d0
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+	mov.w	&0xaaaa,IREGS+0x2(%a6)
+
+	mov.w	&0xaaaa,%d1
+
+	cmp.w	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.l	%d0,(0x8,%a0) #
+###############################
+movp_12:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.l	&0xaaaaaaaa,%d0
+	clr.b	0x0+0x8(%a0)
+	clr.b	0x2+0x8(%a0)
+	clr.b	0x4+0x8(%a0)
+	clr.b	0x6+0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x1f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.l	%d0,(0x8,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x6+0x8(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x4+0x8(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x2+0x8(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x0+0x8(%a0),%d1
+
+	cmp.l	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.l	(0x8,%a0),%d0 #
+###############################
+movp_13:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.b	&0xaa,0x0+0x8(%a0)
+	mov.b	&0xaa,0x2+0x8(%a0)
+	mov.b	&0xaa,0x4+0x8(%a0)
+	mov.b	&0xaa,0x6+0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x1f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.l	(0x8,%a0),%d0
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+	mov.l	&0xaaaaaaaa,IREGS(%a6)
+
+	mov.l	&0xaaaaaaaa,%d1
+
+	cmp.l	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+################################
+# movep.w	%d0,(-0x8,%a0) #
+################################
+movp_14:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA+0x8(%a6),%a0
+	mov.w	&0xaaaa,%d0
+	clr.b	0x0-0x8(%a0)
+	clr.b	0x2-0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x1f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	%d0,(-0x8,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x2-0x8(%a0),%d1
+	lsl.w	&0x8,%d1
+	mov.b	0x0-0x8(%a0),%d1
+
+	cmp.w	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+################################
+# movep.w	(-0x8,%a0),%d0 #
+################################
+movp_15:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA+0x8(%a6),%a0
+	mov.b	&0xaa,0x0-0x8(%a0)
+	mov.b	&0xaa,0x2-0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x1f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	(-0x8,%a0),%d0
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+	mov.w	&0xaaaa,IREGS+0x2(%a6)
+
+	mov.w	&0xaaaa,%d1
+
+	cmp.w	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+################################
+# movep.l	%d0,(-0x8,%a0) #
+################################
+movp_16:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA+0x8(%a6),%a0
+	mov.l	&0xaaaaaaaa,%d0
+	clr.b	0x0-0x8(%a0)
+	clr.b	0x2-0x8(%a0)
+	clr.b	0x4-0x8(%a0)
+	clr.b	0x8-0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x1f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.l	%d0,(-0x8,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x6-0x8(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x4-0x8(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x2-0x8(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x0-0x8(%a0),%d1
+
+	cmp.l	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+################################
+# movep.l	(-0x8,%a0),%d0 #
+################################
+movp_17:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA+0x8(%a6),%a0
+	mov.b	&0xaa,0x0-0x8(%a0)
+	mov.b	&0xaa,0x2-0x8(%a0)
+	mov.b	&0xaa,0x4-0x8(%a0)
+	mov.b	&0xaa,0x8-0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x1f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.l	(-0x8,%a0),%d0
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+	mov.l	&0xaaaaaaaa,IREGS(%a6)
+
+	mov.l	&0xaaaaaaaa,%d1
+
+	cmp.l	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+	mov.l	TESTCTR(%a6),%d1
+	clr.l	%d0
+	rts
+
+###########################################################
+
+divul_str:
+	string		"\t64-bit divide..."
+
+	align		0x4
+divul_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+#	movm.l		DEF_REGS(%pc),&0x3fff
+
+#	clr.l		%d1
+#	mov.l		&0x99999999,%d2
+#	mov.l		&0x88888888,%d3
+
+#	mov.w		&0x001e,ICCR(%a6)
+#	mov.w		&0x001f,%cc
+#	movm.l		&0x7fff,IREGS(%a6)
+
+#	divu.l		%d1,%d2:%d3
+
+#	mov.w		%cc,SCCR(%a6)
+#	movm.l		&0x7fff,SREGS(%a6)
+
+#	bsr.l		chkregs
+#	tst.b		%d0
+#	bne.l		error
+
+divul_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x00000001,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0x00000000,%d3
+
+	mov.w		&0x0014,ICCR(%a6)
+	mov.w		&0x001f,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x44444444,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0x55555555,%d3
+
+	mov.w		&0x0010,ICCR(%a6)
+	mov.w		&0x001f,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x11111111,IREGS+0x8(%a6)
+	mov.l		&0x00000001,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_3:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x55555555,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0x44444444,%d3
+
+	mov.w		&0x0014,ICCR(%a6)
+	mov.w		&0x001f,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x44444444,IREGS+0x8(%a6)
+	mov.l		&0x00000000,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_4:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x11111111,%d1
+	mov.l		&0x44444444,%d2
+	mov.l		&0x44444444,%d3
+
+	mov.w		&0x001e,ICCR(%a6)
+	mov.w		&0x001d,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_5:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xfffffffe,%d1
+	mov.l		&0x00000001,%d2
+	mov.l		&0x00000002,%d3
+
+	mov.w		&0x001e,ICCR(%a6)
+	mov.w		&0x001d,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divs.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_6:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xfffffffe,%d1
+	mov.l		&0x00000001,%d2
+	mov.l		&0x00000000,%d3
+
+	mov.w		&0x0018,ICCR(%a6)
+	mov.w		&0x001d,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divs.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000000,IREGS+0x8(%a6)
+	mov.l		&0x80000000,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_7:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x00000002,%d1
+	mov.l		&0x00000001,%d2
+	mov.l		&0x00000000,%d3
+
+	mov.w		&0x001e,ICCR(%a6)
+	mov.w		&0x001d,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divs.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_8:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xffffffff,%d1
+	mov.l		&0xfffffffe,%d2
+	mov.l		&0xffffffff,%d3
+
+	mov.w		&0x0008,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_9:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xffffffff,%d1
+	mov.l		&0xfffffffe,%d2
+	mov.l		&0xffffffff,%d3
+
+	mov.w		&0x0008,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divu.l		&0xffffffff,%d2:%d2
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0xffffffff,IREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_10:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x0000ffff,%d1
+	mov.l		&0x00000001,%d2
+	mov.l		&0x55555555,%d3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x0000aaab,IREGS+0x8(%a6)
+	mov.l		&0x00015556,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	mov.l		TESTCTR(%a6),%d1
+	clr.l		%d0
+	rts
+
+###########################################################
+
+cas_str:
+	string		"\tcas..."
+
+	align		0x4
+cas0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+
+	mov.w		&0xaaaa,(%a0)
+
+	mov.w		&0xaaaa,%d1
+	mov.w		&0xbbbb,%d2
+
+	mov.w		&0x0014,ICCR(%a6)
+	mov.w		&0x0010,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas.w		%d1,%d2,(%a0)			# Dc,Du,<ea>
+
+	mov.w		%cc,SCCR(%a6)
+	mov.w		(%a0),%d3
+	mov.w		&0xbbbb,IREGS+0xc+0x2(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+
+	mov.w		&0xeeee,(%a0)
+
+	mov.w		&0x0000aaaa,%d1
+	mov.w		&0x0000bbbb,%d2
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas.w		%d1,%d2,(%a0)			# Dc,Du,<ea>
+
+	mov.w		%cc,SCCR(%a6)
+	mov.w		(%a0),%d3
+	mov.w		&0xeeee,IREGS+0x4+0x2(%a6)
+	mov.w		&0xeeee,IREGS+0xc+0x2(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x2(%a6),%a0
+
+	mov.l		&0xaaaaaaaa,(%a0)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas.l		%d1,%d2,(%a0)			# Dc,Du,<ea>
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d3
+	mov.l		&0xbbbbbbbb,IREGS+0xc(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas3:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x2(%a6),%a0
+
+	mov.l		&0xeeeeeeee,(%a0)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas.l		%d1,%d2,(%a0)			# Dc,Du,<ea>
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d3
+	mov.l		&0xeeeeeeee,IREGS+0x4(%a6)
+	mov.l		&0xeeeeeeee,IREGS+0xc(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas4:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+
+	mov.l		&0xaaaaaaaa,(%a0)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas.l		%d1,%d2,(%a0)			# Dc,Du,<ea>
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d3
+	mov.l		&0xbbbbbbbb,IREGS+0xc(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas5:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+
+	mov.l		&0x7fffffff,(%a0)
+
+	mov.l		&0x80000000,%d1
+	mov.l		&0xbbbbbbbb,%d2
+
+	mov.w		&0x001b,ICCR(%a6)
+	mov.w		&0x0010,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas.l		%d1,%d2,(%a0)			# Dc,Du,<ea>
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d3
+	mov.l		&0x7fffffff,IREGS+0x4(%a6)
+	mov.l		&0x7fffffff,IREGS+0xc(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	mov.l		TESTCTR(%a6),%d1
+	clr.l		%d0
+	rts
+
+###########################################################
+
+cas2_str:
+	string		"\tcas2..."
+
+	align		0x4
+cas20:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x0(%a6),%a0
+	lea		DATA+0x4(%a6),%a1
+
+	mov.l		&0xaaaaaaaa,(%a0)
+	mov.l		&0xbbbbbbbb,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x0014,ICCR(%a6)
+	mov.w		&0x0010,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xcccccccc,IREGS+0x14(%a6)
+	mov.l		&0xdddddddd,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas21:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+	lea		DATA+0x5(%a6),%a1
+
+	mov.l		&0xaaaaaaaa,(%a0)
+	mov.l		&0xbbbbbbbb,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x0014,ICCR(%a6)
+	mov.w		&0x0010,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xcccccccc,IREGS+0x14(%a6)
+	mov.l		&0xdddddddd,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas22:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x2(%a6),%a0
+	lea		DATA+0x6(%a6),%a1
+
+	mov.l		&0xaaaaaaaa,(%a0)
+	mov.l		&0xbbbbbbbb,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x0014,ICCR(%a6)
+	mov.w		&0x0010,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xcccccccc,IREGS+0x14(%a6)
+	mov.l		&0xdddddddd,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas23:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x0(%a6),%a0
+	lea		DATA+0x4(%a6),%a1
+
+	mov.l		&0xeeeeeeee,(%a0)
+	mov.l		&0xbbbbbbbb,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xeeeeeeee,IREGS+0x4(%a6)
+	mov.l		&0xbbbbbbbb,IREGS+0x8(%a6)
+	mov.l		&0xeeeeeeee,IREGS+0x14(%a6)
+	mov.l		&0xbbbbbbbb,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas24:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+	lea		DATA+0x5(%a6),%a1
+
+	mov.l		&0xeeeeeeee,(%a0)
+	mov.l		&0xbbbbbbbb,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xeeeeeeee,IREGS+0x4(%a6)
+	mov.l		&0xbbbbbbbb,IREGS+0x8(%a6)
+	mov.l		&0xeeeeeeee,IREGS+0x14(%a6)
+	mov.l		&0xbbbbbbbb,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas25:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x2(%a6),%a0
+	lea		DATA+0x6(%a6),%a1
+
+	mov.l		&0xeeeeeeee,(%a0)
+	mov.l		&0xbbbbbbbb,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xeeeeeeee,IREGS+0x4(%a6)
+	mov.l		&0xbbbbbbbb,IREGS+0x8(%a6)
+	mov.l		&0xeeeeeeee,IREGS+0x14(%a6)
+	mov.l		&0xbbbbbbbb,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas26:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x0(%a6),%a0
+	lea		DATA+0x4(%a6),%a1
+
+	mov.l		&0xaaaaaaaa,(%a0)
+	mov.l		&0xeeeeeeee,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xaaaaaaaa,IREGS+0x4(%a6)
+	mov.l		&0xeeeeeeee,IREGS+0x8(%a6)
+	mov.l		&0xaaaaaaaa,IREGS+0x14(%a6)
+	mov.l		&0xeeeeeeee,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas27:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+	lea		DATA+0x5(%a6),%a1
+
+	mov.l		&0xaaaaaaaa,(%a0)
+	mov.l		&0xeeeeeeee,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xaaaaaaaa,IREGS+0x4(%a6)
+	mov.l		&0xeeeeeeee,IREGS+0x8(%a6)
+	mov.l		&0xaaaaaaaa,IREGS+0x14(%a6)
+	mov.l		&0xeeeeeeee,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas28:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x2(%a6),%a0
+	lea		DATA+0x6(%a6),%a1
+
+	mov.l		&0xaaaaaaaa,(%a0)
+	mov.l		&0x7fffffff,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0x80000000,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x000b,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xaaaaaaaa,IREGS+0x4(%a6)
+	mov.l		&0x7fffffff,IREGS+0x8(%a6)
+	mov.l		&0xaaaaaaaa,IREGS+0x14(%a6)
+	mov.l		&0x7fffffff,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+##################################
+cas29:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x0(%a6),%a0
+	lea		DATA+0x4(%a6),%a1
+
+	mov.w		&0xaaaa,(%a0)
+	mov.w		&0xbbbb,(%a1)
+
+	mov.w		&0xaaaa,%d1
+	mov.w		&0xbbbb,%d2
+	mov.w		&0xcccc,%d3
+	mov.w		&0xdddd,%d4
+
+	mov.w		&0x0014,ICCR(%a6)
+	mov.w		&0x0010,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.w		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.w		(%a0),%d5
+	mov.w		(%a1),%d6
+	mov.w		&0xcccc,IREGS+0x14+0x2(%a6)
+	mov.w		&0xdddd,IREGS+0x18+0x2(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas210:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+	lea		DATA+0x5(%a6),%a1
+
+	mov.w		&0xaaaa,(%a0)
+	mov.w		&0xbbbb,(%a1)
+
+	mov.w		&0xaaaa,%d1
+	mov.w		&0xbbbb,%d2
+	mov.w		&0xcccc,%d3
+	mov.w		&0xdddd,%d4
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.w		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.w		(%a0),%d5
+	mov.w		(%a1),%d6
+	mov.w		&0xcccc,IREGS+0x14+0x2(%a6)
+	mov.w		&0xdddd,IREGS+0x18+0x2(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas211:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x0(%a6),%a0
+	lea		DATA+0x4(%a6),%a1
+
+	mov.w		&0xeeee,(%a0)
+	mov.w		&0xbbbb,(%a1)
+
+	mov.w		&0xaaaa,%d1
+	mov.w		&0xbbbb,%d2
+	mov.w		&0xcccc,%d3
+	mov.w		&0xdddd,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.w		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.w		(%a0),%d5
+	mov.w		(%a1),%d6
+	mov.w		&0xeeee,IREGS+0x4+0x2(%a6)
+	mov.w		&0xbbbb,IREGS+0x8+0x2(%a6)
+	mov.w		&0xeeee,IREGS+0x14+0x2(%a6)
+	mov.w		&0xbbbb,IREGS+0x18+0x2(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas212:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+	lea		DATA+0x5(%a6),%a1
+
+	mov.w		&0xeeee,(%a0)
+	mov.w		&0xbbbb,(%a1)
+
+	mov.w		&0xaaaa,%d1
+	mov.w		&0xbbbb,%d2
+	mov.w		&0xcccc,%d3
+	mov.w		&0xdddd,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.w		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.w		(%a0),%d5
+	mov.w		(%a1),%d6
+	mov.w		&0xeeee,IREGS+0x4+0x2(%a6)
+	mov.w		&0xbbbb,IREGS+0x8+0x2(%a6)
+	mov.w		&0xeeee,IREGS+0x14+0x2(%a6)
+	mov.w		&0xbbbb,IREGS+0x18+0x2(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas213:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x0(%a6),%a0
+	lea		DATA+0x4(%a6),%a1
+
+	mov.w		&0xaaaa,(%a0)
+	mov.w		&0xeeee,(%a1)
+
+	mov.w		&0xaaaa,%d1
+	mov.w		&0xbbbb,%d2
+	mov.w		&0xcccc,%d3
+	mov.w		&0xdddd,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.w		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.w		(%a0),%d5
+	mov.w		(%a1),%d6
+	mov.w		&0xaaaa,IREGS+0x4+0x2(%a6)
+	mov.w		&0xeeee,IREGS+0x8+0x2(%a6)
+	mov.w		&0xaaaa,IREGS+0x14+0x2(%a6)
+	mov.w		&0xeeee,IREGS+0x18+0x2(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas214:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+	lea		DATA+0x5(%a6),%a1
+
+	mov.w		&0xaaaa,(%a0)
+	mov.w		&0x7fff,(%a1)
+
+	mov.w		&0xaaaa,%d1
+	mov.w		&0x8000,%d2
+	mov.w		&0xcccc,%d3
+	mov.w		&0xdddd,%d4
+
+	mov.w		&0x001b,ICCR(%a6)
+	mov.w		&0x0010,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.w		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.w		(%a0),%d5
+	mov.w		(%a1),%d6
+	mov.w		&0xaaaa,IREGS+0x4+0x2(%a6)
+	mov.w		&0x7fff,IREGS+0x8+0x2(%a6)
+	mov.w		&0xaaaa,IREGS+0x14+0x2(%a6)
+	mov.w		&0x7fff,IREGS+0x18+0x2(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	mov.l		TESTCTR(%a6),%d1
+	clr.l		%d0
+	rts
+
+###########################################################
+
+cmp2_str:
+	string		"\tcmp2,chk2..."
+
+	align		0x4
+# unsigned - small,small
+cmp2_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0x2040,DATA(%a6)
+	mov.l		&0x11111120,%d1
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0x2040,DATA(%a6)
+	mov.l		&0x00000040,%a1
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_3:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0x2040,DATA(%a6)
+	mov.l		&0x11111130,%d1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	chk2.b		DATA(%a6),%d1
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_4:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0x2040,DATA(%a6)
+	mov.l		&0x00000010,%a1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_5:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0x2040,DATA(%a6)
+	mov.l		&0x11111150,%d1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_6:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0x2040,DATA(%a6)
+	mov.l		&0x00000090,%a1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+# unsigned - small,large
+cmp2_7:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x2000a000,DATA(%a6)
+	mov.l		&0x11112000,%d1
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.w		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_8:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x2000a000,DATA(%a6)
+	mov.l		&0xffffa000,%a1
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.w		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_9:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x2000a000,DATA(%a6)
+	mov.l		&0x11113000,%d1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	chk2.w		DATA(%a6),%d1
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_10:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x2000a000,DATA(%a6)
+	mov.l		&0xffff9000,%a1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.w		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_11:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x2000a000,DATA(%a6)
+	mov.l		&0x11111000,%d1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.w		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_12:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x2000a000,DATA(%a6)
+	mov.l		&0xffffb000,%a1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.w		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+# unsigned - large,large
+cmp2_13:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xa0000000,DATA(%a6)
+	mov.l		&0xc0000000,DATA+0x4(%a6)
+	mov.l		&0xa0000000,%d1
+
+	mov.w		&0x000c,ICCR(%a6)
+	mov.w		&0x0008,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.l		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_14:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xa0000000,DATA(%a6)
+	mov.l		&0xc0000000,DATA+0x4(%a6)
+	mov.l		&0xc0000000,%a1
+
+	mov.w		&0x000c,ICCR(%a6)
+	mov.w		&0x0008,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.l		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_15:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xa0000000,DATA(%a6)
+	mov.l		&0xc0000000,DATA+0x4(%a6)
+	mov.l		&0xb0000000,%d1
+
+	mov.w		&0x0008,ICCR(%a6)
+	mov.w		&0x0008,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	chk2.l		DATA(%a6),%d1
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_16:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xa0000000,DATA(%a6)
+	mov.l		&0xc0000000,DATA+0x4(%a6)
+	mov.l		&0x10000000,%a1
+
+	mov.w		&0x0009,ICCR(%a6)
+	mov.w		&0x0008,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.l		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_17:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xa0000000,DATA(%a6)
+	mov.l		&0xc0000000,DATA+0x4(%a6)
+	mov.l		&0x90000000,%d1
+
+	mov.w		&0x0009,ICCR(%a6)
+	mov.w		&0x0008,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.l		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_18:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xa0000000,DATA(%a6)
+	mov.l		&0xc0000000,DATA+0x4(%a6)
+	mov.l		&0xd0000000,%a1
+
+	mov.w		&0x0009,ICCR(%a6)
+	mov.w		&0x0008,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.l		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+# signed - negative,positive
+cmp2_19:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa040,DATA(%a6)
+	mov.l		&0x111111a0,%d1
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_20:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa040,DATA(%a6)
+	mov.l		&0x00000040,%a1
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	chk2.b		DATA(%a6),%a1
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_21:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa040,DATA(%a6)
+	mov.l		&0x111111b0,%d1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_22:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa040,DATA(%a6)
+	mov.l		&0x00000010,%a1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_23:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa040,DATA(%a6)
+	mov.l		&0x11111190,%d1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_24:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa040,DATA(%a6)
+	mov.l		&0x00000050,%a1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+# signed - negative,negative
+cmp2_25:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa0c0,DATA(%a6)
+	mov.l		&0x111111a0,%d1
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_26:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa0c0,DATA(%a6)
+	mov.l		&0xffffffc0,%a1
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_27:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa0c0,DATA(%a6)
+	mov.l		&0x111111b0,%d1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	chk2.b		DATA(%a6),%d1
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_28:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa0c0,DATA(%a6)
+	mov.l		&0x11111190,%a1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_29:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa0c0,DATA(%a6)
+	mov.l		&0x111111d0,%d1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_30:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa0c0,DATA(%a6)
+	mov.l		&0x00000050,%a1
+
+	mov.w		&0x001b,ICCR(%a6)
+	mov.w		&0x001f,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	mov.l		TESTCTR(%a6),%d1
+	clr.l		%d0
+	rts
+
+###########################################################
+
+ea_str:
+	string		"\tEffective addresses..."
+
+	align		0x4
+ea_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a0),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a0)+,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM+0x4(%a6),%a0
+	mov.l		%a0,IREGS+0x20(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x4(%a6),%a0
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		-(%a0),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM(%a6),%a0
+	mov.l		%a0,IREGS+0x20(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_3:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x1000(%a6),%a0
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(-0x1000,%a0),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_4:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a0
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x1000,%a0),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_5:
+	addq.l		&0x1,TESTCTR(%a6)
+
+#	movm.l		DEF_REGS(%pc),&0x3fff
+
+#	clr.l		%d2
+#	mov.l		&0x00000002,%d3
+
+#	mov.w		&0x0000,ICCR(%a6)
+#	mov.w		&0x0000,%cc
+#	movm.l		&0xffff,IREGS(%a6)
+
+#	mulu.l		EAMEM.w,%d2:%d3
+
+#	mov.w		%cc,SCCR(%a6)
+#	movm.l		&0xffff,SREGS(%a6)
+#	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+#	bsr.l		chkregs
+#	tst.b		%d0
+#	bne.l		error
+
+ea_6:
+	addq.l		&0x1,TESTCTR(%a6)
+
+#	movm.l		DEF_REGS(%pc),&0x3fff
+
+#	clr.l		%d2
+#	mov.l		&0x00000002,%d3
+
+#	mov.w		&0x0000,ICCR(%a6)
+#	mov.w		&0x0000,%cc
+#	movm.l		&0xffff,IREGS(%a6)
+
+#	mulu.l		EAMEM.l,%d2:%d3
+
+#	mov.w		%cc,SCCR(%a6)
+#	movm.l		&0xffff,SREGS(%a6)
+#	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+#	bsr.l		chkregs
+#	tst.b		%d0
+#	bne.l		error
+
+ea_7:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		&0x00000002,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_8:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_8_next
+ea_8_mem:
+	long		0x00000002
+ea_8_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_8_mem.w,%pc),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_9:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x4(%a6),%a1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		-(%a1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM(%a6),%a0
+	mov.l		%a0,IREGS+0x24(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_10:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x4(%a6),%a2
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		-(%a2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM(%a6),%a0
+	mov.l		%a0,IREGS+0x28(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_11:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x4(%a6),%a3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		-(%a3),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM(%a6),%a0
+	mov.l		%a0,IREGS+0x2c(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_12:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x4(%a6),%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		-(%a4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM(%a6),%a0
+	mov.l		%a0,IREGS+0x30(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_13:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x4(%a6),%a5
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		-(%a5),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM(%a6),%a0
+	mov.l		%a0,IREGS+0x34(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_14:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a6,%a1
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x4(%a1),%a6
+
+	mov.w		&0x0000,ICCR(%a1)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a1)
+
+	mulu.l		-(%a6),%d2:%d3
+
+	mov.w		%cc,SCCR(%a1)
+	movm.l		&0xffff,SREGS(%a1)
+	mov.l		&0x00000004,IREGS+0xc(%a1)
+	lea		EAMEM(%a1),%a0
+	mov.l		%a0,IREGS+0x38(%a1)
+
+	mov.l		%a1,%a6
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_15:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	mov.l		%a7,%a0
+	lea		EAMEM+0x4(%a6),%a7
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		-(%a7),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM(%a6),%a1
+	mov.l		%a1,IREGS+0x3c(%a6)
+
+	mov.l		%a0,%a7
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_16:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_17:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%d4.w*2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_18:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%d4.w*4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_19:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%d4.w*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_20:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%d4.l*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_21:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%d4.l*2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_22:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%d4.l*4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_23:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%d4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_24:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x2,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%a4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_25:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&0x2,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(-0x10.b,%a0,%a4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_26:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a1
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a1,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_27:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a2
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a2,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_28:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a3,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_29:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a4
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a4,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_30:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a5
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a5,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_31:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a6,%a1
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a1),%a6
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a1)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a1)
+
+	mulu.l		(0x10.b,%a6,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a1)
+	movm.l		&0xffff,SREGS(%a1)
+	mov.l		&0x00000004,IREGS+0xc(%a1)
+
+	mov.l		%a1,%a6
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_32:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	mov.l		%a7,%a0
+	lea		EAMEM(%a6),%a7
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a7,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	mov.l		%a0,%a7
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_33:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_34:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a2
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_35:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a3),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_36:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_37:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a5
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a5),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_38:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a6,%a1
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a1),%a6
+
+	mov.w		&0x0000,ICCR(%a1)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a1)
+
+	mulu.l		(%a6),%d2:%d3
+
+	mov.w		%cc,SCCR(%a1)
+	movm.l		&0xffff,SREGS(%a1)
+	mov.l		&0x00000004,IREGS+0xc(%a1)
+
+	mov.l		%a1,%a6
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_39:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	mov.l		%a7,%a0
+	lea		EAMEM(%a6),%a7
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a7),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	mov.l		%a0,%a7
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_40:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a1)+,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM+0x4(%a6),%a0
+	mov.l		%a0,IREGS+0x24(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_41:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a2
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a2)+,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM+0x4(%a6),%a0
+	mov.l		%a0,IREGS+0x28(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_42:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a3)+,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM+0x4(%a6),%a0
+	mov.l		%a0,IREGS+0x2c(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_43:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a4)+,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM+0x4(%a6),%a0
+	mov.l		%a0,IREGS+0x30(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_44:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a5
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a5)+,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM+0x4(%a6),%a0
+	mov.l		%a0,IREGS+0x34(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_45:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a6,%a1
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a1),%a6
+
+	mov.w		&0x0000,ICCR(%a1)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a1)
+
+	mulu.l		(%a6)+,%d2:%d3
+
+	mov.w		%cc,SCCR(%a1)
+	movm.l		&0xffff,SREGS(%a1)
+	mov.l		&0x00000004,IREGS+0xc(%a1)
+	lea		EAMEM+0x4(%a1),%a0
+	mov.l		%a0,IREGS+0x38(%a1)
+
+	mov.l		%a1,%a6
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_46:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	mov.l		%a7,%a0
+	lea		EAMEM(%a6),%a7
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a7)+,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM+0x4(%a6),%a1
+	mov.l		%a1,IREGS+0x3c(%a6)
+
+	mov.l		%a0,%a7
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_47:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x1000,%a1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_48:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a2
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x1000,%a2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_49:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x1000,%a3),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_50:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x1000,%a4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_51:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a5
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x1000,%a5),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_52:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a6,%a1
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a1),%a6
+
+	mov.w		&0x0000,ICCR(%a1)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a1)
+
+	mulu.l		(0x1000,%a6),%d2:%d3
+
+	mov.w		%cc,SCCR(%a1)
+	movm.l		&0xffff,SREGS(%a1)
+	mov.l		&0x00000004,IREGS+0xc(%a1)
+
+	mov.l		%a1,%a6
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_53:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	mov.l		%a7,%a0
+	lea		EAMEM-0x1000(%a6),%a7
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x1000,%a7),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	mov.l		%a0,%a7
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_54:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x1000(%a6),%a0
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(-0x1000,%a0),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_55:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_55_next
+
+ea_55_data:
+	long		0x00000002
+ea_55_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_55_data.w,%pc),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_56:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.w,%a3,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_57:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.w,%a3,%d4.w*2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_58:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.w,%a3,%d4.w*4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_59:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.w,%a3,%d4.w*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_60:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.w,%a3,%d4.l*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_61:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.w,%a3,%d4.l*2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_62:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.w,%a3,%d4.l*4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_63:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.w,%a3,%d4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_64:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&0x2,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(-0x10.w,%a3,%a4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_65:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&0x2,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x00.w,%a3,%za4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_66:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		%a3,%a4
+	add.l		&0x10,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(-0x10.w,%za3,%a4.l*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_67:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&0x2,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(-0x10.l,%a3,%a4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_68:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_68_next
+ea_68_mem:
+	long		0x00000002
+ea_68_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_68_mem+0x10.w,%pc,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_69:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_69_next
+ea_69_mem:
+	long		0x00000002
+ea_69_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		(ea_69_mem+0x10.w,%pc,%d4.w*2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_70:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_70_next
+ea_70_mem:
+	long		0x00000002
+ea_70_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		(ea_70_mem+0x10.w,%pc,%d4.w*4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_71:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_71_next
+ea_71_mem:
+	long		0x00000002
+ea_71_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		(ea_71_mem+0x10.w,%pc,%d4.w*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_72:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_72_next
+ea_72_mem:
+	long		0x00000002
+ea_72_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		(ea_72_mem+0x10.w,%pc,%d4.l*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_73:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_73_next
+ea_73_mem:
+	long		0x00000002
+ea_73_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		(ea_73_mem+0x10.w,%pc,%d4.l*2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_74:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_74_next
+ea_74_mem:
+	long		0x00000002
+ea_74_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		(ea_74_mem+0x10.w,%pc,%d4.l*4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_75:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_75_next
+ea_75_mem:
+	long		0x00000002
+ea_75_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		(ea_75_mem+0x10.w,%pc,%d4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_76:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_76_next
+ea_76_mem:
+	long		0x00000002
+ea_76_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x2,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_76_mem+0x10.w,%pc,%a4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_77:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_77_next
+ea_77_mem:
+	long		0x00000002
+ea_77_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&0x2,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_77_mem+0x00.w,%pc,%za4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_78:
+	addq.l		&0x1,TESTCTR(%a6)
+
+#	movm.l		DEF_REGS(%pc),&0x3fff
+
+#	clr.l		%d2
+#	mov.l		&0x00000002,%d3
+#	lea		EAMEM,%a3
+#	mov.l		%a3,%a4
+#	add.l		&0x10,%a4
+
+#	mov.w		&0x0000,ICCR(%a6)
+#	mov.w		&0x0000,%cc
+#	movm.l		&0xffff,IREGS(%a6)
+
+#	mulu.l		(EAMEM-0x10.w,%zpc,%a4.l*1),%d2:%d3
+
+#	mov.w		%cc,SCCR(%a6)
+#	movm.l		&0xffff,SREGS(%a6)
+#	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+#	bsr.l		chkregs
+#	tst.b		%d0
+#	bne.l		error
+
+ea_79:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM,%a3
+	mov.l		&0x2,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_79_mem-0x10.l,%pc,%a4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bra.b		ea_79_next
+ea_79_mem:
+	long		0x00000002
+ea_79_next:
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_80:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_80_next
+ea_80_mem:
+	long		0x00000002
+ea_80_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a1
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_80_mem+0x10.b,%pc,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_81:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_81_next
+ea_81_mem:
+	long		0x00000002
+ea_81_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_81_mem+0x10.b,%pc,%d4.w*2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_82:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_82_next
+ea_82_mem:
+	long		0x00000002
+ea_82_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_82_mem+0x10.b,%pc,%d4.w*4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_83:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_83_next
+ea_83_mem:
+	long		0x00000002
+ea_83_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_83_mem+0x10.b,%pc,%d4.w*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_84:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_84_next
+ea_84_mem:
+	long		0x00000002
+ea_84_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_84_mem+0x10.b,%pc,%d4.l*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_85:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_85_next
+ea_85_mem:
+	long		0x00000002
+ea_85_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_85_mem+0x10.b,%pc,%d4.l*2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_86:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_86_next
+ea_86_mem:
+	long		0x00000002
+ea_86_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_86_mem+0x10.b,%pc,%d4.l*4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_87:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_87_next
+ea_87_mem:
+	long		0x00000002
+ea_87_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_87_mem+0x10.b,%pc,%d4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_88:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_88_mem+0x10.b,%pc,%d4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bra.b		ea_88_next
+ea_88_mem:
+	long		0x00000002
+ea_88_next:
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_89:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4,%d4.w*1],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_90:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4,%d4.w*2],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_91:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4,%d4.w*4],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_92:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4,%d4.w*8],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_93:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4,%d4.l*1],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_94:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4,%d4.l*2],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_95:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4,%d4.l*4],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_96:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4,%d4.l*8],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_97:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.l,%a4,%d4.l*8],0x1000.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_98:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x00.l,%a4,%zd4.l*8],0x1000.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_99:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([%a4,%zd4.l*8],0x1000.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_100:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x10,%d4
+	add.l		%a4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.l,%za4,%d4.l*1],0x1000.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_101:
+	addq.l		&0x1,TESTCTR(%a6)
+
+#	movm.l		DEF_REGS(%pc),&0x3fff
+
+#	clr.l		%d2
+#	mov.l		&0x00000002,%d3
+#	lea		EAMEM(%a6),%a3
+#	lea		EASTORE(%a6),%a4
+#	mov.l		%a3,(%a4)
+#	mov.l		&-0x10,%d4
+
+#	mov.w		&0x0000,ICCR(%a6)
+#	mov.w		&0x0000,%cc
+#	movm.l		&0xffff,IREGS(%a6)
+
+#	mulu.l		([EASTORE.l,%za4,%zd4.l*1]),%d2:%d3
+
+#	mov.w		%cc,SCCR(%a6)
+#	movm.l		&0xffff,SREGS(%a6)
+#	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+#	bsr.l		chkregs
+#	tst.b		%d0
+#	bne.l		error
+
+ea_102:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a6,%a1
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x1000(%a1),%a3
+	lea		EASTORE(%a1),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%a6
+
+	mov.w		&0x0000,ICCR(%a1)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a1)
+
+	mulu.l		([0x10.w,%a4,%a6.l*8],-0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a1)
+	movm.l		&0xffff,SREGS(%a1)
+	mov.l		&0x00000004,IREGS+0xc(%a1)
+
+	mov.l		%a1,%a6
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_103:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a6,%a1
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x1000(%a1),%a3
+	lea		EASTORE(%a1),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&0x2,%a6
+
+	mov.w		&0x0000,ICCR(%a1)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a1)
+
+	mulu.l		([-0x10.w,%a4,%a6.l*8],-0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a1)
+	movm.l		&0xffff,SREGS(%a1)
+	mov.l		&0x00000004,IREGS+0xc(%a1)
+
+	mov.l		%a1,%a6
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_104:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4],%d4.w*1,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_105:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4],%d4.w*2,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_106:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4],%d4.w*4,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_107:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4],%d4.w*8,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_108:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4],%d4.l*1,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_109:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4],%d4.w*2,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_110:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4],%d4.l*4,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_111:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4],%d4.l*8,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_112:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.l,%a4],%d4.l*8,0x10.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_113:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x00.l,%a4],%zd4.l*8,0x20.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_114:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a7,%a0
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a7
+	mov.l		%a3,(%a7)
+	mov.l		&0x20,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([%a7],%d4.l*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	mov.l		%a0,%a7
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_115:
+	addq.l		&0x1,TESTCTR(%a6)
+
+#	movm.l		DEF_REGS(%pc),&0x3fff
+
+#	clr.l		%d2
+#	mov.l		&0x00000002,%d3
+#	lea		EAMEM-0x20(%pc),%a3
+#	lea		EASTORE(%pc),%a4
+#	mov.l		%a3,(%a4)
+#	mov.l		&0x2,%d4
+
+#	mov.w		&0x0000,ICCR(%a6)
+#	mov.w		&0x0000,%cc
+#	movm.l		&0xffff,IREGS(%a6)
+
+#	mulu.l		([EASTORE.l,%za4],%zd4.l*8,0x20.l),%d2:%d3
+
+#	mov.w		%cc,SCCR(%a6)
+#	movm.l		&0xffff,SREGS(%a6)
+#	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+#	bsr.l		chkregs
+#	tst.b		%d0
+#	bne.l		error
+
+ea_116:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a6,%a1
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a1),%a3
+	lea		EASTORE(%a1),%a6
+	mov.l		%a3,(%a6)
+	add.l		&0x10,%a6
+	mov.l		&-0x2,%a5
+
+	mov.w		&0x0000,ICCR(%a1)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a1)
+
+	mulu.l		([-0x10.w,%a6],%a5.l*8,0x10.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a1)
+	movm.l		&0xffff,SREGS(%a1)
+	mov.l		&0x00000004,IREGS+0xc(%a1)
+
+	mov.l		%a1,%a6
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	mov.l		TESTCTR(%a6),%d1
+	clr.l		%d0
+	rts
+
+ea_117:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x10.w,%pc,%d4.w*1],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_118:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x10.w,%pc,%d4.w*2],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_119:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x10.w,%pc,%d4.w*4],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_120:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x10.w,%pc,%d4.w*8],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_121:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x10.w,%pc,%d4.l*1],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_122:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x10.w,%pc,%d4.l*2],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_123:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x10.w,%pc,%d4.l*4],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_124:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x10.w,%pc,%d4.l*8],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_125:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+	mulu.l		([EASTORE+0x10.l,%pc,%d4.l*8],0x1000.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_126:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x00.l,%pc,%zd4.l*8],0x1000.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_127:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		%a4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([%zpc,%d4.l*1],0x1000.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_128:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x10,%d4
+	add.l		%a4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.l,%zpc,%d4.l*1],0x1000.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_129:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.l,%zpc,%zd4.l*1]),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_130:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&0x2,%a6
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE-0x10.w,%pc,%a6.l*8],-0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_131:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a7,%a0
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&0x2,%a7
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE-0x10.w,%pc,%a7.l*8],-0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	mov.l		%a0,%a7
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_132:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%d4.w*1,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_133:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%d4.w*2,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_134:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%d4.w*4,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_135:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%d4.w*8,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_136:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%d4.l*1,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_137:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%d4.w*2,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_138:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%d4.l*4,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_139:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%d4.l*8,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_140:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.l,%pc],%d4.l*8,0x10.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_141:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.l,%pc],%zd4.l*8,0x20.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_142:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.l,%zpc],%d4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_143:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a7,%a0
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%pc),%a3
+	lea		EASTORE(%pc),%a6
+	mov.l		%a3,(%a6)
+	add.l		&0x10,%a6
+	mov.l		&-0x2,%a7
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%a7.l*8,0x10.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	mov.l		%a0,%a7
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+###########################################################
+###########################################################
+chkregs:
+	lea		IREGS(%a6),%a0
+	lea		SREGS(%a6),%a1
+	mov.l		&14,%d0
+chkregs_loop:
+	cmp.l		(%a0)+,(%a1)+
+	bne.l		chkregs_error
+	dbra.w		%d0,chkregs_loop
+
+	mov.w		ICCR(%a6),%d0
+	mov.w		SCCR(%a6),%d1
+	cmp.w		%d0,%d1
+	bne.l		chkregs_error
+
+	clr.l		%d0
+	rts
+
+chkregs_error:
+	movq.l		&0x1,%d0
+	rts
+
+error:
+	mov.l		TESTCTR(%a6),%d1
+	movq.l		&0x1,%d0
+	rts
+
+DEF_REGS:
+	long		0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+	long		0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+
+	long		0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+	long		0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+
+############################################################
+
+_print_str:
+	mov.l		%d0,-(%sp)
+	mov.l		(TESTTOP-0x80+0x0,%pc),%d0
+	pea		(TESTTOP-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+_print_num:
+	mov.l		%d0,-(%sp)
+	mov.l		(TESTTOP-0x80+0x4,%pc),%d0
+	pea		(TESTTOP-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+############################################################
diff --git a/arch/m68k/ifpsp060/src/pfpsp.S b/arch/m68k/ifpsp060/src/pfpsp.S
new file mode 100644
index 0000000..0c997c4
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/pfpsp.S
@@ -0,0 +1,14745 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# freal.s:
+#	This file is appended to the top of the 060FPSP package
+# and contains the entry points into the package. The user, in
+# effect, branches to one of the branch table entries located
+# after _060FPSP_TABLE.
+#	Also, subroutine stubs exist in this file (_fpsp_done for
+# example) that are referenced by the FPSP package itself in order
+# to call a given routine. The stub routine actually performs the
+# callout. The FPSP code does a "bsr" to the stub routine. This
+# extra layer of hierarchy adds a slight performance penalty but
+# it makes the FPSP code easier to read and more mainatinable.
+#
+
+set	_off_bsun,	0x00
+set	_off_snan,	0x04
+set	_off_operr,	0x08
+set	_off_ovfl,	0x0c
+set	_off_unfl,	0x10
+set	_off_dz,	0x14
+set	_off_inex,	0x18
+set	_off_fline,	0x1c
+set	_off_fpu_dis,	0x20
+set	_off_trap,	0x24
+set	_off_trace,	0x28
+set	_off_access,	0x2c
+set	_off_done,	0x30
+
+set	_off_imr,	0x40
+set	_off_dmr,	0x44
+set	_off_dmw,	0x48
+set	_off_irw,	0x4c
+set	_off_irl,	0x50
+set	_off_drb,	0x54
+set	_off_drw,	0x58
+set	_off_drl,	0x5c
+set	_off_dwb,	0x60
+set	_off_dww,	0x64
+set	_off_dwl,	0x68
+
+_060FPSP_TABLE:
+
+###############################################################
+
+# Here's the table of ENTRY POINTS for those linking the package.
+	bra.l		_fpsp_snan
+	short		0x0000
+	bra.l		_fpsp_operr
+	short		0x0000
+	bra.l		_fpsp_ovfl
+	short		0x0000
+	bra.l		_fpsp_unfl
+	short		0x0000
+	bra.l		_fpsp_dz
+	short		0x0000
+	bra.l		_fpsp_inex
+	short		0x0000
+	bra.l		_fpsp_fline
+	short		0x0000
+	bra.l		_fpsp_unsupp
+	short		0x0000
+	bra.l		_fpsp_effadd
+	short		0x0000
+
+	space		56
+
+###############################################################
+	global		_fpsp_done
+_fpsp_done:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_done,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_ovfl
+_real_ovfl:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_ovfl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_unfl
+_real_unfl:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_unfl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_inex
+_real_inex:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_inex,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_bsun
+_real_bsun:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_bsun,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_operr
+_real_operr:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_operr,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_snan
+_real_snan:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_snan,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_dz
+_real_dz:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dz,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_fline
+_real_fline:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_fline,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_fpu_disabled
+_real_fpu_disabled:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_fpu_dis,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_trap
+_real_trap:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_trap,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_trace
+_real_trace:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_trace,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_access
+_real_access:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_access,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+#######################################
+
+	global		_imem_read
+_imem_read:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_imr,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read
+_dmem_read:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dmr,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write
+_dmem_write:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dmw,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_imem_read_word
+_imem_read_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_irw,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_imem_read_long
+_imem_read_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_irl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_byte
+_dmem_read_byte:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_drb,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_word
+_dmem_read_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_drw,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_long
+_dmem_read_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_drl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_byte
+_dmem_write_byte:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dwb,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_word
+_dmem_write_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dww,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_long
+_dmem_write_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dwl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+#
+# This file contains a set of define statements for constants
+# in order to promote readability within the corecode itself.
+#
+
+set LOCAL_SIZE,		192			# stack frame size(bytes)
+set LV,			-LOCAL_SIZE		# stack offset
+
+set EXC_SR,		0x4			# stack status register
+set EXC_PC,		0x6			# stack pc
+set EXC_VOFF,		0xa			# stacked vector offset
+set EXC_EA,		0xc			# stacked <ea>
+
+set EXC_FP,		0x0			# frame pointer
+
+set EXC_AREGS,		-68			# offset of all address regs
+set EXC_DREGS,		-100			# offset of all data regs
+set EXC_FPREGS,		-36			# offset of all fp regs
+
+set EXC_A7,		EXC_AREGS+(7*4)		# offset of saved a7
+set OLD_A7,		EXC_AREGS+(6*4)		# extra copy of saved a7
+set EXC_A6,		EXC_AREGS+(6*4)		# offset of saved a6
+set EXC_A5,		EXC_AREGS+(5*4)
+set EXC_A4,		EXC_AREGS+(4*4)
+set EXC_A3,		EXC_AREGS+(3*4)
+set EXC_A2,		EXC_AREGS+(2*4)
+set EXC_A1,		EXC_AREGS+(1*4)
+set EXC_A0,		EXC_AREGS+(0*4)
+set EXC_D7,		EXC_DREGS+(7*4)
+set EXC_D6,		EXC_DREGS+(6*4)
+set EXC_D5,		EXC_DREGS+(5*4)
+set EXC_D4,		EXC_DREGS+(4*4)
+set EXC_D3,		EXC_DREGS+(3*4)
+set EXC_D2,		EXC_DREGS+(2*4)
+set EXC_D1,		EXC_DREGS+(1*4)
+set EXC_D0,		EXC_DREGS+(0*4)
+
+set EXC_FP0,		EXC_FPREGS+(0*12)	# offset of saved fp0
+set EXC_FP1,		EXC_FPREGS+(1*12)	# offset of saved fp1
+set EXC_FP2,		EXC_FPREGS+(2*12)	# offset of saved fp2 (not used)
+
+set FP_SCR1,		LV+80			# fp scratch 1
+set FP_SCR1_EX,		FP_SCR1+0
+set FP_SCR1_SGN,	FP_SCR1+2
+set FP_SCR1_HI,		FP_SCR1+4
+set FP_SCR1_LO,		FP_SCR1+8
+
+set FP_SCR0,		LV+68			# fp scratch 0
+set FP_SCR0_EX,		FP_SCR0+0
+set FP_SCR0_SGN,	FP_SCR0+2
+set FP_SCR0_HI,		FP_SCR0+4
+set FP_SCR0_LO,		FP_SCR0+8
+
+set FP_DST,		LV+56			# fp destination operand
+set FP_DST_EX,		FP_DST+0
+set FP_DST_SGN,		FP_DST+2
+set FP_DST_HI,		FP_DST+4
+set FP_DST_LO,		FP_DST+8
+
+set FP_SRC,		LV+44			# fp source operand
+set FP_SRC_EX,		FP_SRC+0
+set FP_SRC_SGN,		FP_SRC+2
+set FP_SRC_HI,		FP_SRC+4
+set FP_SRC_LO,		FP_SRC+8
+
+set USER_FPIAR,		LV+40			# FP instr address register
+
+set USER_FPSR,		LV+36			# FP status register
+set FPSR_CC,		USER_FPSR+0		# FPSR condition codes
+set FPSR_QBYTE,		USER_FPSR+1		# FPSR qoutient byte
+set FPSR_EXCEPT,	USER_FPSR+2		# FPSR exception status byte
+set FPSR_AEXCEPT,	USER_FPSR+3		# FPSR accrued exception byte
+
+set USER_FPCR,		LV+32			# FP control register
+set FPCR_ENABLE,	USER_FPCR+2		# FPCR exception enable
+set FPCR_MODE,		USER_FPCR+3		# FPCR rounding mode control
+
+set L_SCR3,		LV+28			# integer scratch 3
+set L_SCR2,		LV+24			# integer scratch 2
+set L_SCR1,		LV+20			# integer scratch 1
+
+set STORE_FLG,		LV+19			# flag: operand store (ie. not fcmp/ftst)
+
+set EXC_TEMP2,		LV+24			# temporary space
+set EXC_TEMP,		LV+16			# temporary space
+
+set DTAG,		LV+15			# destination operand type
+set STAG,		LV+14			# source operand type
+
+set SPCOND_FLG,		LV+10			# flag: special case (see below)
+
+set EXC_CC,		LV+8			# saved condition codes
+set EXC_EXTWPTR,	LV+4			# saved current PC (active)
+set EXC_EXTWORD,	LV+2			# saved extension word
+set EXC_CMDREG,		LV+2			# saved extension word
+set EXC_OPWORD,		LV+0			# saved operation word
+
+################################
+
+# Helpful macros
+
+set FTEMP,		0			# offsets within an
+set FTEMP_EX,		0			# extended precision
+set FTEMP_SGN,		2			# value saved in memory.
+set FTEMP_HI,		4
+set FTEMP_LO,		8
+set FTEMP_GRS,		12
+
+set LOCAL,		0			# offsets within an
+set LOCAL_EX,		0			# extended precision
+set LOCAL_SGN,		2			# value saved in memory.
+set LOCAL_HI,		4
+set LOCAL_LO,		8
+set LOCAL_GRS,		12
+
+set DST,		0			# offsets within an
+set DST_EX,		0			# extended precision
+set DST_HI,		4			# value saved in memory.
+set DST_LO,		8
+
+set SRC,		0			# offsets within an
+set SRC_EX,		0			# extended precision
+set SRC_HI,		4			# value saved in memory.
+set SRC_LO,		8
+
+set SGL_LO,		0x3f81			# min sgl prec exponent
+set SGL_HI,		0x407e			# max sgl prec exponent
+set DBL_LO,		0x3c01			# min dbl prec exponent
+set DBL_HI,		0x43fe			# max dbl prec exponent
+set EXT_LO,		0x0			# min ext prec exponent
+set EXT_HI,		0x7ffe			# max ext prec exponent
+
+set EXT_BIAS,		0x3fff			# extended precision bias
+set SGL_BIAS,		0x007f			# single precision bias
+set DBL_BIAS,		0x03ff			# double precision bias
+
+set NORM,		0x00			# operand type for STAG/DTAG
+set ZERO,		0x01			# operand type for STAG/DTAG
+set INF,		0x02			# operand type for STAG/DTAG
+set QNAN,		0x03			# operand type for STAG/DTAG
+set DENORM,		0x04			# operand type for STAG/DTAG
+set SNAN,		0x05			# operand type for STAG/DTAG
+set UNNORM,		0x06			# operand type for STAG/DTAG
+
+##################
+# FPSR/FPCR bits #
+##################
+set neg_bit,		0x3			# negative result
+set z_bit,		0x2			# zero result
+set inf_bit,		0x1			# infinite result
+set nan_bit,		0x0			# NAN result
+
+set q_sn_bit,		0x7			# sign bit of quotient byte
+
+set bsun_bit,		7			# branch on unordered
+set snan_bit,		6			# signalling NAN
+set operr_bit,		5			# operand error
+set ovfl_bit,		4			# overflow
+set unfl_bit,		3			# underflow
+set dz_bit,		2			# divide by zero
+set inex2_bit,		1			# inexact result 2
+set inex1_bit,		0			# inexact result 1
+
+set aiop_bit,		7			# accrued inexact operation bit
+set aovfl_bit,		6			# accrued overflow bit
+set aunfl_bit,		5			# accrued underflow bit
+set adz_bit,		4			# accrued dz bit
+set ainex_bit,		3			# accrued inexact bit
+
+#############################
+# FPSR individual bit masks #
+#############################
+set neg_mask,		0x08000000		# negative bit mask (lw)
+set inf_mask,		0x02000000		# infinity bit mask (lw)
+set z_mask,		0x04000000		# zero bit mask (lw)
+set nan_mask,		0x01000000		# nan bit mask (lw)
+
+set neg_bmask,		0x08			# negative bit mask (byte)
+set inf_bmask,		0x02			# infinity bit mask (byte)
+set z_bmask,		0x04			# zero bit mask (byte)
+set nan_bmask,		0x01			# nan bit mask (byte)
+
+set bsun_mask,		0x00008000		# bsun exception mask
+set snan_mask,		0x00004000		# snan exception mask
+set operr_mask,		0x00002000		# operr exception mask
+set ovfl_mask,		0x00001000		# overflow exception mask
+set unfl_mask,		0x00000800		# underflow exception mask
+set dz_mask,		0x00000400		# dz exception mask
+set inex2_mask,		0x00000200		# inex2 exception mask
+set inex1_mask,		0x00000100		# inex1 exception mask
+
+set aiop_mask,		0x00000080		# accrued illegal operation
+set aovfl_mask,		0x00000040		# accrued overflow
+set aunfl_mask,		0x00000020		# accrued underflow
+set adz_mask,		0x00000010		# accrued divide by zero
+set ainex_mask,		0x00000008		# accrued inexact
+
+######################################
+# FPSR combinations used in the FPSP #
+######################################
+set dzinf_mask,		inf_mask+dz_mask+adz_mask
+set opnan_mask,		nan_mask+operr_mask+aiop_mask
+set nzi_mask,		0x01ffffff		#clears N, Z, and I
+set unfinx_mask,	unfl_mask+inex2_mask+aunfl_mask+ainex_mask
+set unf2inx_mask,	unfl_mask+inex2_mask+ainex_mask
+set ovfinx_mask,	ovfl_mask+inex2_mask+aovfl_mask+ainex_mask
+set inx1a_mask,		inex1_mask+ainex_mask
+set inx2a_mask,		inex2_mask+ainex_mask
+set snaniop_mask,	nan_mask+snan_mask+aiop_mask
+set snaniop2_mask,	snan_mask+aiop_mask
+set naniop_mask,	nan_mask+aiop_mask
+set neginf_mask,	neg_mask+inf_mask
+set infaiop_mask,	inf_mask+aiop_mask
+set negz_mask,		neg_mask+z_mask
+set opaop_mask,		operr_mask+aiop_mask
+set unfl_inx_mask,	unfl_mask+aunfl_mask+ainex_mask
+set ovfl_inx_mask,	ovfl_mask+aovfl_mask+ainex_mask
+
+#########
+# misc. #
+#########
+set rnd_stky_bit,	29			# stky bit pos in longword
+
+set sign_bit,		0x7			# sign bit
+set signan_bit,		0x6			# signalling nan bit
+
+set sgl_thresh,		0x3f81			# minimum sgl exponent
+set dbl_thresh,		0x3c01			# minimum dbl exponent
+
+set x_mode,		0x0			# extended precision
+set s_mode,		0x4			# single precision
+set d_mode,		0x8			# double precision
+
+set rn_mode,		0x0			# round-to-nearest
+set rz_mode,		0x1			# round-to-zero
+set rm_mode,		0x2			# round-tp-minus-infinity
+set rp_mode,		0x3			# round-to-plus-infinity
+
+set mantissalen,	64			# length of mantissa in bits
+
+set BYTE,		1			# len(byte) == 1 byte
+set WORD,		2			# len(word) == 2 bytes
+set LONG,		4			# len(longword) == 2 bytes
+
+set BSUN_VEC,		0xc0			# bsun    vector offset
+set INEX_VEC,		0xc4			# inexact vector offset
+set DZ_VEC,		0xc8			# dz      vector offset
+set UNFL_VEC,		0xcc			# unfl    vector offset
+set OPERR_VEC,		0xd0			# operr   vector offset
+set OVFL_VEC,		0xd4			# ovfl    vector offset
+set SNAN_VEC,		0xd8			# snan    vector offset
+
+###########################
+# SPecial CONDition FLaGs #
+###########################
+set ftrapcc_flg,	0x01			# flag bit: ftrapcc exception
+set fbsun_flg,		0x02			# flag bit: bsun exception
+set mia7_flg,		0x04			# flag bit: (a7)+ <ea>
+set mda7_flg,		0x08			# flag bit: -(a7) <ea>
+set fmovm_flg,		0x40			# flag bit: fmovm instruction
+set immed_flg,		0x80			# flag bit: &<data> <ea>
+
+set ftrapcc_bit,	0x0
+set fbsun_bit,		0x1
+set mia7_bit,		0x2
+set mda7_bit,		0x3
+set immed_bit,		0x7
+
+##################################
+# TRANSCENDENTAL "LAST-OP" FLAGS #
+##################################
+set FMUL_OP,		0x0			# fmul instr performed last
+set FDIV_OP,		0x1			# fdiv performed last
+set FADD_OP,		0x2			# fadd performed last
+set FMOV_OP,		0x3			# fmov performed last
+
+#############
+# CONSTANTS #
+#############
+T1:	long		0x40C62D38,0xD3D64634	# 16381 LOG2 LEAD
+T2:	long		0x3D6F90AE,0xB1E75CC7	# 16381 LOG2 TRAIL
+
+PI:	long		0x40000000,0xC90FDAA2,0x2168C235,0x00000000
+PIBY2:	long		0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+TWOBYPI:
+	long		0x3FE45F30,0x6DC9C883
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_ovfl(): 060FPSP entry point for FP Overflow exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Overflow exception in an operating system.			#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	fout() - emulate an opclass 3 instruction			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	_fpsp_done() - "callout" for 060FPSP exit (all work done!)	#
+#	_real_ovfl() - "callout" for Overflow exception enabled code	#
+#	_real_inex() - "callout" for Inexact exception enabled code	#
+#	_real_trace() - "callout" for Trace exception code		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP Ovfl exception stack frame	#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	Overflow Exception enabled:					#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#	Overflow Exception disabled:					#
+#	- The system stack is unchanged					#
+#	- The "exception present" flag in the fsave frame is cleared	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	On the 060, if an FP overflow is present as the result of any	#
+# instruction, the 060 will take an overflow exception whether the	#
+# exception is enabled or disabled in the FPCR. For the disabled case,	#
+# This handler emulates the instruction to determine what the correct	#
+# default result should be for the operation. This default result is	#
+# then stored in either the FP regfile, data regfile, or memory.	#
+# Finally, the handler exits through the "callout" _fpsp_done()		#
+# denoting that no exceptional conditions exist within the machine.	#
+#	If the exception is enabled, then this handler must create the	#
+# exceptional operand and plave it in the fsave state frame, and store	#
+# the default result (only if the instruction is opclass 3). For	#
+# exceptions enabled, this handler must exit through the "callout"	#
+# _real_ovfl() so that the operating system enabled overflow handler	#
+# can handle this case.							#
+#	Two other conditions exist. First, if overflow was disabled	#
+# but the inexact exception was enabled, this handler must exit		#
+# through the "callout" _real_inex() regardless of whether the result	#
+# was inexact.								#
+#	Also, in the case of an opclass three instruction where		#
+# overflow was disabled and the trace exception was enabled, this	#
+# handler must exit through the "callout" _real_trace().		#
+#									#
+#########################################################################
+
+	global		_fpsp_ovfl
+_fpsp_ovfl:
+
+#$#	sub.l		&24,%sp			# make room for src/dst
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&0x5,EXC_CMDREG(%a6)	# is instr an fmove out?
+	bne.w		fovfl_out
+
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+# since, I believe, only NORMs and DENORMs can come through here,
+# maybe we can avoid the subroutine call.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,STAG(%a6)		# maybe NORM,DENORM
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# that can pass through fpsp_ovfl(). remember that fcmp, ftst, and fsincos
+# will never take this exception.
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		fovfl_extract		# monadic
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fovfl_op2_done		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+fovfl_op2_done:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+fovfl_extract:
+
+#$#	mov.l		FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$#	mov.l		FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$#	mov.l		FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+#$#	mov.l		FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
+#$#	mov.l		FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
+#$#	mov.l		FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x007f,%d1		# extract extension
+
+	andi.l		&0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+# maybe we can make these entry points ONLY the OVFL entry points of each routine.
+	mov.l		(tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+# the operation has been emulated. the result is in fp0.
+# the EXOP, if an exception occurred, is in fp1.
+# we must save the default result regardless of whether
+# traps are enabled or disabled.
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0
+	bsr.l		store_fpreg
+
+# the exceptional possibilities we have left ourselves with are ONLY overflow
+# and inexact. and, the inexact is such that overflow occurred and was disabled
+# but inexact was enabled.
+	btst		&ovfl_bit,FPCR_ENABLE(%a6)
+	bne.b		fovfl_ovfl_on
+
+	btst		&inex2_bit,FPCR_ENABLE(%a6)
+	bne.b		fovfl_inex_on
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+#$#	add.l		&24,%sp
+	bra.l		_fpsp_done
+
+# overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
+# in fp1. now, simply jump to _real_ovfl()!
+fovfl_ovfl_on:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP (fp1) to stack
+
+	mov.w		&0xe005,2+FP_SRC(%a6)	# save exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# do this after fmovm,other f<op>s!
+
+	unlk		%a6
+
+	bra.l		_real_ovfl
+
+# overflow occurred but is disabled. meanwhile, inexact is enabled. therefore,
+# we must jump to real_inex().
+fovfl_inex_on:
+
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP (fp1) to stack
+
+	mov.b		&0xc4,1+EXC_VOFF(%a6)	# vector offset = 0xc4
+	mov.w		&0xe001,2+FP_SRC(%a6)	# save exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# do this after fmovm,other f<op>s!
+
+	unlk		%a6
+
+	bra.l		_real_inex
+
+########################################################################
+fovfl_out:
+
+
+#$#	mov.l		FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$#	mov.l		FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$#	mov.l		FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+
+# the src operand is definitely a NORM(!), so tag it as such
+	mov.b		&NORM,STAG(%a6)		# set src optype tag
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	and.l		&0xffff00ff,USER_FPSR(%a6) # zero all but accured field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	bsr.l		fout
+
+	btst		&ovfl_bit,FPCR_ENABLE(%a6)
+	bne.w		fovfl_ovfl_on
+
+	btst		&inex2_bit,FPCR_ENABLE(%a6)
+	bne.w		fovfl_inex_on
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+#$#	add.l		&24,%sp
+
+	btst		&0x7,(%sp)		# is trace on?
+	beq.l		_fpsp_done		# no
+
+	fmov.l		%fpiar,0x8(%sp)		# "Current PC" is in FPIAR
+	mov.w		&0x2024,0x6(%sp)	# stk fmt = 0x2; voff = 0x024
+	bra.l		_real_trace
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_unfl(): 060FPSP entry point for FP Underflow exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Underflow exception in an operating system.			#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	fout() - emulate an opclass 3 instruction			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	_fpsp_done() - "callout" for 060FPSP exit (all work done!)	#
+#	_real_ovfl() - "callout" for Overflow exception enabled code	#
+#	_real_inex() - "callout" for Inexact exception enabled code	#
+#	_real_trace() - "callout" for Trace exception code		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP Unfl exception stack frame	#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	Underflow Exception enabled:					#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#	Underflow Exception disabled:					#
+#	- The system stack is unchanged					#
+#	- The "exception present" flag in the fsave frame is cleared	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	On the 060, if an FP underflow is present as the result of any	#
+# instruction, the 060 will take an underflow exception whether the	#
+# exception is enabled or disabled in the FPCR. For the disabled case,	#
+# This handler emulates the instruction to determine what the correct	#
+# default result should be for the operation. This default result is	#
+# then stored in either the FP regfile, data regfile, or memory.	#
+# Finally, the handler exits through the "callout" _fpsp_done()		#
+# denoting that no exceptional conditions exist within the machine.	#
+#	If the exception is enabled, then this handler must create the	#
+# exceptional operand and plave it in the fsave state frame, and store	#
+# the default result (only if the instruction is opclass 3). For	#
+# exceptions enabled, this handler must exit through the "callout"	#
+# _real_unfl() so that the operating system enabled overflow handler	#
+# can handle this case.							#
+#	Two other conditions exist. First, if underflow was disabled	#
+# but the inexact exception was enabled and the result was inexact,	#
+# this handler must exit through the "callout" _real_inex().		#
+# was inexact.								#
+#	Also, in the case of an opclass three instruction where		#
+# underflow was disabled and the trace exception was enabled, this	#
+# handler must exit through the "callout" _real_trace().		#
+#									#
+#########################################################################
+
+	global		_fpsp_unfl
+_fpsp_unfl:
+
+#$#	sub.l		&24,%sp			# make room for src/dst
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&0x5,EXC_CMDREG(%a6)	# is instr an fmove out?
+	bne.w		funfl_out
+
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,STAG(%a6)		# maybe NORM,DENORM
+
+# bit five of the fp ext word separates the monadic and dyadic operations
+# that can pass through fpsp_unfl(). remember that fcmp, and ftst
+# will never take this exception.
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is op monadic or dyadic?
+	beq.b		funfl_extract		# monadic
+
+# now, what's left that's not dyadic is fsincos. we can distinguish it
+# from all dyadics by the '0110xxx pattern
+	btst		&0x4,1+EXC_CMDREG(%a6)	# is op an fsincos?
+	bne.b		funfl_extract		# yes
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		funfl_op2_done		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+funfl_op2_done:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+funfl_extract:
+
+#$#	mov.l		FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$#	mov.l		FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$#	mov.l		FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+#$#	mov.l		FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
+#$#	mov.l		FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
+#$#	mov.l		FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x007f,%d1		# extract extension
+
+	andi.l		&0x00ff01ff,USER_FPSR(%a6)
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+# maybe we can make these entry points ONLY the OVFL entry points of each routine.
+	mov.l		(tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0
+	bsr.l		store_fpreg
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception. Since this is incorrect, we need to check
+# if our emulation, after re-doing the operation, decided that
+# no underflow was called for. We do these checks only in
+# funfl_{unfl,inex}_on() because w/ both exceptions disabled, this
+# special case will simply exit gracefully with the correct result.
+
+# the exceptional possibilities we have left ourselves with are ONLY overflow
+# and inexact. and, the inexact is such that overflow occurred and was disabled
+# but inexact was enabled.
+	btst		&unfl_bit,FPCR_ENABLE(%a6)
+	bne.b		funfl_unfl_on
+
+funfl_chkinex:
+	btst		&inex2_bit,FPCR_ENABLE(%a6)
+	bne.b		funfl_inex_on
+
+funfl_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+#$#	add.l		&24,%sp
+	bra.l		_fpsp_done
+
+# overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
+# in fp1 (don't forget to save fp0). what to do now?
+# well, we simply have to get to go to _real_unfl()!
+funfl_unfl_on:
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception. Since this is incorrect, we check here to see
+# if our emulation, after re-doing the operation, decided that
+# no underflow was called for.
+	btst		&unfl_bit,FPSR_EXCEPT(%a6)
+	beq.w		funfl_chkinex
+
+funfl_unfl_on2:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP (fp1) to stack
+
+	mov.w		&0xe003,2+FP_SRC(%a6)	# save exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# do this after fmovm,other f<op>s!
+
+	unlk		%a6
+
+	bra.l		_real_unfl
+
+# undeflow occurred but is disabled. meanwhile, inexact is enabled. therefore,
+# we must jump to real_inex().
+funfl_inex_on:
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception.
+# But, whether bogus or not, if inexact is enabled AND it occurred,
+# then we have to branch to real_inex.
+
+	btst		&inex2_bit,FPSR_EXCEPT(%a6)
+	beq.w		funfl_exit
+
+funfl_inex_on2:
+
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP to stack
+
+	mov.b		&0xc4,1+EXC_VOFF(%a6)	# vector offset = 0xc4
+	mov.w		&0xe001,2+FP_SRC(%a6)	# save exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# do this after fmovm,other f<op>s!
+
+	unlk		%a6
+
+	bra.l		_real_inex
+
+#######################################################################
+funfl_out:
+
+
+#$#	mov.l		FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$#	mov.l		FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$#	mov.l		FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+
+# the src operand is definitely a NORM(!), so tag it as such
+	mov.b		&NORM,STAG(%a6)		# set src optype tag
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	and.l		&0xffff00ff,USER_FPSR(%a6) # zero all but accured field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	bsr.l		fout
+
+	btst		&unfl_bit,FPCR_ENABLE(%a6)
+	bne.w		funfl_unfl_on2
+
+	btst		&inex2_bit,FPCR_ENABLE(%a6)
+	bne.w		funfl_inex_on2
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+#$#	add.l		&24,%sp
+
+	btst		&0x7,(%sp)		# is trace on?
+	beq.l		_fpsp_done		# no
+
+	fmov.l		%fpiar,0x8(%sp)		# "Current PC" is in FPIAR
+	mov.w		&0x2024,0x6(%sp)	# stk fmt = 0x2; voff = 0x024
+	bra.l		_real_trace
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_unsupp(): 060FPSP entry point for FP "Unimplemented	#
+#		        Data Type" exception.				#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Unimplemented Data Type exception in an operating system.	#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_{word,long}() - read instruction word/longword	#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	load_fpn1() - load src operand from FP regfile			#
+#	fout() - emulate an opclass 3 instruction			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	_real_inex() - "callout" to operating system inexact handler	#
+#	_fpsp_done() - "callout" for exit; work all done		#
+#	_real_trace() - "callout" for Trace enabled exception		#
+#	funimp_skew() - adjust fsave src ops to "incorrect" value	#
+#	_real_snan() - "callout" for SNAN exception			#
+#	_real_operr() - "callout" for OPERR exception			#
+#	_real_ovfl() - "callout" for OVFL exception			#
+#	_real_unfl() - "callout" for UNFL exception			#
+#	get_packed() - fetch packed operand from memory			#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the "Unimp Data Type" stk frame	#
+#	- The fsave frame contains the ssrc op (for UNNORM/DENORM)	#
+#									#
+# OUTPUT **************************************************************	#
+#	If Inexact exception (opclass 3):				#
+#	- The system stack is changed to an Inexact exception stk frame	#
+#	If SNAN exception (opclass 3):					#
+#	- The system stack is changed to an SNAN exception stk frame	#
+#	If OPERR exception (opclass 3):					#
+#	- The system stack is changed to an OPERR exception stk frame	#
+#	If OVFL exception (opclass 3):					#
+#	- The system stack is changed to an OVFL exception stk frame	#
+#	If UNFL exception (opclass 3):					#
+#	- The system stack is changed to an UNFL exception stack frame	#
+#	If Trace exception enabled:					#
+#	- The system stack is changed to a Trace exception stack frame	#
+#	Else: (normal case)						#
+#	- Correct result has been stored as appropriate			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Two main instruction types can enter here: (1) DENORM or UNNORM	#
+# unimplemented data types. These can be either opclass 0,2 or 3	#
+# instructions, and (2) PACKED unimplemented data format instructions	#
+# also of opclasses 0,2, or 3.						#
+#	For UNNORM/DENORM opclass 0 and 2, the handler fetches the src	#
+# operand from the fsave state frame and the dst operand (if dyadic)	#
+# from the FP register file. The instruction is then emulated by	#
+# choosing an emulation routine from a table of routines indexed by	#
+# instruction type. Once the instruction has been emulated and result	#
+# saved, then we check to see if any enabled exceptions resulted from	#
+# instruction emulation. If none, then we exit through the "callout"	#
+# _fpsp_done(). If there is an enabled FP exception, then we insert	#
+# this exception into the FPU in the fsave state frame and then exit	#
+# through _fpsp_done().							#
+#	PACKED opclass 0 and 2 is similar in how the instruction is	#
+# emulated and exceptions handled. The differences occur in how the	#
+# handler loads the packed op (by calling get_packed() routine) and	#
+# by the fact that a Trace exception could be pending for PACKED ops.	#
+# If a Trace exception is pending, then the current exception stack	#
+# frame is changed to a Trace exception stack frame and an exit is	#
+# made through _real_trace().						#
+#	For UNNORM/DENORM opclass 3, the actual move out to memory is	#
+# performed by calling the routine fout(). If no exception should occur	#
+# as the result of emulation, then an exit either occurs through	#
+# _fpsp_done() or through _real_trace() if a Trace exception is pending	#
+# (a Trace stack frame must be created here, too). If an FP exception	#
+# should occur, then we must create an exception stack frame of that	#
+# type and jump to either _real_snan(), _real_operr(), _real_inex(),	#
+# _real_unfl(), or _real_ovfl() as appropriate. PACKED opclass 3	#
+# emulation is performed in a similar manner.				#
+#									#
+#########################################################################
+
+#
+# (1) DENORM and UNNORM (unimplemented) data types:
+#
+#				post-instruction
+#				*****************
+#				*      EA	*
+#	 pre-instruction	*		*
+#	*****************	*****************
+#	* 0x0 *  0x0dc  *	* 0x3 *  0x0dc  *
+#	*****************	*****************
+#	*     Next	*	*     Next	*
+#	*      PC	*	*      PC	*
+#	*****************	*****************
+#	*      SR	*	*      SR	*
+#	*****************	*****************
+#
+# (2) PACKED format (unsupported) opclasses two and three:
+#	*****************
+#	*      EA	*
+#	*		*
+#	*****************
+#	* 0x2 *  0x0dc	*
+#	*****************
+#	*     Next	*
+#	*      PC	*
+#	*****************
+#	*      SR	*
+#	*****************
+#
+	global		_fpsp_unsupp
+_fpsp_unsupp:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# save fp state
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor mode?
+	bne.b		fu_s
+fu_u:
+	mov.l		%usp,%a0		# fetch user stack pointer
+	mov.l		%a0,EXC_A7(%a6)		# save on stack
+	bra.b		fu_cont
+# if the exception is an opclass zero or two unimplemented data type
+# exception, then the a7' calculated here is wrong since it doesn't
+# stack an ea. however, we don't need an a7' for this case anyways.
+fu_s:
+	lea		0x4+EXC_EA(%a6),%a0	# load old a7'
+	mov.l		%a0,EXC_A7(%a6)		# save on stack
+
+fu_cont:
+
+# the FPIAR holds the "current PC" of the faulting instruction
+# the FPIAR should be set correctly for ALL exceptions passing through
+# this point.
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)	# store OPWORD and EXTWORD
+
+############################
+
+	clr.b		SPCOND_FLG(%a6)		# clear special condition flag
+
+# Separate opclass three (fpn-to-mem) ops since they have a different
+# stack frame and protocol.
+	btst		&0x5,EXC_CMDREG(%a6)	# is it an fmove out?
+	bne.w		fu_out			# yes
+
+# Separate packed opclass two instructions.
+	bfextu		EXC_CMDREG(%a6){&0:&6},%d0
+	cmpi.b		%d0,&0x13
+	beq.w		fu_in_pack
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field
+	andi.l		&0x00ff00ff,USER_FPSR(%a6) # zero exception field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+# Opclass two w/ memory-to-fpn operation will have an incorrect extended
+# precision format if the src format was single or double and the
+# source data type was an INF, NAN, DENORM, or UNNORM
+	lea		FP_SRC(%a6),%a0		# pass ptr to input
+	bsr.l		fix_skewed_ops
+
+# we don't know whether the src operand or the dst operand (or both) is the
+# UNNORM or DENORM. call the function that tags the operand type. if the
+# input is an UNNORM, then convert it to a NORM, DENORM, or ZERO.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fu_op2			# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+
+fu_op2:
+	mov.b		%d0,STAG(%a6)		# save src optype tag
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# at this point
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		fu_extract		# monadic
+	cmpi.b		1+EXC_CMDREG(%a6),&0x3a	# is operation an ftst?
+	beq.b		fu_extract		# yes, so it's monadic, too
+
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fu_op2_done		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+fu_op2_done:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+fu_extract:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode/prec
+
+	bfextu		1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+	mov.l		(tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+#	BSUN	: none
+#	SNAN	: all dyadic ops
+#	OPERR	: fsqrt(-NORM)
+#	OVFL	: all except ftst,fcmp
+#	UNFL	: all except ftst,fcmp
+#	DZ	: fdiv
+#	INEX2	: all except ftst,fcmp
+#	INEX1	: none (packed doesn't go through here)
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions set
+	bne.b		fu_in_ena		# some are enabled
+
+fu_in_cont:
+# fcmp and ftst do not store any result.
+	mov.b		1+EXC_CMDREG(%a6),%d0	# fetch extension
+	andi.b		&0x38,%d0		# extract bits 3-5
+	cmpi.b		%d0,&0x38		# is instr fcmp or ftst?
+	beq.b		fu_in_exit		# yes
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		store_fpreg		# store the result
+
+fu_in_exit:
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	bra.l		_fpsp_done
+
+fu_in_ena:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		fu_in_exc		# there is at least one set
+
+#
+# No exceptions occurred that were also enabled. Now:
+#
+#	if (OVFL && ovfl_disabled && inexact_enabled) {
+#	    branch to _real_inex() (even if the result was exact!);
+#	} else {
+#	    save the result in the proper fp reg (unless the op is fcmp or ftst);
+#	    return;
+#	}
+#
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+	beq.b		fu_in_cont		# no
+
+fu_in_ovflchk:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+	beq.b		fu_in_cont		# no
+	bra.w		fu_in_exc_ovfl		# go insert overflow frame
+
+#
+# An exception occurred and that exception was enabled:
+#
+#	shift enabled exception field into lo byte of d0;
+#	if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
+#	    ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
+#		/*
+#		 * this is the case where we must call _real_inex() now or else
+#		 * there will be no other way to pass it the exceptional operand
+#		 */
+#		call _real_inex();
+#	} else {
+#		restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
+#	}
+#
+fu_in_exc:
+	subi.l		&24,%d0			# fix offset to be 0-8
+	cmpi.b		%d0,&0x6		# is exception INEX? (6)
+	bne.b		fu_in_exc_exit		# no
+
+# the enabled exception was inexact
+	btst		&unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
+	bne.w		fu_in_exc_unfl		# yes
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
+	bne.w		fu_in_exc_ovfl		# yes
+
+# here, we insert the correct fsave status value into the fsave frame for the
+# corresponding exception. the operand in the fsave frame should be the original
+# src operand.
+fu_in_exc_exit:
+	mov.l		%d0,-(%sp)		# save d0
+	bsr.l		funimp_skew		# skew sgl or dbl inputs
+	mov.l		(%sp)+,%d0		# restore d0
+
+	mov.w		(tbl_except.b,%pc,%d0.w*2),2+FP_SRC(%a6) # create exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore src op
+
+	unlk		%a6
+
+	bra.l		_fpsp_done
+
+tbl_except:
+	short		0xe000,0xe006,0xe004,0xe005
+	short		0xe003,0xe002,0xe001,0xe001
+
+fu_in_exc_unfl:
+	mov.w		&0x4,%d0
+	bra.b		fu_in_exc_exit
+fu_in_exc_ovfl:
+	mov.w		&0x03,%d0
+	bra.b		fu_in_exc_exit
+
+# If the input operand to this operation was opclass two and a single
+# or double precision denorm, inf, or nan, the operand needs to be
+# "corrected" in order to have the proper equivalent extended precision
+# number.
+	global		fix_skewed_ops
+fix_skewed_ops:
+	bfextu		EXC_CMDREG(%a6){&0:&6},%d0 # extract opclass,src fmt
+	cmpi.b		%d0,&0x11		# is class = 2 & fmt = sgl?
+	beq.b		fso_sgl			# yes
+	cmpi.b		%d0,&0x15		# is class = 2 & fmt = dbl?
+	beq.b		fso_dbl			# yes
+	rts					# no
+
+fso_sgl:
+	mov.w		LOCAL_EX(%a0),%d0	# fetch src exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	cmpi.w		%d0,&0x3f80		# is |exp| == $3f80?
+	beq.b		fso_sgl_dnrm_zero	# yes
+	cmpi.w		%d0,&0x407f		# no; is |exp| == $407f?
+	beq.b		fso_infnan		# yes
+	rts					# no
+
+fso_sgl_dnrm_zero:
+	andi.l		&0x7fffffff,LOCAL_HI(%a0) # clear j-bit
+	beq.b		fso_zero		# it's a skewed zero
+fso_sgl_dnrm:
+# here, we count on norm not to alter a0...
+	bsr.l		norm			# normalize mantissa
+	neg.w		%d0			# -shft amt
+	addi.w		&0x3f81,%d0		# adjust new exponent
+	andi.w		&0x8000,LOCAL_EX(%a0)	# clear old exponent
+	or.w		%d0,LOCAL_EX(%a0)	# insert new exponent
+	rts
+
+fso_zero:
+	andi.w		&0x8000,LOCAL_EX(%a0)	# clear bogus exponent
+	rts
+
+fso_infnan:
+	andi.b		&0x7f,LOCAL_HI(%a0)	# clear j-bit
+	ori.w		&0x7fff,LOCAL_EX(%a0)	# make exponent = $7fff
+	rts
+
+fso_dbl:
+	mov.w		LOCAL_EX(%a0),%d0	# fetch src exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	cmpi.w		%d0,&0x3c00		# is |exp| == $3c00?
+	beq.b		fso_dbl_dnrm_zero	# yes
+	cmpi.w		%d0,&0x43ff		# no; is |exp| == $43ff?
+	beq.b		fso_infnan		# yes
+	rts					# no
+
+fso_dbl_dnrm_zero:
+	andi.l		&0x7fffffff,LOCAL_HI(%a0) # clear j-bit
+	bne.b		fso_dbl_dnrm		# it's a skewed denorm
+	tst.l		LOCAL_LO(%a0)		# is it a zero?
+	beq.b		fso_zero		# yes
+fso_dbl_dnrm:
+# here, we count on norm not to alter a0...
+	bsr.l		norm			# normalize mantissa
+	neg.w		%d0			# -shft amt
+	addi.w		&0x3c01,%d0		# adjust new exponent
+	andi.w		&0x8000,LOCAL_EX(%a0)	# clear old exponent
+	or.w		%d0,LOCAL_EX(%a0)	# insert new exponent
+	rts
+
+#################################################################
+
+# fmove out took an unimplemented data type exception.
+# the src operand is in FP_SRC. Call _fout() to write out the result and
+# to determine which exceptions, if any, to take.
+fu_out:
+
+# Separate packed move outs from the UNNORM and DENORM move outs.
+	bfextu		EXC_CMDREG(%a6){&3:&3},%d0
+	cmpi.b		%d0,&0x3
+	beq.w		fu_out_pack
+	cmpi.b		%d0,&0x7
+	beq.w		fu_out_pack
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field.
+# fmove out doesn't affect ccodes.
+	and.l		&0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+# the src can ONLY be a DENORM or an UNNORM! so, don't make any big subroutine
+# call here. just figure out what it is...
+	mov.w		FP_SRC_EX(%a6),%d0	# get exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	beq.b		fu_out_denorm		# it's a DENORM
+
+	lea		FP_SRC(%a6),%a0
+	bsr.l		unnorm_fix		# yes; fix it
+
+	mov.b		%d0,STAG(%a6)
+
+	bra.b		fu_out_cont
+fu_out_denorm:
+	mov.b		&DENORM,STAG(%a6)
+fu_out_cont:
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode/prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	mov.l		(%a6),EXC_A6(%a6)	# in case a6 changes
+	bsr.l		fout			# call fmove out routine
+
+# Exceptions in order of precedence:
+#	BSUN	: none
+#	SNAN	: none
+#	OPERR	: fmove.{b,w,l} out of large UNNORM
+#	OVFL	: fmove.{s,d}
+#	UNFL	: fmove.{s,d,x}
+#	DZ	: none
+#	INEX2	: all
+#	INEX1	: none (packed doesn't travel through here)
+
+# determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.w		fu_out_ena		# some are enabled
+
+fu_out_done:
+
+	mov.l		EXC_A6(%a6),(%a6)	# in case a6 changed
+
+# on extended precision opclass three instructions using pre-decrement or
+# post-increment addressing mode, the address register is not updated. is the
+# address register was the stack pointer used from user mode, then let's update
+# it here. if it was used from supervisor mode, then we have to handle this
+# as a special case.
+	btst		&0x5,EXC_SR(%a6)
+	bne.b		fu_out_done_s
+
+	mov.l		EXC_A7(%a6),%a0		# restore a7
+	mov.l		%a0,%usp
+
+fu_out_done_cont:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.b		fu_out_trace		# yes
+
+	bra.l		_fpsp_done
+
+# is the ea mode pre-decrement of the stack pointer from supervisor mode?
+# ("fmov.x fpm,-(a7)") if so,
+fu_out_done_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	bne.b		fu_out_done_cont
+
+# the extended precision result is still in fp0. but, we need to save it
+# somewhere on the stack until we can copy it to its final resting place.
+# here, we're counting on the top of the stack to be the old place-holders
+# for fp0/fp1 which have already been restored. that way, we can write
+# over those destinations with the shifted stack frame.
+	fmovm.x		&0x80,FP_SRC(%a6)	# put answer on stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+	mov.l		LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+	mov.l		LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+	btst		&0x7,(%sp)
+	bne.b		fu_out_trace
+
+	bra.l		_fpsp_done
+
+fu_out_ena:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		fu_out_exc		# there is at least one set
+
+# no exceptions were set.
+# if a disabled overflow occurred and inexact was enabled but the result
+# was exact, then a branch to _real_inex() is made.
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+	beq.w		fu_out_done		# no
+
+fu_out_ovflchk:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+	beq.w		fu_out_done		# no
+	bra.w		fu_inex			# yes
+
+#
+# The fp move out that took the "Unimplemented Data Type" exception was
+# being traced. Since the stack frames are similar, get the "current" PC
+# from FPIAR and put it in the trace stack frame then jump to _real_trace().
+#
+#		  UNSUPP FRAME		   TRACE FRAME
+#		*****************	*****************
+#		*      EA	*	*    Current	*
+#		*		*	*      PC	*
+#		*****************	*****************
+#		* 0x3 *  0x0dc	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*     Next	*	*     Next	*
+#		*      PC	*	*      PC	*
+#		*****************	*****************
+#		*      SR	*	*      SR	*
+#		*****************	*****************
+#
+fu_out_trace:
+	mov.w		&0x2024,0x6(%sp)
+	fmov.l		%fpiar,0x8(%sp)
+	bra.l		_real_trace
+
+# an exception occurred and that exception was enabled.
+fu_out_exc:
+	subi.l		&24,%d0			# fix offset to be 0-8
+
+# we don't mess with the existing fsave frame. just re-insert it and
+# jump to the "_real_{}()" handler...
+	mov.w		(tbl_fu_out.b,%pc,%d0.w*2),%d0
+	jmp		(tbl_fu_out.b,%pc,%d0.w*1)
+
+	swbeg		&0x8
+tbl_fu_out:
+	short		tbl_fu_out	- tbl_fu_out	# BSUN can't happen
+	short		tbl_fu_out	- tbl_fu_out	# SNAN can't happen
+	short		fu_operr	- tbl_fu_out	# OPERR
+	short		fu_ovfl		- tbl_fu_out	# OVFL
+	short		fu_unfl		- tbl_fu_out	# UNFL
+	short		tbl_fu_out	- tbl_fu_out	# DZ can't happen
+	short		fu_inex		- tbl_fu_out	# INEX2
+	short		tbl_fu_out	- tbl_fu_out	# INEX1 won't make it here
+
+# for snan,operr,ovfl,unfl, src op is still in FP_SRC so just
+# frestore it.
+fu_snan:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d8,EXC_VOFF(%a6)	# vector offset = 0xd8
+	mov.w		&0xe006,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+
+
+	bra.l		_real_snan
+
+fu_operr:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d0,EXC_VOFF(%a6)	# vector offset = 0xd0
+	mov.w		&0xe004,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+
+
+	bra.l		_real_operr
+
+fu_ovfl:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP to the stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d4,EXC_VOFF(%a6)	# vector offset = 0xd4
+	mov.w		&0xe005,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)		# restore EXOP
+
+	unlk		%a6
+
+	bra.l		_real_ovfl
+
+# underflow can happen for extended precision. extended precision opclass
+# three instruction exceptions don't update the stack pointer. so, if the
+# exception occurred from user mode, then simply update a7 and exit normally.
+# if the exception occurred from supervisor mode, check if
+fu_unfl:
+	mov.l		EXC_A6(%a6),(%a6)	# restore a6
+
+	btst		&0x5,EXC_SR(%a6)
+	bne.w		fu_unfl_s
+
+	mov.l		EXC_A7(%a6),%a0		# restore a7 whether we need
+	mov.l		%a0,%usp		# to or not...
+
+fu_unfl_cont:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP to the stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30cc,EXC_VOFF(%a6)	# vector offset = 0xcc
+	mov.w		&0xe003,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)		# restore EXOP
+
+	unlk		%a6
+
+	bra.l		_real_unfl
+
+fu_unfl_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg # was the <ea> mode -(sp)?
+	bne.b		fu_unfl_cont
+
+# the extended precision result is still in fp0. but, we need to save it
+# somewhere on the stack until we can copy it to its final resting place
+# (where the exc frame is currently). make sure it's not at the top of the
+# frame or it will get overwritten when the exc stack frame is shifted "down".
+	fmovm.x		&0x80,FP_SRC(%a6)	# put answer on stack
+	fmovm.x		&0x40,FP_DST(%a6)	# put EXOP on stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30cc,EXC_VOFF(%a6)	# vector offset = 0xcc
+	mov.w		&0xe003,2+FP_DST(%a6)
+
+	frestore	FP_DST(%a6)		# restore EXOP
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+	mov.l		LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+	mov.l		LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+	bra.l		_real_unfl
+
+# fmove in and out enter here.
+fu_inex:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP to the stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30c4,EXC_VOFF(%a6)	# vector offset = 0xc4
+	mov.w		&0xe001,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)		# restore EXOP
+
+	unlk		%a6
+
+
+	bra.l		_real_inex
+
+#########################################################################
+#########################################################################
+fu_in_pack:
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field
+	andi.l		&0x0ff00ff,USER_FPSR(%a6) # zero exception field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	bsr.l		get_packed		# fetch packed src operand
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	bsr.l		set_tag_x		# set src optype tag
+
+	mov.b		%d0,STAG(%a6)		# save src optype tag
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# at this point
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		fu_extract_p		# monadic
+	cmpi.b		1+EXC_CMDREG(%a6),&0x3a	# is operation an ftst?
+	beq.b		fu_extract_p		# yes, so it's monadic, too
+
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fu_op2_done_p		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+fu_op2_done_p:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+fu_extract_p:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode/prec
+
+	bfextu		1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+	mov.l		(tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+#	BSUN	: none
+#	SNAN	: all dyadic ops
+#	OPERR	: fsqrt(-NORM)
+#	OVFL	: all except ftst,fcmp
+#	UNFL	: all except ftst,fcmp
+#	DZ	: fdiv
+#	INEX2	: all except ftst,fcmp
+#	INEX1	: all
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.w		fu_in_ena_p		# some are enabled
+
+fu_in_cont_p:
+# fcmp and ftst do not store any result.
+	mov.b		1+EXC_CMDREG(%a6),%d0	# fetch extension
+	andi.b		&0x38,%d0		# extract bits 3-5
+	cmpi.b		%d0,&0x38		# is instr fcmp or ftst?
+	beq.b		fu_in_exit_p		# yes
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		store_fpreg		# store the result
+
+fu_in_exit_p:
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor?
+	bne.w		fu_in_exit_s_p		# supervisor
+
+	mov.l		EXC_A7(%a6),%a0		# update user a7
+	mov.l		%a0,%usp
+
+fu_in_exit_cont_p:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6			# unravel stack frame
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.w		fu_trace_p		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+# the exception occurred in supervisor mode. check to see if the
+# addressing mode was (a7)+. if so, we'll need to shift the
+# stack frame "up".
+fu_in_exit_s_p:
+	btst		&mia7_bit,SPCOND_FLG(%a6) # was ea mode (a7)+
+	beq.b		fu_in_exit_cont_p	# no
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6			# unravel stack frame
+
+# shift the stack frame "up". we don't really care about the <ea> field.
+	mov.l		0x4(%sp),0x10(%sp)
+	mov.l		0x0(%sp),0xc(%sp)
+	add.l		&0xc,%sp
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.w		fu_trace_p		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+fu_in_ena_p:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled & set
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		fu_in_exc_p		# at least one was set
+
+#
+# No exceptions occurred that were also enabled. Now:
+#
+#	if (OVFL && ovfl_disabled && inexact_enabled) {
+#	    branch to _real_inex() (even if the result was exact!);
+#	} else {
+#	    save the result in the proper fp reg (unless the op is fcmp or ftst);
+#	    return;
+#	}
+#
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+	beq.w		fu_in_cont_p		# no
+
+fu_in_ovflchk_p:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+	beq.w		fu_in_cont_p		# no
+	bra.w		fu_in_exc_ovfl_p	# do _real_inex() now
+
+#
+# An exception occurred and that exception was enabled:
+#
+#	shift enabled exception field into lo byte of d0;
+#	if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
+#	    ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
+#		/*
+#		 * this is the case where we must call _real_inex() now or else
+#		 * there will be no other way to pass it the exceptional operand
+#		 */
+#		call _real_inex();
+#	} else {
+#		restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
+#	}
+#
+fu_in_exc_p:
+	subi.l		&24,%d0			# fix offset to be 0-8
+	cmpi.b		%d0,&0x6		# is exception INEX? (6 or 7)
+	blt.b		fu_in_exc_exit_p	# no
+
+# the enabled exception was inexact
+	btst		&unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
+	bne.w		fu_in_exc_unfl_p	# yes
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
+	bne.w		fu_in_exc_ovfl_p	# yes
+
+# here, we insert the correct fsave status value into the fsave frame for the
+# corresponding exception. the operand in the fsave frame should be the original
+# src operand.
+# as a reminder for future predicted pain and agony, we are passing in fsave the
+# "non-skewed" operand for cases of sgl and dbl src INFs,NANs, and DENORMs.
+# this is INCORRECT for enabled SNAN which would give to the user the skewed SNAN!!!
+fu_in_exc_exit_p:
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor?
+	bne.w		fu_in_exc_exit_s_p	# supervisor
+
+	mov.l		EXC_A7(%a6),%a0		# update user a7
+	mov.l		%a0,%usp
+
+fu_in_exc_exit_cont_p:
+	mov.w		(tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore src op
+
+	unlk		%a6
+
+	btst		&0x7,(%sp)		# is trace enabled?
+	bne.w		fu_trace_p		# yes
+
+	bra.l		_fpsp_done
+
+tbl_except_p:
+	short		0xe000,0xe006,0xe004,0xe005
+	short		0xe003,0xe002,0xe001,0xe001
+
+fu_in_exc_ovfl_p:
+	mov.w		&0x3,%d0
+	bra.w		fu_in_exc_exit_p
+
+fu_in_exc_unfl_p:
+	mov.w		&0x4,%d0
+	bra.w		fu_in_exc_exit_p
+
+fu_in_exc_exit_s_p:
+	btst		&mia7_bit,SPCOND_FLG(%a6)
+	beq.b		fu_in_exc_exit_cont_p
+
+	mov.w		(tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore src op
+
+	unlk		%a6			# unravel stack frame
+
+# shift stack frame "up". who cares about <ea> field.
+	mov.l		0x4(%sp),0x10(%sp)
+	mov.l		0x0(%sp),0xc(%sp)
+	add.l		&0xc,%sp
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.b		fu_trace_p		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+#
+# The opclass two PACKED instruction that took an "Unimplemented Data Type"
+# exception was being traced. Make the "current" PC the FPIAR and put it in the
+# trace stack frame then jump to _real_trace().
+#
+#		  UNSUPP FRAME		   TRACE FRAME
+#		*****************	*****************
+#		*      EA	*	*    Current	*
+#		*		*	*      PC	*
+#		*****************	*****************
+#		* 0x2 *	0x0dc	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*     Next	*	*     Next	*
+#		*      PC	*	*      PC	*
+#		*****************	*****************
+#		*      SR	*	*      SR	*
+#		*****************	*****************
+fu_trace_p:
+	mov.w		&0x2024,0x6(%sp)
+	fmov.l		%fpiar,0x8(%sp)
+
+	bra.l		_real_trace
+
+#########################################################
+#########################################################
+fu_out_pack:
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field.
+# fmove out doesn't affect ccodes.
+	and.l		&0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0
+	bsr.l		load_fpn1
+
+# unlike other opclass 3, unimplemented data type exceptions, packed must be
+# able to detect all operand types.
+	lea		FP_SRC(%a6),%a0
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fu_op2_p		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+
+fu_op2_p:
+	mov.b		%d0,STAG(%a6)		# save src optype tag
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode/prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	mov.l		(%a6),EXC_A6(%a6)	# in case a6 changes
+	bsr.l		fout			# call fmove out routine
+
+# Exceptions in order of precedence:
+#	BSUN	: no
+#	SNAN	: yes
+#	OPERR	: if ((k_factor > +17) || (dec. exp exceeds 3 digits))
+#	OVFL	: no
+#	UNFL	: no
+#	DZ	: no
+#	INEX2	: yes
+#	INEX1	: no
+
+# determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.w		fu_out_ena_p		# some are enabled
+
+fu_out_exit_p:
+	mov.l		EXC_A6(%a6),(%a6)	# restore a6
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor?
+	bne.b		fu_out_exit_s_p		# supervisor
+
+	mov.l		EXC_A7(%a6),%a0		# update user a7
+	mov.l		%a0,%usp
+
+fu_out_exit_cont_p:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6			# unravel stack frame
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.w		fu_trace_p		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+# the exception occurred in supervisor mode. check to see if the
+# addressing mode was -(a7). if so, we'll need to shift the
+# stack frame "down".
+fu_out_exit_s_p:
+	btst		&mda7_bit,SPCOND_FLG(%a6) # was ea mode -(a7)
+	beq.b		fu_out_exit_cont_p	# no
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+	mov.l		LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+	btst		&0x7,(%sp)
+	bne.w		fu_trace_p
+
+	bra.l		_fpsp_done
+
+fu_out_ena_p:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	beq.w		fu_out_exit_p
+
+	mov.l		EXC_A6(%a6),(%a6)	# restore a6
+
+# an exception occurred and that exception was enabled.
+# the only exception possible on packed move out are INEX, OPERR, and SNAN.
+fu_out_exc_p:
+	cmpi.b		%d0,&0x1a
+	bgt.w		fu_inex_p2
+	beq.w		fu_operr_p
+
+fu_snan_p:
+	btst		&0x5,EXC_SR(%a6)
+	bne.b		fu_snan_s_p
+
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp
+	bra.w		fu_snan
+
+fu_snan_s_p:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	bne.w		fu_snan
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d8,EXC_VOFF(%a6)	# vector offset = 0xd0
+	mov.w		&0xe006,2+FP_SRC(%a6)	# set fsave status
+
+	frestore	FP_SRC(%a6)		# restore src operand
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+	mov.l		LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+
+	bra.l		_real_snan
+
+fu_operr_p:
+	btst		&0x5,EXC_SR(%a6)
+	bne.w		fu_operr_p_s
+
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp
+	bra.w		fu_operr
+
+fu_operr_p_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	bne.w		fu_operr
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d0,EXC_VOFF(%a6)	# vector offset = 0xd0
+	mov.w		&0xe004,2+FP_SRC(%a6)	# set fsave status
+
+	frestore	FP_SRC(%a6)		# restore src operand
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+	mov.l		LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+
+	bra.l		_real_operr
+
+fu_inex_p2:
+	btst		&0x5,EXC_SR(%a6)
+	bne.w		fu_inex_s_p2
+
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp
+	bra.w		fu_inex
+
+fu_inex_s_p2:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	bne.w		fu_inex
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30c4,EXC_VOFF(%a6)	# vector offset = 0xc4
+	mov.w		&0xe001,2+FP_SRC(%a6)	# set fsave status
+
+	frestore	FP_SRC(%a6)		# restore src operand
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+	mov.l		LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+
+	bra.l		_real_inex
+
+#########################################################################
+
+#
+# if we're stuffing a source operand back into an fsave frame then we
+# have to make sure that for single or double source operands that the
+# format stuffed is as weird as the hardware usually makes it.
+#
+	global		funimp_skew
+funimp_skew:
+	bfextu		EXC_EXTWORD(%a6){&3:&3},%d0 # extract src specifier
+	cmpi.b		%d0,&0x1		# was src sgl?
+	beq.b		funimp_skew_sgl		# yes
+	cmpi.b		%d0,&0x5		# was src dbl?
+	beq.b		funimp_skew_dbl		# yes
+	rts
+
+funimp_skew_sgl:
+	mov.w		FP_SRC_EX(%a6),%d0	# fetch DENORM exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	beq.b		funimp_skew_sgl_not
+	cmpi.w		%d0,&0x3f80
+	bgt.b		funimp_skew_sgl_not
+	neg.w		%d0			# make exponent negative
+	addi.w		&0x3f81,%d0		# find amt to shift
+	mov.l		FP_SRC_HI(%a6),%d1	# fetch DENORM hi(man)
+	lsr.l		%d0,%d1			# shift it
+	bset		&31,%d1			# set j-bit
+	mov.l		%d1,FP_SRC_HI(%a6)	# insert new hi(man)
+	andi.w		&0x8000,FP_SRC_EX(%a6)	# clear old exponent
+	ori.w		&0x3f80,FP_SRC_EX(%a6)	# insert new "skewed" exponent
+funimp_skew_sgl_not:
+	rts
+
+funimp_skew_dbl:
+	mov.w		FP_SRC_EX(%a6),%d0	# fetch DENORM exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	beq.b		funimp_skew_dbl_not
+	cmpi.w		%d0,&0x3c00
+	bgt.b		funimp_skew_dbl_not
+
+	tst.b		FP_SRC_EX(%a6)		# make "internal format"
+	smi.b		0x2+FP_SRC(%a6)
+	mov.w		%d0,FP_SRC_EX(%a6)	# insert exponent with cleared sign
+	clr.l		%d0			# clear g,r,s
+	lea		FP_SRC(%a6),%a0		# pass ptr to src op
+	mov.w		&0x3c01,%d1		# pass denorm threshold
+	bsr.l		dnrm_lp			# denorm it
+	mov.w		&0x3c00,%d0		# new exponent
+	tst.b		0x2+FP_SRC(%a6)		# is sign set?
+	beq.b		fss_dbl_denorm_done	# no
+	bset		&15,%d0			# set sign
+fss_dbl_denorm_done:
+	bset		&0x7,FP_SRC_HI(%a6)	# set j-bit
+	mov.w		%d0,FP_SRC_EX(%a6)	# insert new exponent
+funimp_skew_dbl_not:
+	rts
+
+#########################################################################
+	global		_mem_write2
+_mem_write2:
+	btst		&0x5,EXC_SR(%a6)
+	beq.l		_dmem_write
+	mov.l		0x0(%a0),FP_DST_EX(%a6)
+	mov.l		0x4(%a0),FP_DST_HI(%a6)
+	mov.l		0x8(%a0),FP_DST_LO(%a6)
+	clr.l		%d1
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_effadd(): 060FPSP entry point for FP "Unimplemented	#
+#			effective address" exception.			#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Unimplemented Effective Address exception in an operating	#
+#	system.								#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	decbin() - convert packed data to FP binary data		#
+#	_real_fpu_disabled() - "callout" for "FPU disabled" exception	#
+#	_real_access() - "callout" for access error exception		#
+#	_mem_read() - read extended immediate operand from memory	#
+#	_fpsp_done() - "callout" for exit; work all done		#
+#	_real_trace() - "callout" for Trace enabled exception		#
+#	fmovm_dynamic() - emulate dynamic fmovm instruction		#
+#	fmovm_ctrl() - emulate fmovm control instruction		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the "Unimplemented <ea>" stk frame	#
+#									#
+# OUTPUT **************************************************************	#
+#	If access error:						#
+#	- The system stack is changed to an access error stack frame	#
+#	If FPU disabled:						#
+#	- The system stack is changed to an FPU disabled stack frame	#
+#	If Trace exception enabled:					#
+#	- The system stack is changed to a Trace exception stack frame	#
+#	Else: (normal case)						#
+#	- None (correct result has been stored as appropriate)		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	This exception handles 3 types of operations:			#
+# (1) FP Instructions using extended precision or packed immediate	#
+#     addressing mode.							#
+# (2) The "fmovm.x" instruction w/ dynamic register specification.	#
+# (3) The "fmovm.l" instruction w/ 2 or 3 control registers.		#
+#									#
+#	For immediate data operations, the data is read in w/ a		#
+# _mem_read() "callout", converted to FP binary (if packed), and used	#
+# as the source operand to the instruction specified by the instruction	#
+# word. If no FP exception should be reported ads a result of the	#
+# emulation, then the result is stored to the destination register and	#
+# the handler exits through _fpsp_done(). If an enabled exc has been	#
+# signalled as a result of emulation, then an fsave state frame		#
+# corresponding to the FP exception type must be entered into the 060	#
+# FPU before exiting. In either the enabled or disabled cases, we	#
+# must also check if a Trace exception is pending, in which case, we	#
+# must create a Trace exception stack frame from the current exception	#
+# stack frame. If no Trace is pending, we simply exit through		#
+# _fpsp_done().								#
+#	For "fmovm.x", call the routine fmovm_dynamic() which will	#
+# decode and emulate the instruction. No FP exceptions can be pending	#
+# as a result of this operation emulation. A Trace exception can be	#
+# pending, though, which means the current stack frame must be changed	#
+# to a Trace stack frame and an exit made through _real_trace().	#
+# For the case of "fmovm.x Dn,-(a7)", where the offending instruction	#
+# was executed from supervisor mode, this handler must store the FP	#
+# register file values to the system stack by itself since		#
+# fmovm_dynamic() can't handle this. A normal exit is made through	#
+# fpsp_done().								#
+#	For "fmovm.l", fmovm_ctrl() is used to emulate the instruction.	#
+# Again, a Trace exception may be pending and an exit made through	#
+# _real_trace(). Else, a normal exit is made through _fpsp_done().	#
+#									#
+#	Before any of the above is attempted, it must be checked to	#
+# see if the FPU is disabled. Since the "Unimp <ea>" exception is taken	#
+# before the "FPU disabled" exception, but the "FPU disabled" exception	#
+# has higher priority, we check the disabled bit in the PCR. If set,	#
+# then we must create an 8 word "FPU disabled" exception stack frame	#
+# from the current 4 word exception stack frame. This includes		#
+# reproducing the effective address of the instruction to put on the	#
+# new stack frame.							#
+#									#
+#	In the process of all emulation work, if a _mem_read()		#
+# "callout" returns a failing result indicating an access error, then	#
+# we must create an access error stack frame from the current stack	#
+# frame. This information includes a faulting address and a fault-	#
+# status-longword. These are created within this handler.		#
+#									#
+#########################################################################
+
+	global		_fpsp_effadd
+_fpsp_effadd:
+
+# This exception type takes priority over the "Line F Emulator"
+# exception. Therefore, the FPU could be disabled when entering here.
+# So, we must check to see if it's disabled and handle that case separately.
+	mov.l		%d0,-(%sp)		# save d0
+	movc		%pcr,%d0		# load proc cr
+	btst		&0x1,%d0		# is FPU disabled?
+	bne.w		iea_disabled		# yes
+	mov.l		(%sp)+,%d0		# restore d0
+
+	link		%a6,&-LOCAL_SIZE	# init stack frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# PC of instruction that took the exception is the PC in the frame
+	mov.l		EXC_PC(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)	# store OPWORD and EXTWORD
+
+#########################################################################
+
+	tst.w		%d0			# is operation fmovem?
+	bmi.w		iea_fmovm		# yes
+
+#
+# here, we will have:
+#	fabs	fdabs	fsabs		facos		fmod
+#	fadd	fdadd	fsadd		fasin		frem
+#	fcmp				fatan		fscale
+#	fdiv	fddiv	fsdiv		fatanh		fsin
+#	fint				fcos		fsincos
+#	fintrz				fcosh		fsinh
+#	fmove	fdmove	fsmove		fetox		ftan
+#	fmul	fdmul	fsmul		fetoxm1		ftanh
+#	fneg	fdneg	fsneg		fgetexp		ftentox
+#	fsgldiv				fgetman		ftwotox
+#	fsglmul				flog10
+#	fsqrt				flog2
+#	fsub	fdsub	fssub		flogn
+#	ftst				flognp1
+# which can all use f<op>.{x,p}
+# so, now it's immediate data extended precision AND PACKED FORMAT!
+#
+iea_op:
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	btst		&0xa,%d0		# is src fmt x or p?
+	bne.b		iea_op_pack		# packed
+
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# pass: ptr to #<data>
+	lea		FP_SRC(%a6),%a1		# pass: ptr to super addr
+	mov.l		&0xc,%d0		# pass: 12 bytes
+	bsr.l		_imem_read		# read extended immediate
+
+	tst.l		%d1			# did ifetch fail?
+	bne.w		iea_iacc		# yes
+
+	bra.b		iea_op_setsrc
+
+iea_op_pack:
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# pass: ptr to #<data>
+	lea		FP_SRC(%a6),%a1		# pass: ptr to super dst
+	mov.l		&0xc,%d0		# pass: 12 bytes
+	bsr.l		_imem_read		# read packed operand
+
+	tst.l		%d1			# did ifetch fail?
+	bne.w		iea_iacc		# yes
+
+# The packed operand is an INF or a NAN if the exponent field is all ones.
+	bfextu		FP_SRC(%a6){&1:&15},%d0	# get exp
+	cmpi.w		%d0,&0x7fff		# INF or NAN?
+	beq.b		iea_op_setsrc		# operand is an INF or NAN
+
+# The packed operand is a zero if the mantissa is all zero, else it's
+# a normal packed op.
+	mov.b		3+FP_SRC(%a6),%d0	# get byte 4
+	andi.b		&0x0f,%d0		# clear all but last nybble
+	bne.b		iea_op_gp_not_spec	# not a zero
+	tst.l		FP_SRC_HI(%a6)		# is lw 2 zero?
+	bne.b		iea_op_gp_not_spec	# not a zero
+	tst.l		FP_SRC_LO(%a6)		# is lw 3 zero?
+	beq.b		iea_op_setsrc		# operand is a ZERO
+iea_op_gp_not_spec:
+	lea		FP_SRC(%a6),%a0		# pass: ptr to packed op
+	bsr.l		decbin			# convert to extended
+	fmovm.x		&0x80,FP_SRC(%a6)	# make this the srcop
+
+iea_op_setsrc:
+	addi.l		&0xc,EXC_EXTWPTR(%a6)	# update extension word pointer
+
+# FP_SRC now holds the src operand.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,STAG(%a6)		# could be ANYTHING!!!
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		iea_op_getdst		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM/DENORM/ZERO
+	mov.b		%d0,STAG(%a6)		# set new optype tag
+iea_op_getdst:
+	clr.b		STORE_FLG(%a6)		# clear "store result" boolean
+
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		iea_op_extract		# monadic
+	btst		&0x4,1+EXC_CMDREG(%a6)	# is operation fsincos,ftst,fcmp?
+	bne.b		iea_op_spec		# yes
+
+iea_op_loaddst:
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
+	bsr.l		load_fpn2		# load dst operand
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,DTAG(%a6)		# could be ANYTHING!!!
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		iea_op_extract		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM/DENORM/ZERO
+	mov.b		%d0,DTAG(%a6)		# set new optype tag
+	bra.b		iea_op_extract
+
+# the operation is fsincos, ftst, or fcmp. only fcmp is dyadic
+iea_op_spec:
+	btst		&0x3,1+EXC_CMDREG(%a6)	# is operation fsincos?
+	beq.b		iea_op_extract		# yes
+# now, we're left with ftst and fcmp. so, first let's tag them so that they don't
+# store a result. then, only fcmp will branch back and pick up a dst operand.
+	st		STORE_FLG(%a6)		# don't store a final result
+	btst		&0x1,1+EXC_CMDREG(%a6)	# is operation fcmp?
+	beq.b		iea_op_loaddst		# yes
+
+iea_op_extract:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass: rnd mode,prec
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x007f,%d1		# extract extension
+
+	fmov.l		&0x0,%fpcr
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+	mov.l		(tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+#	BSUN	: none
+#	SNAN	: all operations
+#	OPERR	: all reg-reg or mem-reg operations that can normally operr
+#	OVFL	: same as OPERR
+#	UNFL	: same as OPERR
+#	DZ	: same as OPERR
+#	INEX2	: same as OPERR
+#	INEX1	: all packed immediate operations
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.b		iea_op_ena		# some are enabled
+
+# now, we save the result, unless, of course, the operation was ftst or fcmp.
+# these don't save results.
+iea_op_save:
+	tst.b		STORE_FLG(%a6)		# does this op store a result?
+	bne.b		iea_op_exit1		# exit with no frestore
+
+iea_op_store:
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
+	bsr.l		store_fpreg		# store the result
+
+iea_op_exit1:
+	mov.l		EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
+	mov.l		EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6			# unravel the frame
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.w		iea_op_trace		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+iea_op_ena:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enable and set
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		iea_op_exc		# at least one was set
+
+# no exception occurred. now, did a disabled, exact overflow occur with inexact
+# enabled? if so, then we have to stuff an overflow frame into the FPU.
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+	beq.b		iea_op_save
+
+iea_op_ovfl:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
+	beq.b		iea_op_store		# no
+	bra.b		iea_op_exc_ovfl		# yes
+
+# an enabled exception occurred. we have to insert the exception type back into
+# the machine.
+iea_op_exc:
+	subi.l		&24,%d0			# fix offset to be 0-8
+	cmpi.b		%d0,&0x6		# is exception INEX?
+	bne.b		iea_op_exc_force	# no
+
+# the enabled exception was inexact. so, if it occurs with an overflow
+# or underflow that was disabled, then we have to force an overflow or
+# underflow frame.
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+	bne.b		iea_op_exc_ovfl		# yes
+	btst		&unfl_bit,FPSR_EXCEPT(%a6) # did underflow occur?
+	bne.b		iea_op_exc_unfl		# yes
+
+iea_op_exc_force:
+	mov.w		(tbl_iea_except.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+	bra.b		iea_op_exit2		# exit with frestore
+
+tbl_iea_except:
+	short		0xe002, 0xe006, 0xe004, 0xe005
+	short		0xe003, 0xe002, 0xe001, 0xe001
+
+iea_op_exc_ovfl:
+	mov.w		&0xe005,2+FP_SRC(%a6)
+	bra.b		iea_op_exit2
+
+iea_op_exc_unfl:
+	mov.w		&0xe003,2+FP_SRC(%a6)
+
+iea_op_exit2:
+	mov.l		EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
+	mov.l		EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore exceptional state
+
+	unlk		%a6			# unravel the frame
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.b		iea_op_trace		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+#
+# The opclass two instruction that took an "Unimplemented Effective Address"
+# exception was being traced. Make the "current" PC the FPIAR and put it in
+# the trace stack frame then jump to _real_trace().
+#
+#		 UNIMP EA FRAME		   TRACE FRAME
+#		*****************	*****************
+#		* 0x0 *  0x0f0	*	*    Current	*
+#		*****************	*      PC	*
+#		*    Current	*	*****************
+#		*      PC	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*      SR	*	*     Next	*
+#		*****************	*      PC	*
+#					*****************
+#					*      SR	*
+#					*****************
+iea_op_trace:
+	mov.l		(%sp),-(%sp)		# shift stack frame "down"
+	mov.w		0x8(%sp),0x4(%sp)
+	mov.w		&0x2024,0x6(%sp)	# stk fmt = 0x2; voff = 0x024
+	fmov.l		%fpiar,0x8(%sp)		# "Current PC" is in FPIAR
+
+	bra.l		_real_trace
+
+#########################################################################
+iea_fmovm:
+	btst		&14,%d0			# ctrl or data reg
+	beq.w		iea_fmovm_ctrl
+
+iea_fmovm_data:
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor mode
+	bne.b		iea_fmovm_data_s
+
+iea_fmovm_data_u:
+	mov.l		%usp,%a0
+	mov.l		%a0,EXC_A7(%a6)		# store current a7
+	bsr.l		fmovm_dynamic		# do dynamic fmovm
+	mov.l		EXC_A7(%a6),%a0		# load possibly new a7
+	mov.l		%a0,%usp		# update usp
+	bra.w		iea_fmovm_exit
+
+iea_fmovm_data_s:
+	clr.b		SPCOND_FLG(%a6)
+	lea		0x2+EXC_VOFF(%a6),%a0
+	mov.l		%a0,EXC_A7(%a6)
+	bsr.l		fmovm_dynamic		# do dynamic fmovm
+
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	beq.w		iea_fmovm_data_predec
+	cmpi.b		SPCOND_FLG(%a6),&mia7_flg
+	bne.w		iea_fmovm_exit
+
+# right now, d0 = the size.
+# the data has been fetched from the supervisor stack, but we have not
+# incremented the stack pointer by the appropriate number of bytes.
+# do it here.
+iea_fmovm_data_postinc:
+	btst		&0x7,EXC_SR(%a6)
+	bne.b		iea_fmovm_data_pi_trace
+
+	mov.w		EXC_SR(%a6),(EXC_SR,%a6,%d0)
+	mov.l		EXC_EXTWPTR(%a6),(EXC_PC,%a6,%d0)
+	mov.w		&0x00f0,(EXC_VOFF,%a6,%d0)
+
+	lea		(EXC_SR,%a6,%d0),%a0
+	mov.l		%a0,EXC_SR(%a6)
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+	mov.l		(%sp)+,%sp
+	bra.l		_fpsp_done
+
+iea_fmovm_data_pi_trace:
+	mov.w		EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
+	mov.l		EXC_EXTWPTR(%a6),(EXC_PC-0x4,%a6,%d0)
+	mov.w		&0x2024,(EXC_VOFF-0x4,%a6,%d0)
+	mov.l		EXC_PC(%a6),(EXC_VOFF+0x2-0x4,%a6,%d0)
+
+	lea		(EXC_SR-0x4,%a6,%d0),%a0
+	mov.l		%a0,EXC_SR(%a6)
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+	mov.l		(%sp)+,%sp
+	bra.l		_real_trace
+
+# right now, d1 = size and d0 = the strg.
+iea_fmovm_data_predec:
+	mov.b		%d1,EXC_VOFF(%a6)	# store strg
+	mov.b		%d0,0x1+EXC_VOFF(%a6)	# store size
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.l		(%a6),-(%sp)		# make a copy of a6
+	mov.l		%d0,-(%sp)		# save d0
+	mov.l		%d1,-(%sp)		# save d1
+	mov.l		EXC_EXTWPTR(%a6),-(%sp)	# make a copy of Next PC
+
+	clr.l		%d0
+	mov.b		0x1+EXC_VOFF(%a6),%d0	# fetch size
+	neg.l		%d0			# get negative of size
+
+	btst		&0x7,EXC_SR(%a6)	# is trace enabled?
+	beq.b		iea_fmovm_data_p2
+
+	mov.w		EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
+	mov.l		EXC_PC(%a6),(EXC_VOFF-0x2,%a6,%d0)
+	mov.l		(%sp)+,(EXC_PC-0x4,%a6,%d0)
+	mov.w		&0x2024,(EXC_VOFF-0x4,%a6,%d0)
+
+	pea		(%a6,%d0)		# create final sp
+	bra.b		iea_fmovm_data_p3
+
+iea_fmovm_data_p2:
+	mov.w		EXC_SR(%a6),(EXC_SR,%a6,%d0)
+	mov.l		(%sp)+,(EXC_PC,%a6,%d0)
+	mov.w		&0x00f0,(EXC_VOFF,%a6,%d0)
+
+	pea		(0x4,%a6,%d0)		# create final sp
+
+iea_fmovm_data_p3:
+	clr.l		%d1
+	mov.b		EXC_VOFF(%a6),%d1	# fetch strg
+
+	tst.b		%d1
+	bpl.b		fm_1
+	fmovm.x		&0x80,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_1:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_2
+	fmovm.x		&0x40,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_2:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_3
+	fmovm.x		&0x20,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_3:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_4
+	fmovm.x		&0x10,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_4:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_5
+	fmovm.x		&0x08,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_5:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_6
+	fmovm.x		&0x04,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_6:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_7
+	fmovm.x		&0x02,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_7:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_end
+	fmovm.x		&0x01,(0x4+0x8,%a6,%d0)
+fm_end:
+	mov.l		0x4(%sp),%d1
+	mov.l		0x8(%sp),%d0
+	mov.l		0xc(%sp),%a6
+	mov.l		(%sp)+,%sp
+
+	btst		&0x7,(%sp)		# is trace enabled?
+	beq.l		_fpsp_done
+	bra.l		_real_trace
+
+#########################################################################
+iea_fmovm_ctrl:
+
+	bsr.l		fmovm_ctrl		# load ctrl regs
+
+iea_fmovm_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	btst		&0x7,EXC_SR(%a6)	# is trace on?
+	bne.b		iea_fmovm_trace		# yes
+
+	mov.l		EXC_EXTWPTR(%a6),EXC_PC(%a6) # set Next PC
+
+	unlk		%a6			# unravel the frame
+
+	bra.l		_fpsp_done		# exit to os
+
+#
+# The control reg instruction that took an "Unimplemented Effective Address"
+# exception was being traced. The "Current PC" for the trace frame is the
+# PC stacked for Unimp EA. The "Next PC" is in EXC_EXTWPTR.
+# After fixing the stack frame, jump to _real_trace().
+#
+#		 UNIMP EA FRAME		   TRACE FRAME
+#		*****************	*****************
+#		* 0x0 *  0x0f0	*	*    Current	*
+#		*****************	*      PC	*
+#		*    Current	*	*****************
+#		*      PC	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*      SR	*	*     Next	*
+#		*****************	*      PC	*
+#					*****************
+#					*      SR	*
+#					*****************
+# this ain't a pretty solution, but it works:
+# -restore a6 (not with unlk)
+# -shift stack frame down over where old a6 used to be
+# -add LOCAL_SIZE to stack pointer
+iea_fmovm_trace:
+	mov.l		(%a6),%a6		# restore frame pointer
+	mov.w		EXC_SR+LOCAL_SIZE(%sp),0x0+LOCAL_SIZE(%sp)
+	mov.l		EXC_PC+LOCAL_SIZE(%sp),0x8+LOCAL_SIZE(%sp)
+	mov.l		EXC_EXTWPTR+LOCAL_SIZE(%sp),0x2+LOCAL_SIZE(%sp)
+	mov.w		&0x2024,0x6+LOCAL_SIZE(%sp) # stk fmt = 0x2; voff = 0x024
+	add.l		&LOCAL_SIZE,%sp		# clear stack frame
+
+	bra.l		_real_trace
+
+#########################################################################
+# The FPU is disabled and so we should really have taken the "Line
+# F Emulator" exception. So, here we create an 8-word stack frame
+# from our 4-word stack frame. This means we must calculate the length
+# the faulting instruction to get the "next PC". This is trivial for
+# immediate operands but requires some extra work for fmovm dynamic
+# which can use most addressing modes.
+iea_disabled:
+	mov.l		(%sp)+,%d0		# restore d0
+
+	link		%a6,&-LOCAL_SIZE	# init stack frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+
+# PC of instruction that took the exception is the PC in the frame
+	mov.l		EXC_PC(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)	# store OPWORD and EXTWORD
+
+	tst.w		%d0			# is instr fmovm?
+	bmi.b		iea_dis_fmovm		# yes
+# instruction is using an extended precision immediate operand. therefore,
+# the total instruction length is 16 bytes.
+iea_dis_immed:
+	mov.l		&0x10,%d0		# 16 bytes of instruction
+	bra.b		iea_dis_cont
+iea_dis_fmovm:
+	btst		&0xe,%d0		# is instr fmovm ctrl
+	bne.b		iea_dis_fmovm_data	# no
+# the instruction is a fmovm.l with 2 or 3 registers.
+	bfextu		%d0{&19:&3},%d1
+	mov.l		&0xc,%d0
+	cmpi.b		%d1,&0x7		# move all regs?
+	bne.b		iea_dis_cont
+	addq.l		&0x4,%d0
+	bra.b		iea_dis_cont
+# the instruction is an fmovm.x dynamic which can use many addressing
+# modes and thus can have several different total instruction lengths.
+# call fmovm_calc_ea which will go through the ea calc process and,
+# as a by-product, will tell us how long the instruction is.
+iea_dis_fmovm_data:
+	clr.l		%d0
+	bsr.l		fmovm_calc_ea
+	mov.l		EXC_EXTWPTR(%a6),%d0
+	sub.l		EXC_PC(%a6),%d0
+iea_dis_cont:
+	mov.w		%d0,EXC_VOFF(%a6)	# store stack shift value
+
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+# here, we actually create the 8-word frame from the 4-word frame,
+# with the "next PC" as additional info.
+# the <ea> field is let as undefined.
+	subq.l		&0x8,%sp		# make room for new stack
+	mov.l		%d0,-(%sp)		# save d0
+	mov.w		0xc(%sp),0x4(%sp)	# move SR
+	mov.l		0xe(%sp),0x6(%sp)	# move Current PC
+	clr.l		%d0
+	mov.w		0x12(%sp),%d0
+	mov.l		0x6(%sp),0x10(%sp)	# move Current PC
+	add.l		%d0,0x6(%sp)		# make Next PC
+	mov.w		&0x402c,0xa(%sp)	# insert offset,frame format
+	mov.l		(%sp)+,%d0		# restore d0
+
+	bra.l		_real_fpu_disabled
+
+##########
+
+iea_iacc:
+	movc		%pcr,%d0
+	btst		&0x1,%d0
+	bne.b		iea_iacc_cont
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1 on stack
+iea_iacc_cont:
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	subq.w		&0x8,%sp		# make stack frame bigger
+	mov.l		0x8(%sp),(%sp)		# store SR,hi(PC)
+	mov.w		0xc(%sp),0x4(%sp)	# store lo(PC)
+	mov.w		&0x4008,0x6(%sp)	# store voff
+	mov.l		0x2(%sp),0x8(%sp)	# store ea
+	mov.l		&0x09428001,0xc(%sp)	# store fslw
+
+iea_acc_done:
+	btst		&0x5,(%sp)		# user or supervisor mode?
+	beq.b		iea_acc_done2		# user
+	bset		&0x2,0xd(%sp)		# set supervisor TM bit
+
+iea_acc_done2:
+	bra.l		_real_access
+
+iea_dacc:
+	lea		-LOCAL_SIZE(%a6),%sp
+
+	movc		%pcr,%d1
+	btst		&0x1,%d1
+	bne.b		iea_dacc_cont
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1 on stack
+	fmovm.l		LOCAL_SIZE+USER_FPCR(%sp),%fpcr,%fpsr,%fpiar # restore ctrl regs
+iea_dacc_cont:
+	mov.l		(%a6),%a6
+
+	mov.l		0x4+LOCAL_SIZE(%sp),-0x8+0x4+LOCAL_SIZE(%sp)
+	mov.w		0x8+LOCAL_SIZE(%sp),-0x8+0x8+LOCAL_SIZE(%sp)
+	mov.w		&0x4008,-0x8+0xa+LOCAL_SIZE(%sp)
+	mov.l		%a0,-0x8+0xc+LOCAL_SIZE(%sp)
+	mov.w		%d0,-0x8+0x10+LOCAL_SIZE(%sp)
+	mov.w		&0x0001,-0x8+0x12+LOCAL_SIZE(%sp)
+
+	movm.l		LOCAL_SIZE+EXC_DREGS(%sp),&0x0303 # restore d0-d1/a0-a1
+	add.w		&LOCAL_SIZE-0x4,%sp
+
+	bra.b		iea_acc_done
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_operr(): 060FPSP entry point for FP Operr exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Operand Error exception in an operating system.		#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	_real_operr() - "callout" to operating system operr handler	#
+#	_dmem_write_{byte,word,long}() - store data to mem (opclass 3)	#
+#	store_dreg_{b,w,l}() - store data to data regfile (opclass 3)	#
+#	facc_out_{b,w,l}() - store to memory took access error (opcl 3)	#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP Operr exception frame	#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	No access error:						#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In a system where the FP Operr exception is enabled, the goal	#
+# is to get to the handler specified at _real_operr(). But, on the 060,	#
+# for opclass zero and two instruction taking this exception, the	#
+# input operand in the fsave frame may be incorrect for some cases	#
+# and needs to be corrected. This handler calls fix_skewed_ops() to	#
+# do just this and then exits through _real_operr().			#
+#	For opclass 3 instructions, the 060 doesn't store the default	#
+# operr result out to memory or data register file as it should.	#
+# This code must emulate the move out before finally exiting through	#
+# _real_inex(). The move out, if to memory, is performed using		#
+# _mem_write() "callout" routines that may return a failing result.	#
+# In this special case, the handler must exit through facc_out()	#
+# which creates an access error stack frame from the current operr	#
+# stack frame.								#
+#									#
+#########################################################################
+
+	global		_fpsp_operr
+_fpsp_operr:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&13,%d0			# is instr an fmove out?
+	bne.b		foperr_out		# fmove out
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source infinity or
+# denorm operand in the sgl or dbl format. NANs also become skewed, but can't
+# cause an operr so we don't need to check for them here.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+foperr_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+	bra.l		_real_operr
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# operand error exceptions. we do this here before passing control to
+# the user operand error handler.
+#
+# byte, word, and long destination format operations can pass
+# through here. we simply need to test the sign of the src
+# operand and save the appropriate minimum or maximum integer value
+# to the effective address as pointed to by the stacked effective address.
+#
+# although packed opclass three operations can take operand error
+# exceptions, they won't pass through here since they are caught
+# first by the unsupported data format exception handler. that handler
+# sends them directly to _real_operr() if necessary.
+#
+foperr_out:
+
+	mov.w		FP_SRC_EX(%a6),%d1	# fetch exponent
+	andi.w		&0x7fff,%d1
+	cmpi.w		%d1,&0x7fff
+	bne.b		foperr_out_not_qnan
+# the operand is either an infinity or a QNAN.
+	tst.l		FP_SRC_LO(%a6)
+	bne.b		foperr_out_qnan
+	mov.l		FP_SRC_HI(%a6),%d1
+	andi.l		&0x7fffffff,%d1
+	beq.b		foperr_out_not_qnan
+foperr_out_qnan:
+	mov.l		FP_SRC_HI(%a6),L_SCR1(%a6)
+	bra.b		foperr_out_jmp
+
+foperr_out_not_qnan:
+	mov.l		&0x7fffffff,%d1
+	tst.b		FP_SRC_EX(%a6)
+	bpl.b		foperr_out_not_qnan2
+	addq.l		&0x1,%d1
+foperr_out_not_qnan2:
+	mov.l		%d1,L_SCR1(%a6)
+
+foperr_out_jmp:
+	bfextu		%d0{&19:&3},%d0		# extract dst format field
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract <ea> mode,reg
+	mov.w		(tbl_operr.b,%pc,%d0.w*2),%a0
+	jmp		(tbl_operr.b,%pc,%a0)
+
+tbl_operr:
+	short		foperr_out_l - tbl_operr # long word integer
+	short		tbl_operr    - tbl_operr # sgl prec shouldn't happen
+	short		tbl_operr    - tbl_operr # ext prec shouldn't happen
+	short		foperr_exit  - tbl_operr # packed won't enter here
+	short		foperr_out_w - tbl_operr # word integer
+	short		tbl_operr    - tbl_operr # dbl prec shouldn't happen
+	short		foperr_out_b - tbl_operr # byte integer
+	short		tbl_operr    - tbl_operr # packed won't enter here
+
+foperr_out_b:
+	mov.b		L_SCR1(%a6),%d0		# load positive default result
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		foperr_out_b_save_dn	# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_byte	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_b		# yes
+
+	bra.w		foperr_exit
+foperr_out_b_save_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_b		# store result to regfile
+	bra.w		foperr_exit
+
+foperr_out_w:
+	mov.w		L_SCR1(%a6),%d0		# load positive default result
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		foperr_out_w_save_dn	# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_word	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_w		# yes
+
+	bra.w		foperr_exit
+foperr_out_w_save_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_w		# store result to regfile
+	bra.w		foperr_exit
+
+foperr_out_l:
+	mov.l		L_SCR1(%a6),%d0		# load positive default result
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		foperr_out_l_save_dn	# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_long	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.w		foperr_exit
+foperr_out_l_save_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_l		# store result to regfile
+	bra.w		foperr_exit
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_snan(): 060FPSP entry point for FP SNAN exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Signalling NAN exception in an operating system.		#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	_real_snan() - "callout" to operating system SNAN handler	#
+#	_dmem_write_{byte,word,long}() - store data to mem (opclass 3)	#
+#	store_dreg_{b,w,l}() - store data to data regfile (opclass 3)	#
+#	facc_out_{b,w,l,d,x}() - store to mem took acc error (opcl 3)	#
+#	_calc_ea_fout() - fix An if <ea> is -() or ()+; also get <ea>	#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP SNAN exception frame		#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	No access error:						#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In a system where the FP SNAN exception is enabled, the goal	#
+# is to get to the handler specified at _real_snan(). But, on the 060,	#
+# for opclass zero and two instructions taking this exception, the	#
+# input operand in the fsave frame may be incorrect for some cases	#
+# and needs to be corrected. This handler calls fix_skewed_ops() to	#
+# do just this and then exits through _real_snan().			#
+#	For opclass 3 instructions, the 060 doesn't store the default	#
+# SNAN result out to memory or data register file as it should.		#
+# This code must emulate the move out before finally exiting through	#
+# _real_snan(). The move out, if to memory, is performed using		#
+# _mem_write() "callout" routines that may return a failing result.	#
+# In this special case, the handler must exit through facc_out()	#
+# which creates an access error stack frame from the current SNAN	#
+# stack frame.								#
+#	For the case of an extended precision opclass 3 instruction,	#
+# if the effective addressing mode was -() or ()+, then the address	#
+# register must get updated by calling _calc_ea_fout(). If the <ea>	#
+# was -(a7) from supervisor mode, then the exception frame currently	#
+# on the system stack must be carefully moved "down" to make room	#
+# for the operand being moved.						#
+#									#
+#########################################################################
+
+	global		_fpsp_snan
+_fpsp_snan:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&13,%d0			# is instr an fmove out?
+	bne.w		fsnan_out		# fmove out
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source infinity or
+# denorm operand in the sgl or dbl format. NANs also become skewed and must be
+# fixed here.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+fsnan_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+	bra.l		_real_snan
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# snan exceptions. we do this here before passing control to
+# the user snan handler.
+#
+# byte, word, long, and packed destination format operations can pass
+# through here. since packed format operations already were handled by
+# fpsp_unsupp(), then we need to do nothing else for them here.
+# for byte, word, and long, we simply need to test the sign of the src
+# operand and save the appropriate minimum or maximum integer value
+# to the effective address as pointed to by the stacked effective address.
+#
+fsnan_out:
+
+	bfextu		%d0{&19:&3},%d0		# extract dst format field
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract <ea> mode,reg
+	mov.w		(tbl_snan.b,%pc,%d0.w*2),%a0
+	jmp		(tbl_snan.b,%pc,%a0)
+
+tbl_snan:
+	short		fsnan_out_l - tbl_snan # long word integer
+	short		fsnan_out_s - tbl_snan # sgl prec shouldn't happen
+	short		fsnan_out_x - tbl_snan # ext prec shouldn't happen
+	short		tbl_snan    - tbl_snan # packed needs no help
+	short		fsnan_out_w - tbl_snan # word integer
+	short		fsnan_out_d - tbl_snan # dbl prec shouldn't happen
+	short		fsnan_out_b - tbl_snan # byte integer
+	short		tbl_snan    - tbl_snan # packed needs no help
+
+fsnan_out_b:
+	mov.b		FP_SRC_HI(%a6),%d0	# load upper byte of SNAN
+	bset		&6,%d0			# set SNAN bit
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		fsnan_out_b_dn		# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_byte	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_b		# yes
+
+	bra.w		fsnan_exit
+fsnan_out_b_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_b		# store result to regfile
+	bra.w		fsnan_exit
+
+fsnan_out_w:
+	mov.w		FP_SRC_HI(%a6),%d0	# load upper word of SNAN
+	bset		&14,%d0			# set SNAN bit
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		fsnan_out_w_dn		# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_word	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_w		# yes
+
+	bra.w		fsnan_exit
+fsnan_out_w_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_w		# store result to regfile
+	bra.w		fsnan_exit
+
+fsnan_out_l:
+	mov.l		FP_SRC_HI(%a6),%d0	# load upper longword of SNAN
+	bset		&30,%d0			# set SNAN bit
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		fsnan_out_l_dn		# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_long	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.w		fsnan_exit
+fsnan_out_l_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_l		# store result to regfile
+	bra.w		fsnan_exit
+
+fsnan_out_s:
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		fsnan_out_d_dn		# yes
+	mov.l		FP_SRC_EX(%a6),%d0	# fetch SNAN sign
+	andi.l		&0x80000000,%d0		# keep sign
+	ori.l		&0x7fc00000,%d0		# insert new exponent,SNAN bit
+	mov.l		FP_SRC_HI(%a6),%d1	# load mantissa
+	lsr.l		&0x8,%d1		# shift mantissa for sgl
+	or.l		%d1,%d0			# create sgl SNAN
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_long	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.w		fsnan_exit
+fsnan_out_d_dn:
+	mov.l		FP_SRC_EX(%a6),%d0	# fetch SNAN sign
+	andi.l		&0x80000000,%d0		# keep sign
+	ori.l		&0x7fc00000,%d0		# insert new exponent,SNAN bit
+	mov.l		%d1,-(%sp)
+	mov.l		FP_SRC_HI(%a6),%d1	# load mantissa
+	lsr.l		&0x8,%d1		# shift mantissa for sgl
+	or.l		%d1,%d0			# create sgl SNAN
+	mov.l		(%sp)+,%d1
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_l		# store result to regfile
+	bra.w		fsnan_exit
+
+fsnan_out_d:
+	mov.l		FP_SRC_EX(%a6),%d0	# fetch SNAN sign
+	andi.l		&0x80000000,%d0		# keep sign
+	ori.l		&0x7ff80000,%d0		# insert new exponent,SNAN bit
+	mov.l		FP_SRC_HI(%a6),%d1	# load hi mantissa
+	mov.l		%d0,FP_SCR0_EX(%a6)	# store to temp space
+	mov.l		&11,%d0			# load shift amt
+	lsr.l		%d0,%d1
+	or.l		%d1,FP_SCR0_EX(%a6)	# create dbl hi
+	mov.l		FP_SRC_HI(%a6),%d1	# load hi mantissa
+	andi.l		&0x000007ff,%d1
+	ror.l		%d0,%d1
+	mov.l		%d1,FP_SCR0_HI(%a6)	# store to temp space
+	mov.l		FP_SRC_LO(%a6),%d1	# load lo mantissa
+	lsr.l		%d0,%d1
+	or.l		%d1,FP_SCR0_HI(%a6)	# create dbl lo
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	mov.l		EXC_EA(%a6),%a1		# pass: dst addr
+	movq.l		&0x8,%d0		# pass: size of 8 bytes
+	bsr.l		_dmem_write		# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_d		# yes
+
+	bra.w		fsnan_exit
+
+# for extended precision, if the addressing mode is pre-decrement or
+# post-increment, then the address register did not get updated.
+# in addition, for pre-decrement, the stacked <ea> is incorrect.
+fsnan_out_x:
+	clr.b		SPCOND_FLG(%a6)		# clear special case flag
+
+	mov.w		FP_SRC_EX(%a6),FP_SCR0_EX(%a6)
+	clr.w		2+FP_SCR0(%a6)
+	mov.l		FP_SRC_HI(%a6),%d0
+	bset		&30,%d0
+	mov.l		%d0,FP_SCR0_HI(%a6)
+	mov.l		FP_SRC_LO(%a6),FP_SCR0_LO(%a6)
+
+	btst		&0x5,EXC_SR(%a6)	# supervisor mode exception?
+	bne.b		fsnan_out_x_s		# yes
+
+	mov.l		%usp,%a0		# fetch user stack pointer
+	mov.l		%a0,EXC_A7(%a6)		# save on stack for calc_ea()
+	mov.l		(%a6),EXC_A6(%a6)
+
+	bsr.l		_calc_ea_fout		# find the correct ea,update An
+	mov.l		%a0,%a1
+	mov.l		%a0,EXC_EA(%a6)		# stack correct <ea>
+
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp		# restore user stack pointer
+	mov.l		EXC_A6(%a6),(%a6)
+
+fsnan_out_x_save:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	movq.l		&0xc,%d0		# pass: size of extended
+	bsr.l		_dmem_write		# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_x		# yes
+
+	bra.w		fsnan_exit
+
+fsnan_out_x_s:
+	mov.l		(%a6),EXC_A6(%a6)
+
+	bsr.l		_calc_ea_fout		# find the correct ea,update An
+	mov.l		%a0,%a1
+	mov.l		%a0,EXC_EA(%a6)		# stack correct <ea>
+
+	mov.l		EXC_A6(%a6),(%a6)
+
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
+	bne.b		fsnan_out_x_save	# no
+
+# the operation was "fmove.x SNAN,-(a7)" from supervisor mode.
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	mov.l		EXC_A6(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_PC+0x2(%sp),LOCAL_SIZE+EXC_PC+0x2-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+	mov.l		LOCAL_SIZE+FP_SCR0_EX(%sp),LOCAL_SIZE+EXC_SR(%sp)
+	mov.l		LOCAL_SIZE+FP_SCR0_HI(%sp),LOCAL_SIZE+EXC_PC+0x2(%sp)
+	mov.l		LOCAL_SIZE+FP_SCR0_LO(%sp),LOCAL_SIZE+EXC_EA(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+	bra.l		_real_snan
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_inex(): 060FPSP entry point for FP Inexact exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Inexact exception in an operating system.			#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	smovcr() - emulate an "fmovcr" instruction			#
+#	fout() - emulate an opclass 3 instruction			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	_real_inex() - "callout" to operating system inexact handler	#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP Inexact exception frame	#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In a system where the FP Inexact exception is enabled, the goal	#
+# is to get to the handler specified at _real_inex(). But, on the 060,	#
+# for opclass zero and two instruction taking this exception, the	#
+# hardware doesn't store the correct result to the destination FP	#
+# register as did the '040 and '881/2. This handler must emulate the	#
+# instruction in order to get this value and then store it to the	#
+# correct register before calling _real_inex().				#
+#	For opclass 3 instructions, the 060 doesn't store the default	#
+# inexact result out to memory or data register file as it should.	#
+# This code must emulate the move out by calling fout() before finally	#
+# exiting through _real_inex().						#
+#									#
+#########################################################################
+
+	global		_fpsp_inex
+_fpsp_inex:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&13,%d0			# is instr an fmove out?
+	bne.w		finex_out		# fmove out
+
+
+# the hardware, for "fabs" and "fneg" w/ a long source format, puts the
+# longword integer directly into the upper longword of the mantissa along
+# w/ an exponent value of 0x401e. we convert this to extended precision here.
+	bfextu		%d0{&19:&3},%d0		# fetch instr size
+	bne.b		finex_cont		# instr size is not long
+	cmpi.w		FP_SRC_EX(%a6),&0x401e	# is exponent 0x401e?
+	bne.b		finex_cont		# no
+	fmov.l		&0x0,%fpcr
+	fmov.l		FP_SRC_HI(%a6),%fp0	# load integer src
+	fmov.x		%fp0,FP_SRC(%a6)	# store integer as extended precision
+	mov.w		&0xe001,0x2+FP_SRC(%a6)
+
+finex_cont:
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+# Here, we zero the ccode and exception byte field since we're going to
+# emulate the whole instruction. Notice, though, that we don't kill the
+# INEX1 bit. This is because a packed op has long since been converted
+# to extended before arriving here. Therefore, we need to retain the
+# INEX1 bit from when the operand was first converted.
+	andi.l		&0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	bfextu		EXC_EXTWORD(%a6){&0:&6},%d1 # extract upper 6 of cmdreg
+	cmpi.b		%d1,&0x17		# is op an fmovecr?
+	beq.w		finex_fmovcr		# yes
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,STAG(%a6)		# maybe NORM,DENORM
+
+# bits four and five of the fp extension word separate the monadic and dyadic
+# operations that can pass through fpsp_inex(). remember that fcmp and ftst
+# will never take this exception, but fsincos will.
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		finex_extract		# monadic
+
+	btst		&0x4,1+EXC_CMDREG(%a6)	# is operation an fsincos?
+	bne.b		finex_extract		# yes
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		finex_op2_done		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+finex_op2_done:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+finex_extract:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x007f,%d1		# extract extension
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+	mov.l		(tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+# the operation has been emulated. the result is in fp0.
+finex_save:
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0
+	bsr.l		store_fpreg
+
+finex_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+	bra.l		_real_inex
+
+finex_fmovcr:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec,mode
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.l		&0x0000007f,%d1		# pass rom offset
+	bsr.l		smovcr
+	bra.b		finex_save
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# inexact exceptions. we do this here before passing control to
+# the user inexact handler.
+#
+# byte, word, and long destination format operations can pass
+# through here. so can double and single precision.
+# although packed opclass three operations can take inexact
+# exceptions, they won't pass through here since they are caught
+# first by the unsupported data format exception handler. that handler
+# sends them directly to _real_inex() if necessary.
+#
+finex_out:
+
+	mov.b		&NORM,STAG(%a6)		# src is a NORM
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec,mode
+
+	andi.l		&0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	bsr.l		fout			# store the default result
+
+	bra.b		finex_exit
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_dz(): 060FPSP entry point for FP DZ exception.		#
+#									#
+#	This handler should be the first code executed upon taking	#
+#	the FP DZ exception in an operating system.			#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword from memory	#
+#	fix_skewed_ops() - adjust fsave operand				#
+#	_real_dz() - "callout" exit point from FP DZ handler		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP DZ exception stack.		#
+#	- The fsave frame contains the source operand.			#
+#									#
+# OUTPUT **************************************************************	#
+#	- The system stack contains the FP DZ exception stack.		#
+#	- The fsave frame contains the adjusted source operand.		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In a system where the DZ exception is enabled, the goal is to	#
+# get to the handler specified at _real_dz(). But, on the 060, when the	#
+# exception is taken, the input operand in the fsave state frame may	#
+# be incorrect for some cases and need to be adjusted. So, this package	#
+# adjusts the operand using fix_skewed_ops() and then branches to	#
+# _real_dz().								#
+#									#
+#########################################################################
+
+	global		_fpsp_dz
+_fpsp_dz:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source zero
+# in the sgl or dbl format.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+fdz_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+	bra.l		_real_dz
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_fline(): 060FPSP entry point for "Line F emulator"	#
+#		       exception when the "reduced" version of the	#
+#		       FPSP is implemented that does not emulate	#
+#		       FP unimplemented instructions.			#
+#									#
+#	This handler should be the first code executed upon taking a	#
+#	"Line F Emulator" exception in an operating system integrating	#
+#	the reduced version of 060FPSP.					#
+#									#
+# XREF ****************************************************************	#
+#	_real_fpu_disabled() - Handle "FPU disabled" exceptions		#
+#	_real_fline() - Handle all other cases (treated equally)	#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains a "Line F Emulator" exception	#
+#	  stack frame.							#
+#									#
+# OUTPUT **************************************************************	#
+#	- The system stack is unchanged.				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	When a "Line F Emulator" exception occurs in a system where	#
+# "FPU Unimplemented" instructions will not be emulated, the exception	#
+# can occur because then FPU is disabled or the instruction is to be	#
+# classifed as "Line F". This module determines which case exists and	#
+# calls the appropriate "callout".					#
+#									#
+#########################################################################
+
+	global		_fpsp_fline
+_fpsp_fline:
+
+# check to see if the FPU is disabled. if so, jump to the OS entry
+# point for that condition.
+	cmpi.w		0x6(%sp),&0x402c
+	beq.l		_real_fpu_disabled
+
+	bra.l		_real_fline
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_dcalc_ea(): calc correct <ea> from <ea> stacked on exception	#
+#									#
+# XREF ****************************************************************	#
+#	inc_areg() - increment an address register			#
+#	dec_areg() - decrement an address register			#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = number of bytes to adjust <ea> by				#
+#									#
+# OUTPUT **************************************************************	#
+#	None								#
+#									#
+# ALGORITHM ***********************************************************	#
+# "Dummy" CALCulate Effective Address:					#
+#	The stacked <ea> for FP unimplemented instructions and opclass	#
+#	two packed instructions is correct with the exception of...	#
+#									#
+#	1) -(An)   : The register is not updated regardless of size.	#
+#		     Also, for extended precision and packed, the	#
+#		     stacked <ea> value is 8 bytes too big		#
+#	2) (An)+   : The register is not updated.			#
+#	3) #<data> : The upper longword of the immediate operand is	#
+#		     stacked b,w,l and s sizes are completely stacked.	#
+#		     d,x, and p are not.				#
+#									#
+#########################################################################
+
+	global		_dcalc_ea
+_dcalc_ea:
+	mov.l		%d0, %a0		# move # bytes to %a0
+
+	mov.b		1+EXC_OPWORD(%a6), %d0	# fetch opcode word
+	mov.l		%d0, %d1		# make a copy
+
+	andi.w		&0x38, %d0		# extract mode field
+	andi.l		&0x7, %d1		# extract reg  field
+
+	cmpi.b		%d0,&0x18		# is mode (An)+ ?
+	beq.b		dcea_pi			# yes
+
+	cmpi.b		%d0,&0x20		# is mode -(An) ?
+	beq.b		dcea_pd			# yes
+
+	or.w		%d1,%d0			# concat mode,reg
+	cmpi.b		%d0,&0x3c		# is mode #<data>?
+
+	beq.b		dcea_imm		# yes
+
+	mov.l		EXC_EA(%a6),%a0		# return <ea>
+	rts
+
+# need to set immediate data flag here since we'll need to do
+# an imem_read to fetch this later.
+dcea_imm:
+	mov.b		&immed_flg,SPCOND_FLG(%a6)
+	lea		([USER_FPIAR,%a6],0x4),%a0 # no; return <ea>
+	rts
+
+# here, the <ea> is stacked correctly. however, we must update the
+# address register...
+dcea_pi:
+	mov.l		%a0,%d0			# pass amt to inc by
+	bsr.l		inc_areg		# inc addr register
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	rts
+
+# the <ea> is stacked correctly for all but extended and packed which
+# the <ea>s are 8 bytes too large.
+# it would make no sense to have a pre-decrement to a7 in supervisor
+# mode so we don't even worry about this tricky case here : )
+dcea_pd:
+	mov.l		%a0,%d0			# pass amt to dec by
+	bsr.l		dec_areg		# dec addr register
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+
+	cmpi.b		%d0,&0xc		# is opsize ext or packed?
+	beq.b		dcea_pd2		# yes
+	rts
+dcea_pd2:
+	sub.l		&0x8,%a0		# correct <ea>
+	mov.l		%a0,EXC_EA(%a6)		# put correct <ea> on stack
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_calc_ea_fout(): calculate correct stacked <ea> for extended	#
+#			 and packed data opclass 3 operations.		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	a0 = return correct effective address				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	For opclass 3 extended and packed data operations, the <ea>	#
+# stacked for the exception is incorrect for -(an) and (an)+ addressing	#
+# modes. Also, while we're at it, the index register itself must get	#
+# updated.								#
+#	So, for -(an), we must subtract 8 off of the stacked <ea> value	#
+# and return that value as the correct <ea> and store that value in An.	#
+# For (an)+, the stacked <ea> is correct but we must adjust An by +12.	#
+#									#
+#########################################################################
+
+# This calc_ea is currently used to retrieve the correct <ea>
+# for fmove outs of type extended and packed.
+	global		_calc_ea_fout
+_calc_ea_fout:
+	mov.b		1+EXC_OPWORD(%a6),%d0	# fetch opcode word
+	mov.l		%d0,%d1			# make a copy
+
+	andi.w		&0x38,%d0		# extract mode field
+	andi.l		&0x7,%d1		# extract reg  field
+
+	cmpi.b		%d0,&0x18		# is mode (An)+ ?
+	beq.b		ceaf_pi			# yes
+
+	cmpi.b		%d0,&0x20		# is mode -(An) ?
+	beq.w		ceaf_pd			# yes
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	rts
+
+# (An)+ : extended and packed fmove out
+#	: stacked <ea> is correct
+#	: "An" not updated
+ceaf_pi:
+	mov.w		(tbl_ceaf_pi.b,%pc,%d1.w*2),%d1
+	mov.l		EXC_EA(%a6),%a0
+	jmp		(tbl_ceaf_pi.b,%pc,%d1.w*1)
+
+	swbeg		&0x8
+tbl_ceaf_pi:
+	short		ceaf_pi0 - tbl_ceaf_pi
+	short		ceaf_pi1 - tbl_ceaf_pi
+	short		ceaf_pi2 - tbl_ceaf_pi
+	short		ceaf_pi3 - tbl_ceaf_pi
+	short		ceaf_pi4 - tbl_ceaf_pi
+	short		ceaf_pi5 - tbl_ceaf_pi
+	short		ceaf_pi6 - tbl_ceaf_pi
+	short		ceaf_pi7 - tbl_ceaf_pi
+
+ceaf_pi0:
+	addi.l		&0xc,EXC_DREGS+0x8(%a6)
+	rts
+ceaf_pi1:
+	addi.l		&0xc,EXC_DREGS+0xc(%a6)
+	rts
+ceaf_pi2:
+	add.l		&0xc,%a2
+	rts
+ceaf_pi3:
+	add.l		&0xc,%a3
+	rts
+ceaf_pi4:
+	add.l		&0xc,%a4
+	rts
+ceaf_pi5:
+	add.l		&0xc,%a5
+	rts
+ceaf_pi6:
+	addi.l		&0xc,EXC_A6(%a6)
+	rts
+ceaf_pi7:
+	mov.b		&mia7_flg,SPCOND_FLG(%a6)
+	addi.l		&0xc,EXC_A7(%a6)
+	rts
+
+# -(An) : extended and packed fmove out
+#	: stacked <ea> = actual <ea> + 8
+#	: "An" not updated
+ceaf_pd:
+	mov.w		(tbl_ceaf_pd.b,%pc,%d1.w*2),%d1
+	mov.l		EXC_EA(%a6),%a0
+	sub.l		&0x8,%a0
+	sub.l		&0x8,EXC_EA(%a6)
+	jmp		(tbl_ceaf_pd.b,%pc,%d1.w*1)
+
+	swbeg		&0x8
+tbl_ceaf_pd:
+	short		ceaf_pd0 - tbl_ceaf_pd
+	short		ceaf_pd1 - tbl_ceaf_pd
+	short		ceaf_pd2 - tbl_ceaf_pd
+	short		ceaf_pd3 - tbl_ceaf_pd
+	short		ceaf_pd4 - tbl_ceaf_pd
+	short		ceaf_pd5 - tbl_ceaf_pd
+	short		ceaf_pd6 - tbl_ceaf_pd
+	short		ceaf_pd7 - tbl_ceaf_pd
+
+ceaf_pd0:
+	mov.l		%a0,EXC_DREGS+0x8(%a6)
+	rts
+ceaf_pd1:
+	mov.l		%a0,EXC_DREGS+0xc(%a6)
+	rts
+ceaf_pd2:
+	mov.l		%a0,%a2
+	rts
+ceaf_pd3:
+	mov.l		%a0,%a3
+	rts
+ceaf_pd4:
+	mov.l		%a0,%a4
+	rts
+ceaf_pd5:
+	mov.l		%a0,%a5
+	rts
+ceaf_pd6:
+	mov.l		%a0,EXC_A6(%a6)
+	rts
+ceaf_pd7:
+	mov.l		%a0,EXC_A7(%a6)
+	mov.b		&mda7_flg,SPCOND_FLG(%a6)
+	rts
+
+#
+# This table holds the offsets of the emulation routines for each individual
+# math operation relative to the address of this table. Included are
+# routines like fadd/fmul/fabs. The transcendentals ARE NOT. This is because
+# this table is for the version if the 060FPSP without transcendentals.
+# The location within the table is determined by the extension bits of the
+# operation longword.
+#
+
+	swbeg		&109
+tbl_unsupp:
+	long		fin		- tbl_unsupp	# 00: fmove
+	long		fint		- tbl_unsupp	# 01: fint
+	long		tbl_unsupp	- tbl_unsupp	# 02: fsinh
+	long		fintrz		- tbl_unsupp	# 03: fintrz
+	long		fsqrt		- tbl_unsupp	# 04: fsqrt
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp	# 06: flognp1
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp	# 08: fetoxm1
+	long		tbl_unsupp	- tbl_unsupp	# 09: ftanh
+	long		tbl_unsupp	- tbl_unsupp	# 0a: fatan
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp	# 0c: fasin
+	long		tbl_unsupp	- tbl_unsupp	# 0d: fatanh
+	long		tbl_unsupp	- tbl_unsupp	# 0e: fsin
+	long		tbl_unsupp	- tbl_unsupp	# 0f: ftan
+	long		tbl_unsupp	- tbl_unsupp	# 10: fetox
+	long		tbl_unsupp	- tbl_unsupp	# 11: ftwotox
+	long		tbl_unsupp	- tbl_unsupp	# 12: ftentox
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp	# 14: flogn
+	long		tbl_unsupp	- tbl_unsupp	# 15: flog10
+	long		tbl_unsupp	- tbl_unsupp	# 16: flog2
+	long		tbl_unsupp	- tbl_unsupp
+	long		fabs		- tbl_unsupp	# 18: fabs
+	long		tbl_unsupp	- tbl_unsupp	# 19: fcosh
+	long		fneg		- tbl_unsupp	# 1a: fneg
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp	# 1c: facos
+	long		tbl_unsupp	- tbl_unsupp	# 1d: fcos
+	long		tbl_unsupp	- tbl_unsupp	# 1e: fgetexp
+	long		tbl_unsupp	- tbl_unsupp	# 1f: fgetman
+	long		fdiv		- tbl_unsupp	# 20: fdiv
+	long		tbl_unsupp	- tbl_unsupp	# 21: fmod
+	long		fadd		- tbl_unsupp	# 22: fadd
+	long		fmul		- tbl_unsupp	# 23: fmul
+	long		fsgldiv		- tbl_unsupp	# 24: fsgldiv
+	long		tbl_unsupp	- tbl_unsupp	# 25: frem
+	long		tbl_unsupp	- tbl_unsupp	# 26: fscale
+	long		fsglmul		- tbl_unsupp	# 27: fsglmul
+	long		fsub		- tbl_unsupp	# 28: fsub
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp	# 30: fsincos
+	long		tbl_unsupp	- tbl_unsupp	# 31: fsincos
+	long		tbl_unsupp	- tbl_unsupp	# 32: fsincos
+	long		tbl_unsupp	- tbl_unsupp	# 33: fsincos
+	long		tbl_unsupp	- tbl_unsupp	# 34: fsincos
+	long		tbl_unsupp	- tbl_unsupp	# 35: fsincos
+	long		tbl_unsupp	- tbl_unsupp	# 36: fsincos
+	long		tbl_unsupp	- tbl_unsupp	# 37: fsincos
+	long		fcmp		- tbl_unsupp	# 38: fcmp
+	long		tbl_unsupp	- tbl_unsupp
+	long		ftst		- tbl_unsupp	# 3a: ftst
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsin		- tbl_unsupp	# 40: fsmove
+	long		fssqrt		- tbl_unsupp	# 41: fssqrt
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdin		- tbl_unsupp	# 44: fdmove
+	long		fdsqrt		- tbl_unsupp	# 45: fdsqrt
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsabs		- tbl_unsupp	# 58: fsabs
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsneg		- tbl_unsupp	# 5a: fsneg
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdabs		- tbl_unsupp	# 5c: fdabs
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdneg		- tbl_unsupp	# 5e: fdneg
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsdiv		- tbl_unsupp	# 60: fsdiv
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsadd		- tbl_unsupp	# 62: fsadd
+	long		fsmul		- tbl_unsupp	# 63: fsmul
+	long		fddiv		- tbl_unsupp	# 64: fddiv
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdadd		- tbl_unsupp	# 66: fdadd
+	long		fdmul		- tbl_unsupp	# 67: fdmul
+	long		fssub		- tbl_unsupp	# 68: fssub
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdsub		- tbl_unsupp	# 6c: fdsub
+
+#################################################
+# Add this here so non-fp modules can compile.
+# (smovcr is called from fpsp_inex.)
+	global		smovcr
+smovcr:
+	bra.b		smovcr
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fmovm_dynamic(): emulate "fmovm" dynamic instruction		#
+#									#
+# XREF ****************************************************************	#
+#	fetch_dreg() - fetch data register				#
+#	{i,d,}mem_read() - fetch data from memory			#
+#	_mem_write() - write data to memory				#
+#	iea_iacc() - instruction memory access error occurred		#
+#	iea_dacc() - data memory access error occurred			#
+#	restore() - restore An index regs if access error occurred	#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	If instr is "fmovm Dn,-(A7)" from supervisor mode,		#
+#		d0 = size of dump					#
+#		d1 = Dn							#
+#	Else if instruction access error,				#
+#		d0 = FSLW						#
+#	Else if data access error,					#
+#		d0 = FSLW						#
+#		a0 = address of fault					#
+#	Else								#
+#		none.							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	The effective address must be calculated since this is entered	#
+# from an "Unimplemented Effective Address" exception handler. So, we	#
+# have our own fcalc_ea() routine here. If an access error is flagged	#
+# by a _{i,d,}mem_read() call, we must exit through the special		#
+# handler.								#
+#	The data register is determined and its value loaded to get the	#
+# string of FP registers affected. This value is used as an index into	#
+# a lookup table such that we can determine the number of bytes		#
+# involved.								#
+#	If the instruction is "fmovm.x <ea>,Dn", a _mem_read() is used	#
+# to read in all FP values. Again, _mem_read() may fail and require a	#
+# special exit.								#
+#	If the instruction is "fmovm.x DN,<ea>", a _mem_write() is used	#
+# to write all FP values. _mem_write() may also fail.			#
+#	If the instruction is "fmovm.x DN,-(a7)" from supervisor mode,	#
+# then we return the size of the dump and the string to the caller	#
+# so that the move can occur outside of this routine. This special	#
+# case is required so that moves to the system stack are handled	#
+# correctly.								#
+#									#
+# DYNAMIC:								#
+#	fmovm.x	dn, <ea>						#
+#	fmovm.x	<ea>, dn						#
+#									#
+#	      <WORD 1>		      <WORD2>				#
+#	1111 0010 00 |<ea>|	11@& 1000 0$$$ 0000			#
+#									#
+#	& = (0): predecrement addressing mode				#
+#	    (1): postincrement or control addressing mode		#
+#	@ = (0): move listed regs from memory to the FPU		#
+#	    (1): move listed regs from the FPU to memory		#
+#	$$$    : index of data register holding reg select mask		#
+#									#
+# NOTES:								#
+#	If the data register holds a zero, then the			#
+#	instruction is a nop.						#
+#									#
+#########################################################################
+
+	global		fmovm_dynamic
+fmovm_dynamic:
+
+# extract the data register in which the bit string resides...
+	mov.b		1+EXC_EXTWORD(%a6),%d1	# fetch extword
+	andi.w		&0x70,%d1		# extract reg bits
+	lsr.b		&0x4,%d1		# shift into lo bits
+
+# fetch the bit string into d0...
+	bsr.l		fetch_dreg		# fetch reg string
+
+	andi.l		&0x000000ff,%d0		# keep only lo byte
+
+	mov.l		%d0,-(%sp)		# save strg
+	mov.b		(tbl_fmovm_size.w,%pc,%d0),%d0
+	mov.l		%d0,-(%sp)		# save size
+	bsr.l		fmovm_calc_ea		# calculate <ea>
+	mov.l		(%sp)+,%d0		# restore size
+	mov.l		(%sp)+,%d1		# restore strg
+
+# if the bit string is a zero, then the operation is a no-op
+# but, make sure that we've calculated ea and advanced the opword pointer
+	beq.w		fmovm_data_done
+
+# separate move ins from move outs...
+	btst		&0x5,EXC_EXTWORD(%a6)	# is it a move in or out?
+	beq.w		fmovm_data_in		# it's a move out
+
+#############
+# MOVE OUT: #
+#############
+fmovm_data_out:
+	btst		&0x4,EXC_EXTWORD(%a6)	# control or predecrement?
+	bne.w		fmovm_out_ctrl		# control
+
+############################
+fmovm_out_predec:
+# for predecrement mode, the bit string is the opposite of both control
+# operations and postincrement mode. (bit7 = FP7 ... bit0 = FP0)
+# here, we convert it to be just like the others...
+	mov.b		(tbl_fmovm_convert.w,%pc,%d1.w*1),%d1
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor mode?
+	beq.b		fmovm_out_ctrl		# user
+
+fmovm_out_predec_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
+	bne.b		fmovm_out_ctrl
+
+# the operation was unfortunately an: fmovm.x dn,-(sp)
+# called from supervisor mode.
+# we're also passing "size" and "strg" back to the calling routine
+	rts
+
+############################
+fmovm_out_ctrl:
+	mov.l		%a0,%a1			# move <ea> to a1
+
+	sub.l		%d0,%sp			# subtract size of dump
+	lea		(%sp),%a0
+
+	tst.b		%d1			# should FP0 be moved?
+	bpl.b		fmovm_out_ctrl_fp1	# no
+
+	mov.l		0x0+EXC_FP0(%a6),(%a0)+	# yes
+	mov.l		0x4+EXC_FP0(%a6),(%a0)+
+	mov.l		0x8+EXC_FP0(%a6),(%a0)+
+
+fmovm_out_ctrl_fp1:
+	lsl.b		&0x1,%d1		# should FP1 be moved?
+	bpl.b		fmovm_out_ctrl_fp2	# no
+
+	mov.l		0x0+EXC_FP1(%a6),(%a0)+	# yes
+	mov.l		0x4+EXC_FP1(%a6),(%a0)+
+	mov.l		0x8+EXC_FP1(%a6),(%a0)+
+
+fmovm_out_ctrl_fp2:
+	lsl.b		&0x1,%d1		# should FP2 be moved?
+	bpl.b		fmovm_out_ctrl_fp3	# no
+
+	fmovm.x		&0x20,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp3:
+	lsl.b		&0x1,%d1		# should FP3 be moved?
+	bpl.b		fmovm_out_ctrl_fp4	# no
+
+	fmovm.x		&0x10,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp4:
+	lsl.b		&0x1,%d1		# should FP4 be moved?
+	bpl.b		fmovm_out_ctrl_fp5	# no
+
+	fmovm.x		&0x08,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp5:
+	lsl.b		&0x1,%d1		# should FP5 be moved?
+	bpl.b		fmovm_out_ctrl_fp6	# no
+
+	fmovm.x		&0x04,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp6:
+	lsl.b		&0x1,%d1		# should FP6 be moved?
+	bpl.b		fmovm_out_ctrl_fp7	# no
+
+	fmovm.x		&0x02,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp7:
+	lsl.b		&0x1,%d1		# should FP7 be moved?
+	bpl.b		fmovm_out_ctrl_done	# no
+
+	fmovm.x		&0x01,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_done:
+	mov.l		%a1,L_SCR1(%a6)
+
+	lea		(%sp),%a0		# pass: supervisor src
+	mov.l		%d0,-(%sp)		# save size
+	bsr.l		_dmem_write		# copy data to user mem
+
+	mov.l		(%sp)+,%d0
+	add.l		%d0,%sp			# clear fpreg data from stack
+
+	tst.l		%d1			# did dstore err?
+	bne.w		fmovm_out_err		# yes
+
+	rts
+
+############
+# MOVE IN: #
+############
+fmovm_data_in:
+	mov.l		%a0,L_SCR1(%a6)
+
+	sub.l		%d0,%sp			# make room for fpregs
+	lea		(%sp),%a1
+
+	mov.l		%d1,-(%sp)		# save bit string for later
+	mov.l		%d0,-(%sp)		# save # of bytes
+
+	bsr.l		_dmem_read		# copy data from user mem
+
+	mov.l		(%sp)+,%d0		# retrieve # of bytes
+
+	tst.l		%d1			# did dfetch fail?
+	bne.w		fmovm_in_err		# yes
+
+	mov.l		(%sp)+,%d1		# load bit string
+
+	lea		(%sp),%a0		# addr of stack
+
+	tst.b		%d1			# should FP0 be moved?
+	bpl.b		fmovm_data_in_fp1	# no
+
+	mov.l		(%a0)+,0x0+EXC_FP0(%a6)	# yes
+	mov.l		(%a0)+,0x4+EXC_FP0(%a6)
+	mov.l		(%a0)+,0x8+EXC_FP0(%a6)
+
+fmovm_data_in_fp1:
+	lsl.b		&0x1,%d1		# should FP1 be moved?
+	bpl.b		fmovm_data_in_fp2	# no
+
+	mov.l		(%a0)+,0x0+EXC_FP1(%a6)	# yes
+	mov.l		(%a0)+,0x4+EXC_FP1(%a6)
+	mov.l		(%a0)+,0x8+EXC_FP1(%a6)
+
+fmovm_data_in_fp2:
+	lsl.b		&0x1,%d1		# should FP2 be moved?
+	bpl.b		fmovm_data_in_fp3	# no
+
+	fmovm.x		(%a0)+,&0x20		# yes
+
+fmovm_data_in_fp3:
+	lsl.b		&0x1,%d1		# should FP3 be moved?
+	bpl.b		fmovm_data_in_fp4	# no
+
+	fmovm.x		(%a0)+,&0x10		# yes
+
+fmovm_data_in_fp4:
+	lsl.b		&0x1,%d1		# should FP4 be moved?
+	bpl.b		fmovm_data_in_fp5	# no
+
+	fmovm.x		(%a0)+,&0x08		# yes
+
+fmovm_data_in_fp5:
+	lsl.b		&0x1,%d1		# should FP5 be moved?
+	bpl.b		fmovm_data_in_fp6	# no
+
+	fmovm.x		(%a0)+,&0x04		# yes
+
+fmovm_data_in_fp6:
+	lsl.b		&0x1,%d1		# should FP6 be moved?
+	bpl.b		fmovm_data_in_fp7	# no
+
+	fmovm.x		(%a0)+,&0x02		# yes
+
+fmovm_data_in_fp7:
+	lsl.b		&0x1,%d1		# should FP7 be moved?
+	bpl.b		fmovm_data_in_done	# no
+
+	fmovm.x		(%a0)+,&0x01		# yes
+
+fmovm_data_in_done:
+	add.l		%d0,%sp			# remove fpregs from stack
+	rts
+
+#####################################
+
+fmovm_data_done:
+	rts
+
+##############################################################################
+
+#
+# table indexed by the operation's bit string that gives the number
+# of bytes that will be moved.
+#
+# number of bytes = (# of 1's in bit string) * 12(bytes/fpreg)
+#
+tbl_fmovm_size:
+	byte	0x00,0x0c,0x0c,0x18,0x0c,0x18,0x18,0x24
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x3c,0x48,0x48,0x54,0x48,0x54,0x54,0x60
+
+#
+# table to convert a pre-decrement bit string into a post-increment
+# or control bit string.
+# ex:	0x00	==>	0x00
+#	0x01	==>	0x80
+#	0x02	==>	0x40
+#		.
+#		.
+#	0xfd	==>	0xbf
+#	0xfe	==>	0x7f
+#	0xff	==>	0xff
+#
+tbl_fmovm_convert:
+	byte	0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0
+	byte	0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0
+	byte	0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8
+	byte	0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8
+	byte	0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4
+	byte	0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4
+	byte	0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec
+	byte	0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc
+	byte	0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2
+	byte	0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2
+	byte	0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea
+	byte	0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa
+	byte	0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6
+	byte	0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6
+	byte	0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee
+	byte	0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe
+	byte	0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1
+	byte	0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1
+	byte	0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9
+	byte	0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9
+	byte	0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5
+	byte	0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5
+	byte	0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed
+	byte	0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd
+	byte	0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3
+	byte	0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3
+	byte	0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb
+	byte	0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb
+	byte	0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7
+	byte	0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7
+	byte	0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef
+	byte	0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff
+
+	global		fmovm_calc_ea
+###############################################
+# _fmovm_calc_ea: calculate effective address #
+###############################################
+fmovm_calc_ea:
+	mov.l		%d0,%a0			# move # bytes to a0
+
+# currently, MODE and REG are taken from the EXC_OPWORD. this could be
+# easily changed if they were inputs passed in registers.
+	mov.w		EXC_OPWORD(%a6),%d0	# fetch opcode word
+	mov.w		%d0,%d1			# make a copy
+
+	andi.w		&0x3f,%d0		# extract mode field
+	andi.l		&0x7,%d1		# extract reg  field
+
+# jump to the corresponding function for each {MODE,REG} pair.
+	mov.w		(tbl_fea_mode.b,%pc,%d0.w*2),%d0 # fetch jmp distance
+	jmp		(tbl_fea_mode.b,%pc,%d0.w*1) # jmp to correct ea mode
+
+	swbeg		&64
+tbl_fea_mode:
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+
+	short		faddr_ind_a0	-	tbl_fea_mode
+	short		faddr_ind_a1	-	tbl_fea_mode
+	short		faddr_ind_a2	-	tbl_fea_mode
+	short		faddr_ind_a3	-	tbl_fea_mode
+	short		faddr_ind_a4	-	tbl_fea_mode
+	short		faddr_ind_a5	-	tbl_fea_mode
+	short		faddr_ind_a6	-	tbl_fea_mode
+	short		faddr_ind_a7	-	tbl_fea_mode
+
+	short		faddr_ind_p_a0	-	tbl_fea_mode
+	short		faddr_ind_p_a1	-	tbl_fea_mode
+	short		faddr_ind_p_a2	-	tbl_fea_mode
+	short		faddr_ind_p_a3	-	tbl_fea_mode
+	short		faddr_ind_p_a4	-	tbl_fea_mode
+	short		faddr_ind_p_a5	-	tbl_fea_mode
+	short		faddr_ind_p_a6	-	tbl_fea_mode
+	short		faddr_ind_p_a7	-	tbl_fea_mode
+
+	short		faddr_ind_m_a0	-	tbl_fea_mode
+	short		faddr_ind_m_a1	-	tbl_fea_mode
+	short		faddr_ind_m_a2	-	tbl_fea_mode
+	short		faddr_ind_m_a3	-	tbl_fea_mode
+	short		faddr_ind_m_a4	-	tbl_fea_mode
+	short		faddr_ind_m_a5	-	tbl_fea_mode
+	short		faddr_ind_m_a6	-	tbl_fea_mode
+	short		faddr_ind_m_a7	-	tbl_fea_mode
+
+	short		faddr_ind_disp_a0	-	tbl_fea_mode
+	short		faddr_ind_disp_a1	-	tbl_fea_mode
+	short		faddr_ind_disp_a2	-	tbl_fea_mode
+	short		faddr_ind_disp_a3	-	tbl_fea_mode
+	short		faddr_ind_disp_a4	-	tbl_fea_mode
+	short		faddr_ind_disp_a5	-	tbl_fea_mode
+	short		faddr_ind_disp_a6	-	tbl_fea_mode
+	short		faddr_ind_disp_a7	-	tbl_fea_mode
+
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+
+	short		fabs_short	-	tbl_fea_mode
+	short		fabs_long	-	tbl_fea_mode
+	short		fpc_ind		-	tbl_fea_mode
+	short		fpc_ind_ext	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+
+###################################
+# Address register indirect: (An) #
+###################################
+faddr_ind_a0:
+	mov.l		EXC_DREGS+0x8(%a6),%a0	# Get current a0
+	rts
+
+faddr_ind_a1:
+	mov.l		EXC_DREGS+0xc(%a6),%a0	# Get current a1
+	rts
+
+faddr_ind_a2:
+	mov.l		%a2,%a0			# Get current a2
+	rts
+
+faddr_ind_a3:
+	mov.l		%a3,%a0			# Get current a3
+	rts
+
+faddr_ind_a4:
+	mov.l		%a4,%a0			# Get current a4
+	rts
+
+faddr_ind_a5:
+	mov.l		%a5,%a0			# Get current a5
+	rts
+
+faddr_ind_a6:
+	mov.l		(%a6),%a0		# Get current a6
+	rts
+
+faddr_ind_a7:
+	mov.l		EXC_A7(%a6),%a0		# Get current a7
+	rts
+
+#####################################################
+# Address register indirect w/ postincrement: (An)+ #
+#####################################################
+faddr_ind_p_a0:
+	mov.l		EXC_DREGS+0x8(%a6),%d0	# Get current a0
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,EXC_DREGS+0x8(%a6)	# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a1:
+	mov.l		EXC_DREGS+0xc(%a6),%d0	# Get current a1
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,EXC_DREGS+0xc(%a6)	# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a2:
+	mov.l		%a2,%d0			# Get current a2
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,%a2			# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a3:
+	mov.l		%a3,%d0			# Get current a3
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,%a3			# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a4:
+	mov.l		%a4,%d0			# Get current a4
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,%a4			# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a5:
+	mov.l		%a5,%d0			# Get current a5
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,%a5			# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a6:
+	mov.l		(%a6),%d0		# Get current a6
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,(%a6)		# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a7:
+	mov.b		&mia7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+	mov.l		EXC_A7(%a6),%d0		# Get current a7
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,EXC_A7(%a6)		# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+####################################################
+# Address register indirect w/ predecrement: -(An) #
+####################################################
+faddr_ind_m_a0:
+	mov.l		EXC_DREGS+0x8(%a6),%d0	# Get current a0
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_DREGS+0x8(%a6)	# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a1:
+	mov.l		EXC_DREGS+0xc(%a6),%d0	# Get current a1
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_DREGS+0xc(%a6)	# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a2:
+	mov.l		%a2,%d0			# Get current a2
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,%a2			# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a3:
+	mov.l		%a3,%d0			# Get current a3
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,%a3			# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a4:
+	mov.l		%a4,%d0			# Get current a4
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,%a4			# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a5:
+	mov.l		%a5,%d0			# Get current a5
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,%a5			# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a6:
+	mov.l		(%a6),%d0		# Get current a6
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,(%a6)		# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a7:
+	mov.b		&mda7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+	mov.l		EXC_A7(%a6),%d0		# Get current a7
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A7(%a6)		# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+########################################################
+# Address register indirect w/ displacement: (d16, An) #
+########################################################
+faddr_ind_disp_a0:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_DREGS+0x8(%a6),%a0	# a0 + d16
+	rts
+
+faddr_ind_disp_a1:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_DREGS+0xc(%a6),%a0	# a1 + d16
+	rts
+
+faddr_ind_disp_a2:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		%a2,%a0			# a2 + d16
+	rts
+
+faddr_ind_disp_a3:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		%a3,%a0			# a3 + d16
+	rts
+
+faddr_ind_disp_a4:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		%a4,%a0			# a4 + d16
+	rts
+
+faddr_ind_disp_a5:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		%a5,%a0			# a5 + d16
+	rts
+
+faddr_ind_disp_a6:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		(%a6),%a0		# a6 + d16
+	rts
+
+faddr_ind_disp_a7:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_A7(%a6),%a0		# a7 + d16
+	rts
+
+########################################################################
+# Address register indirect w/ index(8-bit displacement): (d8, An, Xn) #
+#    "       "         "    w/   "  (base displacement): (bd, An, Xn)  #
+# Memory indirect postindexed: ([bd, An], Xn, od)		       #
+# Memory indirect preindexed: ([bd, An, Xn], od)		       #
+########################################################################
+faddr_ind_ext:
+	addq.l		&0x8,%d1
+	bsr.l		fetch_dreg		# fetch base areg
+	mov.l		%d0,-(%sp)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch extword in d0
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		(%sp)+,%a0
+
+	btst		&0x8,%d0
+	bne.w		fcalc_mem_ind
+
+	mov.l		%d0,L_SCR1(%a6)		# hold opword
+
+	mov.l		%d0,%d1
+	rol.w		&0x4,%d1
+	andi.w		&0xf,%d1		# extract index regno
+
+# count on fetch_dreg() not to alter a0...
+	bsr.l		fetch_dreg		# fetch index
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		L_SCR1(%a6),%d2		# fetch opword
+
+	btst		&0xb,%d2		# is it word or long?
+	bne.b		faii8_long
+	ext.l		%d0			# sign extend word index
+faii8_long:
+	mov.l		%d2,%d1
+	rol.w		&0x7,%d1
+	andi.l		&0x3,%d1		# extract scale value
+
+	lsl.l		%d1,%d0			# shift index by scale
+
+	extb.l		%d2			# sign extend displacement
+	add.l		%d2,%d0			# index + disp
+	add.l		%d0,%a0			# An + (index + disp)
+
+	mov.l		(%sp)+,%d2		# restore old d2
+	rts
+
+###########################
+# Absolute short: (XXX).W #
+###########################
+fabs_short:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch short address
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# return <ea> in a0
+	rts
+
+##########################
+# Absolute long: (XXX).L #
+##########################
+fabs_long:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch long address
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,%a0			# return <ea> in a0
+	rts
+
+#######################################################
+# Program counter indirect w/ displacement: (d16, PC) #
+#######################################################
+fpc_ind:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch word displacement
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_EXTWPTR(%a6),%a0	# pc + d16
+
+# _imem_read_word() increased the extwptr by 2. need to adjust here.
+	subq.l		&0x2,%a0		# adjust <ea>
+	rts
+
+##########################################################
+# PC indirect w/ index(8-bit displacement): (d8, PC, An) #
+# "     "     w/   "  (base displacement): (bd, PC, An)  #
+# PC memory indirect postindexed: ([bd, PC], Xn, od)     #
+# PC memory indirect preindexed: ([bd, PC, Xn], od)      #
+##########################################################
+fpc_ind_ext:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch ext word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# put base in a0
+	subq.l		&0x2,%a0		# adjust base
+
+	btst		&0x8,%d0		# is disp only 8 bits?
+	bne.w		fcalc_mem_ind		# calc memory indirect
+
+	mov.l		%d0,L_SCR1(%a6)		# store opword
+
+	mov.l		%d0,%d1			# make extword copy
+	rol.w		&0x4,%d1		# rotate reg num into place
+	andi.w		&0xf,%d1		# extract register number
+
+# count on fetch_dreg() not to alter a0...
+	bsr.l		fetch_dreg		# fetch index
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		L_SCR1(%a6),%d2		# fetch opword
+
+	btst		&0xb,%d2		# is index word or long?
+	bne.b		fpii8_long		# long
+	ext.l		%d0			# sign extend word index
+fpii8_long:
+	mov.l		%d2,%d1
+	rol.w		&0x7,%d1		# rotate scale value into place
+	andi.l		&0x3,%d1		# extract scale value
+
+	lsl.l		%d1,%d0			# shift index by scale
+
+	extb.l		%d2			# sign extend displacement
+	add.l		%d2,%d0			# disp + index
+	add.l		%d0,%a0			# An + (index + disp)
+
+	mov.l		(%sp)+,%d2		# restore temp register
+	rts
+
+# d2 = index
+# d3 = base
+# d4 = od
+# d5 = extword
+fcalc_mem_ind:
+	btst		&0x6,%d0		# is the index suppressed?
+	beq.b		fcalc_index
+
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+
+	mov.l		%d0,%d5			# put extword in d5
+	mov.l		%a0,%d3			# put base in d3
+
+	clr.l		%d2			# yes, so index = 0
+	bra.b		fbase_supp_ck
+
+# index:
+fcalc_index:
+	mov.l		%d0,L_SCR1(%a6)		# save d0 (opword)
+	bfextu		%d0{&16:&4},%d1		# fetch dreg index
+	bsr.l		fetch_dreg
+
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+	mov.l		%d0,%d2			# put index in d2
+	mov.l		L_SCR1(%a6),%d5
+	mov.l		%a0,%d3
+
+	btst		&0xb,%d5		# is index word or long?
+	bne.b		fno_ext
+	ext.l		%d2
+
+fno_ext:
+	bfextu		%d5{&21:&2},%d0
+	lsl.l		%d0,%d2
+
+# base address (passed as parameter in d3):
+# we clear the value here if it should actually be suppressed.
+fbase_supp_ck:
+	btst		&0x7,%d5		# is the bd suppressed?
+	beq.b		fno_base_sup
+	clr.l		%d3
+
+# base displacement:
+fno_base_sup:
+	bfextu		%d5{&26:&2},%d0		# get bd size
+#	beq.l		fmovm_error		# if (size == 0) it's reserved
+
+	cmpi.b		%d0,&0x2
+	blt.b		fno_bd
+	beq.b		fget_word_bd
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		fcea_iacc		# yes
+
+	bra.b		fchk_ind
+
+fget_word_bd:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		fcea_iacc		# yes
+
+	ext.l		%d0			# sign extend bd
+
+fchk_ind:
+	add.l		%d0,%d3			# base += bd
+
+# outer displacement:
+fno_bd:
+	bfextu		%d5{&30:&2},%d0		# is od suppressed?
+	beq.w		faii_bd
+
+	cmpi.b		%d0,&0x2
+	blt.b		fnull_od
+	beq.b		fword_od
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		fcea_iacc		# yes
+
+	bra.b		fadd_them
+
+fword_od:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		fcea_iacc		# yes
+
+	ext.l		%d0			# sign extend od
+	bra.b		fadd_them
+
+fnull_od:
+	clr.l		%d0
+
+fadd_them:
+	mov.l		%d0,%d4
+
+	btst		&0x2,%d5		# pre or post indexing?
+	beq.b		fpre_indexed
+
+	mov.l		%d3,%a0
+	bsr.l		_dmem_read_long
+
+	tst.l		%d1			# did dfetch fail?
+	bne.w		fcea_err		# yes
+
+	add.l		%d2,%d0			# <ea> += index
+	add.l		%d4,%d0			# <ea> += od
+	bra.b		fdone_ea
+
+fpre_indexed:
+	add.l		%d2,%d3			# preindexing
+	mov.l		%d3,%a0
+	bsr.l		_dmem_read_long
+
+	tst.l		%d1			# did dfetch fail?
+	bne.w		fcea_err		# yes
+
+	add.l		%d4,%d0			# ea += od
+	bra.b		fdone_ea
+
+faii_bd:
+	add.l		%d2,%d3			# ea = (base + bd) + index
+	mov.l		%d3,%d0
+fdone_ea:
+	mov.l		%d0,%a0
+
+	movm.l		(%sp)+,&0x003c		# restore d2-d5
+	rts
+
+#########################################################
+fcea_err:
+	mov.l		%d3,%a0
+
+	movm.l		(%sp)+,&0x003c		# restore d2-d5
+	mov.w		&0x0101,%d0
+	bra.l		iea_dacc
+
+fcea_iacc:
+	movm.l		(%sp)+,&0x003c		# restore d2-d5
+	bra.l		iea_iacc
+
+fmovm_out_err:
+	bsr.l		restore
+	mov.w		&0x00e1,%d0
+	bra.b		fmovm_err
+
+fmovm_in_err:
+	bsr.l		restore
+	mov.w		&0x0161,%d0
+
+fmovm_err:
+	mov.l		L_SCR1(%a6),%a0
+	bra.l		iea_dacc
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fmovm_ctrl(): emulate fmovm.l of control registers instr	#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read longword from memory			#
+#	iea_iacc() - _imem_read_long() failed; error recovery		#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	If _imem_read_long() doesn't fail:				#
+#		USER_FPCR(a6)  = new FPCR value				#
+#		USER_FPSR(a6)  = new FPSR value				#
+#		USER_FPIAR(a6) = new FPIAR value			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Decode the instruction type by looking at the extension word	#
+# in order to see how many control registers to fetch from memory.	#
+# Fetch them using _imem_read_long(). If this fetch fails, exit through	#
+# the special access error exit handler iea_iacc().			#
+#									#
+# Instruction word decoding:						#
+#									#
+#	fmovem.l #<data>, {FPIAR&|FPCR&|FPSR}				#
+#									#
+#		WORD1			WORD2				#
+#	1111 0010 00 111100	100$ $$00 0000 0000			#
+#									#
+#	$$$ (100): FPCR							#
+#	    (010): FPSR							#
+#	    (001): FPIAR						#
+#	    (000): FPIAR						#
+#									#
+#########################################################################
+
+	global		fmovm_ctrl
+fmovm_ctrl:
+	mov.b		EXC_EXTWORD(%a6),%d0	# fetch reg select bits
+	cmpi.b		%d0,&0x9c		# fpcr & fpsr & fpiar ?
+	beq.w		fctrl_in_7		# yes
+	cmpi.b		%d0,&0x98		# fpcr & fpsr ?
+	beq.w		fctrl_in_6		# yes
+	cmpi.b		%d0,&0x94		# fpcr & fpiar ?
+	beq.b		fctrl_in_5		# yes
+
+# fmovem.l #<data>, fpsr/fpiar
+fctrl_in_3:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPSR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPSR(%a6)	# store new FPSR to stack
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPIAR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPIAR(%a6)	# store new FPIAR to stack
+	rts
+
+# fmovem.l #<data>, fpcr/fpiar
+fctrl_in_5:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPCR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPCR(%a6)	# store new FPCR to stack
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPIAR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPIAR(%a6)	# store new FPIAR to stack
+	rts
+
+# fmovem.l #<data>, fpcr/fpsr
+fctrl_in_6:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPCR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPCR(%a6)	# store new FPCR to mem
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPSR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPSR(%a6)	# store new FPSR to mem
+	rts
+
+# fmovem.l #<data>, fpcr/fpsr/fpiar
+fctrl_in_7:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPCR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPCR(%a6)	# store new FPCR to mem
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPSR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPSR(%a6)	# store new FPSR to mem
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPIAR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPIAR(%a6)	# store new FPIAR to mem
+	rts
+
+##########################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	addsub_scaler2(): scale inputs to fadd/fsub such that no	#
+#			  OVFL/UNFL exceptions will result		#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize mantissa after adjusting exponent		#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SRC(a6) = fp op1(src)					#
+#	FP_DST(a6) = fp op2(dst)					#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SRC(a6) = fp op1 scaled(src)					#
+#	FP_DST(a6) = fp op2 scaled(dst)					#
+#	d0         = scale amount					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	If the DST exponent is > the SRC exponent, set the DST exponent	#
+# equal to 0x3fff and scale the SRC exponent by the value that the	#
+# DST exponent was scaled by. If the SRC exponent is greater or equal,	#
+# do the opposite. Return this scale factor in d0.			#
+#	If the two exponents differ by > the number of mantissa bits	#
+# plus two, then set the smallest exponent to a very small value as a	#
+# quick shortcut.							#
+#									#
+#########################################################################
+
+	global		addsub_scaler2
+addsub_scaler2:
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+	mov.w		SRC_EX(%a0),%d0
+	mov.w		DST_EX(%a1),%d1
+	mov.w		%d0,FP_SCR0_EX(%a6)
+	mov.w		%d1,FP_SCR1_EX(%a6)
+
+	andi.w		&0x7fff,%d0
+	andi.w		&0x7fff,%d1
+	mov.w		%d0,L_SCR1(%a6)		# store src exponent
+	mov.w		%d1,2+L_SCR1(%a6)	# store dst exponent
+
+	cmp.w		%d0, %d1		# is src exp >= dst exp?
+	bge.l		src_exp_ge2
+
+# dst exp is >  src exp; scale dst to exp = 0x3fff
+dst_exp_gt2:
+	bsr.l		scale_to_zero_dst
+	mov.l		%d0,-(%sp)		# save scale factor
+
+	cmpi.b		STAG(%a6),&DENORM	# is dst denormalized?
+	bne.b		cmpexp12
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize the denorm; result is new exp
+	neg.w		%d0			# new exp = -(shft val)
+	mov.w		%d0,L_SCR1(%a6)		# inset new exp
+
+cmpexp12:
+	mov.w		2+L_SCR1(%a6),%d0
+	subi.w		&mantissalen+2,%d0	# subtract mantissalen+2 from larger exp
+
+	cmp.w		%d0,L_SCR1(%a6)		# is difference >= len(mantissa)+2?
+	bge.b		quick_scale12
+
+	mov.w		L_SCR1(%a6),%d0
+	add.w		0x2(%sp),%d0		# scale src exponent by scale factor
+	mov.w		FP_SCR0_EX(%a6),%d1
+	and.w		&0x8000,%d1
+	or.w		%d1,%d0			# concat {sgn,new exp}
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new dst exponent
+
+	mov.l		(%sp)+,%d0		# return SCALE factor
+	rts
+
+quick_scale12:
+	andi.w		&0x8000,FP_SCR0_EX(%a6)	# zero src exponent
+	bset		&0x0,1+FP_SCR0_EX(%a6)	# set exp = 1
+
+	mov.l		(%sp)+,%d0		# return SCALE factor
+	rts
+
+# src exp is >= dst exp; scale src to exp = 0x3fff
+src_exp_ge2:
+	bsr.l		scale_to_zero_src
+	mov.l		%d0,-(%sp)		# save scale factor
+
+	cmpi.b		DTAG(%a6),&DENORM	# is dst denormalized?
+	bne.b		cmpexp22
+	lea		FP_SCR1(%a6),%a0
+	bsr.l		norm			# normalize the denorm; result is new exp
+	neg.w		%d0			# new exp = -(shft val)
+	mov.w		%d0,2+L_SCR1(%a6)	# inset new exp
+
+cmpexp22:
+	mov.w		L_SCR1(%a6),%d0
+	subi.w		&mantissalen+2,%d0	# subtract mantissalen+2 from larger exp
+
+	cmp.w		%d0,2+L_SCR1(%a6)	# is difference >= len(mantissa)+2?
+	bge.b		quick_scale22
+
+	mov.w		2+L_SCR1(%a6),%d0
+	add.w		0x2(%sp),%d0		# scale dst exponent by scale factor
+	mov.w		FP_SCR1_EX(%a6),%d1
+	andi.w		&0x8000,%d1
+	or.w		%d1,%d0			# concat {sgn,new exp}
+	mov.w		%d0,FP_SCR1_EX(%a6)	# insert new dst exponent
+
+	mov.l		(%sp)+,%d0		# return SCALE factor
+	rts
+
+quick_scale22:
+	andi.w		&0x8000,FP_SCR1_EX(%a6)	# zero dst exponent
+	bset		&0x0,1+FP_SCR1_EX(%a6)	# set exp = 1
+
+	mov.l		(%sp)+,%d0		# return SCALE factor
+	rts
+
+##########################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	scale_to_zero_src(): scale the exponent of extended precision	#
+#			     value at FP_SCR0(a6).			#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize the mantissa if the operand was a DENORM	#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SCR0(a6) = extended precision operand to be scaled		#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SCR0(a6) = scaled extended precision operand			#
+#	d0	    = scale value					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Set the exponent of the input operand to 0x3fff. Save the value	#
+# of the difference between the original and new exponent. Then,	#
+# normalize the operand if it was a DENORM. Add this normalization	#
+# value to the previous value. Return the result.			#
+#									#
+#########################################################################
+
+	global		scale_to_zero_src
+scale_to_zero_src:
+	mov.w		FP_SCR0_EX(%a6),%d1	# extract operand's {sgn,exp}
+	mov.w		%d1,%d0			# make a copy
+
+	andi.l		&0x7fff,%d1		# extract operand's exponent
+
+	andi.w		&0x8000,%d0		# extract operand's sgn
+	or.w		&0x3fff,%d0		# insert new operand's exponent(=0)
+
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert biased exponent
+
+	cmpi.b		STAG(%a6),&DENORM	# is operand normalized?
+	beq.b		stzs_denorm		# normalize the DENORM
+
+stzs_norm:
+	mov.l		&0x3fff,%d0
+	sub.l		%d1,%d0			# scale = BIAS + (-exp)
+
+	rts
+
+stzs_denorm:
+	lea		FP_SCR0(%a6),%a0	# pass ptr to src op
+	bsr.l		norm			# normalize denorm
+	neg.l		%d0			# new exponent = -(shft val)
+	mov.l		%d0,%d1			# prepare for op_norm call
+	bra.b		stzs_norm		# finish scaling
+
+###
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	scale_sqrt(): scale the input operand exponent so a subsequent	#
+#		      fsqrt operation won't take an exception.		#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize the mantissa if the operand was a DENORM	#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SCR0(a6) = extended precision operand to be scaled		#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SCR0(a6) = scaled extended precision operand			#
+#	d0	    = scale value					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	If the input operand is a DENORM, normalize it.			#
+#	If the exponent of the input operand is even, set the exponent	#
+# to 0x3ffe and return a scale factor of "(exp-0x3ffe)/2". If the	#
+# exponent of the input operand is off, set the exponent to ox3fff and	#
+# return a scale factor of "(exp-0x3fff)/2".				#
+#									#
+#########################################################################
+
+	global		scale_sqrt
+scale_sqrt:
+	cmpi.b		STAG(%a6),&DENORM	# is operand normalized?
+	beq.b		ss_denorm		# normalize the DENORM
+
+	mov.w		FP_SCR0_EX(%a6),%d1	# extract operand's {sgn,exp}
+	andi.l		&0x7fff,%d1		# extract operand's exponent
+
+	andi.w		&0x8000,FP_SCR0_EX(%a6)	# extract operand's sgn
+
+	btst		&0x0,%d1		# is exp even or odd?
+	beq.b		ss_norm_even
+
+	ori.w		&0x3fff,FP_SCR0_EX(%a6)	# insert new operand's exponent(=0)
+
+	mov.l		&0x3fff,%d0
+	sub.l		%d1,%d0			# scale = BIAS + (-exp)
+	asr.l		&0x1,%d0		# divide scale factor by 2
+	rts
+
+ss_norm_even:
+	ori.w		&0x3ffe,FP_SCR0_EX(%a6)	# insert new operand's exponent(=0)
+
+	mov.l		&0x3ffe,%d0
+	sub.l		%d1,%d0			# scale = BIAS + (-exp)
+	asr.l		&0x1,%d0		# divide scale factor by 2
+	rts
+
+ss_denorm:
+	lea		FP_SCR0(%a6),%a0	# pass ptr to src op
+	bsr.l		norm			# normalize denorm
+
+	btst		&0x0,%d0		# is exp even or odd?
+	beq.b		ss_denorm_even
+
+	ori.w		&0x3fff,FP_SCR0_EX(%a6)	# insert new operand's exponent(=0)
+
+	add.l		&0x3fff,%d0
+	asr.l		&0x1,%d0		# divide scale factor by 2
+	rts
+
+ss_denorm_even:
+	ori.w		&0x3ffe,FP_SCR0_EX(%a6)	# insert new operand's exponent(=0)
+
+	add.l		&0x3ffe,%d0
+	asr.l		&0x1,%d0		# divide scale factor by 2
+	rts
+
+###
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	scale_to_zero_dst(): scale the exponent of extended precision	#
+#			     value at FP_SCR1(a6).			#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize the mantissa if the operand was a DENORM	#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SCR1(a6) = extended precision operand to be scaled		#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SCR1(a6) = scaled extended precision operand			#
+#	d0	    = scale value					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Set the exponent of the input operand to 0x3fff. Save the value	#
+# of the difference between the original and new exponent. Then,	#
+# normalize the operand if it was a DENORM. Add this normalization	#
+# value to the previous value. Return the result.			#
+#									#
+#########################################################################
+
+	global		scale_to_zero_dst
+scale_to_zero_dst:
+	mov.w		FP_SCR1_EX(%a6),%d1	# extract operand's {sgn,exp}
+	mov.w		%d1,%d0			# make a copy
+
+	andi.l		&0x7fff,%d1		# extract operand's exponent
+
+	andi.w		&0x8000,%d0		# extract operand's sgn
+	or.w		&0x3fff,%d0		# insert new operand's exponent(=0)
+
+	mov.w		%d0,FP_SCR1_EX(%a6)	# insert biased exponent
+
+	cmpi.b		DTAG(%a6),&DENORM	# is operand normalized?
+	beq.b		stzd_denorm		# normalize the DENORM
+
+stzd_norm:
+	mov.l		&0x3fff,%d0
+	sub.l		%d1,%d0			# scale = BIAS + (-exp)
+	rts
+
+stzd_denorm:
+	lea		FP_SCR1(%a6),%a0	# pass ptr to dst op
+	bsr.l		norm			# normalize denorm
+	neg.l		%d0			# new exponent = -(shft val)
+	mov.l		%d0,%d1			# prepare for op_norm call
+	bra.b		stzd_norm		# finish scaling
+
+##########################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	res_qnan(): return default result w/ QNAN operand for dyadic	#
+#	res_snan(): return default result w/ SNAN operand for dyadic	#
+#	res_qnan_1op(): return dflt result w/ QNAN operand for monadic	#
+#	res_snan_1op(): return dflt result w/ SNAN operand for monadic	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SRC(a6) = pointer to extended precision src operand		#
+#	FP_DST(a6) = pointer to extended precision dst operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default result						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	If either operand (but not both operands) of an operation is a	#
+# nonsignalling NAN, then that NAN is returned as the result. If both	#
+# operands are nonsignalling NANs, then the destination operand		#
+# nonsignalling NAN is returned as the result.				#
+#	If either operand to an operation is a signalling NAN (SNAN),	#
+# then, the SNAN bit is set in the FPSR EXC byte. If the SNAN trap	#
+# enable bit is set in the FPCR, then the trap is taken and the		#
+# destination is not modified. If the SNAN trap enable bit is not set,	#
+# then the SNAN is converted to a nonsignalling NAN (by setting the	#
+# SNAN bit in the operand to one), and the operation continues as	#
+# described in the preceding paragraph, for nonsignalling NANs.		#
+#	Make sure the appropriate FPSR bits are set before exiting.	#
+#									#
+#########################################################################
+
+	global		res_qnan
+	global		res_snan
+res_qnan:
+res_snan:
+	cmp.b		DTAG(%a6), &SNAN	# is the dst an SNAN?
+	beq.b		dst_snan2
+	cmp.b		DTAG(%a6), &QNAN	# is the dst a  QNAN?
+	beq.b		dst_qnan2
+src_nan:
+	cmp.b		STAG(%a6), &QNAN
+	beq.b		src_qnan2
+	global		res_snan_1op
+res_snan_1op:
+src_snan2:
+	bset		&0x6, FP_SRC_HI(%a6)	# set SNAN bit
+	or.l		&nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
+	lea		FP_SRC(%a6), %a0
+	bra.b		nan_comp
+	global		res_qnan_1op
+res_qnan_1op:
+src_qnan2:
+	or.l		&nan_mask, USER_FPSR(%a6)
+	lea		FP_SRC(%a6), %a0
+	bra.b		nan_comp
+dst_snan2:
+	or.l		&nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
+	bset		&0x6, FP_DST_HI(%a6)	# set SNAN bit
+	lea		FP_DST(%a6), %a0
+	bra.b		nan_comp
+dst_qnan2:
+	lea		FP_DST(%a6), %a0
+	cmp.b		STAG(%a6), &SNAN
+	bne		nan_done
+	or.l		&aiop_mask+snan_mask, USER_FPSR(%a6)
+nan_done:
+	or.l		&nan_mask, USER_FPSR(%a6)
+nan_comp:
+	btst		&0x7, FTEMP_EX(%a0)	# is NAN neg?
+	beq.b		nan_not_neg
+	or.l		&neg_mask, USER_FPSR(%a6)
+nan_not_neg:
+	fmovm.x		(%a0), &0x80
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	res_operr(): return default result during operand error		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default operand error result				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	An nonsignalling NAN is returned as the default result when	#
+# an operand error occurs for the following cases:			#
+#									#
+#	Multiply: (Infinity x Zero)					#
+#	Divide  : (Zero / Zero) || (Infinity / Infinity)		#
+#									#
+#########################################################################
+
+	global		res_operr
+res_operr:
+	or.l		&nan_mask+operr_mask+aiop_mask, USER_FPSR(%a6)
+	fmovm.x		nan_return(%pc), &0x80
+	rts
+
+nan_return:
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_denorm(): denormalize an intermediate result			#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT *************************************************************** #
+#	a0 = points to the operand to be denormalized			#
+#		(in the internal extended format)			#
+#									#
+#	d0 = rounding precision						#
+#									#
+# OUTPUT **************************************************************	#
+#	a0 = pointer to the denormalized result				#
+#		(in the internal extended format)			#
+#									#
+#	d0 = guard,round,sticky						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the exponent underflow threshold for the given	#
+# precision, shift the mantissa bits to the right in order raise the	#
+# exponent of the operand to the threshold value. While shifting the	#
+# mantissa bits right, maintain the value of the guard, round, and	#
+# sticky bits.								#
+# other notes:								#
+#	(1) _denorm() is called by the underflow routines		#
+#	(2) _denorm() does NOT affect the status register		#
+#									#
+#########################################################################
+
+#
+# table of exponent threshold values for each precision
+#
+tbl_thresh:
+	short		0x0
+	short		sgl_thresh
+	short		dbl_thresh
+
+	global		_denorm
+_denorm:
+#
+# Load the exponent threshold for the precision selected and check
+# to see if (threshold - exponent) is > 65 in which case we can
+# simply calculate the sticky bit and zero the mantissa. otherwise
+# we have to call the denormalization routine.
+#
+	lsr.b		&0x2, %d0		# shift prec to lo bits
+	mov.w		(tbl_thresh.b,%pc,%d0.w*2), %d1 # load prec threshold
+	mov.w		%d1, %d0		# copy d1 into d0
+	sub.w		FTEMP_EX(%a0), %d0	# diff = threshold - exp
+	cmpi.w		%d0, &66		# is diff > 65? (mant + g,r bits)
+	bpl.b		denorm_set_stky		# yes; just calc sticky
+
+	clr.l		%d0			# clear g,r,s
+	btst		&inex2_bit, FPSR_EXCEPT(%a6) # yes; was INEX2 set?
+	beq.b		denorm_call		# no; don't change anything
+	bset		&29, %d0		# yes; set sticky bit
+
+denorm_call:
+	bsr.l		dnrm_lp			# denormalize the number
+	rts
+
+#
+# all bit would have been shifted off during the denorm so simply
+# calculate if the sticky should be set and clear the entire mantissa.
+#
+denorm_set_stky:
+	mov.l		&0x20000000, %d0	# set sticky bit in return value
+	mov.w		%d1, FTEMP_EX(%a0)	# load exp with threshold
+	clr.l		FTEMP_HI(%a0)		# set d1 = 0 (ms mantissa)
+	clr.l		FTEMP_LO(%a0)		# set d2 = 0 (ms mantissa)
+	rts
+
+#									#
+# dnrm_lp(): normalize exponent/mantissa to specified threshhold	#
+#									#
+# INPUT:								#
+#	%a0	   : points to the operand to be denormalized		#
+#	%d0{31:29} : initial guard,round,sticky				#
+#	%d1{15:0}  : denormalization threshold				#
+# OUTPUT:								#
+#	%a0	   : points to the denormalized operand			#
+#	%d0{31:29} : final guard,round,sticky				#
+#									#
+
+# *** Local Equates *** #
+set	GRS,		L_SCR2			# g,r,s temp storage
+set	FTEMP_LO2,	L_SCR1			# FTEMP_LO copy
+
+	global		dnrm_lp
+dnrm_lp:
+
+#
+# make a copy of FTEMP_LO and place the g,r,s bits directly after it
+# in memory so as to make the bitfield extraction for denormalization easier.
+#
+	mov.l		FTEMP_LO(%a0), FTEMP_LO2(%a6) # make FTEMP_LO copy
+	mov.l		%d0, GRS(%a6)		# place g,r,s after it
+
+#
+# check to see how much less than the underflow threshold the operand
+# exponent is.
+#
+	mov.l		%d1, %d0		# copy the denorm threshold
+	sub.w		FTEMP_EX(%a0), %d1	# d1 = threshold - uns exponent
+	ble.b		dnrm_no_lp		# d1 <= 0
+	cmpi.w		%d1, &0x20		# is ( 0 <= d1 < 32) ?
+	blt.b		case_1			# yes
+	cmpi.w		%d1, &0x40		# is (32 <= d1 < 64) ?
+	blt.b		case_2			# yes
+	bra.w		case_3			# (d1 >= 64)
+
+#
+# No normalization necessary
+#
+dnrm_no_lp:
+	mov.l		GRS(%a6), %d0		# restore original g,r,s
+	rts
+
+#
+# case (0<d1<32)
+#
+# %d0 = denorm threshold
+# %d1 = "n" = amt to shift
+#
+#	---------------------------------------------------------
+#	|     FTEMP_HI	  |	FTEMP_LO     |grs000.........000|
+#	---------------------------------------------------------
+#	<-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
+#	\	   \		      \			 \
+#	 \	    \		       \		  \
+#	  \	     \			\		   \
+#	   \	      \			 \		    \
+#	    \	       \		  \		     \
+#	     \		\		   \		      \
+#	      \		 \		    \		       \
+#	       \	  \		     \			\
+#	<-(n)-><-(32 - n)-><------(32)-------><------(32)------->
+#	---------------------------------------------------------
+#	|0.....0| NEW_HI  |  NEW_FTEMP_LO     |grs		|
+#	---------------------------------------------------------
+#
+case_1:
+	mov.l		%d2, -(%sp)		# create temp storage
+
+	mov.w		%d0, FTEMP_EX(%a0)	# exponent = denorm threshold
+	mov.l		&32, %d0
+	sub.w		%d1, %d0		# %d0 = 32 - %d1
+
+	cmpi.w		%d1, &29		# is shft amt >= 29
+	blt.b		case1_extract		# no; no fix needed
+	mov.b		GRS(%a6), %d2
+	or.b		%d2, 3+FTEMP_LO2(%a6)
+
+case1_extract:
+	bfextu		FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_HI
+	bfextu		FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new FTEMP_LO
+	bfextu		FTEMP_LO2(%a6){%d0:&32}, %d0 # %d0 = new G,R,S
+
+	mov.l		%d2, FTEMP_HI(%a0)	# store new FTEMP_HI
+	mov.l		%d1, FTEMP_LO(%a0)	# store new FTEMP_LO
+
+	bftst		%d0{&2:&30}		# were bits shifted off?
+	beq.b		case1_sticky_clear	# no; go finish
+	bset		&rnd_stky_bit, %d0	# yes; set sticky bit
+
+case1_sticky_clear:
+	and.l		&0xe0000000, %d0	# clear all but G,R,S
+	mov.l		(%sp)+, %d2		# restore temp register
+	rts
+
+#
+# case (32<=d1<64)
+#
+# %d0 = denorm threshold
+# %d1 = "n" = amt to shift
+#
+#	---------------------------------------------------------
+#	|     FTEMP_HI	  |	FTEMP_LO     |grs000.........000|
+#	---------------------------------------------------------
+#	<-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
+#	\	   \		      \
+#	 \	    \		       \
+#	  \	     \			-------------------
+#	   \	      --------------------		   \
+#	    -------------------		  \		    \
+#			       \	   \		     \
+#				\	    \		      \
+#				 \	     \		       \
+#	<-------(32)------><-(n)-><-(32 - n)-><------(32)------->
+#	---------------------------------------------------------
+#	|0...............0|0....0| NEW_LO     |grs		|
+#	---------------------------------------------------------
+#
+case_2:
+	mov.l		%d2, -(%sp)		# create temp storage
+
+	mov.w		%d0, FTEMP_EX(%a0)	# exponent = denorm threshold
+	subi.w		&0x20, %d1		# %d1 now between 0 and 32
+	mov.l		&0x20, %d0
+	sub.w		%d1, %d0		# %d0 = 32 - %d1
+
+# subtle step here; or in the g,r,s at the bottom of FTEMP_LO to minimize
+# the number of bits to check for the sticky detect.
+# it only plays a role in shift amounts of 61-63.
+	mov.b		GRS(%a6), %d2
+	or.b		%d2, 3+FTEMP_LO2(%a6)
+
+	bfextu		FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_LO
+	bfextu		FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new G,R,S
+
+	bftst		%d1{&2:&30}		# were any bits shifted off?
+	bne.b		case2_set_sticky	# yes; set sticky bit
+	bftst		FTEMP_LO2(%a6){%d0:&31}	# were any bits shifted off?
+	bne.b		case2_set_sticky	# yes; set sticky bit
+
+	mov.l		%d1, %d0		# move new G,R,S to %d0
+	bra.b		case2_end
+
+case2_set_sticky:
+	mov.l		%d1, %d0		# move new G,R,S to %d0
+	bset		&rnd_stky_bit, %d0	# set sticky bit
+
+case2_end:
+	clr.l		FTEMP_HI(%a0)		# store FTEMP_HI = 0
+	mov.l		%d2, FTEMP_LO(%a0)	# store FTEMP_LO
+	and.l		&0xe0000000, %d0	# clear all but G,R,S
+
+	mov.l		(%sp)+,%d2		# restore temp register
+	rts
+
+#
+# case (d1>=64)
+#
+# %d0 = denorm threshold
+# %d1 = amt to shift
+#
+case_3:
+	mov.w		%d0, FTEMP_EX(%a0)	# insert denorm threshold
+
+	cmpi.w		%d1, &65		# is shift amt > 65?
+	blt.b		case3_64		# no; it's == 64
+	beq.b		case3_65		# no; it's == 65
+
+#
+# case (d1>65)
+#
+# Shift value is > 65 and out of range. All bits are shifted off.
+# Return a zero mantissa with the sticky bit set
+#
+	clr.l		FTEMP_HI(%a0)		# clear hi(mantissa)
+	clr.l		FTEMP_LO(%a0)		# clear lo(mantissa)
+	mov.l		&0x20000000, %d0	# set sticky bit
+	rts
+
+#
+# case (d1 == 64)
+#
+#	---------------------------------------------------------
+#	|     FTEMP_HI	  |	FTEMP_LO     |grs000.........000|
+#	---------------------------------------------------------
+#	<-------(32)------>
+#	\		   \
+#	 \		    \
+#	  \		     \
+#	   \		      ------------------------------
+#	    -------------------------------		    \
+#					   \		     \
+#					    \		      \
+#					     \		       \
+#					      <-------(32)------>
+#	---------------------------------------------------------
+#	|0...............0|0................0|grs		|
+#	---------------------------------------------------------
+#
+case3_64:
+	mov.l		FTEMP_HI(%a0), %d0	# fetch hi(mantissa)
+	mov.l		%d0, %d1		# make a copy
+	and.l		&0xc0000000, %d0	# extract G,R
+	and.l		&0x3fffffff, %d1	# extract other bits
+
+	bra.b		case3_complete
+
+#
+# case (d1 == 65)
+#
+#	---------------------------------------------------------
+#	|     FTEMP_HI	  |	FTEMP_LO     |grs000.........000|
+#	---------------------------------------------------------
+#	<-------(32)------>
+#	\		   \
+#	 \		    \
+#	  \		     \
+#	   \		      ------------------------------
+#	    --------------------------------		    \
+#					    \		     \
+#					     \		      \
+#					      \		       \
+#					       <-------(31)----->
+#	---------------------------------------------------------
+#	|0...............0|0................0|0rs		|
+#	---------------------------------------------------------
+#
+case3_65:
+	mov.l		FTEMP_HI(%a0), %d0	# fetch hi(mantissa)
+	and.l		&0x80000000, %d0	# extract R bit
+	lsr.l		&0x1, %d0		# shift high bit into R bit
+	and.l		&0x7fffffff, %d1	# extract other bits
+
+case3_complete:
+# last operation done was an "and" of the bits shifted off so the condition
+# codes are already set so branch accordingly.
+	bne.b		case3_set_sticky	# yes; go set new sticky
+	tst.l		FTEMP_LO(%a0)		# were any bits shifted off?
+	bne.b		case3_set_sticky	# yes; go set new sticky
+	tst.b		GRS(%a6)		# were any bits shifted off?
+	bne.b		case3_set_sticky	# yes; go set new sticky
+
+#
+# no bits were shifted off so don't set the sticky bit.
+# the guard and
+# the entire mantissa is zero.
+#
+	clr.l		FTEMP_HI(%a0)		# clear hi(mantissa)
+	clr.l		FTEMP_LO(%a0)		# clear lo(mantissa)
+	rts
+
+#
+# some bits were shifted off so set the sticky bit.
+# the entire mantissa is zero.
+#
+case3_set_sticky:
+	bset		&rnd_stky_bit,%d0	# set new sticky bit
+	clr.l		FTEMP_HI(%a0)		# clear hi(mantissa)
+	clr.l		FTEMP_LO(%a0)		# clear lo(mantissa)
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_round(): round result according to precision/mode		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0	  = ptr to input operand in internal extended format	#
+#	d1(hi)    = contains rounding precision:			#
+#			ext = $0000xxxx					#
+#			sgl = $0004xxxx					#
+#			dbl = $0008xxxx					#
+#	d1(lo)	  = contains rounding mode:				#
+#			RN  = $xxxx0000					#
+#			RZ  = $xxxx0001					#
+#			RM  = $xxxx0002					#
+#			RP  = $xxxx0003					#
+#	d0{31:29} = contains the g,r,s bits (extended)			#
+#									#
+# OUTPUT **************************************************************	#
+#	a0 = pointer to rounded result					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	On return the value pointed to by a0 is correctly rounded,	#
+#	a0 is preserved and the g-r-s bits in d0 are cleared.		#
+#	The result is not typed - the tag field is invalid.  The	#
+#	result is still in the internal extended format.		#
+#									#
+#	The INEX bit of USER_FPSR will be set if the rounded result was	#
+#	inexact (i.e. if any of the g-r-s bits were set).		#
+#									#
+#########################################################################
+
+	global		_round
+_round:
+#
+# ext_grs() looks at the rounding precision and sets the appropriate
+# G,R,S bits.
+# If (G,R,S == 0) then result is exact and round is done, else set
+# the inex flag in status reg and continue.
+#
+	bsr.l		ext_grs			# extract G,R,S
+
+	tst.l		%d0			# are G,R,S zero?
+	beq.w		truncate		# yes; round is complete
+
+	or.w		&inx2a_mask, 2+USER_FPSR(%a6) # set inex2/ainex
+
+#
+# Use rounding mode as an index into a jump table for these modes.
+# All of the following assumes grs != 0.
+#
+	mov.w		(tbl_mode.b,%pc,%d1.w*2), %a1 # load jump offset
+	jmp		(tbl_mode.b,%pc,%a1)	# jmp to rnd mode handler
+
+tbl_mode:
+	short		rnd_near - tbl_mode
+	short		truncate - tbl_mode	# RZ always truncates
+	short		rnd_mnus - tbl_mode
+	short		rnd_plus - tbl_mode
+
+#################################################################
+#	ROUND PLUS INFINITY					#
+#								#
+#	If sign of fp number = 0 (positive), then add 1 to l.	#
+#################################################################
+rnd_plus:
+	tst.b		FTEMP_SGN(%a0)		# check for sign
+	bmi.w		truncate		# if positive then truncate
+
+	mov.l		&0xffffffff, %d0	# force g,r,s to be all f's
+	swap		%d1			# set up d1 for round prec.
+
+	cmpi.b		%d1, &s_mode		# is prec = sgl?
+	beq.w		add_sgl			# yes
+	bgt.w		add_dbl			# no; it's dbl
+	bra.w		add_ext			# no; it's ext
+
+#################################################################
+#	ROUND MINUS INFINITY					#
+#								#
+#	If sign of fp number = 1 (negative), then add 1 to l.	#
+#################################################################
+rnd_mnus:
+	tst.b		FTEMP_SGN(%a0)		# check for sign
+	bpl.w		truncate		# if negative then truncate
+
+	mov.l		&0xffffffff, %d0	# force g,r,s to be all f's
+	swap		%d1			# set up d1 for round prec.
+
+	cmpi.b		%d1, &s_mode		# is prec = sgl?
+	beq.w		add_sgl			# yes
+	bgt.w		add_dbl			# no; it's dbl
+	bra.w		add_ext			# no; it's ext
+
+#################################################################
+#	ROUND NEAREST						#
+#								#
+#	If (g=1), then add 1 to l and if (r=s=0), then clear l	#
+#	Note that this will round to even in case of a tie.	#
+#################################################################
+rnd_near:
+	asl.l		&0x1, %d0		# shift g-bit to c-bit
+	bcc.w		truncate		# if (g=1) then
+
+	swap		%d1			# set up d1 for round prec.
+
+	cmpi.b		%d1, &s_mode		# is prec = sgl?
+	beq.w		add_sgl			# yes
+	bgt.w		add_dbl			# no; it's dbl
+	bra.w		add_ext			# no; it's ext
+
+# *** LOCAL EQUATES ***
+set	ad_1_sgl,	0x00000100	# constant to add 1 to l-bit in sgl prec
+set	ad_1_dbl,	0x00000800	# constant to add 1 to l-bit in dbl prec
+
+#########################
+#	ADD SINGLE	#
+#########################
+add_sgl:
+	add.l		&ad_1_sgl, FTEMP_HI(%a0)
+	bcc.b		scc_clr			# no mantissa overflow
+	roxr.w		FTEMP_HI(%a0)		# shift v-bit back in
+	roxr.w		FTEMP_HI+2(%a0)		# shift v-bit back in
+	add.w		&0x1, FTEMP_EX(%a0)	# and incr exponent
+scc_clr:
+	tst.l		%d0			# test for rs = 0
+	bne.b		sgl_done
+	and.w		&0xfe00, FTEMP_HI+2(%a0) # clear the l-bit
+sgl_done:
+	and.l		&0xffffff00, FTEMP_HI(%a0) # truncate bits beyond sgl limit
+	clr.l		FTEMP_LO(%a0)		# clear d2
+	rts
+
+#########################
+#	ADD EXTENDED	#
+#########################
+add_ext:
+	addq.l		&1,FTEMP_LO(%a0)	# add 1 to l-bit
+	bcc.b		xcc_clr			# test for carry out
+	addq.l		&1,FTEMP_HI(%a0)	# propagate carry
+	bcc.b		xcc_clr
+	roxr.w		FTEMP_HI(%a0)		# mant is 0 so restore v-bit
+	roxr.w		FTEMP_HI+2(%a0)		# mant is 0 so restore v-bit
+	roxr.w		FTEMP_LO(%a0)
+	roxr.w		FTEMP_LO+2(%a0)
+	add.w		&0x1,FTEMP_EX(%a0)	# and inc exp
+xcc_clr:
+	tst.l		%d0			# test rs = 0
+	bne.b		add_ext_done
+	and.b		&0xfe,FTEMP_LO+3(%a0)	# clear the l bit
+add_ext_done:
+	rts
+
+#########################
+#	ADD DOUBLE	#
+#########################
+add_dbl:
+	add.l		&ad_1_dbl, FTEMP_LO(%a0) # add 1 to lsb
+	bcc.b		dcc_clr			# no carry
+	addq.l		&0x1, FTEMP_HI(%a0)	# propagate carry
+	bcc.b		dcc_clr			# no carry
+
+	roxr.w		FTEMP_HI(%a0)		# mant is 0 so restore v-bit
+	roxr.w		FTEMP_HI+2(%a0)		# mant is 0 so restore v-bit
+	roxr.w		FTEMP_LO(%a0)
+	roxr.w		FTEMP_LO+2(%a0)
+	addq.w		&0x1, FTEMP_EX(%a0)	# incr exponent
+dcc_clr:
+	tst.l		%d0			# test for rs = 0
+	bne.b		dbl_done
+	and.w		&0xf000, FTEMP_LO+2(%a0) # clear the l-bit
+
+dbl_done:
+	and.l		&0xfffff800,FTEMP_LO(%a0) # truncate bits beyond dbl limit
+	rts
+
+###########################
+# Truncate all other bits #
+###########################
+truncate:
+	swap		%d1			# select rnd prec
+
+	cmpi.b		%d1, &s_mode		# is prec sgl?
+	beq.w		sgl_done		# yes
+	bgt.b		dbl_done		# no; it's dbl
+	rts					# no; it's ext
+
+
+#
+# ext_grs(): extract guard, round and sticky bits according to
+#	     rounding precision.
+#
+# INPUT
+#	d0	   = extended precision g,r,s (in d0{31:29})
+#	d1	   = {PREC,ROUND}
+# OUTPUT
+#	d0{31:29}  = guard, round, sticky
+#
+# The ext_grs extract the guard/round/sticky bits according to the
+# selected rounding precision. It is called by the round subroutine
+# only.  All registers except d0 are kept intact. d0 becomes an
+# updated guard,round,sticky in d0{31:29}
+#
+# Notes: the ext_grs uses the round PREC, and therefore has to swap d1
+#	 prior to usage, and needs to restore d1 to original. this
+#	 routine is tightly tied to the round routine and not meant to
+#	 uphold standard subroutine calling practices.
+#
+
+ext_grs:
+	swap		%d1			# have d1.w point to round precision
+	tst.b		%d1			# is rnd prec = extended?
+	bne.b		ext_grs_not_ext		# no; go handle sgl or dbl
+
+#
+# %d0 actually already hold g,r,s since _round() had it before calling
+# this function. so, as long as we don't disturb it, we are "returning" it.
+#
+ext_grs_ext:
+	swap		%d1			# yes; return to correct positions
+	rts
+
+ext_grs_not_ext:
+	movm.l		&0x3000, -(%sp)		# make some temp registers {d2/d3}
+
+	cmpi.b		%d1, &s_mode		# is rnd prec = sgl?
+	bne.b		ext_grs_dbl		# no; go handle dbl
+
+#
+# sgl:
+#	96		64	  40	32		0
+#	-----------------------------------------------------
+#	| EXP	|XXXXXXX|	  |xx	|		|grs|
+#	-----------------------------------------------------
+#			<--(24)--->nn\			   /
+#				   ee ---------------------
+#				   ww		|
+#						v
+#				   gr	   new sticky
+#
+ext_grs_sgl:
+	bfextu		FTEMP_HI(%a0){&24:&2}, %d3 # sgl prec. g-r are 2 bits right
+	mov.l		&30, %d2		# of the sgl prec. limits
+	lsl.l		%d2, %d3		# shift g-r bits to MSB of d3
+	mov.l		FTEMP_HI(%a0), %d2	# get word 2 for s-bit test
+	and.l		&0x0000003f, %d2	# s bit is the or of all other
+	bne.b		ext_grs_st_stky		# bits to the right of g-r
+	tst.l		FTEMP_LO(%a0)		# test lower mantissa
+	bne.b		ext_grs_st_stky		# if any are set, set sticky
+	tst.l		%d0			# test original g,r,s
+	bne.b		ext_grs_st_stky		# if any are set, set sticky
+	bra.b		ext_grs_end_sd		# if words 3 and 4 are clr, exit
+
+#
+# dbl:
+#	96		64		32	 11	0
+#	-----------------------------------------------------
+#	| EXP	|XXXXXXX|		|	 |xx	|grs|
+#	-----------------------------------------------------
+#						  nn\	    /
+#						  ee -------
+#						  ww	|
+#							v
+#						  gr	new sticky
+#
+ext_grs_dbl:
+	bfextu		FTEMP_LO(%a0){&21:&2}, %d3 # dbl-prec. g-r are 2 bits right
+	mov.l		&30, %d2		# of the dbl prec. limits
+	lsl.l		%d2, %d3		# shift g-r bits to the MSB of d3
+	mov.l		FTEMP_LO(%a0), %d2	# get lower mantissa  for s-bit test
+	and.l		&0x000001ff, %d2	# s bit is the or-ing of all
+	bne.b		ext_grs_st_stky		# other bits to the right of g-r
+	tst.l		%d0			# test word original g,r,s
+	bne.b		ext_grs_st_stky		# if any are set, set sticky
+	bra.b		ext_grs_end_sd		# if clear, exit
+
+ext_grs_st_stky:
+	bset		&rnd_stky_bit, %d3	# set sticky bit
+ext_grs_end_sd:
+	mov.l		%d3, %d0		# return grs to d0
+
+	movm.l		(%sp)+, &0xc		# restore scratch registers {d2/d3}
+
+	swap		%d1			# restore d1 to original
+	rts
+
+#########################################################################
+# norm(): normalize the mantissa of an extended precision input. the	#
+#	  input operand should not be normalized already.		#
+#									#
+# XDEF ****************************************************************	#
+#	norm()								#
+#									#
+# XREF **************************************************************** #
+#	none								#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer fp extended precision operand to normalize		#
+#									#
+# OUTPUT ************************************************************** #
+#	d0 = number of bit positions the mantissa was shifted		#
+#	a0 = the input operand's mantissa is normalized; the exponent	#
+#	     is unchanged.						#
+#									#
+#########################################################################
+	global		norm
+norm:
+	mov.l		%d2, -(%sp)		# create some temp regs
+	mov.l		%d3, -(%sp)
+
+	mov.l		FTEMP_HI(%a0), %d0	# load hi(mantissa)
+	mov.l		FTEMP_LO(%a0), %d1	# load lo(mantissa)
+
+	bfffo		%d0{&0:&32}, %d2	# how many places to shift?
+	beq.b		norm_lo			# hi(man) is all zeroes!
+
+norm_hi:
+	lsl.l		%d2, %d0		# left shift hi(man)
+	bfextu		%d1{&0:%d2}, %d3	# extract lo bits
+
+	or.l		%d3, %d0		# create hi(man)
+	lsl.l		%d2, %d1		# create lo(man)
+
+	mov.l		%d0, FTEMP_HI(%a0)	# store new hi(man)
+	mov.l		%d1, FTEMP_LO(%a0)	# store new lo(man)
+
+	mov.l		%d2, %d0		# return shift amount
+
+	mov.l		(%sp)+, %d3		# restore temp regs
+	mov.l		(%sp)+, %d2
+
+	rts
+
+norm_lo:
+	bfffo		%d1{&0:&32}, %d2	# how many places to shift?
+	lsl.l		%d2, %d1		# shift lo(man)
+	add.l		&32, %d2		# add 32 to shft amount
+
+	mov.l		%d1, FTEMP_HI(%a0)	# store hi(man)
+	clr.l		FTEMP_LO(%a0)		# lo(man) is now zero
+
+	mov.l		%d2, %d0		# return shift amount
+
+	mov.l		(%sp)+, %d3		# restore temp regs
+	mov.l		(%sp)+, %d2
+
+	rts
+
+#########################################################################
+# unnorm_fix(): - changes an UNNORM to one of NORM, DENORM, or ZERO	#
+#		- returns corresponding optype tag			#
+#									#
+# XDEF ****************************************************************	#
+#	unnorm_fix()							#
+#									#
+# XREF **************************************************************** #
+#	norm() - normalize the mantissa					#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to unnormalized extended precision number		#
+#									#
+# OUTPUT ************************************************************** #
+#	d0 = optype tag - is corrected to one of NORM, DENORM, or ZERO	#
+#	a0 = input operand has been converted to a norm, denorm, or	#
+#	     zero; both the exponent and mantissa are changed.		#
+#									#
+#########################################################################
+
+	global		unnorm_fix
+unnorm_fix:
+	bfffo		FTEMP_HI(%a0){&0:&32}, %d0 # how many shifts are needed?
+	bne.b		unnorm_shift		# hi(man) is not all zeroes
+
+#
+# hi(man) is all zeroes so see if any bits in lo(man) are set
+#
+unnorm_chk_lo:
+	bfffo		FTEMP_LO(%a0){&0:&32}, %d0 # is operand really a zero?
+	beq.w		unnorm_zero		# yes
+
+	add.w		&32, %d0		# no; fix shift distance
+
+#
+# d0 = # shifts needed for complete normalization
+#
+unnorm_shift:
+	clr.l		%d1			# clear top word
+	mov.w		FTEMP_EX(%a0), %d1	# extract exponent
+	and.w		&0x7fff, %d1		# strip off sgn
+
+	cmp.w		%d0, %d1		# will denorm push exp < 0?
+	bgt.b		unnorm_nrm_zero		# yes; denorm only until exp = 0
+
+#
+# exponent would not go < 0. therefore, number stays normalized
+#
+	sub.w		%d0, %d1		# shift exponent value
+	mov.w		FTEMP_EX(%a0), %d0	# load old exponent
+	and.w		&0x8000, %d0		# save old sign
+	or.w		%d0, %d1		# {sgn,new exp}
+	mov.w		%d1, FTEMP_EX(%a0)	# insert new exponent
+
+	bsr.l		norm			# normalize UNNORM
+
+	mov.b		&NORM, %d0		# return new optype tag
+	rts
+
+#
+# exponent would go < 0, so only denormalize until exp = 0
+#
+unnorm_nrm_zero:
+	cmp.b		%d1, &32		# is exp <= 32?
+	bgt.b		unnorm_nrm_zero_lrg	# no; go handle large exponent
+
+	bfextu		FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man)
+	mov.l		%d0, FTEMP_HI(%a0)	# save new hi(man)
+
+	mov.l		FTEMP_LO(%a0), %d0	# fetch old lo(man)
+	lsl.l		%d1, %d0		# extract new lo(man)
+	mov.l		%d0, FTEMP_LO(%a0)	# save new lo(man)
+
+	and.w		&0x8000, FTEMP_EX(%a0)	# set exp = 0
+
+	mov.b		&DENORM, %d0		# return new optype tag
+	rts
+
+#
+# only mantissa bits set are in lo(man)
+#
+unnorm_nrm_zero_lrg:
+	sub.w		&32, %d1		# adjust shft amt by 32
+
+	mov.l		FTEMP_LO(%a0), %d0	# fetch old lo(man)
+	lsl.l		%d1, %d0		# left shift lo(man)
+
+	mov.l		%d0, FTEMP_HI(%a0)	# store new hi(man)
+	clr.l		FTEMP_LO(%a0)		# lo(man) = 0
+
+	and.w		&0x8000, FTEMP_EX(%a0)	# set exp = 0
+
+	mov.b		&DENORM, %d0		# return new optype tag
+	rts
+
+#
+# whole mantissa is zero so this UNNORM is actually a zero
+#
+unnorm_zero:
+	and.w		&0x8000, FTEMP_EX(%a0)	# force exponent to zero
+
+	mov.b		&ZERO, %d0		# fix optype tag
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	set_tag_x(): return the optype of the input ext fp number	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of type tag						#
+#		one of: NORM, INF, QNAN, SNAN, DENORM, UNNORM, ZERO	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Simply test the exponent, j-bit, and mantissa values to		#
+# determine the type of operand.					#
+#	If it's an unnormalized zero, alter the operand and force it	#
+# to be a normal zero.							#
+#									#
+#########################################################################
+
+	global		set_tag_x
+set_tag_x:
+	mov.w		FTEMP_EX(%a0), %d0	# extract exponent
+	andi.w		&0x7fff, %d0		# strip off sign
+	cmpi.w		%d0, &0x7fff		# is (EXP == MAX)?
+	beq.b		inf_or_nan_x
+not_inf_or_nan_x:
+	btst		&0x7,FTEMP_HI(%a0)
+	beq.b		not_norm_x
+is_norm_x:
+	mov.b		&NORM, %d0
+	rts
+not_norm_x:
+	tst.w		%d0			# is exponent = 0?
+	bne.b		is_unnorm_x
+not_unnorm_x:
+	tst.l		FTEMP_HI(%a0)
+	bne.b		is_denorm_x
+	tst.l		FTEMP_LO(%a0)
+	bne.b		is_denorm_x
+is_zero_x:
+	mov.b		&ZERO, %d0
+	rts
+is_denorm_x:
+	mov.b		&DENORM, %d0
+	rts
+# must distinguish now "Unnormalized zeroes" which we
+# must convert to zero.
+is_unnorm_x:
+	tst.l		FTEMP_HI(%a0)
+	bne.b		is_unnorm_reg_x
+	tst.l		FTEMP_LO(%a0)
+	bne.b		is_unnorm_reg_x
+# it's an "unnormalized zero". let's convert it to an actual zero...
+	andi.w		&0x8000,FTEMP_EX(%a0)	# clear exponent
+	mov.b		&ZERO, %d0
+	rts
+is_unnorm_reg_x:
+	mov.b		&UNNORM, %d0
+	rts
+inf_or_nan_x:
+	tst.l		FTEMP_LO(%a0)
+	bne.b		is_nan_x
+	mov.l		FTEMP_HI(%a0), %d0
+	and.l		&0x7fffffff, %d0	# msb is a don't care!
+	bne.b		is_nan_x
+is_inf_x:
+	mov.b		&INF, %d0
+	rts
+is_nan_x:
+	btst		&0x6, FTEMP_HI(%a0)
+	beq.b		is_snan_x
+	mov.b		&QNAN, %d0
+	rts
+is_snan_x:
+	mov.b		&SNAN, %d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	set_tag_d(): return the optype of the input dbl fp number	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = points to double precision operand				#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of type tag						#
+#		one of: NORM, INF, QNAN, SNAN, DENORM, ZERO		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Simply test the exponent, j-bit, and mantissa values to		#
+# determine the type of operand.					#
+#									#
+#########################################################################
+
+	global		set_tag_d
+set_tag_d:
+	mov.l		FTEMP(%a0), %d0
+	mov.l		%d0, %d1
+
+	andi.l		&0x7ff00000, %d0
+	beq.b		zero_or_denorm_d
+
+	cmpi.l		%d0, &0x7ff00000
+	beq.b		inf_or_nan_d
+
+is_norm_d:
+	mov.b		&NORM, %d0
+	rts
+zero_or_denorm_d:
+	and.l		&0x000fffff, %d1
+	bne		is_denorm_d
+	tst.l		4+FTEMP(%a0)
+	bne		is_denorm_d
+is_zero_d:
+	mov.b		&ZERO, %d0
+	rts
+is_denorm_d:
+	mov.b		&DENORM, %d0
+	rts
+inf_or_nan_d:
+	and.l		&0x000fffff, %d1
+	bne		is_nan_d
+	tst.l		4+FTEMP(%a0)
+	bne		is_nan_d
+is_inf_d:
+	mov.b		&INF, %d0
+	rts
+is_nan_d:
+	btst		&19, %d1
+	bne		is_qnan_d
+is_snan_d:
+	mov.b		&SNAN, %d0
+	rts
+is_qnan_d:
+	mov.b		&QNAN, %d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	set_tag_s(): return the optype of the input sgl fp number	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to single precision operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of type tag						#
+#		one of: NORM, INF, QNAN, SNAN, DENORM, ZERO		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Simply test the exponent, j-bit, and mantissa values to		#
+# determine the type of operand.					#
+#									#
+#########################################################################
+
+	global		set_tag_s
+set_tag_s:
+	mov.l		FTEMP(%a0), %d0
+	mov.l		%d0, %d1
+
+	andi.l		&0x7f800000, %d0
+	beq.b		zero_or_denorm_s
+
+	cmpi.l		%d0, &0x7f800000
+	beq.b		inf_or_nan_s
+
+is_norm_s:
+	mov.b		&NORM, %d0
+	rts
+zero_or_denorm_s:
+	and.l		&0x007fffff, %d1
+	bne		is_denorm_s
+is_zero_s:
+	mov.b		&ZERO, %d0
+	rts
+is_denorm_s:
+	mov.b		&DENORM, %d0
+	rts
+inf_or_nan_s:
+	and.l		&0x007fffff, %d1
+	bne		is_nan_s
+is_inf_s:
+	mov.b		&INF, %d0
+	rts
+is_nan_s:
+	btst		&22, %d1
+	bne		is_qnan_s
+is_snan_s:
+	mov.b		&SNAN, %d0
+	rts
+is_qnan_s:
+	mov.b		&QNAN, %d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	unf_res(): routine to produce default underflow result of a	#
+#		   scaled extended precision number; this is used by	#
+#		   fadd/fdiv/fmul/etc. emulation routines.		#
+#	unf_res4(): same as above but for fsglmul/fsgldiv which use	#
+#		    single round prec and extended prec mode.		#
+#									#
+# XREF ****************************************************************	#
+#	_denorm() - denormalize according to scale factor		#
+#	_round() - round denormalized number according to rnd prec	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precison operand			#
+#	d0 = scale factor						#
+#	d1 = rounding precision/mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	a0 = pointer to default underflow result in extended precision	#
+#	d0.b = result FPSR_cc which caller may or may not want to save	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Convert the input operand to "internal format" which means the	#
+# exponent is extended to 16 bits and the sign is stored in the unused	#
+# portion of the extended precison operand. Denormalize the number	#
+# according to the scale factor passed in d0. Then, round the		#
+# denormalized result.							#
+#	Set the FPSR_exc bits as appropriate but return the cc bits in	#
+# d0 in case the caller doesn't want to save them (as is the case for	#
+# fmove out).								#
+#	unf_res4() for fsglmul/fsgldiv forces the denorm to extended	#
+# precision and the rounding mode to single.				#
+#									#
+#########################################################################
+	global		unf_res
+unf_res:
+	mov.l		%d1, -(%sp)		# save rnd prec,mode on stack
+
+	btst		&0x7, FTEMP_EX(%a0)	# make "internal" format
+	sne		FTEMP_SGN(%a0)
+
+	mov.w		FTEMP_EX(%a0), %d1	# extract exponent
+	and.w		&0x7fff, %d1
+	sub.w		%d0, %d1
+	mov.w		%d1, FTEMP_EX(%a0)	# insert 16 bit exponent
+
+	mov.l		%a0, -(%sp)		# save operand ptr during calls
+
+	mov.l		0x4(%sp),%d0		# pass rnd prec.
+	andi.w		&0x00c0,%d0
+	lsr.w		&0x4,%d0
+	bsr.l		_denorm			# denorm result
+
+	mov.l		(%sp),%a0
+	mov.w		0x6(%sp),%d1		# load prec:mode into %d1
+	andi.w		&0xc0,%d1		# extract rnd prec
+	lsr.w		&0x4,%d1
+	swap		%d1
+	mov.w		0x6(%sp),%d1
+	andi.w		&0x30,%d1
+	lsr.w		&0x4,%d1
+	bsr.l		_round			# round the denorm
+
+	mov.l		(%sp)+, %a0
+
+# result is now rounded properly. convert back to normal format
+	bclr		&0x7, FTEMP_EX(%a0)	# clear sgn first; may have residue
+	tst.b		FTEMP_SGN(%a0)		# is "internal result" sign set?
+	beq.b		unf_res_chkifzero	# no; result is positive
+	bset		&0x7, FTEMP_EX(%a0)	# set result sgn
+	clr.b		FTEMP_SGN(%a0)		# clear temp sign
+
+# the number may have become zero after rounding. set ccodes accordingly.
+unf_res_chkifzero:
+	clr.l		%d0
+	tst.l		FTEMP_HI(%a0)		# is value now a zero?
+	bne.b		unf_res_cont		# no
+	tst.l		FTEMP_LO(%a0)
+	bne.b		unf_res_cont		# no
+#	bset		&z_bit, FPSR_CC(%a6)	# yes; set zero ccode bit
+	bset		&z_bit, %d0		# yes; set zero ccode bit
+
+unf_res_cont:
+
+#
+# can inex1 also be set along with unfl and inex2???
+#
+# we know that underflow has occurred. aunfl should be set if INEX2 is also set.
+#
+	btst		&inex2_bit, FPSR_EXCEPT(%a6) # is INEX2 set?
+	beq.b		unf_res_end		# no
+	bset		&aunfl_bit, FPSR_AEXCEPT(%a6) # yes; set aunfl
+
+unf_res_end:
+	add.l		&0x4, %sp		# clear stack
+	rts
+
+# unf_res() for fsglmul() and fsgldiv().
+	global		unf_res4
+unf_res4:
+	mov.l		%d1,-(%sp)		# save rnd prec,mode on stack
+
+	btst		&0x7,FTEMP_EX(%a0)	# make "internal" format
+	sne		FTEMP_SGN(%a0)
+
+	mov.w		FTEMP_EX(%a0),%d1	# extract exponent
+	and.w		&0x7fff,%d1
+	sub.w		%d0,%d1
+	mov.w		%d1,FTEMP_EX(%a0)	# insert 16 bit exponent
+
+	mov.l		%a0,-(%sp)		# save operand ptr during calls
+
+	clr.l		%d0			# force rnd prec = ext
+	bsr.l		_denorm			# denorm result
+
+	mov.l		(%sp),%a0
+	mov.w		&s_mode,%d1		# force rnd prec = sgl
+	swap		%d1
+	mov.w		0x6(%sp),%d1		# load rnd mode
+	andi.w		&0x30,%d1		# extract rnd prec
+	lsr.w		&0x4,%d1
+	bsr.l		_round			# round the denorm
+
+	mov.l		(%sp)+,%a0
+
+# result is now rounded properly. convert back to normal format
+	bclr		&0x7,FTEMP_EX(%a0)	# clear sgn first; may have residue
+	tst.b		FTEMP_SGN(%a0)		# is "internal result" sign set?
+	beq.b		unf_res4_chkifzero	# no; result is positive
+	bset		&0x7,FTEMP_EX(%a0)	# set result sgn
+	clr.b		FTEMP_SGN(%a0)		# clear temp sign
+
+# the number may have become zero after rounding. set ccodes accordingly.
+unf_res4_chkifzero:
+	clr.l		%d0
+	tst.l		FTEMP_HI(%a0)		# is value now a zero?
+	bne.b		unf_res4_cont		# no
+	tst.l		FTEMP_LO(%a0)
+	bne.b		unf_res4_cont		# no
+#	bset		&z_bit,FPSR_CC(%a6)	# yes; set zero ccode bit
+	bset		&z_bit,%d0		# yes; set zero ccode bit
+
+unf_res4_cont:
+
+#
+# can inex1 also be set along with unfl and inex2???
+#
+# we know that underflow has occurred. aunfl should be set if INEX2 is also set.
+#
+	btst		&inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+	beq.b		unf_res4_end		# no
+	bset		&aunfl_bit,FPSR_AEXCEPT(%a6) # yes; set aunfl
+
+unf_res4_end:
+	add.l		&0x4,%sp		# clear stack
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	ovf_res(): routine to produce the default overflow result of	#
+#		   an overflowing number.				#
+#	ovf_res2(): same as above but the rnd mode/prec are passed	#
+#		    differently.					#
+#									#
+# XREF ****************************************************************	#
+#	none								#
+#									#
+# INPUT ***************************************************************	#
+#	d1.b	= '-1' => (-); '0' => (+)				#
+#   ovf_res():								#
+#	d0	= rnd mode/prec						#
+#   ovf_res2():								#
+#	hi(d0)	= rnd prec						#
+#	lo(d0)	= rnd mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	a0	= points to extended precision result			#
+#	d0.b	= condition code bits					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	The default overflow result can be determined by the sign of	#
+# the result and the rounding mode/prec in effect. These bits are	#
+# concatenated together to create an index into the default result	#
+# table. A pointer to the correct result is returned in a0. The		#
+# resulting condition codes are returned in d0 in case the caller	#
+# doesn't want FPSR_cc altered (as is the case for fmove out).		#
+#									#
+#########################################################################
+
+	global		ovf_res
+ovf_res:
+	andi.w		&0x10,%d1		# keep result sign
+	lsr.b		&0x4,%d0		# shift prec/mode
+	or.b		%d0,%d1			# concat the two
+	mov.w		%d1,%d0			# make a copy
+	lsl.b		&0x1,%d1		# multiply d1 by 2
+	bra.b		ovf_res_load
+
+	global		ovf_res2
+ovf_res2:
+	and.w		&0x10, %d1		# keep result sign
+	or.b		%d0, %d1		# insert rnd mode
+	swap		%d0
+	or.b		%d0, %d1		# insert rnd prec
+	mov.w		%d1, %d0		# make a copy
+	lsl.b		&0x1, %d1		# shift left by 1
+
+#
+# use the rounding mode, precision, and result sign as in index into the
+# two tables below to fetch the default result and the result ccodes.
+#
+ovf_res_load:
+	mov.b		(tbl_ovfl_cc.b,%pc,%d0.w*1), %d0 # fetch result ccodes
+	lea		(tbl_ovfl_result.b,%pc,%d1.w*8), %a0 # return result ptr
+
+	rts
+
+tbl_ovfl_cc:
+	byte		0x2, 0x0, 0x0, 0x2
+	byte		0x2, 0x0, 0x0, 0x2
+	byte		0x2, 0x0, 0x0, 0x2
+	byte		0x0, 0x0, 0x0, 0x0
+	byte		0x2+0x8, 0x8, 0x2+0x8, 0x8
+	byte		0x2+0x8, 0x8, 0x2+0x8, 0x8
+	byte		0x2+0x8, 0x8, 0x2+0x8, 0x8
+
+tbl_ovfl_result:
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+	long		0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RZ
+	long		0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RM
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+	long		0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RZ
+	long		0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RM
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+	long		0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RZ
+	long		0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RM
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+	long		0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RZ
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+	long		0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RP
+
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+	long		0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RZ
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+	long		0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RP
+
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+	long		0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RZ
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+	long		0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RP
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fout(): move from fp register to memory or data register	#
+#									#
+# XREF ****************************************************************	#
+#	_round() - needed to create EXOP for sgl/dbl precision		#
+#	norm() - needed to create EXOP for extended precision		#
+#	ovf_res() - create default overflow result for sgl/dbl precision#
+#	unf_res() - create default underflow result for sgl/dbl prec.	#
+#	dst_dbl() - create rounded dbl precision result.		#
+#	dst_sgl() - create rounded sgl precision result.		#
+#	fetch_dreg() - fetch dynamic k-factor reg for packed.		#
+#	bindec() - convert FP binary number to packed number.		#
+#	_mem_write() - write data to memory.				#
+#	_mem_write2() - write data to memory unless supv mode -(a7) exc.#
+#	_dmem_write_{byte,word,long}() - write data to memory.		#
+#	store_dreg_{b,w,l}() - store data to data register file.	#
+#	facc_out_{b,w,l,d,x}() - data access error occurred.		#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = round prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 : intermediate underflow or overflow result if		#
+#	      OVFL/UNFL occurred for a sgl or dbl operand		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	This routine is accessed by many handlers that need to do an	#
+# opclass three move of an operand out to memory.			#
+#	Decode an fmove out (opclass 3) instruction to determine if	#
+# it's b,w,l,s,d,x, or p in size. b,w,l can be stored to either a data	#
+# register or memory. The algorithm uses a standard "fmove" to create	#
+# the rounded result. Also, since exceptions are disabled, this also	#
+# create the correct OPERR default result if appropriate.		#
+#	For sgl or dbl precision, overflow or underflow can occur. If	#
+# either occurs and is enabled, the EXOP.				#
+#	For extended precision, the stacked <ea> must be fixed along	#
+# w/ the address index register as appropriate w/ _calc_ea_fout(). If	#
+# the source is a denorm and if underflow is enabled, an EXOP must be	#
+# created.								#
+#	For packed, the k-factor must be fetched from the instruction	#
+# word or a data register. The <ea> must be fixed as w/ extended	#
+# precision. Then, bindec() is called to create the appropriate		#
+# packed result.							#
+#	If at any time an access error is flagged by one of the move-	#
+# to-memory routines, then a special exit must be made so that the	#
+# access error can be handled properly.					#
+#									#
+#########################################################################
+
+	global		fout
+fout:
+	bfextu		EXC_CMDREG(%a6){&3:&3},%d1 # extract dst fmt
+	mov.w		(tbl_fout.b,%pc,%d1.w*2),%a1 # use as index
+	jmp		(tbl_fout.b,%pc,%a1)	# jump to routine
+
+	swbeg		&0x8
+tbl_fout:
+	short		fout_long	-	tbl_fout
+	short		fout_sgl	-	tbl_fout
+	short		fout_ext	-	tbl_fout
+	short		fout_pack	-	tbl_fout
+	short		fout_word	-	tbl_fout
+	short		fout_dbl	-	tbl_fout
+	short		fout_byte	-	tbl_fout
+	short		fout_pack	-	tbl_fout
+
+#################################################################
+# fmove.b out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_byte:
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_byte_denorm	# no
+
+	fmovm.x		SRC(%a0),&0x80		# load value
+
+fout_byte_norm:
+	fmov.l		%d0,%fpcr		# insert rnd prec,mode
+
+	fmov.b		%fp0,%d0		# exec move out w/ correct rnd mode
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch FPSR
+	or.w		%d1,2+USER_FPSR(%a6)	# save new exc,accrued bits
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_byte_dn		# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_byte	# write byte
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_b		# yes
+
+	rts
+
+fout_byte_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_b
+	rts
+
+fout_byte_denorm:
+	mov.l		SRC_EX(%a0),%d1
+	andi.l		&0x80000000,%d1		# keep DENORM sign
+	ori.l		&0x00800000,%d1		# make smallest sgl
+	fmov.s		%d1,%fp0
+	bra.b		fout_byte_norm
+
+#################################################################
+# fmove.w out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_word:
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_word_denorm	# no
+
+	fmovm.x		SRC(%a0),&0x80		# load value
+
+fout_word_norm:
+	fmov.l		%d0,%fpcr		# insert rnd prec:mode
+
+	fmov.w		%fp0,%d0		# exec move out w/ correct rnd mode
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch FPSR
+	or.w		%d1,2+USER_FPSR(%a6)	# save new exc,accrued bits
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_word_dn		# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_word	# write word
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_w		# yes
+
+	rts
+
+fout_word_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_w
+	rts
+
+fout_word_denorm:
+	mov.l		SRC_EX(%a0),%d1
+	andi.l		&0x80000000,%d1		# keep DENORM sign
+	ori.l		&0x00800000,%d1		# make smallest sgl
+	fmov.s		%d1,%fp0
+	bra.b		fout_word_norm
+
+#################################################################
+# fmove.l out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_long:
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_long_denorm	# no
+
+	fmovm.x		SRC(%a0),&0x80		# load value
+
+fout_long_norm:
+	fmov.l		%d0,%fpcr		# insert rnd prec:mode
+
+	fmov.l		%fp0,%d0		# exec move out w/ correct rnd mode
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch FPSR
+	or.w		%d1,2+USER_FPSR(%a6)	# save new exc,accrued bits
+
+fout_long_write:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_long_dn		# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_long	# write long
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	rts
+
+fout_long_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_l
+	rts
+
+fout_long_denorm:
+	mov.l		SRC_EX(%a0),%d1
+	andi.l		&0x80000000,%d1		# keep DENORM sign
+	ori.l		&0x00800000,%d1		# make smallest sgl
+	fmov.s		%d1,%fp0
+	bra.b		fout_long_norm
+
+#################################################################
+# fmove.x out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+# The DENORM causes an Underflow exception.
+fout_ext:
+
+# we copy the extended precision result to FP_SCR0 so that the reserved
+# 16-bit field gets zeroed. we do this since we promise not to disturb
+# what's at SRC(a0).
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	clr.w		2+FP_SCR0_EX(%a6)	# clear reserved field
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	fmovm.x		SRC(%a0),&0x80		# return result
+
+	bsr.l		_calc_ea_fout		# fix stacked <ea>
+
+	mov.l		%a0,%a1			# pass: dst addr
+	lea		FP_SCR0(%a6),%a0	# pass: src addr
+	mov.l		&0xc,%d0		# pass: opsize is 12 bytes
+
+# we must not yet write the extended precision data to the stack
+# in the pre-decrement case from supervisor mode or else we'll corrupt
+# the stack frame. so, leave it in FP_SRC for now and deal with it later...
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	beq.b		fout_ext_a7
+
+	bsr.l		_dmem_write		# write ext prec number to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fout_ext_err		# yes
+
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_ext_denorm		# no
+	rts
+
+# the number is a DENORM. must set the underflow exception bit
+fout_ext_denorm:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set underflow exc bit
+
+	mov.b		FPCR_ENABLE(%a6),%d0
+	andi.b		&0x0a,%d0		# is UNFL or INEX enabled?
+	bne.b		fout_ext_exc		# yes
+	rts
+
+# we don't want to do the write if the exception occurred in supervisor mode
+# so _mem_write2() handles this for us.
+fout_ext_a7:
+	bsr.l		_mem_write2		# write ext prec number to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fout_ext_err		# yes
+
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_ext_denorm		# no
+	rts
+
+fout_ext_exc:
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize the mantissa
+	neg.w		%d0			# new exp = -(shft amt)
+	andi.w		&0x7fff,%d0
+	andi.w		&0x8000,FP_SCR0_EX(%a6)	# keep only old sign
+	or.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+fout_ext_err:
+	mov.l		EXC_A6(%a6),(%a6)	# fix stacked a6
+	bra.l		facc_out_x
+
+#########################################################################
+# fmove.s out ###########################################################
+#########################################################################
+fout_sgl:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	mov.l		%d0,L_SCR3(%a6)		# save rnd prec,mode on stack
+
+#
+# operand is a normalized number. first, we check to see if the move out
+# would cause either an underflow or overflow. these cases are handled
+# separately. otherwise, set the FPCR to the proper rounding mode and
+# execute the move.
+#
+	mov.w		SRC_EX(%a0),%d0		# extract exponent
+	andi.w		&0x7fff,%d0		# strip sign
+
+	cmpi.w		%d0,&SGL_HI		# will operand overflow?
+	bgt.w		fout_sgl_ovfl		# yes; go handle OVFL
+	beq.w		fout_sgl_may_ovfl	# maybe; go handle possible OVFL
+	cmpi.w		%d0,&SGL_LO		# will operand underflow?
+	blt.w		fout_sgl_unfl		# yes; go handle underflow
+
+#
+# NORMs(in range) can be stored out by a simple "fmov.s"
+# Unnormalized inputs can come through this point.
+#
+fout_sgl_exg:
+	fmovm.x		SRC(%a0),&0x80		# fetch fop from stack
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmov.s		%fp0,%d0		# store does convert and round
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.w		%d1,2+USER_FPSR(%a6)	# set possible inex2/ainex
+
+fout_sgl_exg_write:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_sgl_exg_write_dn	# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_long	# write long
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	rts
+
+fout_sgl_exg_write_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_l
+	rts
+
+#
+# here, we know that the operand would UNFL if moved out to single prec,
+# so, denorm and round and then use generic store single routine to
+# write the value to memory.
+#
+fout_sgl_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.l		%a0,-(%sp)
+
+	clr.l		%d0			# pass: S.F. = 0
+
+	cmpi.b		STAG(%a6),&DENORM	# fetch src optype tag
+	bne.b		fout_sgl_unfl_cont	# let DENORMs fall through
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize the DENORM
+
+fout_sgl_unfl_cont:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calc default underflow result
+
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to fop
+	bsr.l		dst_sgl			# convert to single prec
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_sgl_unfl_dn	# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_long	# write long
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.b		fout_sgl_unfl_chkexc
+
+fout_sgl_unfl_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_l
+
+fout_sgl_unfl_chkexc:
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0a,%d1		# is UNFL or INEX enabled?
+	bne.w		fout_sd_exc_unfl	# yes
+	addq.l		&0x4,%sp
+	rts
+
+#
+# it's definitely an overflow so call ovf_res to get the correct answer
+#
+fout_sgl_ovfl:
+	tst.b		3+SRC_HI(%a0)		# is result inexact?
+	bne.b		fout_sgl_ovfl_inex2
+	tst.l		SRC_LO(%a0)		# is result inexact?
+	bne.b		fout_sgl_ovfl_inex2
+	ori.w		&ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+	bra.b		fout_sgl_ovfl_cont
+fout_sgl_ovfl_inex2:
+	ori.w		&ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
+
+fout_sgl_ovfl_cont:
+	mov.l		%a0,-(%sp)
+
+# call ovf_res() w/ sgl prec and the correct rnd mode to create the default
+# overflow result. DON'T save the returned ccodes from ovf_res() since
+# fmove out doesn't alter them.
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	smi		%d1			# set if so
+	mov.l		L_SCR3(%a6),%d0		# pass: sgl prec,rnd mode
+	bsr.l		ovf_res			# calc OVFL result
+	fmovm.x		(%a0),&0x80		# load default overflow result
+	fmov.s		%fp0,%d0		# store to single
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_sgl_ovfl_dn	# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_long	# write long
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.b		fout_sgl_ovfl_chkexc
+
+fout_sgl_ovfl_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_l
+
+fout_sgl_ovfl_chkexc:
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0a,%d1		# is UNFL or INEX enabled?
+	bne.w		fout_sd_exc_ovfl	# yes
+	addq.l		&0x4,%sp
+	rts
+
+#
+# move out MAY overflow:
+# (1) force the exp to 0x3fff
+# (2) do a move w/ appropriate rnd mode
+# (3) if exp still equals zero, then insert original exponent
+#	for the correct result.
+#     if exp now equals one, then it overflowed so call ovf_res.
+#
+fout_sgl_may_ovfl:
+	mov.w		SRC_EX(%a0),%d1		# fetch current sign
+	andi.w		&0x8000,%d1		# keep it,clear exp
+	ori.w		&0x3fff,%d1		# insert exp = 0
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert scaled exp
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# force fop to be rounded
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fabs.x		%fp0			# need absolute value
+	fcmp.b		%fp0,&0x2		# did exponent increase?
+	fblt.w		fout_sgl_exg		# no; go finish NORM
+	bra.w		fout_sgl_ovfl		# yes; go handle overflow
+
+################
+
+fout_sd_exc_unfl:
+	mov.l		(%sp)+,%a0
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	cmpi.b		STAG(%a6),&DENORM	# was src a DENORM?
+	bne.b		fout_sd_exc_cont	# no
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm
+	neg.l		%d0
+	andi.w		&0x7fff,%d0
+	bfins		%d0,FP_SCR0_EX(%a6){&1:&15}
+	bra.b		fout_sd_exc_cont
+
+fout_sd_exc:
+fout_sd_exc_ovfl:
+	mov.l		(%sp)+,%a0		# restore a0
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+fout_sd_exc_cont:
+	bclr		&0x7,FP_SCR0_EX(%a6)	# clear sign bit
+	sne.b		2+FP_SCR0_EX(%a6)	# set internal sign bit
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to DENORM
+
+	mov.b		3+L_SCR3(%a6),%d1
+	lsr.b		&0x4,%d1
+	andi.w		&0x0c,%d1
+	swap		%d1
+	mov.b		3+L_SCR3(%a6),%d1
+	lsr.b		&0x4,%d1
+	andi.w		&0x03,%d1
+	clr.l		%d0			# pass: zero g,r,s
+	bsr.l		_round			# round the DENORM
+
+	tst.b		2+FP_SCR0_EX(%a6)	# is EXOP negative?
+	beq.b		fout_sd_exc_done	# no
+	bset		&0x7,FP_SCR0_EX(%a6)	# yes
+
+fout_sd_exc_done:
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#################################################################
+# fmove.d out ###################################################
+#################################################################
+fout_dbl:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+	mov.l		%d0,L_SCR3(%a6)		# save rnd prec,mode on stack
+
+#
+# operand is a normalized number. first, we check to see if the move out
+# would cause either an underflow or overflow. these cases are handled
+# separately. otherwise, set the FPCR to the proper rounding mode and
+# execute the move.
+#
+	mov.w		SRC_EX(%a0),%d0		# extract exponent
+	andi.w		&0x7fff,%d0		# strip sign
+
+	cmpi.w		%d0,&DBL_HI		# will operand overflow?
+	bgt.w		fout_dbl_ovfl		# yes; go handle OVFL
+	beq.w		fout_dbl_may_ovfl	# maybe; go handle possible OVFL
+	cmpi.w		%d0,&DBL_LO		# will operand underflow?
+	blt.w		fout_dbl_unfl		# yes; go handle underflow
+
+#
+# NORMs(in range) can be stored out by a simple "fmov.d"
+# Unnormalized inputs can come through this point.
+#
+fout_dbl_exg:
+	fmovm.x		SRC(%a0),&0x80		# fetch fop from stack
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmov.d		%fp0,L_SCR1(%a6)	# store does convert and round
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d0		# save FPSR
+
+	or.w		%d0,2+USER_FPSR(%a6)	# set possible inex2/ainex
+
+	mov.l		EXC_EA(%a6),%a1		# pass: dst addr
+	lea		L_SCR1(%a6),%a0		# pass: src addr
+	movq.l		&0x8,%d0		# pass: opsize is 8 bytes
+	bsr.l		_dmem_write		# store dbl fop to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_d		# yes
+
+	rts					# no; so we're finished
+
+#
+# here, we know that the operand would UNFL if moved out to double prec,
+# so, denorm and round and then use generic store double routine to
+# write the value to memory.
+#
+fout_dbl_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.l		%a0,-(%sp)
+
+	clr.l		%d0			# pass: S.F. = 0
+
+	cmpi.b		STAG(%a6),&DENORM	# fetch src optype tag
+	bne.b		fout_dbl_unfl_cont	# let DENORMs fall through
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize the DENORM
+
+fout_dbl_unfl_cont:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calc default underflow result
+
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to fop
+	bsr.l		dst_dbl			# convert to single prec
+	mov.l		%d0,L_SCR1(%a6)
+	mov.l		%d1,L_SCR2(%a6)
+
+	mov.l		EXC_EA(%a6),%a1		# pass: dst addr
+	lea		L_SCR1(%a6),%a0		# pass: src addr
+	movq.l		&0x8,%d0		# pass: opsize is 8 bytes
+	bsr.l		_dmem_write		# store dbl fop to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_d		# yes
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0a,%d1		# is UNFL or INEX enabled?
+	bne.w		fout_sd_exc_unfl	# yes
+	addq.l		&0x4,%sp
+	rts
+
+#
+# it's definitely an overflow so call ovf_res to get the correct answer
+#
+fout_dbl_ovfl:
+	mov.w		2+SRC_LO(%a0),%d0
+	andi.w		&0x7ff,%d0
+	bne.b		fout_dbl_ovfl_inex2
+
+	ori.w		&ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+	bra.b		fout_dbl_ovfl_cont
+fout_dbl_ovfl_inex2:
+	ori.w		&ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
+
+fout_dbl_ovfl_cont:
+	mov.l		%a0,-(%sp)
+
+# call ovf_res() w/ dbl prec and the correct rnd mode to create the default
+# overflow result. DON'T save the returned ccodes from ovf_res() since
+# fmove out doesn't alter them.
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	smi		%d1			# set if so
+	mov.l		L_SCR3(%a6),%d0		# pass: dbl prec,rnd mode
+	bsr.l		ovf_res			# calc OVFL result
+	fmovm.x		(%a0),&0x80		# load default overflow result
+	fmov.d		%fp0,L_SCR1(%a6)	# store to double
+
+	mov.l		EXC_EA(%a6),%a1		# pass: dst addr
+	lea		L_SCR1(%a6),%a0		# pass: src addr
+	movq.l		&0x8,%d0		# pass: opsize is 8 bytes
+	bsr.l		_dmem_write		# store dbl fop to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_d		# yes
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0a,%d1		# is UNFL or INEX enabled?
+	bne.w		fout_sd_exc_ovfl	# yes
+	addq.l		&0x4,%sp
+	rts
+
+#
+# move out MAY overflow:
+# (1) force the exp to 0x3fff
+# (2) do a move w/ appropriate rnd mode
+# (3) if exp still equals zero, then insert original exponent
+#	for the correct result.
+#     if exp now equals one, then it overflowed so call ovf_res.
+#
+fout_dbl_may_ovfl:
+	mov.w		SRC_EX(%a0),%d1		# fetch current sign
+	andi.w		&0x8000,%d1		# keep it,clear exp
+	ori.w		&0x3fff,%d1		# insert exp = 0
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert scaled exp
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# force fop to be rounded
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fabs.x		%fp0			# need absolute value
+	fcmp.b		%fp0,&0x2		# did exponent increase?
+	fblt.w		fout_dbl_exg		# no; go finish NORM
+	bra.w		fout_dbl_ovfl		# yes; go handle overflow
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	dst_dbl(): create double precision value from extended prec.	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to source operand in extended precision		#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = hi(double precision result)				#
+#	d1 = lo(double precision result)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#  Changes extended precision to double precision.			#
+#  Note: no attempt is made to round the extended value to double.	#
+#	dbl_sign = ext_sign						#
+#	dbl_exp = ext_exp - $3fff(ext bias) + $7ff(dbl bias)		#
+#	get rid of ext integer bit					#
+#	dbl_mant = ext_mant{62:12}					#
+#									#
+#		---------------   ---------------    ---------------	#
+#  extended ->  |s|    exp    |   |1| ms mant   |    | ls mant     |	#
+#		---------------   ---------------    ---------------	#
+#		 95	    64    63 62	      32      31     11	  0	#
+#				     |			     |		#
+#				     |			     |		#
+#				     |			     |		#
+#			             v			     v		#
+#			      ---------------   ---------------		#
+#  double   ->		      |s|exp| mant  |   |  mant       |		#
+#			      ---------------   ---------------		#
+#			      63     51   32   31	       0	#
+#									#
+#########################################################################
+
+dst_dbl:
+	clr.l		%d0			# clear d0
+	mov.w		FTEMP_EX(%a0),%d0	# get exponent
+	subi.w		&EXT_BIAS,%d0		# subtract extended precision bias
+	addi.w		&DBL_BIAS,%d0		# add double precision bias
+	tst.b		FTEMP_HI(%a0)		# is number a denorm?
+	bmi.b		dst_get_dupper		# no
+	subq.w		&0x1,%d0		# yes; denorm bias = DBL_BIAS - 1
+dst_get_dupper:
+	swap		%d0			# d0 now in upper word
+	lsl.l		&0x4,%d0		# d0 in proper place for dbl prec exp
+	tst.b		FTEMP_EX(%a0)		# test sign
+	bpl.b		dst_get_dman		# if postive, go process mantissa
+	bset		&0x1f,%d0		# if negative, set sign
+dst_get_dman:
+	mov.l		FTEMP_HI(%a0),%d1	# get ms mantissa
+	bfextu		%d1{&1:&20},%d1		# get upper 20 bits of ms
+	or.l		%d1,%d0			# put these bits in ms word of double
+	mov.l		%d0,L_SCR1(%a6)		# put the new exp back on the stack
+	mov.l		FTEMP_HI(%a0),%d1	# get ms mantissa
+	mov.l		&21,%d0			# load shift count
+	lsl.l		%d0,%d1			# put lower 11 bits in upper bits
+	mov.l		%d1,L_SCR2(%a6)		# build lower lword in memory
+	mov.l		FTEMP_LO(%a0),%d1	# get ls mantissa
+	bfextu		%d1{&0:&21},%d0		# get ls 21 bits of double
+	mov.l		L_SCR2(%a6),%d1
+	or.l		%d0,%d1			# put them in double result
+	mov.l		L_SCR1(%a6),%d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	dst_sgl(): create single precision value from extended prec	#
+#									#
+# XREF ****************************************************************	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to source operand in extended precision		#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = single precision result					#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+# Changes extended precision to single precision.			#
+#	sgl_sign = ext_sign						#
+#	sgl_exp = ext_exp - $3fff(ext bias) + $7f(sgl bias)		#
+#	get rid of ext integer bit					#
+#	sgl_mant = ext_mant{62:12}					#
+#									#
+#		---------------   ---------------    ---------------	#
+#  extended ->  |s|    exp    |   |1| ms mant   |    | ls mant     |	#
+#		---------------   ---------------    ---------------	#
+#		 95	    64    63 62	   40 32      31     12	  0	#
+#				     |	   |				#
+#				     |	   |				#
+#				     |	   |				#
+#			             v     v				#
+#			      ---------------				#
+#  single   ->		      |s|exp| mant  |				#
+#			      ---------------				#
+#			      31     22     0				#
+#									#
+#########################################################################
+
+dst_sgl:
+	clr.l		%d0
+	mov.w		FTEMP_EX(%a0),%d0	# get exponent
+	subi.w		&EXT_BIAS,%d0		# subtract extended precision bias
+	addi.w		&SGL_BIAS,%d0		# add single precision bias
+	tst.b		FTEMP_HI(%a0)		# is number a denorm?
+	bmi.b		dst_get_supper		# no
+	subq.w		&0x1,%d0		# yes; denorm bias = SGL_BIAS - 1
+dst_get_supper:
+	swap		%d0			# put exp in upper word of d0
+	lsl.l		&0x7,%d0		# shift it into single exp bits
+	tst.b		FTEMP_EX(%a0)		# test sign
+	bpl.b		dst_get_sman		# if positive, continue
+	bset		&0x1f,%d0		# if negative, put in sign first
+dst_get_sman:
+	mov.l		FTEMP_HI(%a0),%d1	# get ms mantissa
+	andi.l		&0x7fffff00,%d1		# get upper 23 bits of ms
+	lsr.l		&0x8,%d1		# and put them flush right
+	or.l		%d1,%d0			# put these bits in ms word of single
+	rts
+
+##############################################################################
+fout_pack:
+	bsr.l		_calc_ea_fout		# fetch the <ea>
+	mov.l		%a0,-(%sp)
+
+	mov.b		STAG(%a6),%d0		# fetch input type
+	bne.w		fout_pack_not_norm	# input is not NORM
+
+fout_pack_norm:
+	btst		&0x4,EXC_CMDREG(%a6)	# static or dynamic?
+	beq.b		fout_pack_s		# static
+
+fout_pack_d:
+	mov.b		1+EXC_CMDREG(%a6),%d1	# fetch dynamic reg
+	lsr.b		&0x4,%d1
+	andi.w		&0x7,%d1
+
+	bsr.l		fetch_dreg		# fetch Dn w/ k-factor
+
+	bra.b		fout_pack_type
+fout_pack_s:
+	mov.b		1+EXC_CMDREG(%a6),%d0	# fetch static field
+
+fout_pack_type:
+	bfexts		%d0{&25:&7},%d0		# extract k-factor
+	mov.l	%d0,-(%sp)
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to input
+
+# bindec is currently scrambling FP_SRC for denorm inputs.
+# we'll have to change this, but for now, tough luck!!!
+	bsr.l		bindec			# convert xprec to packed
+
+#	andi.l		&0xcfff000f,FP_SCR0(%a6) # clear unused fields
+	andi.l		&0xcffff00f,FP_SCR0(%a6) # clear unused fields
+
+	mov.l	(%sp)+,%d0
+
+	tst.b		3+FP_SCR0_EX(%a6)
+	bne.b		fout_pack_set
+	tst.l		FP_SCR0_HI(%a6)
+	bne.b		fout_pack_set
+	tst.l		FP_SCR0_LO(%a6)
+	bne.b		fout_pack_set
+
+# add the extra condition that only if the k-factor was zero, too, should
+# we zero the exponent
+	tst.l		%d0
+	bne.b		fout_pack_set
+# "mantissa" is all zero which means that the answer is zero. but, the '040
+# algorithm allows the exponent to be non-zero. the 881/2 do not. therefore,
+# if the mantissa is zero, I will zero the exponent, too.
+# the question now is whether the exponents sign bit is allowed to be non-zero
+# for a zero, also...
+	andi.w		&0xf000,FP_SCR0(%a6)
+
+fout_pack_set:
+
+	lea		FP_SCR0(%a6),%a0	# pass: src addr
+
+fout_pack_write:
+	mov.l		(%sp)+,%a1		# pass: dst addr
+	mov.l		&0xc,%d0		# pass: opsize is 12 bytes
+
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	beq.b		fout_pack_a7
+
+	bsr.l		_dmem_write		# write ext prec number to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fout_ext_err		# yes
+
+	rts
+
+# we don't want to do the write if the exception occurred in supervisor mode
+# so _mem_write2() handles this for us.
+fout_pack_a7:
+	bsr.l		_mem_write2		# write ext prec number to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fout_ext_err		# yes
+
+	rts
+
+fout_pack_not_norm:
+	cmpi.b		%d0,&DENORM		# is it a DENORM?
+	beq.w		fout_pack_norm		# yes
+	lea		FP_SRC(%a6),%a0
+	clr.w		2+FP_SRC_EX(%a6)
+	cmpi.b		%d0,&SNAN		# is it an SNAN?
+	beq.b		fout_pack_snan		# yes
+	bra.b		fout_pack_write		# no
+
+fout_pack_snan:
+	ori.w		&snaniop2_mask,FPSR_EXCEPT(%a6) # set SNAN/AIOP
+	bset		&0x6,FP_SRC_HI(%a6)	# set snan bit
+	bra.b		fout_pack_write
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fmul(): emulates the fmul instruction				#
+#	fsmul(): emulates the fsmul instruction				#
+#	fdmul(): emulates the fdmul instruction				#
+#									#
+# XREF ****************************************************************	#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	scale_to_zero_dst() - scale dst exponent to zero		#
+#	unf_res() - return default underflow result			#
+#	ovf_res() - return default overflow result			#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a multiply	#
+# instruction won't cause an exception. Use the regular fmul to		#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	align		0x10
+tbl_fmul_ovfl:
+	long		0x3fff - 0x7ffe		# ext_max
+	long		0x3fff - 0x407e		# sgl_max
+	long		0x3fff - 0x43fe		# dbl_max
+tbl_fmul_unfl:
+	long		0x3fff + 0x0001		# ext_unfl
+	long		0x3fff - 0x3f80		# sgl_unfl
+	long		0x3fff - 0x3c00		# dbl_unfl
+
+	global		fsmul
+fsmul:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	bra.b		fmul
+
+	global		fdmul
+fdmul:
+	andi.b		&0x30,%d0
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fmul
+fmul:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+	bne.w		fmul_not_norm		# optimize on non-norm input
+
+fmul_norm:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_to_zero_src	# scale src exponent
+	mov.l		%d0,-(%sp)		# save scale factor 1
+
+	bsr.l		scale_to_zero_dst	# scale dst exponent
+
+	add.l		%d0,(%sp)		# SCALE_FACTOR = scale1 + scale2
+
+	mov.w		2+L_SCR3(%a6),%d1	# fetch precision
+	lsr.b		&0x6,%d1		# shift to lo bits
+	mov.l		(%sp)+,%d0		# load S.F.
+	cmp.l		%d0,(tbl_fmul_ovfl.w,%pc,%d1.w*4) # would result ovfl?
+	beq.w		fmul_may_ovfl		# result may rnd to overflow
+	blt.w		fmul_ovfl		# result will overflow
+
+	cmp.l		%d0,(tbl_fmul_unfl.w,%pc,%d1.w*4) # would result unfl?
+	beq.w		fmul_may_unfl		# result may rnd to no unfl
+	bgt.w		fmul_unfl		# result will underflow
+
+#
+# NORMAL:
+# - the result of the multiply operation will neither overflow nor underflow.
+# - do the multiply to the proper precision and rounding mode.
+# - scale the result exponent using the scale factor. if both operands were
+# normalized then we really don't need to go through this scaling. but for now,
+# this will do.
+#
+fmul_normal:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fmul_normal_exit:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# OVERFLOW:
+# - the result of the multiply operation is an overflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+# - if overflow or inexact is enabled, we need a multiply result rounded to
+# extended precision. if the original operation was extended, then we have this
+# result. if the original operation was single or double, we have to do another
+# multiply using extended precision and the correct rounding mode. the result
+# of this operation then has its exponent scaled by -0x6000 to create the
+# exceptional operand.
+#
+fmul_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+# save setting this until now because this is where fmul_may_ovfl may jump in
+fmul_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fmul_ovfl_ena		# yes
+
+# calculate the default result
+fmul_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass rnd prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled; Create EXOP:
+# - if precision is extended, then we have the EXOP. simply bias the exponent
+# with an extra -0x6000. if the precision is single or double, we need to
+# calculate a result rounded to extended precision.
+#
+fmul_ovfl_ena:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# test the rnd prec
+	bne.b		fmul_ovfl_ena_sd	# it's sgl or dbl
+
+fmul_ovfl_ena_cont:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# move result to stack
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1		# clear sign bit
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fmul_ovfl_dis
+
+fmul_ovfl_ena_sd:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# keep rnd mode only
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	bra.b		fmul_ovfl_ena_cont
+
+#
+# may OVERFLOW:
+# - the result of the multiply operation MAY overflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+#
+fmul_may_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fmul_ovfl_tst		# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fmul_normal_exit
+
+#
+# UNDERFLOW:
+# - the result of the multiply operation is an underflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+# - if overflow or inexact is enabled, we need a multiply result rounded to
+# extended precision. if the original operation was extended, then we have this
+# result. if the original operation was single or double, we have to do another
+# multiply using extended precision and the correct rounding mode. the result
+# of this operation then has its exponent scaled by -0x6000 to create the
+# exceptional operand.
+#
+fmul_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+# for fun, let's use only extended precision, round to zero. then, let
+# the unf_res() routine figure out all the rest.
+# will we get the correct answer.
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fmul_unfl_ena		# yes
+
+fmul_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# unf_res2 may have set 'Z'
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# UNFL is enabled.
+#
+fmul_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fmul_unfl_ena_sd	# no, sgl or dbl
+
+# if the rnd mode is anything but RZ, then we have to re-do the above
+# multiplication becuase we used RZ for all.
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+fmul_unfl_ena_cont:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp1	# execute multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# add bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fmul_unfl_dis
+
+fmul_unfl_ena_sd:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# use only rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	bra.b		fmul_unfl_ena_cont
+
+# MAY UNDERFLOW:
+# -use the correct rounding mode and precision. this code favors operations
+# that do not underflow.
+fmul_may_unfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| > 2.b?
+	fbgt.w		fmul_normal_exit	# no; no underflow occurred
+	fblt.w		fmul_unfl		# yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 2. but,
+# we don't know if the result was an underflow that rounded up to a 2 or
+# a normalized number that rounded down to a 2. so, redo the entire operation
+# using RZ as the rounding mode to see what the pre-rounded result is.
+# this case should be relatively rare.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst operand
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert RZ
+
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp1	# execute multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fabs.x		%fp1			# make absolute value
+	fcmp.b		%fp1,&0x2		# is |result| < 2.b?
+	fbge.w		fmul_normal_exit	# no; no underflow occurred
+	bra.w		fmul_unfl		# yes, underflow occurred
+
+################################################################################
+
+#
+# Multiply: inputs are not both normalized; what are they?
+#
+fmul_not_norm:
+	mov.w		(tbl_fmul_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fmul_op.b,%pc,%d1.w)
+
+	swbeg		&48
+tbl_fmul_op:
+	short		fmul_norm	- tbl_fmul_op # NORM x NORM
+	short		fmul_zero	- tbl_fmul_op # NORM x ZERO
+	short		fmul_inf_src	- tbl_fmul_op # NORM x INF
+	short		fmul_res_qnan	- tbl_fmul_op # NORM x QNAN
+	short		fmul_norm	- tbl_fmul_op # NORM x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # NORM x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_zero	- tbl_fmul_op # ZERO x NORM
+	short		fmul_zero	- tbl_fmul_op # ZERO x ZERO
+	short		fmul_res_operr	- tbl_fmul_op # ZERO x INF
+	short		fmul_res_qnan	- tbl_fmul_op # ZERO x QNAN
+	short		fmul_zero	- tbl_fmul_op # ZERO x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # ZERO x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_inf_dst	- tbl_fmul_op # INF x NORM
+	short		fmul_res_operr	- tbl_fmul_op # INF x ZERO
+	short		fmul_inf_dst	- tbl_fmul_op # INF x INF
+	short		fmul_res_qnan	- tbl_fmul_op # INF x QNAN
+	short		fmul_inf_dst	- tbl_fmul_op # INF x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # INF x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x NORM
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x ZERO
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x INF
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x QNAN
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # QNAN x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_norm	- tbl_fmul_op # NORM x NORM
+	short		fmul_zero	- tbl_fmul_op # NORM x ZERO
+	short		fmul_inf_src	- tbl_fmul_op # NORM x INF
+	short		fmul_res_qnan	- tbl_fmul_op # NORM x QNAN
+	short		fmul_norm	- tbl_fmul_op # NORM x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # NORM x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x NORM
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x ZERO
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x INF
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x QNAN
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+fmul_res_operr:
+	bra.l		res_operr
+fmul_res_snan:
+	bra.l		res_snan
+fmul_res_qnan:
+	bra.l		res_qnan
+
+#
+# Multiply: (Zero x Zero) || (Zero x norm) || (Zero x denorm)
+#
+	global		fmul_zero		# global for fsglmul
+fmul_zero:
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bpl.b		fmul_zero_p		# result ZERO is pos.
+fmul_zero_n:
+	fmov.s		&0x80000000,%fp0	# load -ZERO
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/N
+	rts
+fmul_zero_p:
+	fmov.s		&0x00000000,%fp0	# load +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+#
+# Multiply: (inf x inf) || (inf x norm) || (inf x denorm)
+#
+# Note: The j-bit for an infinity is a don't-care. However, to be
+# strictly compatible w/ the 68881/882, we make sure to return an
+# INF w/ the j-bit set if the input INF j-bit was set. Destination
+# INFs take priority.
+#
+	global		fmul_inf_dst		# global for fsglmul
+fmul_inf_dst:
+	fmovm.x		DST(%a1),&0x80		# return INF result in fp0
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bpl.b		fmul_inf_dst_p		# result INF is pos.
+fmul_inf_dst_n:
+	fabs.x		%fp0			# clear result sign
+	fneg.x		%fp0			# set result sign
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
+	rts
+fmul_inf_dst_p:
+	fabs.x		%fp0			# clear result sign
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set INF
+	rts
+
+	global		fmul_inf_src		# global for fsglmul
+fmul_inf_src:
+	fmovm.x		SRC(%a0),&0x80		# return INF result in fp0
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bpl.b		fmul_inf_dst_p		# result INF is pos.
+	bra.b		fmul_inf_dst_n
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fin(): emulates the fmove instruction				#
+#	fsin(): emulates the fsmove instruction				#
+#	fdin(): emulates the fdmove instruction				#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize mantissa for EXOP on denorm			#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	ovf_res() - return default overflow result			#
+#	unf_res() - return default underflow result			#
+#	res_qnan_1op() - return QNAN result				#
+#	res_snan_1op() - return SNAN result				#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = round prec/mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms into extended, single, and double precision.			#
+#	Norms can be emulated w/ a regular fmove instruction. For	#
+# sgl/dbl, must scale exponent and perform an "fmove". Check to see	#
+# if the result would have overflowed/underflowed. If so, use unf_res()	#
+# or ovf_res() to return the default result. Also return EXOP if	#
+# exception is enabled. If no exception, return the default result.	#
+#	Unnorms don't pass through here.				#
+#									#
+#########################################################################
+
+	global		fsin
+fsin:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl precision
+	bra.b		fin
+
+	global		fdin
+fdin:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl precision
+
+	global		fin
+fin:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	mov.b		STAG(%a6),%d1		# fetch src optype tag
+	bne.w		fin_not_norm		# optimize on non-norm input
+
+#
+# FP MOVE IN: NORMs and DENORMs ONLY!
+#
+fin_norm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.w		fin_not_ext		# no, so go handle dbl or sgl
+
+#
+# precision selected is extended. so...we cannot get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+	tst.b		SRC_EX(%a0)		# is the operand negative?
+	bpl.b		fin_norm_done		# no
+	bset		&neg_bit,FPSR_CC(%a6)	# yes, so set 'N' ccode bit
+fin_norm_done:
+	fmovm.x		SRC(%a0),&0x80		# return result in fp0
+	rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fin_denorm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.w		fin_not_ext		# no, so go handle dbl or sgl
+
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+	tst.b		SRC_EX(%a0)		# is the operand negative?
+	bpl.b		fin_denorm_done		# no
+	bset		&neg_bit,FPSR_CC(%a6)	# yes, so set 'N' ccode bit
+fin_denorm_done:
+	fmovm.x		SRC(%a0),&0x80		# return result in fp0
+	btst		&unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+	bne.b		fin_denorm_unfl_ena	# yes
+	rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fin_denorm_unfl_ena:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	bsr.l		norm			# normalize result
+	neg.w		%d0			# new exponent = -(shft val)
+	addi.w		&0x6000,%d0		# add new bias to exponent
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch old sign,exp
+	andi.w		&0x8000,%d1		# keep old sign
+	andi.w		&0x7fff,%d0		# clear sign position
+	or.w		%d1,%d0			# concat new exo,old sign
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#
+# operand is to be rounded to single or double precision
+#
+fin_not_ext:
+	cmpi.b		%d0,&s_mode*0x10	# separate sgl/dbl prec
+	bne.b		fin_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fin_sgl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3f80	# will move in underflow?
+	bge.w		fin_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x407e	# will move in overflow?
+	beq.w		fin_sd_may_ovfl		# maybe; go check
+	blt.w		fin_sd_ovfl		# yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved into the fp reg file
+#
+fin_sd_normal:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# perform move
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fin_sd_normal_exit:
+	mov.l		%d2,-(%sp)		# save d2
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d1,%d2			# concat old sign,new exponent
+	mov.w		%d2,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# operand is to be rounded to double precision
+#
+fin_dbl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3c00	# will move in underflow?
+	bge.w		fin_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x43fe	# will move in overflow?
+	beq.w		fin_sd_may_ovfl		# maybe; go check
+	blt.w		fin_sd_ovfl		# yes; go handle overflow
+	bra.w		fin_sd_normal		# no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fin_sd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	tst.b		FP_SCR0_EX(%a6)		# is operand negative?
+	bpl.b		fin_sd_unfl_tst
+	bset		&neg_bit,FPSR_CC(%a6)	# set 'N' ccode bit
+
+# if underflow or inexact is enabled, then go calculate the EXOP first.
+fin_sd_unfl_tst:
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fin_sd_unfl_ena		# yes
+
+fin_sd_unfl_dis:
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# unf_res may have set 'Z'
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# operand will underflow AND underflow or inexact is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fin_sd_unfl_ena:
+	mov.l		FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+	mov.l		FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+	mov.w		FP_SCR0_EX(%a6),%d1	# load current exponent
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# subtract scale factor
+	andi.w		&0x8000,%d2		# extract old sign
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1
+	or.w		%d1,%d2			# concat old sign,new exp
+	mov.w		%d2,FP_SCR1_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR1(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fin_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fin_sd_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# perform move
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fin_sd_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fin_sd_ovfl_ena		# yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fin_sd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass: prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fin_sd_ovfl_ena:
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	sub.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fin_sd_ovfl_dis
+
+#
+# the move in MAY overflow. so...
+#
+fin_sd_may_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# perform the move
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fin_sd_ovfl_tst		# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fin_sd_normal_exit
+
+##########################################################################
+
+#
+# operand is not a NORM: check its optype and branch accordingly
+#
+fin_not_norm:
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.w		fin_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNANs
+	beq.l		res_snan_1op
+	cmpi.b		%d1,&QNAN		# weed out QNANs
+	beq.l		res_qnan_1op
+
+#
+# do the fmove in; at this point, only possible ops are ZERO and INF.
+# use fmov to determine ccodes.
+# prec:mode should be zero at this point but it won't affect answer anyways.
+#
+	fmov.x		SRC(%a0),%fp0		# do fmove in
+	fmov.l		%fpsr,%d0		# no exceptions possible
+	rol.l		&0x8,%d0		# put ccodes in lo byte
+	mov.b		%d0,FPSR_CC(%a6)	# insert correct ccodes
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fdiv(): emulates the fdiv instruction				#
+#	fsdiv(): emulates the fsdiv instruction				#
+#	fddiv(): emulates the fddiv instruction				#
+#									#
+# XREF ****************************************************************	#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	scale_to_zero_dst() - scale dst exponent to zero		#
+#	unf_res() - return default underflow result			#
+#	ovf_res() - return default overflow result			#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a divide	#
+# instruction won't cause an exception. Use the regular fdiv to		#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	align		0x10
+tbl_fdiv_unfl:
+	long		0x3fff - 0x0000		# ext_unfl
+	long		0x3fff - 0x3f81		# sgl_unfl
+	long		0x3fff - 0x3c01		# dbl_unfl
+
+tbl_fdiv_ovfl:
+	long		0x3fff - 0x7ffe		# ext overflow exponent
+	long		0x3fff - 0x407e		# sgl overflow exponent
+	long		0x3fff - 0x43fe		# dbl overflow exponent
+
+	global		fsdiv
+fsdiv:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	bra.b		fdiv
+
+	global		fddiv
+fddiv:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fdiv
+fdiv:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+
+	bne.w		fdiv_not_norm		# optimize on non-norm input
+
+#
+# DIVIDE: NORMs and DENORMs ONLY!
+#
+fdiv_norm:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_to_zero_src	# scale src exponent
+	mov.l		%d0,-(%sp)		# save scale factor 1
+
+	bsr.l		scale_to_zero_dst	# scale dst exponent
+
+	neg.l		(%sp)			# SCALE FACTOR = scale1 - scale2
+	add.l		%d0,(%sp)
+
+	mov.w		2+L_SCR3(%a6),%d1	# fetch precision
+	lsr.b		&0x6,%d1		# shift to lo bits
+	mov.l		(%sp)+,%d0		# load S.F.
+	cmp.l		%d0,(tbl_fdiv_ovfl.b,%pc,%d1.w*4) # will result overflow?
+	ble.w		fdiv_may_ovfl		# result will overflow
+
+	cmp.l		%d0,(tbl_fdiv_unfl.w,%pc,%d1.w*4) # will result underflow?
+	beq.w		fdiv_may_unfl		# maybe
+	bgt.w		fdiv_unfl		# yes; go handle underflow
+
+fdiv_normal:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# save FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# perform divide
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fdiv_normal_exit:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store result on stack
+	mov.l		%d2,-(%sp)		# store d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+tbl_fdiv_ovfl2:
+	long		0x7fff
+	long		0x407f
+	long		0x43ff
+
+fdiv_no_ovfl:
+	mov.l		(%sp)+,%d0		# restore scale factor
+	bra.b		fdiv_normal_exit
+
+fdiv_may_ovfl:
+	mov.l		%d0,-(%sp)		# save scale factor
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# set FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		%fpsr,%d0
+	fmov.l		&0x0,%fpcr
+
+	or.l		%d0,USER_FPSR(%a6)	# save INEX,N
+
+	fmovm.x		&0x01,-(%sp)		# save result to stack
+	mov.w		(%sp),%d0		# fetch new exponent
+	add.l		&0xc,%sp		# clear result from stack
+	andi.l		&0x7fff,%d0		# strip sign
+	sub.l		(%sp),%d0		# add scale factor
+	cmp.l		%d0,(tbl_fdiv_ovfl2.b,%pc,%d1.w*4)
+	blt.b		fdiv_no_ovfl
+	mov.l		(%sp)+,%d0
+
+fdiv_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fdiv_ovfl_ena		# yes
+
+fdiv_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+fdiv_ovfl_ena:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fdiv_ovfl_ena_sd	# no, do sgl or dbl
+
+fdiv_ovfl_ena_cont:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# move result to stack
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1		# clear sign bit
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fdiv_ovfl_dis
+
+fdiv_ovfl_ena_sd:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# keep rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	bra.b		fdiv_ovfl_ena_cont
+
+fdiv_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fdiv_unfl_ena		# yes
+
+fdiv_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# UNFL is enabled.
+#
+fdiv_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fdiv_unfl_ena_sd	# no, sgl or dbl
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+fdiv_unfl_ena_cont:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp1	# execute divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factoer
+	addi.l		&0x6000,%d1		# add bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exp
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fdiv_unfl_dis
+
+fdiv_unfl_ena_sd:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# use only rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	bra.b		fdiv_unfl_ena_cont
+
+#
+# the divide operation MAY underflow:
+#
+fdiv_may_unfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x1		# is |result| > 1.b?
+	fbgt.w		fdiv_normal_exit	# no; no underflow occurred
+	fblt.w		fdiv_unfl		# yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 1. but,
+# we don't know if the result was an underflow that rounded up to a 1
+# or a normalized number that rounded down to a 1. so, redo the entire
+# operation using RZ as the rounding mode to see what the pre-rounded
+# result is. this case should be relatively rare.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into fp1
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert RZ
+
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp1	# execute divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fabs.x		%fp1			# make absolute value
+	fcmp.b		%fp1,&0x1		# is |result| < 1.b?
+	fbge.w		fdiv_normal_exit	# no; no underflow occurred
+	bra.w		fdiv_unfl		# yes; underflow occurred
+
+############################################################################
+
+#
+# Divide: inputs are not both normalized; what are they?
+#
+fdiv_not_norm:
+	mov.w		(tbl_fdiv_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fdiv_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fdiv_op:
+	short		fdiv_norm	- tbl_fdiv_op # NORM / NORM
+	short		fdiv_inf_load	- tbl_fdiv_op # NORM / ZERO
+	short		fdiv_zero_load	- tbl_fdiv_op # NORM / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # NORM / QNAN
+	short		fdiv_norm	- tbl_fdiv_op # NORM / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # NORM / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_zero_load	- tbl_fdiv_op # ZERO / NORM
+	short		fdiv_res_operr	- tbl_fdiv_op # ZERO / ZERO
+	short		fdiv_zero_load	- tbl_fdiv_op # ZERO / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # ZERO / QNAN
+	short		fdiv_zero_load	- tbl_fdiv_op # ZERO / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # ZERO / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_inf_dst	- tbl_fdiv_op # INF / NORM
+	short		fdiv_inf_dst	- tbl_fdiv_op # INF / ZERO
+	short		fdiv_res_operr	- tbl_fdiv_op # INF / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # INF / QNAN
+	short		fdiv_inf_dst	- tbl_fdiv_op # INF / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # INF / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / NORM
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / ZERO
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / QNAN
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # QNAN / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_norm	- tbl_fdiv_op # DENORM / NORM
+	short		fdiv_inf_load	- tbl_fdiv_op # DENORM / ZERO
+	short		fdiv_zero_load	- tbl_fdiv_op # DENORM / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # DENORM / QNAN
+	short		fdiv_norm	- tbl_fdiv_op # DENORM / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # DENORM / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / NORM
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / ZERO
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / INF
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / QNAN
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+fdiv_res_qnan:
+	bra.l		res_qnan
+fdiv_res_snan:
+	bra.l		res_snan
+fdiv_res_operr:
+	bra.l		res_operr
+
+	global		fdiv_zero_load		# global for fsgldiv
+fdiv_zero_load:
+	mov.b		SRC_EX(%a0),%d0		# result sign is exclusive
+	mov.b		DST_EX(%a1),%d1		# or of input signs.
+	eor.b		%d0,%d1
+	bpl.b		fdiv_zero_load_p	# result is positive
+	fmov.s		&0x80000000,%fp0	# load a -ZERO
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)	# set Z/N
+	rts
+fdiv_zero_load_p:
+	fmov.s		&0x00000000,%fp0	# load a +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+#
+# The destination was In Range and the source was a ZERO. The result,
+# therefore, is an INF w/ the proper sign.
+# So, determine the sign and return a new INF (w/ the j-bit cleared).
+#
+	global		fdiv_inf_load		# global for fsgldiv
+fdiv_inf_load:
+	ori.w		&dz_mask+adz_mask,2+USER_FPSR(%a6) # no; set DZ/ADZ
+	mov.b		SRC_EX(%a0),%d0		# load both signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bpl.b		fdiv_inf_load_p		# result is positive
+	fmov.s		&0xff800000,%fp0	# make result -INF
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
+	rts
+fdiv_inf_load_p:
+	fmov.s		&0x7f800000,%fp0	# make result +INF
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set INF
+	rts
+
+#
+# The destination was an INF w/ an In Range or ZERO source, the result is
+# an INF w/ the proper sign.
+# The 68881/882 returns the destination INF w/ the new sign(if the j-bit of the
+# dst INF is set, then then j-bit of the result INF is also set).
+#
+	global		fdiv_inf_dst		# global for fsgldiv
+fdiv_inf_dst:
+	mov.b		DST_EX(%a1),%d0		# load both signs
+	mov.b		SRC_EX(%a0),%d1
+	eor.b		%d0,%d1
+	bpl.b		fdiv_inf_dst_p		# result is positive
+
+	fmovm.x		DST(%a1),&0x80		# return result in fp0
+	fabs.x		%fp0			# clear sign bit
+	fneg.x		%fp0			# set sign bit
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+fdiv_inf_dst_p:
+	fmovm.x		DST(%a1),&0x80		# return result in fp0
+	fabs.x		%fp0			# return positive INF
+	mov.b		&inf_bmask,FPSR_CC(%a6) # set INF
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fneg(): emulates the fneg instruction				#
+#	fsneg(): emulates the fsneg instruction				#
+#	fdneg(): emulates the fdneg instruction				#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize a denorm to provide EXOP			#
+#	scale_to_zero_src() - scale sgl/dbl source exponent		#
+#	ovf_res() - return default overflow result			#
+#	unf_res() - return default underflow result			#
+#	res_qnan_1op() - return QNAN result				#
+#	res_snan_1op() - return SNAN result				#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, zeroes, and infinities as special cases. Separate	#
+# norms/denorms into ext/sgl/dbl precisions. Extended precision can be	#
+# emulated by simply setting sign bit. Sgl/dbl operands must be scaled	#
+# and an actual fneg performed to see if overflow/underflow would have	#
+# occurred. If so, return default underflow/overflow result. Else,	#
+# scale the result exponent and return result. FPSR gets set based on	#
+# the result value.							#
+#									#
+#########################################################################
+
+	global		fsneg
+fsneg:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl precision
+	bra.b		fneg
+
+	global		fdneg
+fdneg:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fneg
+fneg:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+	mov.b		STAG(%a6),%d1
+	bne.w		fneg_not_norm		# optimize on non-norm input
+
+#
+# NEGATE SIGN : norms and denorms ONLY!
+#
+fneg_norm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.w		fneg_not_ext		# no; go handle sgl or dbl
+
+#
+# precision selected is extended. so...we can not get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.w		SRC_EX(%a0),%d0
+	eori.w		&0x8000,%d0		# negate sign
+	bpl.b		fneg_norm_load		# sign is positive
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+fneg_norm_load:
+	mov.w		%d0,FP_SCR0_EX(%a6)
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fneg_denorm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fneg_not_ext		# no; go handle sgl or dbl
+
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.w		SRC_EX(%a0),%d0
+	eori.w		&0x8000,%d0		# negate sign
+	bpl.b		fneg_denorm_done	# no
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# yes, set 'N' ccode bit
+fneg_denorm_done:
+	mov.w		%d0,FP_SCR0_EX(%a6)
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+
+	btst		&unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+	bne.b		fneg_ext_unfl_ena	# yes
+	rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fneg_ext_unfl_ena:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	bsr.l		norm			# normalize result
+	neg.w		%d0			# new exponent = -(shft val)
+	addi.w		&0x6000,%d0		# add new bias to exponent
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch old sign,exp
+	andi.w		&0x8000,%d1		# keep old sign
+	andi.w		&0x7fff,%d0		# clear sign position
+	or.w		%d1,%d0			# concat old sign, new exponent
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#
+# operand is either single or double
+#
+fneg_not_ext:
+	cmpi.b		%d0,&s_mode*0x10	# separate sgl/dbl prec
+	bne.b		fneg_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fneg_sgl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3f80	# will move in underflow?
+	bge.w		fneg_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x407e	# will move in overflow?
+	beq.w		fneg_sd_may_ovfl	# maybe; go check
+	blt.w		fneg_sd_ovfl		# yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fneg_sd_normal:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fneg.x		FP_SCR0(%a6),%fp0	# perform negation
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fneg_sd_normal_exit:
+	mov.l		%d2,-(%sp)		# save d2
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.w		FP_SCR0_EX(%a6),%d1	# load sgn,exp
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d1,%d2			# concat old sign,new exp
+	mov.w		%d2,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# operand is to be rounded to double precision
+#
+fneg_dbl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3c00	# will move in underflow?
+	bge.b		fneg_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x43fe	# will move in overflow?
+	beq.w		fneg_sd_may_ovfl	# maybe; go check
+	blt.w		fneg_sd_ovfl		# yes; go handle overflow
+	bra.w		fneg_sd_normal		# no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fneg_sd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	eori.b		&0x80,FP_SCR0_EX(%a6)	# negate sign
+	bpl.b		fneg_sd_unfl_tst
+	bset		&neg_bit,FPSR_CC(%a6)	# set 'N' ccode bit
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+fneg_sd_unfl_tst:
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fneg_sd_unfl_ena	# yes
+
+fneg_sd_unfl_dis:
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# unf_res may have set 'Z'
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fneg_sd_unfl_ena:
+	mov.l		FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+	mov.l		FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+	mov.w		FP_SCR0_EX(%a6),%d1	# load current exponent
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# subtract scale factor
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat new sign,new exp
+	mov.w		%d1,FP_SCR1_EX(%a6)	# insert new exp
+	fmovm.x		FP_SCR1(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fneg_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fneg_sd_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fneg.x		FP_SCR0(%a6),%fp0	# perform negation
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fneg_sd_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fneg_sd_ovfl_ena	# yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fneg_sd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass: prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fneg_sd_ovfl_ena:
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat sign,exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fneg_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fneg_sd_may_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fneg.x		FP_SCR0(%a6),%fp0	# perform negation
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fneg_sd_ovfl_tst	# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fneg_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fneg_not_norm:
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.w		fneg_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	cmpi.b		%d1,&QNAN		# weed out QNAN
+	beq.l		res_qnan_1op
+
+#
+# do the fneg; at this point, only possible ops are ZERO and INF.
+# use fneg to determine ccodes.
+# prec:mode should be zero at this point but it won't affect answer anyways.
+#
+	fneg.x		SRC_EX(%a0),%fp0	# do fneg
+	fmov.l		%fpsr,%d0
+	rol.l		&0x8,%d0		# put ccodes in lo byte
+	mov.b		%d0,FPSR_CC(%a6)	# insert correct ccodes
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	ftst(): emulates the ftest instruction				#
+#									#
+# XREF ****************************************************************	#
+#	res{s,q}nan_1op() - set NAN result for monadic instruction	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	none								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Check the source operand tag (STAG) and set the FPCR according	#
+# to the operand type and sign.						#
+#									#
+#########################################################################
+
+	global		ftst
+ftst:
+	mov.b		STAG(%a6),%d1
+	bne.b		ftst_not_norm		# optimize on non-norm input
+
+#
+# Norm:
+#
+ftst_norm:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.b		ftst_norm_m		# yes
+	rts
+ftst_norm_m:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+#
+# input is not normalized; what is it?
+#
+ftst_not_norm:
+	cmpi.b		%d1,&ZERO		# weed out ZERO
+	beq.b		ftst_zero
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		ftst_inf
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	cmpi.b		%d1,&QNAN		# weed out QNAN
+	beq.l		res_qnan_1op
+
+#
+# Denorm:
+#
+ftst_denorm:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.b		ftst_denorm_m		# yes
+	rts
+ftst_denorm_m:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+#
+# Infinity:
+#
+ftst_inf:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.b		ftst_inf_m		# yes
+ftst_inf_p:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+ftst_inf_m:
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'I','N' ccode bits
+	rts
+
+#
+# Zero:
+#
+ftst_zero:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.b		ftst_zero_m		# yes
+ftst_zero_p:
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+ftst_zero_m:
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)	# set 'Z','N' ccode bits
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fint(): emulates the fint instruction				#
+#									#
+# XREF ****************************************************************	#
+#	res_{s,q}nan_1op() - set NAN result for monadic operation	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = round precision/mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Separate according to operand type. Unnorms don't pass through	#
+# here. For norms, load the rounding mode/prec, execute a "fint", then	#
+# store the resulting FPSR bits.					#
+#	For denorms, force the j-bit to a one and do the same as for	#
+# norms. Denorms are so low that the answer will either be a zero or a	#
+# one.									#
+#	For zeroes/infs/NANs, return the same while setting the FPSR	#
+# as appropriate.							#
+#									#
+#########################################################################
+
+	global		fint
+fint:
+	mov.b		STAG(%a6),%d1
+	bne.b		fint_not_norm		# optimize on non-norm input
+
+#
+# Norm:
+#
+fint_norm:
+	andi.b		&0x30,%d0		# set prec = ext
+
+	fmov.l		%d0,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fint.x		SRC(%a0),%fp0		# execute fint
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d0		# save FPSR
+	or.l		%d0,USER_FPSR(%a6)	# set exception bits
+
+	rts
+
+#
+# input is not normalized; what is it?
+#
+fint_not_norm:
+	cmpi.b		%d1,&ZERO		# weed out ZERO
+	beq.b		fint_zero
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		fint_inf
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.b		fint_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	bra.l		res_qnan_1op		# weed out QNAN
+
+#
+# Denorm:
+#
+# for DENORMs, the result will be either (+/-)ZERO or (+/-)1.
+# also, the INEX2 and AINEX exception bits will be set.
+# so, we could either set these manually or force the DENORM
+# to a very small NORM and ship it to the NORM routine.
+# I do the latter.
+#
+fint_denorm:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
+	mov.b		&0x80,FP_SCR0_HI(%a6)	# force DENORM ==> small NORM
+	lea		FP_SCR0(%a6),%a0
+	bra.b		fint_norm
+
+#
+# Zero:
+#
+fint_zero:
+	tst.b		SRC_EX(%a0)		# is ZERO negative?
+	bmi.b		fint_zero_m		# yes
+fint_zero_p:
+	fmov.s		&0x00000000,%fp0	# return +ZERO in fp0
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+fint_zero_m:
+	fmov.s		&0x80000000,%fp0	# return -ZERO in fp0
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
+	rts
+
+#
+# Infinity:
+#
+fint_inf:
+	fmovm.x		SRC(%a0),&0x80		# return result in fp0
+	tst.b		SRC_EX(%a0)		# is INF negative?
+	bmi.b		fint_inf_m		# yes
+fint_inf_p:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+fint_inf_m:
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fintrz(): emulates the fintrz instruction			#
+#									#
+# XREF ****************************************************************	#
+#	res_{s,q}nan_1op() - set NAN result for monadic operation	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = round precision/mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Separate according to operand type. Unnorms don't pass through	#
+# here. For norms, load the rounding mode/prec, execute a "fintrz",	#
+# then store the resulting FPSR bits.					#
+#	For denorms, force the j-bit to a one and do the same as for	#
+# norms. Denorms are so low that the answer will either be a zero or a	#
+# one.									#
+#	For zeroes/infs/NANs, return the same while setting the FPSR	#
+# as appropriate.							#
+#									#
+#########################################################################
+
+	global		fintrz
+fintrz:
+	mov.b		STAG(%a6),%d1
+	bne.b		fintrz_not_norm		# optimize on non-norm input
+
+#
+# Norm:
+#
+fintrz_norm:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fintrz.x	SRC(%a0),%fp0		# execute fintrz
+
+	fmov.l		%fpsr,%d0		# save FPSR
+	or.l		%d0,USER_FPSR(%a6)	# set exception bits
+
+	rts
+
+#
+# input is not normalized; what is it?
+#
+fintrz_not_norm:
+	cmpi.b		%d1,&ZERO		# weed out ZERO
+	beq.b		fintrz_zero
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		fintrz_inf
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.b		fintrz_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	bra.l		res_qnan_1op		# weed out QNAN
+
+#
+# Denorm:
+#
+# for DENORMs, the result will be (+/-)ZERO.
+# also, the INEX2 and AINEX exception bits will be set.
+# so, we could either set these manually or force the DENORM
+# to a very small NORM and ship it to the NORM routine.
+# I do the latter.
+#
+fintrz_denorm:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
+	mov.b		&0x80,FP_SCR0_HI(%a6)	# force DENORM ==> small NORM
+	lea		FP_SCR0(%a6),%a0
+	bra.b		fintrz_norm
+
+#
+# Zero:
+#
+fintrz_zero:
+	tst.b		SRC_EX(%a0)		# is ZERO negative?
+	bmi.b		fintrz_zero_m		# yes
+fintrz_zero_p:
+	fmov.s		&0x00000000,%fp0	# return +ZERO in fp0
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+fintrz_zero_m:
+	fmov.s		&0x80000000,%fp0	# return -ZERO in fp0
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
+	rts
+
+#
+# Infinity:
+#
+fintrz_inf:
+	fmovm.x		SRC(%a0),&0x80		# return result in fp0
+	tst.b		SRC_EX(%a0)		# is INF negative?
+	bmi.b		fintrz_inf_m		# yes
+fintrz_inf_p:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+fintrz_inf_m:
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fabs():  emulates the fabs instruction				#
+#	fsabs(): emulates the fsabs instruction				#
+#	fdabs(): emulates the fdabs instruction				#
+#									#
+# XREF **************************************************************** #
+#	norm() - normalize denorm mantissa to provide EXOP		#
+#	scale_to_zero_src() - make exponent. = 0; get scale factor	#
+#	unf_res() - calculate underflow result				#
+#	ovf_res() - calculate overflow result				#
+#	res_{s,q}nan_1op() - set NAN result for monadic operation	#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision source operand		#
+#	d0 = rnd precision/mode						#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms into extended, single, and double precision.			#
+#	Simply clear sign for extended precision norm. Ext prec denorm	#
+# gets an EXOP created for it since it's an underflow.			#
+#	Double and single precision can overflow and underflow. First,	#
+# scale the operand such that the exponent is zero. Perform an "fabs"	#
+# using the correct rnd mode/prec. Check to see if the original		#
+# exponent would take an exception. If so, use unf_res() or ovf_res()	#
+# to calculate the default result. Also, create the EXOP for the	#
+# exceptional case. If no exception should occur, insert the correct	#
+# result exponent and return.						#
+#	Unnorms don't pass through here.				#
+#									#
+#########################################################################
+
+	global		fsabs
+fsabs:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl precision
+	bra.b		fabs
+
+	global		fdabs
+fdabs:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl precision
+
+	global		fabs
+fabs:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+	mov.b		STAG(%a6),%d1
+	bne.w		fabs_not_norm		# optimize on non-norm input
+
+#
+# ABSOLUTE VALUE: norms and denorms ONLY!
+#
+fabs_norm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fabs_not_ext		# no; go handle sgl or dbl
+
+#
+# precision selected is extended. so...we can not get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.w		SRC_EX(%a0),%d1
+	bclr		&15,%d1			# force absolute value
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert exponent
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fabs_denorm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fabs_not_ext		# no
+
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.w		SRC_EX(%a0),%d0
+	bclr		&15,%d0			# clear sign
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert exponent
+
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+
+	btst		&unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+	bne.b		fabs_ext_unfl_ena
+	rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fabs_ext_unfl_ena:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	bsr.l		norm			# normalize result
+	neg.w		%d0			# new exponent = -(shft val)
+	addi.w		&0x6000,%d0		# add new bias to exponent
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch old sign,exp
+	andi.w		&0x8000,%d1		# keep old sign
+	andi.w		&0x7fff,%d0		# clear sign position
+	or.w		%d1,%d0			# concat old sign, new exponent
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#
+# operand is either single or double
+#
+fabs_not_ext:
+	cmpi.b		%d0,&s_mode*0x10	# separate sgl/dbl prec
+	bne.b		fabs_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fabs_sgl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3f80	# will move in underflow?
+	bge.w		fabs_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x407e	# will move in overflow?
+	beq.w		fabs_sd_may_ovfl	# maybe; go check
+	blt.w		fabs_sd_ovfl		# yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fabs_sd_normal:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fabs.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fabs_sd_normal_exit:
+	mov.l		%d2,-(%sp)		# save d2
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.w		FP_SCR0_EX(%a6),%d1	# load sgn,exp
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d1,%d2			# concat old sign,new exp
+	mov.w		%d2,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# operand is to be rounded to double precision
+#
+fabs_dbl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3c00	# will move in underflow?
+	bge.b		fabs_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x43fe	# will move in overflow?
+	beq.w		fabs_sd_may_ovfl	# maybe; go check
+	blt.w		fabs_sd_ovfl		# yes; go handle overflow
+	bra.w		fabs_sd_normal		# no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fabs_sd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	bclr		&0x7,FP_SCR0_EX(%a6)	# force absolute value
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fabs_sd_unfl_ena	# yes
+
+fabs_sd_unfl_dis:
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set possible 'Z' ccode
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fabs_sd_unfl_ena:
+	mov.l		FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+	mov.l		FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+	mov.w		FP_SCR0_EX(%a6),%d1	# load current exponent
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# subtract scale factor
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat new sign,new exp
+	mov.w		%d1,FP_SCR1_EX(%a6)	# insert new exp
+	fmovm.x		FP_SCR1(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fabs_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fabs_sd_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fabs.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fabs_sd_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fabs_sd_ovfl_ena	# yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fabs_sd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass: prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fabs_sd_ovfl_ena:
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat sign,exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fabs_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fabs_sd_may_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fabs.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fabs_sd_ovfl_tst	# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fabs_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fabs_not_norm:
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.w		fabs_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	cmpi.b		%d1,&QNAN		# weed out QNAN
+	beq.l		res_qnan_1op
+
+	fabs.x		SRC(%a0),%fp0		# force absolute value
+
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		fabs_inf
+fabs_zero:
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+fabs_inf:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fcmp(): fp compare op routine					#
+#									#
+# XREF ****************************************************************	#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0 = round prec/mode						#
+#									#
+# OUTPUT ************************************************************** #
+#	None								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs and denorms as special cases. For everything else,	#
+# just use the actual fcmp instruction to produce the correct condition	#
+# codes.								#
+#									#
+#########################################################################
+
+	global		fcmp
+fcmp:
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1
+	bne.b		fcmp_not_norm		# optimize on non-norm input
+
+#
+# COMPARE FP OPs : NORMs, ZEROs, INFs, and "corrected" DENORMs
+#
+fcmp_norm:
+	fmovm.x		DST(%a1),&0x80		# load dst op
+
+	fcmp.x		%fp0,SRC(%a0)		# do compare
+
+	fmov.l		%fpsr,%d0		# save FPSR
+	rol.l		&0x8,%d0		# extract ccode bits
+	mov.b		%d0,FPSR_CC(%a6)	# set ccode bits(no exc bits are set)
+
+	rts
+
+#
+# fcmp: inputs are not both normalized; what are they?
+#
+fcmp_not_norm:
+	mov.w		(tbl_fcmp_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fcmp_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fcmp_op:
+	short		fcmp_norm	- tbl_fcmp_op # NORM - NORM
+	short		fcmp_norm	- tbl_fcmp_op # NORM - ZERO
+	short		fcmp_norm	- tbl_fcmp_op # NORM - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # NORM - QNAN
+	short		fcmp_nrm_dnrm	- tbl_fcmp_op # NORM - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # NORM - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_norm	- tbl_fcmp_op # ZERO - NORM
+	short		fcmp_norm	- tbl_fcmp_op # ZERO - ZERO
+	short		fcmp_norm	- tbl_fcmp_op # ZERO - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # ZERO - QNAN
+	short		fcmp_dnrm_s	- tbl_fcmp_op # ZERO - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # ZERO - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_norm	- tbl_fcmp_op # INF - NORM
+	short		fcmp_norm	- tbl_fcmp_op # INF - ZERO
+	short		fcmp_norm	- tbl_fcmp_op # INF - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # INF - QNAN
+	short		fcmp_dnrm_s	- tbl_fcmp_op # INF - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # INF - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - NORM
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - ZERO
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - QNAN
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # QNAN - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_dnrm_nrm	- tbl_fcmp_op # DENORM - NORM
+	short		fcmp_dnrm_d	- tbl_fcmp_op # DENORM - ZERO
+	short		fcmp_dnrm_d	- tbl_fcmp_op # DENORM - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # DENORM - QNAN
+	short		fcmp_dnrm_sd	- tbl_fcmp_op # DENORM - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # DENORM - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - NORM
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - ZERO
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - INF
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - QNAN
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+# unlike all other functions for QNAN and SNAN, fcmp does NOT set the
+# 'N' bit for a negative QNAN or SNAN input so we must squelch it here.
+fcmp_res_qnan:
+	bsr.l		res_qnan
+	andi.b		&0xf7,FPSR_CC(%a6)
+	rts
+fcmp_res_snan:
+	bsr.l		res_snan
+	andi.b		&0xf7,FPSR_CC(%a6)
+	rts
+
+#
+# DENORMs are a little more difficult.
+# If you have a 2 DENORMs, then you can just force the j-bit to a one
+# and use the fcmp_norm routine.
+# If you have a DENORM and an INF or ZERO, just force the DENORM's j-bit to a one
+# and use the fcmp_norm routine.
+# If you have a DENORM and a NORM with opposite signs, then use fcmp_norm, also.
+# But with a DENORM and a NORM of the same sign, the neg bit is set if the
+# (1) signs are (+) and the DENORM is the dst or
+# (2) signs are (-) and the DENORM is the src
+#
+
+fcmp_dnrm_s:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),%d0
+	bset		&31,%d0			# DENORM src; make into small norm
+	mov.l		%d0,FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	lea		FP_SCR0(%a6),%a0
+	bra.w		fcmp_norm
+
+fcmp_dnrm_d:
+	mov.l		DST_EX(%a1),FP_SCR0_EX(%a6)
+	mov.l		DST_HI(%a1),%d0
+	bset		&31,%d0			# DENORM src; make into small norm
+	mov.l		%d0,FP_SCR0_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR0_LO(%a6)
+	lea		FP_SCR0(%a6),%a1
+	bra.w		fcmp_norm
+
+fcmp_dnrm_sd:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		DST_HI(%a1),%d0
+	bset		&31,%d0			# DENORM dst; make into small norm
+	mov.l		%d0,FP_SCR1_HI(%a6)
+	mov.l		SRC_HI(%a0),%d0
+	bset		&31,%d0			# DENORM dst; make into small norm
+	mov.l		%d0,FP_SCR0_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	lea		FP_SCR1(%a6),%a1
+	lea		FP_SCR0(%a6),%a0
+	bra.w		fcmp_norm
+
+fcmp_nrm_dnrm:
+	mov.b		SRC_EX(%a0),%d0		# determine if like signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bmi.w		fcmp_dnrm_s
+
+# signs are the same, so must determine the answer ourselves.
+	tst.b		%d0			# is src op negative?
+	bmi.b		fcmp_nrm_dnrm_m		# yes
+	rts
+fcmp_nrm_dnrm_m:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+
+fcmp_dnrm_nrm:
+	mov.b		SRC_EX(%a0),%d0		# determine if like signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bmi.w		fcmp_dnrm_d
+
+# signs are the same, so must determine the answer ourselves.
+	tst.b		%d0			# is src op negative?
+	bpl.b		fcmp_dnrm_nrm_m		# no
+	rts
+fcmp_dnrm_nrm_m:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fsglmul(): emulates the fsglmul instruction			#
+#									#
+# XREF ****************************************************************	#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	scale_to_zero_dst() - scale dst exponent to zero		#
+#	unf_res4() - return default underflow result for sglop		#
+#	ovf_res() - return default overflow result			#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a multiply	#
+# instruction won't cause an exception. Use the regular fsglmul to	#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	global		fsglmul
+fsglmul:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1
+
+	bne.w		fsglmul_not_norm	# optimize on non-norm input
+
+fsglmul_norm:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_to_zero_src	# scale exponent
+	mov.l		%d0,-(%sp)		# save scale factor 1
+
+	bsr.l		scale_to_zero_dst	# scale dst exponent
+
+	add.l		(%sp)+,%d0		# SCALE_FACTOR = scale1 + scale2
+
+	cmpi.l		%d0,&0x3fff-0x7ffe	# would result ovfl?
+	beq.w		fsglmul_may_ovfl	# result may rnd to overflow
+	blt.w		fsglmul_ovfl		# result will overflow
+
+	cmpi.l		%d0,&0x3fff+0x0001	# would result unfl?
+	beq.w		fsglmul_may_unfl	# result may rnd to no unfl
+	bgt.w		fsglmul_unfl		# result will underflow
+
+fsglmul_normal:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsglmul_normal_exit:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+fsglmul_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsglmul_ovfl_tst:
+
+# save setting this until now because this is where fsglmul_may_ovfl may jump in
+	or.l		&ovfl_inx_mask, USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fsglmul_ovfl_ena	# yes
+
+fsglmul_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	andi.b		&0x30,%d0		# force prec = ext
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+fsglmul_ovfl_ena:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# move result to stack
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fsglmul_ovfl_dis
+
+fsglmul_may_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fsglmul_ovfl_tst	# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fsglmul_normal_exit
+
+fsglmul_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fsglmul_unfl_ena	# yes
+
+fsglmul_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res4		# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' bit may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# UNFL is enabled.
+#
+fsglmul_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp1	# execute sgl multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# add bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fsglmul_unfl_dis
+
+fsglmul_may_unfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| > 2.b?
+	fbgt.w		fsglmul_normal_exit	# no; no underflow occurred
+	fblt.w		fsglmul_unfl		# yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 2. but,
+# we don't know if the result was an underflow that rounded up to a 2 or
+# a normalized number that rounded down to a 2. so, redo the entire operation
+# using RZ as the rounding mode to see what the pre-rounded result is.
+# this case should be relatively rare.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into fp1
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert RZ
+
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp1	# execute sgl multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fabs.x		%fp1			# make absolute value
+	fcmp.b		%fp1,&0x2		# is |result| < 2.b?
+	fbge.w		fsglmul_normal_exit	# no; no underflow occurred
+	bra.w		fsglmul_unfl		# yes, underflow occurred
+
+##############################################################################
+
+#
+# Single Precision Multiply: inputs are not both normalized; what are they?
+#
+fsglmul_not_norm:
+	mov.w		(tbl_fsglmul_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fsglmul_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fsglmul_op:
+	short		fsglmul_norm		- tbl_fsglmul_op # NORM x NORM
+	short		fsglmul_zero		- tbl_fsglmul_op # NORM x ZERO
+	short		fsglmul_inf_src		- tbl_fsglmul_op # NORM x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # NORM x QNAN
+	short		fsglmul_norm		- tbl_fsglmul_op # NORM x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # NORM x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_zero		- tbl_fsglmul_op # ZERO x NORM
+	short		fsglmul_zero		- tbl_fsglmul_op # ZERO x ZERO
+	short		fsglmul_res_operr	- tbl_fsglmul_op # ZERO x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # ZERO x QNAN
+	short		fsglmul_zero		- tbl_fsglmul_op # ZERO x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # ZERO x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_inf_dst		- tbl_fsglmul_op # INF x NORM
+	short		fsglmul_res_operr	- tbl_fsglmul_op # INF x ZERO
+	short		fsglmul_inf_dst		- tbl_fsglmul_op # INF x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # INF x QNAN
+	short		fsglmul_inf_dst		- tbl_fsglmul_op # INF x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # INF x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x NORM
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x ZERO
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x QNAN
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # QNAN x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_norm		- tbl_fsglmul_op # NORM x NORM
+	short		fsglmul_zero		- tbl_fsglmul_op # NORM x ZERO
+	short		fsglmul_inf_src		- tbl_fsglmul_op # NORM x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # NORM x QNAN
+	short		fsglmul_norm		- tbl_fsglmul_op # NORM x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # NORM x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x NORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x ZERO
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x INF
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x QNAN
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+fsglmul_res_operr:
+	bra.l		res_operr
+fsglmul_res_snan:
+	bra.l		res_snan
+fsglmul_res_qnan:
+	bra.l		res_qnan
+fsglmul_zero:
+	bra.l		fmul_zero
+fsglmul_inf_src:
+	bra.l		fmul_inf_src
+fsglmul_inf_dst:
+	bra.l		fmul_inf_dst
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fsgldiv(): emulates the fsgldiv instruction			#
+#									#
+# XREF ****************************************************************	#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	scale_to_zero_dst() - scale dst exponent to zero		#
+#	unf_res4() - return default underflow result for sglop		#
+#	ovf_res() - return default overflow result			#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a divide	#
+# instruction won't cause an exception. Use the regular fsgldiv to	#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	global		fsgldiv
+fsgldiv:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+
+	bne.w		fsgldiv_not_norm	# optimize on non-norm input
+
+#
+# DIVIDE: NORMs and DENORMs ONLY!
+#
+fsgldiv_norm:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_to_zero_src	# calculate scale factor 1
+	mov.l		%d0,-(%sp)		# save scale factor 1
+
+	bsr.l		scale_to_zero_dst	# calculate scale factor 2
+
+	neg.l		(%sp)			# S.F. = scale1 - scale2
+	add.l		%d0,(%sp)
+
+	mov.w		2+L_SCR3(%a6),%d1	# fetch precision,mode
+	lsr.b		&0x6,%d1
+	mov.l		(%sp)+,%d0
+	cmpi.l		%d0,&0x3fff-0x7ffe
+	ble.w		fsgldiv_may_ovfl
+
+	cmpi.l		%d0,&0x3fff-0x0000	# will result underflow?
+	beq.w		fsgldiv_may_unfl	# maybe
+	bgt.w		fsgldiv_unfl		# yes; go handle underflow
+
+fsgldiv_normal:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# save FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp0	# perform sgl divide
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsgldiv_normal_exit:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store result on stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+fsgldiv_may_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# set FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		%fpsr,%d1
+	fmov.l		&0x0,%fpcr
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX,N
+
+	fmovm.x		&0x01,-(%sp)		# save result to stack
+	mov.w		(%sp),%d1		# fetch new exponent
+	add.l		&0xc,%sp		# clear result
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	cmp.l		%d1,&0x7fff		# did divide overflow?
+	blt.b		fsgldiv_normal_exit
+
+fsgldiv_ovfl_tst:
+	or.w		&ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fsgldiv_ovfl_ena	# yes
+
+fsgldiv_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	andi.b		&0x30,%d0		# kill precision
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+fsgldiv_ovfl_ena:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# move result to stack
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract new bias
+	andi.w		&0x7fff,%d1		# clear ms bit
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fsgldiv_ovfl_dis
+
+fsgldiv_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp0	# execute sgl divide
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fsgldiv_unfl_ena	# yes
+
+fsgldiv_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res4		# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' bit may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# UNFL is enabled.
+#
+fsgldiv_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp1	# execute sgl divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# add bias
+	andi.w		&0x7fff,%d1		# clear top bit
+	or.w		%d2,%d1			# concat old sign, new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fsgldiv_unfl_dis
+
+#
+# the divide operation MAY underflow:
+#
+fsgldiv_may_unfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp0	# execute sgl divide
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x1		# is |result| > 1.b?
+	fbgt.w		fsgldiv_normal_exit	# no; no underflow occurred
+	fblt.w		fsgldiv_unfl		# yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 1. but,
+# we don't know if the result was an underflow that rounded up to a 1
+# or a normalized number that rounded down to a 1. so, redo the entire
+# operation using RZ as the rounding mode to see what the pre-rounded
+# result is. this case should be relatively rare.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into %fp1
+
+	clr.l		%d1			# clear scratch register
+	ori.b		&rz_mode*0x10,%d1	# force RZ rnd mode
+
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp1	# execute sgl divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fabs.x		%fp1			# make absolute value
+	fcmp.b		%fp1,&0x1		# is |result| < 1.b?
+	fbge.w		fsgldiv_normal_exit	# no; no underflow occurred
+	bra.w		fsgldiv_unfl		# yes; underflow occurred
+
+############################################################################
+
+#
+# Divide: inputs are not both normalized; what are they?
+#
+fsgldiv_not_norm:
+	mov.w		(tbl_fsgldiv_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fsgldiv_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fsgldiv_op:
+	short		fsgldiv_norm		- tbl_fsgldiv_op # NORM / NORM
+	short		fsgldiv_inf_load	- tbl_fsgldiv_op # NORM / ZERO
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # NORM / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # NORM / QNAN
+	short		fsgldiv_norm		- tbl_fsgldiv_op # NORM / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # NORM / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # ZERO / NORM
+	short		fsgldiv_res_operr	- tbl_fsgldiv_op # ZERO / ZERO
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # ZERO / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # ZERO / QNAN
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # ZERO / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # ZERO / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_inf_dst		- tbl_fsgldiv_op # INF / NORM
+	short		fsgldiv_inf_dst		- tbl_fsgldiv_op # INF / ZERO
+	short		fsgldiv_res_operr	- tbl_fsgldiv_op # INF / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # INF / QNAN
+	short		fsgldiv_inf_dst		- tbl_fsgldiv_op # INF / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # INF / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / NORM
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / ZERO
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / QNAN
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # QNAN / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_norm		- tbl_fsgldiv_op # DENORM / NORM
+	short		fsgldiv_inf_load	- tbl_fsgldiv_op # DENORM / ZERO
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # DENORM / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # DENORM / QNAN
+	short		fsgldiv_norm		- tbl_fsgldiv_op # DENORM / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # DENORM / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / NORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / ZERO
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / INF
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / QNAN
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+fsgldiv_res_qnan:
+	bra.l		res_qnan
+fsgldiv_res_snan:
+	bra.l		res_snan
+fsgldiv_res_operr:
+	bra.l		res_operr
+fsgldiv_inf_load:
+	bra.l		fdiv_inf_load
+fsgldiv_zero_load:
+	bra.l		fdiv_zero_load
+fsgldiv_inf_dst:
+	bra.l		fdiv_inf_dst
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fadd(): emulates the fadd instruction				#
+#	fsadd(): emulates the fadd instruction				#
+#	fdadd(): emulates the fdadd instruction				#
+#									#
+# XREF ****************************************************************	#
+#	addsub_scaler2() - scale the operands so they won't take exc	#
+#	ovf_res() - return default overflow result			#
+#	unf_res() - return default underflow result			#
+#	res_qnan() - set QNAN result					#
+#	res_snan() - set SNAN result					#
+#	res_operr() - set OPERR result					#
+#	scale_to_zero_src() - set src operand exponent equal to zero	#
+#	scale_to_zero_dst() - set dst operand exponent equal to zero	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms into extended, single, and double precision.			#
+#	Do addition after scaling exponents such that exception won't	#
+# occur. Then, check result exponent to see if exception would have	#
+# occurred. If so, return default result and maybe EXOP. Else, insert	#
+# the correct result exponent and return. Set FPSR bits as appropriate.	#
+#									#
+#########################################################################
+
+	global		fsadd
+fsadd:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	bra.b		fadd
+
+	global		fdadd
+fdadd:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fadd
+fadd:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+
+	bne.w		fadd_not_norm		# optimize on non-norm input
+
+#
+# ADD: norms and denorms
+#
+fadd_norm:
+	bsr.l		addsub_scaler2		# scale exponents
+
+fadd_zero_entry:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fadd.x		FP_SCR0(%a6),%fp0	# execute add
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch INEX2,N,Z
+
+	or.l		%d1,USER_FPSR(%a6)	# save exc and ccode bits
+
+	fbeq.w		fadd_zero_exit		# if result is zero, end now
+
+	mov.l		%d2,-(%sp)		# save d2
+
+	fmovm.x		&0x01,-(%sp)		# save result to stack
+
+	mov.w		2+L_SCR3(%a6),%d1
+	lsr.b		&0x6,%d1
+
+	mov.w		(%sp),%d2		# fetch new sign, exp
+	andi.l		&0x7fff,%d2		# strip sign
+	sub.l		%d0,%d2			# add scale factor
+
+	cmp.l		%d2,(tbl_fadd_ovfl.b,%pc,%d1.w*4) # is it an overflow?
+	bge.b		fadd_ovfl		# yes
+
+	cmp.l		%d2,(tbl_fadd_unfl.b,%pc,%d1.w*4) # is it an underflow?
+	blt.w		fadd_unfl		# yes
+	beq.w		fadd_may_unfl		# maybe; go find out
+
+fadd_normal:
+	mov.w		(%sp),%d1
+	andi.w		&0x8000,%d1		# keep sign
+	or.w		%d2,%d1			# concat sign,new exp
+	mov.w		%d1,(%sp)		# insert new exponent
+
+	fmovm.x		(%sp)+,&0x80		# return result in fp0
+
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fadd_zero_exit:
+#	fmov.s		&0x00000000,%fp0	# return zero in fp0
+	rts
+
+tbl_fadd_ovfl:
+	long		0x7fff			# ext ovfl
+	long		0x407f			# sgl ovfl
+	long		0x43ff			# dbl ovfl
+
+tbl_fadd_unfl:
+	long	        0x0000			# ext unfl
+	long		0x3f81			# sgl unfl
+	long		0x3c01			# dbl unfl
+
+fadd_ovfl:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fadd_ovfl_ena		# yes
+
+	add.l		&0xc,%sp
+fadd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fadd_ovfl_ena:
+	mov.b		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fadd_ovfl_ena_sd	# no; prec = sgl or dbl
+
+fadd_ovfl_ena_cont:
+	mov.w		(%sp),%d1
+	andi.w		&0x8000,%d1		# keep sign
+	subi.l		&0x6000,%d2		# add extra bias
+	andi.w		&0x7fff,%d2
+	or.w		%d2,%d1			# concat sign,new exp
+	mov.w		%d1,(%sp)		# insert new exponent
+
+	fmovm.x		(%sp)+,&0x40		# return EXOP in fp1
+	bra.b		fadd_ovfl_dis
+
+fadd_ovfl_ena_sd:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# keep rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	fadd.x		FP_SCR0(%a6),%fp0	# execute add
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	add.l		&0xc,%sp
+	fmovm.x		&0x01,-(%sp)
+	bra.b		fadd_ovfl_ena_cont
+
+fadd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	add.l		&0xc,%sp
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fadd.x		FP_SCR0(%a6),%fp0	# execute add
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save status
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fadd_unfl_ena		# yes
+
+fadd_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' bit may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fadd_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fadd_unfl_ena_sd	# no; sgl or dbl
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+fadd_unfl_ena_cont:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fadd.x		FP_SCR0(%a6),%fp1	# execute multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1		# clear top bit
+	or.w		%d2,%d1			# concat sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fadd_unfl_dis
+
+fadd_unfl_ena_sd:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# use only rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	bra.b		fadd_unfl_ena_cont
+
+#
+# result is equal to the smallest normalized number in the selected precision
+# if the precision is extended, this result could not have come from an
+# underflow that rounded up.
+#
+fadd_may_unfl:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1
+	beq.w		fadd_normal		# yes; no underflow occurred
+
+	mov.l		0x4(%sp),%d1		# extract hi(man)
+	cmpi.l		%d1,&0x80000000		# is hi(man) = 0x80000000?
+	bne.w		fadd_normal		# no; no underflow occurred
+
+	tst.l		0x8(%sp)		# is lo(man) = 0x0?
+	bne.w		fadd_normal		# no; no underflow occurred
+
+	btst		&inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+	beq.w		fadd_normal		# no; no underflow occurred
+
+#
+# ok, so now the result has a exponent equal to the smallest normalized
+# exponent for the selected precision. also, the mantissa is equal to
+# 0x8000000000000000 and this mantissa is the result of rounding non-zero
+# g,r,s.
+# now, we must determine whether the pre-rounded result was an underflow
+# rounded "up" or a normalized number rounded "down".
+# so, we do this be re-executing the add using RZ as the rounding mode and
+# seeing if the new result is smaller or equal to the current result.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into fp1
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fadd.x		FP_SCR0(%a6),%fp1	# execute add
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fabs.x		%fp0			# compare absolute values
+	fabs.x		%fp1
+	fcmp.x		%fp0,%fp1		# is first result > second?
+
+	fbgt.w		fadd_unfl		# yes; it's an underflow
+	bra.w		fadd_normal		# no; it's not an underflow
+
+##########################################################################
+
+#
+# Add: inputs are not both normalized; what are they?
+#
+fadd_not_norm:
+	mov.w		(tbl_fadd_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fadd_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fadd_op:
+	short		fadd_norm	- tbl_fadd_op # NORM + NORM
+	short		fadd_zero_src	- tbl_fadd_op # NORM + ZERO
+	short		fadd_inf_src	- tbl_fadd_op # NORM + INF
+	short		fadd_res_qnan	- tbl_fadd_op # NORM + QNAN
+	short		fadd_norm	- tbl_fadd_op # NORM + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # NORM + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_zero_dst	- tbl_fadd_op # ZERO + NORM
+	short		fadd_zero_2	- tbl_fadd_op # ZERO + ZERO
+	short		fadd_inf_src	- tbl_fadd_op # ZERO + INF
+	short		fadd_res_qnan	- tbl_fadd_op # NORM + QNAN
+	short		fadd_zero_dst	- tbl_fadd_op # ZERO + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # NORM + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_inf_dst	- tbl_fadd_op # INF + NORM
+	short		fadd_inf_dst	- tbl_fadd_op # INF + ZERO
+	short		fadd_inf_2	- tbl_fadd_op # INF + INF
+	short		fadd_res_qnan	- tbl_fadd_op # NORM + QNAN
+	short		fadd_inf_dst	- tbl_fadd_op # INF + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # NORM + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + NORM
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + ZERO
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + INF
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + QNAN
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # QNAN + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_norm	- tbl_fadd_op # DENORM + NORM
+	short		fadd_zero_src	- tbl_fadd_op # DENORM + ZERO
+	short		fadd_inf_src	- tbl_fadd_op # DENORM + INF
+	short		fadd_res_qnan	- tbl_fadd_op # NORM + QNAN
+	short		fadd_norm	- tbl_fadd_op # DENORM + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # NORM + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + NORM
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + ZERO
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + INF
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + QNAN
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+fadd_res_qnan:
+	bra.l		res_qnan
+fadd_res_snan:
+	bra.l		res_snan
+
+#
+# both operands are ZEROes
+#
+fadd_zero_2:
+	mov.b		SRC_EX(%a0),%d0		# are the signs opposite
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bmi.w		fadd_zero_2_chk_rm	# weed out (-ZERO)+(+ZERO)
+
+# the signs are the same. so determine whether they are positive or negative
+# and return the appropriately signed zero.
+	tst.b		%d0			# are ZEROes positive or negative?
+	bmi.b		fadd_zero_rm		# negative
+	fmov.s		&0x00000000,%fp0	# return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+#
+# the ZEROes have opposite signs:
+# - therefore, we return +ZERO if the rounding modes are RN,RZ, or RP.
+# - -ZERO is returned in the case of RM.
+#
+fadd_zero_2_chk_rm:
+	mov.b		3+L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# extract rnd mode
+	cmpi.b		%d1,&rm_mode*0x10	# is rnd mode == RM?
+	beq.b		fadd_zero_rm		# yes
+	fmov.s		&0x00000000,%fp0	# return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+fadd_zero_rm:
+	fmov.s		&0x80000000,%fp0	# return -ZERO
+	mov.b		&neg_bmask+z_bmask,FPSR_CC(%a6) # set NEG/Z
+	rts
+
+#
+# one operand is a ZERO and the other is a DENORM or NORM. scale
+# the DENORM or NORM and jump to the regular fadd routine.
+#
+fadd_zero_dst:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# scale the operand
+	clr.w		FP_SCR1_EX(%a6)
+	clr.l		FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)
+	bra.w		fadd_zero_entry		# go execute fadd
+
+fadd_zero_src:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+	bsr.l		scale_to_zero_dst	# scale the operand
+	clr.w		FP_SCR0_EX(%a6)
+	clr.l		FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)
+	bra.w		fadd_zero_entry		# go execute fadd
+
+#
+# both operands are INFs. an OPERR will result if the INFs have
+# different signs. else, an INF of the same sign is returned
+#
+fadd_inf_2:
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d1,%d0
+	bmi.l		res_operr		# weed out (-INF)+(+INF)
+
+# ok, so it's not an OPERR. but, we do have to remember to return the
+# src INF since that's where the 881/882 gets the j-bit from...
+
+#
+# operands are INF and one of {ZERO, INF, DENORM, NORM}
+#
+fadd_inf_src:
+	fmovm.x		SRC(%a0),&0x80		# return src INF
+	tst.b		SRC_EX(%a0)		# is INF positive?
+	bpl.b		fadd_inf_done		# yes; we're done
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+#
+# operands are INF and one of {ZERO, INF, DENORM, NORM}
+#
+fadd_inf_dst:
+	fmovm.x		DST(%a1),&0x80		# return dst INF
+	tst.b		DST_EX(%a1)		# is INF positive?
+	bpl.b		fadd_inf_done		# yes; we're done
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+fadd_inf_done:
+	mov.b		&inf_bmask,FPSR_CC(%a6) # set INF
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fsub(): emulates the fsub instruction				#
+#	fssub(): emulates the fssub instruction				#
+#	fdsub(): emulates the fdsub instruction				#
+#									#
+# XREF ****************************************************************	#
+#	addsub_scaler2() - scale the operands so they won't take exc	#
+#	ovf_res() - return default overflow result			#
+#	unf_res() - return default underflow result			#
+#	res_qnan() - set QNAN result					#
+#	res_snan() - set SNAN result					#
+#	res_operr() - set OPERR result					#
+#	scale_to_zero_src() - set src operand exponent equal to zero	#
+#	scale_to_zero_dst() - set dst operand exponent equal to zero	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms into extended, single, and double precision.			#
+#	Do subtraction after scaling exponents such that exception won't#
+# occur. Then, check result exponent to see if exception would have	#
+# occurred. If so, return default result and maybe EXOP. Else, insert	#
+# the correct result exponent and return. Set FPSR bits as appropriate.	#
+#									#
+#########################################################################
+
+	global		fssub
+fssub:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	bra.b		fsub
+
+	global		fdsub
+fdsub:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fsub
+fsub:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+
+	bne.w		fsub_not_norm		# optimize on non-norm input
+
+#
+# SUB: norms and denorms
+#
+fsub_norm:
+	bsr.l		addsub_scaler2		# scale exponents
+
+fsub_zero_entry:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fsub.x		FP_SCR0(%a6),%fp0	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch INEX2, N, Z
+
+	or.l		%d1,USER_FPSR(%a6)	# save exc and ccode bits
+
+	fbeq.w		fsub_zero_exit		# if result zero, end now
+
+	mov.l		%d2,-(%sp)		# save d2
+
+	fmovm.x		&0x01,-(%sp)		# save result to stack
+
+	mov.w		2+L_SCR3(%a6),%d1
+	lsr.b		&0x6,%d1
+
+	mov.w		(%sp),%d2		# fetch new exponent
+	andi.l		&0x7fff,%d2		# strip sign
+	sub.l		%d0,%d2			# add scale factor
+
+	cmp.l		%d2,(tbl_fsub_ovfl.b,%pc,%d1.w*4) # is it an overflow?
+	bge.b		fsub_ovfl		# yes
+
+	cmp.l		%d2,(tbl_fsub_unfl.b,%pc,%d1.w*4) # is it an underflow?
+	blt.w		fsub_unfl		# yes
+	beq.w		fsub_may_unfl		# maybe; go find out
+
+fsub_normal:
+	mov.w		(%sp),%d1
+	andi.w		&0x8000,%d1		# keep sign
+	or.w		%d2,%d1			# insert new exponent
+	mov.w		%d1,(%sp)		# insert new exponent
+
+	fmovm.x		(%sp)+,&0x80		# return result in fp0
+
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fsub_zero_exit:
+#	fmov.s		&0x00000000,%fp0	# return zero in fp0
+	rts
+
+tbl_fsub_ovfl:
+	long		0x7fff			# ext ovfl
+	long		0x407f			# sgl ovfl
+	long		0x43ff			# dbl ovfl
+
+tbl_fsub_unfl:
+	long	        0x0000			# ext unfl
+	long		0x3f81			# sgl unfl
+	long		0x3c01			# dbl unfl
+
+fsub_ovfl:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fsub_ovfl_ena		# yes
+
+	add.l		&0xc,%sp
+fsub_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fsub_ovfl_ena:
+	mov.b		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fsub_ovfl_ena_sd	# no
+
+fsub_ovfl_ena_cont:
+	mov.w		(%sp),%d1		# fetch {sgn,exp}
+	andi.w		&0x8000,%d1		# keep sign
+	subi.l		&0x6000,%d2		# subtract new bias
+	andi.w		&0x7fff,%d2		# clear top bit
+	or.w		%d2,%d1			# concat sign,exp
+	mov.w		%d1,(%sp)		# insert new exponent
+
+	fmovm.x		(%sp)+,&0x40		# return EXOP in fp1
+	bra.b		fsub_ovfl_dis
+
+fsub_ovfl_ena_sd:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# clear rnd prec
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	fsub.x		FP_SCR0(%a6),%fp0	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	add.l		&0xc,%sp
+	fmovm.x		&0x01,-(%sp)
+	bra.b		fsub_ovfl_ena_cont
+
+fsub_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	add.l		&0xc,%sp
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsub.x		FP_SCR0(%a6),%fp0	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save status
+
+	or.l		%d1,USER_FPSR(%a6)
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fsub_unfl_ena		# yes
+
+fsub_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fsub_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fsub_unfl_ena_sd	# no
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+fsub_unfl_ena_cont:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsub.x		FP_SCR0(%a6),%fp1	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# store result to stack
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# subtract new bias
+	andi.w		&0x7fff,%d1		# clear top bit
+	or.w		%d2,%d1			# concat sgn,exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fsub_unfl_dis
+
+fsub_unfl_ena_sd:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# clear rnd prec
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	bra.b		fsub_unfl_ena_cont
+
+#
+# result is equal to the smallest normalized number in the selected precision
+# if the precision is extended, this result could not have come from an
+# underflow that rounded up.
+#
+fsub_may_unfl:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# fetch rnd prec
+	beq.w		fsub_normal		# yes; no underflow occurred
+
+	mov.l		0x4(%sp),%d1
+	cmpi.l		%d1,&0x80000000		# is hi(man) = 0x80000000?
+	bne.w		fsub_normal		# no; no underflow occurred
+
+	tst.l		0x8(%sp)		# is lo(man) = 0x0?
+	bne.w		fsub_normal		# no; no underflow occurred
+
+	btst		&inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+	beq.w		fsub_normal		# no; no underflow occurred
+
+#
+# ok, so now the result has a exponent equal to the smallest normalized
+# exponent for the selected precision. also, the mantissa is equal to
+# 0x8000000000000000 and this mantissa is the result of rounding non-zero
+# g,r,s.
+# now, we must determine whether the pre-rounded result was an underflow
+# rounded "up" or a normalized number rounded "down".
+# so, we do this be re-executing the add using RZ as the rounding mode and
+# seeing if the new result is smaller or equal to the current result.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into fp1
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsub.x		FP_SCR0(%a6),%fp1	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fabs.x		%fp0			# compare absolute values
+	fabs.x		%fp1
+	fcmp.x		%fp0,%fp1		# is first result > second?
+
+	fbgt.w		fsub_unfl		# yes; it's an underflow
+	bra.w		fsub_normal		# no; it's not an underflow
+
+##########################################################################
+
+#
+# Sub: inputs are not both normalized; what are they?
+#
+fsub_not_norm:
+	mov.w		(tbl_fsub_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fsub_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fsub_op:
+	short		fsub_norm	- tbl_fsub_op # NORM - NORM
+	short		fsub_zero_src	- tbl_fsub_op # NORM - ZERO
+	short		fsub_inf_src	- tbl_fsub_op # NORM - INF
+	short		fsub_res_qnan	- tbl_fsub_op # NORM - QNAN
+	short		fsub_norm	- tbl_fsub_op # NORM - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # NORM - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_zero_dst	- tbl_fsub_op # ZERO - NORM
+	short		fsub_zero_2	- tbl_fsub_op # ZERO - ZERO
+	short		fsub_inf_src	- tbl_fsub_op # ZERO - INF
+	short		fsub_res_qnan	- tbl_fsub_op # NORM - QNAN
+	short		fsub_zero_dst	- tbl_fsub_op # ZERO - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # NORM - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_inf_dst	- tbl_fsub_op # INF - NORM
+	short		fsub_inf_dst	- tbl_fsub_op # INF - ZERO
+	short		fsub_inf_2	- tbl_fsub_op # INF - INF
+	short		fsub_res_qnan	- tbl_fsub_op # NORM - QNAN
+	short		fsub_inf_dst	- tbl_fsub_op # INF - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # NORM - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - NORM
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - ZERO
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - INF
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - QNAN
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # QNAN - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_norm	- tbl_fsub_op # DENORM - NORM
+	short		fsub_zero_src	- tbl_fsub_op # DENORM - ZERO
+	short		fsub_inf_src	- tbl_fsub_op # DENORM - INF
+	short		fsub_res_qnan	- tbl_fsub_op # NORM - QNAN
+	short		fsub_norm	- tbl_fsub_op # DENORM - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # NORM - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - NORM
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - ZERO
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - INF
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - QNAN
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+fsub_res_qnan:
+	bra.l		res_qnan
+fsub_res_snan:
+	bra.l		res_snan
+
+#
+# both operands are ZEROes
+#
+fsub_zero_2:
+	mov.b		SRC_EX(%a0),%d0
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d1,%d0
+	bpl.b		fsub_zero_2_chk_rm
+
+# the signs are opposite, so, return a ZERO w/ the sign of the dst ZERO
+	tst.b		%d0			# is dst negative?
+	bmi.b		fsub_zero_2_rm		# yes
+	fmov.s		&0x00000000,%fp0	# no; return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+#
+# the ZEROes have the same signs:
+# - therefore, we return +ZERO if the rounding mode is RN,RZ, or RP
+# - -ZERO is returned in the case of RM.
+#
+fsub_zero_2_chk_rm:
+	mov.b		3+L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# extract rnd mode
+	cmpi.b		%d1,&rm_mode*0x10	# is rnd mode = RM?
+	beq.b		fsub_zero_2_rm		# yes
+	fmov.s		&0x00000000,%fp0	# no; return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+fsub_zero_2_rm:
+	fmov.s		&0x80000000,%fp0	# return -ZERO
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)	# set Z/NEG
+	rts
+
+#
+# one operand is a ZERO and the other is a DENORM or a NORM.
+# scale the DENORM or NORM and jump to the regular fsub routine.
+#
+fsub_zero_dst:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# scale the operand
+	clr.w		FP_SCR1_EX(%a6)
+	clr.l		FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)
+	bra.w		fsub_zero_entry		# go execute fsub
+
+fsub_zero_src:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+	bsr.l		scale_to_zero_dst	# scale the operand
+	clr.w		FP_SCR0_EX(%a6)
+	clr.l		FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)
+	bra.w		fsub_zero_entry		# go execute fsub
+
+#
+# both operands are INFs. an OPERR will result if the INFs have the
+# same signs. else,
+#
+fsub_inf_2:
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d1,%d0
+	bpl.l		res_operr		# weed out (-INF)+(+INF)
+
+# ok, so it's not an OPERR. but we do have to remember to return
+# the src INF since that's where the 881/882 gets the j-bit.
+
+fsub_inf_src:
+	fmovm.x		SRC(%a0),&0x80		# return src INF
+	fneg.x		%fp0			# invert sign
+	fbge.w		fsub_inf_done		# sign is now positive
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+fsub_inf_dst:
+	fmovm.x		DST(%a1),&0x80		# return dst INF
+	tst.b		DST_EX(%a1)		# is INF negative?
+	bpl.b		fsub_inf_done		# no
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+fsub_inf_done:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set INF
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fsqrt(): emulates the fsqrt instruction				#
+#	fssqrt(): emulates the fssqrt instruction			#
+#	fdsqrt(): emulates the fdsqrt instruction			#
+#									#
+# XREF ****************************************************************	#
+#	scale_sqrt() - scale the source operand				#
+#	unf_res() - return default underflow result			#
+#	ovf_res() - return default overflow result			#
+#	res_qnan_1op() - return QNAN result				#
+#	res_snan_1op() - return SNAN result				#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a sqrt		#
+# instruction won't cause an exception. Use the regular fsqrt to	#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	global		fssqrt
+fssqrt:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl precision
+	bra.b		fsqrt
+
+	global		fdsqrt
+fdsqrt:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl precision
+
+	global		fsqrt
+fsqrt:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+	clr.w		%d1
+	mov.b		STAG(%a6),%d1
+	bne.w		fsqrt_not_norm		# optimize on non-norm input
+
+#
+# SQUARE ROOT: norms and denorms ONLY!
+#
+fsqrt_norm:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.l		res_operr		# yes
+
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fsqrt_not_ext		# no; go handle sgl or dbl
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsqrt.x		(%a0),%fp0		# execute square root
+
+	fmov.l		%fpsr,%d1
+	or.l		%d1,USER_FPSR(%a6)	# set N,INEX
+
+	rts
+
+fsqrt_denorm:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.l		res_operr		# yes
+
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fsqrt_not_ext		# no; go handle sgl or dbl
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_sqrt		# calculate scale factor
+
+	bra.w		fsqrt_sd_normal
+
+#
+# operand is either single or double
+#
+fsqrt_not_ext:
+	cmpi.b		%d0,&s_mode*0x10	# separate sgl/dbl prec
+	bne.w		fsqrt_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fsqrt_sgl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_sqrt		# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3f81	# will move in underflow?
+	beq.w		fsqrt_sd_may_unfl
+	bgt.w		fsqrt_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x407f	# will move in overflow?
+	beq.w		fsqrt_sd_may_ovfl	# maybe; go check
+	blt.w		fsqrt_sd_ovfl		# yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fsqrt_sd_normal:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fsqrt.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsqrt_sd_normal_exit:
+	mov.l		%d2,-(%sp)		# save d2
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.w		FP_SCR0_EX(%a6),%d1	# load sgn,exp
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d1,%d2			# concat old sign,new exp
+	mov.w		%d2,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# operand is to be rounded to double precision
+#
+fsqrt_dbl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_sqrt		# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3c01	# will move in underflow?
+	beq.w		fsqrt_sd_may_unfl
+	bgt.b		fsqrt_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x43ff	# will move in overflow?
+	beq.w		fsqrt_sd_may_ovfl	# maybe; go check
+	blt.w		fsqrt_sd_ovfl		# yes; go handle overflow
+	bra.w		fsqrt_sd_normal		# no; ho handle normalized op
+
+# we're on the line here and the distinguising characteristic is whether
+# the exponent is 3fff or 3ffe. if it's 3ffe, then it's a safe number
+# elsewise fall through to underflow.
+fsqrt_sd_may_unfl:
+	btst		&0x0,1+FP_SCR0_EX(%a6)	# is exponent 0x3fff?
+	bne.w		fsqrt_sd_normal		# yes, so no underflow
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fsqrt_sd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsqrt.x		FP_SCR0(%a6),%fp0	# execute square root
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fsqrt_sd_unfl_ena	# yes
+
+fsqrt_sd_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set possible 'Z' ccode
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fsqrt_sd_unfl_ena:
+	mov.l		FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+	mov.l		FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+	mov.w		FP_SCR0_EX(%a6),%d1	# load current exponent
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# subtract scale factor
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat new sign,new exp
+	mov.w		%d1,FP_SCR1_EX(%a6)	# insert new exp
+	fmovm.x		FP_SCR1(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fsqrt_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fsqrt_sd_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fsqrt.x		FP_SCR0(%a6),%fp0	# perform square root
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsqrt_sd_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fsqrt_sd_ovfl_ena	# yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fsqrt_sd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass: prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fsqrt_sd_ovfl_ena:
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat sign,exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fsqrt_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fsqrt_sd_may_ovfl:
+	btst		&0x0,1+FP_SCR0_EX(%a6)	# is exponent 0x3fff?
+	bne.w		fsqrt_sd_ovfl		# yes, so overflow
+
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fsqrt.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fmov.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x1		# is |result| >= 1.b?
+	fbge.w		fsqrt_sd_ovfl_tst	# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fsqrt_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fsqrt_not_norm:
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.w		fsqrt_denorm
+	cmpi.b		%d1,&ZERO		# weed out ZERO
+	beq.b		fsqrt_zero
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		fsqrt_inf
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	bra.l		res_qnan_1op
+
+#
+#	fsqrt(+0) = +0
+#	fsqrt(-0) = -0
+#	fsqrt(+INF) = +INF
+#	fsqrt(-INF) = OPERR
+#
+fsqrt_zero:
+	tst.b		SRC_EX(%a0)		# is ZERO positive or negative?
+	bmi.b		fsqrt_zero_m		# negative
+fsqrt_zero_p:
+	fmov.s		&0x00000000,%fp0	# return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+fsqrt_zero_m:
+	fmov.s		&0x80000000,%fp0	# return -ZERO
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)	# set 'Z','N' ccode bits
+	rts
+
+fsqrt_inf:
+	tst.b		SRC_EX(%a0)		# is INF positive or negative?
+	bmi.l		res_operr		# negative
+fsqrt_inf_p:
+	fmovm.x		SRC(%a0),&0x80		# return +INF in fp0
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fetch_dreg(): fetch register according to index in d1		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d1 = index of register to fetch from				#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of register fetched					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the index value in d1 which can range from zero	#
+# to fifteen, load the corresponding register file value (where		#
+# address register indexes start at 8). D0/D1/A0/A1/A6/A7 are on the	#
+# stack. The rest should still be in their original places.		#
+#									#
+#########################################################################
+
+# this routine leaves d1 intact for subsequent store_dreg calls.
+	global		fetch_dreg
+fetch_dreg:
+	mov.w		(tbl_fdreg.b,%pc,%d1.w*2),%d0
+	jmp		(tbl_fdreg.b,%pc,%d0.w*1)
+
+tbl_fdreg:
+	short		fdreg0 - tbl_fdreg
+	short		fdreg1 - tbl_fdreg
+	short		fdreg2 - tbl_fdreg
+	short		fdreg3 - tbl_fdreg
+	short		fdreg4 - tbl_fdreg
+	short		fdreg5 - tbl_fdreg
+	short		fdreg6 - tbl_fdreg
+	short		fdreg7 - tbl_fdreg
+	short		fdreg8 - tbl_fdreg
+	short		fdreg9 - tbl_fdreg
+	short		fdrega - tbl_fdreg
+	short		fdregb - tbl_fdreg
+	short		fdregc - tbl_fdreg
+	short		fdregd - tbl_fdreg
+	short		fdrege - tbl_fdreg
+	short		fdregf - tbl_fdreg
+
+fdreg0:
+	mov.l		EXC_DREGS+0x0(%a6),%d0
+	rts
+fdreg1:
+	mov.l		EXC_DREGS+0x4(%a6),%d0
+	rts
+fdreg2:
+	mov.l		%d2,%d0
+	rts
+fdreg3:
+	mov.l		%d3,%d0
+	rts
+fdreg4:
+	mov.l		%d4,%d0
+	rts
+fdreg5:
+	mov.l		%d5,%d0
+	rts
+fdreg6:
+	mov.l		%d6,%d0
+	rts
+fdreg7:
+	mov.l		%d7,%d0
+	rts
+fdreg8:
+	mov.l		EXC_DREGS+0x8(%a6),%d0
+	rts
+fdreg9:
+	mov.l		EXC_DREGS+0xc(%a6),%d0
+	rts
+fdrega:
+	mov.l		%a2,%d0
+	rts
+fdregb:
+	mov.l		%a3,%d0
+	rts
+fdregc:
+	mov.l		%a4,%d0
+	rts
+fdregd:
+	mov.l		%a5,%d0
+	rts
+fdrege:
+	mov.l		(%a6),%d0
+	rts
+fdregf:
+	mov.l		EXC_A7(%a6),%d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	store_dreg_l(): store longword to data register specified by d1	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = longowrd value to store					#
+#	d1 = index of register to fetch from				#
+#									#
+# OUTPUT **************************************************************	#
+#	(data register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the index value in d1, store the longword value	#
+# in d0 to the corresponding data register. D0/D1 are on the stack	#
+# while the rest are in their initial places.				#
+#									#
+#########################################################################
+
+	global		store_dreg_l
+store_dreg_l:
+	mov.w		(tbl_sdregl.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_sdregl.b,%pc,%d1.w*1)
+
+tbl_sdregl:
+	short		sdregl0 - tbl_sdregl
+	short		sdregl1 - tbl_sdregl
+	short		sdregl2 - tbl_sdregl
+	short		sdregl3 - tbl_sdregl
+	short		sdregl4 - tbl_sdregl
+	short		sdregl5 - tbl_sdregl
+	short		sdregl6 - tbl_sdregl
+	short		sdregl7 - tbl_sdregl
+
+sdregl0:
+	mov.l		%d0,EXC_DREGS+0x0(%a6)
+	rts
+sdregl1:
+	mov.l		%d0,EXC_DREGS+0x4(%a6)
+	rts
+sdregl2:
+	mov.l		%d0,%d2
+	rts
+sdregl3:
+	mov.l		%d0,%d3
+	rts
+sdregl4:
+	mov.l		%d0,%d4
+	rts
+sdregl5:
+	mov.l		%d0,%d5
+	rts
+sdregl6:
+	mov.l		%d0,%d6
+	rts
+sdregl7:
+	mov.l		%d0,%d7
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	store_dreg_w(): store word to data register specified by d1	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = word value to store					#
+#	d1 = index of register to fetch from				#
+#									#
+# OUTPUT **************************************************************	#
+#	(data register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the index value in d1, store the word value	#
+# in d0 to the corresponding data register. D0/D1 are on the stack	#
+# while the rest are in their initial places.				#
+#									#
+#########################################################################
+
+	global		store_dreg_w
+store_dreg_w:
+	mov.w		(tbl_sdregw.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_sdregw.b,%pc,%d1.w*1)
+
+tbl_sdregw:
+	short		sdregw0 - tbl_sdregw
+	short		sdregw1 - tbl_sdregw
+	short		sdregw2 - tbl_sdregw
+	short		sdregw3 - tbl_sdregw
+	short		sdregw4 - tbl_sdregw
+	short		sdregw5 - tbl_sdregw
+	short		sdregw6 - tbl_sdregw
+	short		sdregw7 - tbl_sdregw
+
+sdregw0:
+	mov.w		%d0,2+EXC_DREGS+0x0(%a6)
+	rts
+sdregw1:
+	mov.w		%d0,2+EXC_DREGS+0x4(%a6)
+	rts
+sdregw2:
+	mov.w		%d0,%d2
+	rts
+sdregw3:
+	mov.w		%d0,%d3
+	rts
+sdregw4:
+	mov.w		%d0,%d4
+	rts
+sdregw5:
+	mov.w		%d0,%d5
+	rts
+sdregw6:
+	mov.w		%d0,%d6
+	rts
+sdregw7:
+	mov.w		%d0,%d7
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	store_dreg_b(): store byte to data register specified by d1	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = byte value to store					#
+#	d1 = index of register to fetch from				#
+#									#
+# OUTPUT **************************************************************	#
+#	(data register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the index value in d1, store the byte value	#
+# in d0 to the corresponding data register. D0/D1 are on the stack	#
+# while the rest are in their initial places.				#
+#									#
+#########################################################################
+
+	global		store_dreg_b
+store_dreg_b:
+	mov.w		(tbl_sdregb.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_sdregb.b,%pc,%d1.w*1)
+
+tbl_sdregb:
+	short		sdregb0 - tbl_sdregb
+	short		sdregb1 - tbl_sdregb
+	short		sdregb2 - tbl_sdregb
+	short		sdregb3 - tbl_sdregb
+	short		sdregb4 - tbl_sdregb
+	short		sdregb5 - tbl_sdregb
+	short		sdregb6 - tbl_sdregb
+	short		sdregb7 - tbl_sdregb
+
+sdregb0:
+	mov.b		%d0,3+EXC_DREGS+0x0(%a6)
+	rts
+sdregb1:
+	mov.b		%d0,3+EXC_DREGS+0x4(%a6)
+	rts
+sdregb2:
+	mov.b		%d0,%d2
+	rts
+sdregb3:
+	mov.b		%d0,%d3
+	rts
+sdregb4:
+	mov.b		%d0,%d4
+	rts
+sdregb5:
+	mov.b		%d0,%d5
+	rts
+sdregb6:
+	mov.b		%d0,%d6
+	rts
+sdregb7:
+	mov.b		%d0,%d7
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	inc_areg(): increment an address register by the value in d0	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = amount to increment by					#
+#	d1 = index of address register to increment			#
+#									#
+# OUTPUT **************************************************************	#
+#	(address register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Typically used for an instruction w/ a post-increment <ea>,	#
+# this routine adds the increment value in d0 to the address register	#
+# specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside	#
+# in their original places.						#
+#	For a7, if the increment amount is one, then we have to		#
+# increment by two. For any a7 update, set the mia7_flag so that if	#
+# an access error exception occurs later in emulation, this address	#
+# register update can be undone.					#
+#									#
+#########################################################################
+
+	global		inc_areg
+inc_areg:
+	mov.w		(tbl_iareg.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_iareg.b,%pc,%d1.w*1)
+
+tbl_iareg:
+	short		iareg0 - tbl_iareg
+	short		iareg1 - tbl_iareg
+	short		iareg2 - tbl_iareg
+	short		iareg3 - tbl_iareg
+	short		iareg4 - tbl_iareg
+	short		iareg5 - tbl_iareg
+	short		iareg6 - tbl_iareg
+	short		iareg7 - tbl_iareg
+
+iareg0:	add.l		%d0,EXC_DREGS+0x8(%a6)
+	rts
+iareg1:	add.l		%d0,EXC_DREGS+0xc(%a6)
+	rts
+iareg2:	add.l		%d0,%a2
+	rts
+iareg3:	add.l		%d0,%a3
+	rts
+iareg4:	add.l		%d0,%a4
+	rts
+iareg5:	add.l		%d0,%a5
+	rts
+iareg6:	add.l		%d0,(%a6)
+	rts
+iareg7:	mov.b		&mia7_flg,SPCOND_FLG(%a6)
+	cmpi.b		%d0,&0x1
+	beq.b		iareg7b
+	add.l		%d0,EXC_A7(%a6)
+	rts
+iareg7b:
+	addq.l		&0x2,EXC_A7(%a6)
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	dec_areg(): decrement an address register by the value in d0	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = amount to decrement by					#
+#	d1 = index of address register to decrement			#
+#									#
+# OUTPUT **************************************************************	#
+#	(address register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Typically used for an instruction w/ a pre-decrement <ea>,	#
+# this routine adds the decrement value in d0 to the address register	#
+# specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside	#
+# in their original places.						#
+#	For a7, if the decrement amount is one, then we have to		#
+# decrement by two. For any a7 update, set the mda7_flag so that if	#
+# an access error exception occurs later in emulation, this address	#
+# register update can be undone.					#
+#									#
+#########################################################################
+
+	global		dec_areg
+dec_areg:
+	mov.w		(tbl_dareg.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_dareg.b,%pc,%d1.w*1)
+
+tbl_dareg:
+	short		dareg0 - tbl_dareg
+	short		dareg1 - tbl_dareg
+	short		dareg2 - tbl_dareg
+	short		dareg3 - tbl_dareg
+	short		dareg4 - tbl_dareg
+	short		dareg5 - tbl_dareg
+	short		dareg6 - tbl_dareg
+	short		dareg7 - tbl_dareg
+
+dareg0:	sub.l		%d0,EXC_DREGS+0x8(%a6)
+	rts
+dareg1:	sub.l		%d0,EXC_DREGS+0xc(%a6)
+	rts
+dareg2:	sub.l		%d0,%a2
+	rts
+dareg3:	sub.l		%d0,%a3
+	rts
+dareg4:	sub.l		%d0,%a4
+	rts
+dareg5:	sub.l		%d0,%a5
+	rts
+dareg6:	sub.l		%d0,(%a6)
+	rts
+dareg7:	mov.b		&mda7_flg,SPCOND_FLG(%a6)
+	cmpi.b		%d0,&0x1
+	beq.b		dareg7b
+	sub.l		%d0,EXC_A7(%a6)
+	rts
+dareg7b:
+	subq.l		&0x2,EXC_A7(%a6)
+	rts
+
+##############################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	load_fpn1(): load FP register value into FP_SRC(a6).		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = index of FP register to load				#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SRC(a6) = value loaded from FP register file			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Using the index in d0, load FP_SRC(a6) with a number from the	#
+# FP register file.							#
+#									#
+#########################################################################
+
+	global		load_fpn1
+load_fpn1:
+	mov.w		(tbl_load_fpn1.b,%pc,%d0.w*2), %d0
+	jmp		(tbl_load_fpn1.b,%pc,%d0.w*1)
+
+tbl_load_fpn1:
+	short		load_fpn1_0 - tbl_load_fpn1
+	short		load_fpn1_1 - tbl_load_fpn1
+	short		load_fpn1_2 - tbl_load_fpn1
+	short		load_fpn1_3 - tbl_load_fpn1
+	short		load_fpn1_4 - tbl_load_fpn1
+	short		load_fpn1_5 - tbl_load_fpn1
+	short		load_fpn1_6 - tbl_load_fpn1
+	short		load_fpn1_7 - tbl_load_fpn1
+
+load_fpn1_0:
+	mov.l		0+EXC_FP0(%a6), 0+FP_SRC(%a6)
+	mov.l		4+EXC_FP0(%a6), 4+FP_SRC(%a6)
+	mov.l		8+EXC_FP0(%a6), 8+FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_1:
+	mov.l		0+EXC_FP1(%a6), 0+FP_SRC(%a6)
+	mov.l		4+EXC_FP1(%a6), 4+FP_SRC(%a6)
+	mov.l		8+EXC_FP1(%a6), 8+FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_2:
+	fmovm.x		&0x20, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_3:
+	fmovm.x		&0x10, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_4:
+	fmovm.x		&0x08, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_5:
+	fmovm.x		&0x04, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_6:
+	fmovm.x		&0x02, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_7:
+	fmovm.x		&0x01, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+
+#############################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	load_fpn2(): load FP register value into FP_DST(a6).		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = index of FP register to load				#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_DST(a6) = value loaded from FP register file			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Using the index in d0, load FP_DST(a6) with a number from the	#
+# FP register file.							#
+#									#
+#########################################################################
+
+	global		load_fpn2
+load_fpn2:
+	mov.w		(tbl_load_fpn2.b,%pc,%d0.w*2), %d0
+	jmp		(tbl_load_fpn2.b,%pc,%d0.w*1)
+
+tbl_load_fpn2:
+	short		load_fpn2_0 - tbl_load_fpn2
+	short		load_fpn2_1 - tbl_load_fpn2
+	short		load_fpn2_2 - tbl_load_fpn2
+	short		load_fpn2_3 - tbl_load_fpn2
+	short		load_fpn2_4 - tbl_load_fpn2
+	short		load_fpn2_5 - tbl_load_fpn2
+	short		load_fpn2_6 - tbl_load_fpn2
+	short		load_fpn2_7 - tbl_load_fpn2
+
+load_fpn2_0:
+	mov.l		0+EXC_FP0(%a6), 0+FP_DST(%a6)
+	mov.l		4+EXC_FP0(%a6), 4+FP_DST(%a6)
+	mov.l		8+EXC_FP0(%a6), 8+FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_1:
+	mov.l		0+EXC_FP1(%a6), 0+FP_DST(%a6)
+	mov.l		4+EXC_FP1(%a6), 4+FP_DST(%a6)
+	mov.l		8+EXC_FP1(%a6), 8+FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_2:
+	fmovm.x		&0x20, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_3:
+	fmovm.x		&0x10, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_4:
+	fmovm.x		&0x08, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_5:
+	fmovm.x		&0x04, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_6:
+	fmovm.x		&0x02, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_7:
+	fmovm.x		&0x01, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+
+#############################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	store_fpreg(): store an fp value to the fpreg designated d0.	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	fp0 = extended precision value to store				#
+#	d0  = index of floating-point register				#
+#									#
+# OUTPUT **************************************************************	#
+#	None								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Store the value in fp0 to the FP register designated by the	#
+# value in d0. The FP number can be DENORM or SNAN so we have to be	#
+# careful that we don't take an exception here.				#
+#									#
+#########################################################################
+
+	global		store_fpreg
+store_fpreg:
+	mov.w		(tbl_store_fpreg.b,%pc,%d0.w*2), %d0
+	jmp		(tbl_store_fpreg.b,%pc,%d0.w*1)
+
+tbl_store_fpreg:
+	short		store_fpreg_0 - tbl_store_fpreg
+	short		store_fpreg_1 - tbl_store_fpreg
+	short		store_fpreg_2 - tbl_store_fpreg
+	short		store_fpreg_3 - tbl_store_fpreg
+	short		store_fpreg_4 - tbl_store_fpreg
+	short		store_fpreg_5 - tbl_store_fpreg
+	short		store_fpreg_6 - tbl_store_fpreg
+	short		store_fpreg_7 - tbl_store_fpreg
+
+store_fpreg_0:
+	fmovm.x		&0x80, EXC_FP0(%a6)
+	rts
+store_fpreg_1:
+	fmovm.x		&0x80, EXC_FP1(%a6)
+	rts
+store_fpreg_2:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x20
+	rts
+store_fpreg_3:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x10
+	rts
+store_fpreg_4:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x08
+	rts
+store_fpreg_5:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x04
+	rts
+store_fpreg_6:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x02
+	rts
+store_fpreg_7:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x01
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	get_packed(): fetch a packed operand from memory and then	#
+#		      convert it to a floating-point binary number.	#
+#									#
+# XREF ****************************************************************	#
+#	_dcalc_ea() - calculate the correct <ea>			#
+#	_mem_read() - fetch the packed operand from memory		#
+#	facc_in_x() - the fetch failed so jump to special exit code	#
+#	decbin()    - convert packed to binary extended precision	#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	If no failure on _mem_read():					#
+#	FP_SRC(a6) = packed operand now as a binary FP number		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Get the correct <ea> whihc is the value on the exception stack	#
+# frame w/ maybe a correction factor if the <ea> is -(an) or (an)+.	#
+# Then, fetch the operand from memory. If the fetch fails, exit		#
+# through facc_in_x().							#
+#	If the packed operand is a ZERO,NAN, or INF, convert it to	#
+# its binary representation here. Else, call decbin() which will	#
+# convert the packed value to an extended precision binary value.	#
+#									#
+#########################################################################
+
+# the stacked <ea> for packed is correct except for -(An).
+# the base reg must be updated for both -(An) and (An)+.
+	global		get_packed
+get_packed:
+	mov.l		&0xc,%d0		# packed is 12 bytes
+	bsr.l		_dcalc_ea		# fetch <ea>; correct An
+
+	lea		FP_SRC(%a6),%a1		# pass: ptr to super dst
+	mov.l		&0xc,%d0		# pass: 12 bytes
+	bsr.l		_dmem_read		# read packed operand
+
+	tst.l		%d1			# did dfetch fail?
+	bne.l		facc_in_x		# yes
+
+# The packed operand is an INF or a NAN if the exponent field is all ones.
+	bfextu		FP_SRC(%a6){&1:&15},%d0	# get exp
+	cmpi.w		%d0,&0x7fff		# INF or NAN?
+	bne.b		gp_try_zero		# no
+	rts					# operand is an INF or NAN
+
+# The packed operand is a zero if the mantissa is all zero, else it's
+# a normal packed op.
+gp_try_zero:
+	mov.b		3+FP_SRC(%a6),%d0	# get byte 4
+	andi.b		&0x0f,%d0		# clear all but last nybble
+	bne.b		gp_not_spec		# not a zero
+	tst.l		FP_SRC_HI(%a6)		# is lw 2 zero?
+	bne.b		gp_not_spec		# not a zero
+	tst.l		FP_SRC_LO(%a6)		# is lw 3 zero?
+	bne.b		gp_not_spec		# not a zero
+	rts					# operand is a ZERO
+gp_not_spec:
+	lea		FP_SRC(%a6),%a0		# pass: ptr to packed op
+	bsr.l		decbin			# convert to extended
+	fmovm.x		&0x80,FP_SRC(%a6)	# make this the srcop
+	rts
+
+#########################################################################
+# decbin(): Converts normalized packed bcd value pointed to by register	#
+#	    a0 to extended-precision value in fp0.			#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to normalized packed bcd value			#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = exact fp representation of the packed bcd value.		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Expected is a normal bcd (i.e. non-exceptional; all inf, zero,	#
+#	and NaN operands are dispatched without entering this routine)	#
+#	value in 68881/882 format at location (a0).			#
+#									#
+#	A1. Convert the bcd exponent to binary by successive adds and	#
+#	muls. Set the sign according to SE. Subtract 16 to compensate	#
+#	for the mantissa which is to be interpreted as 17 integer	#
+#	digits, rather than 1 integer and 16 fraction digits.		#
+#	Note: this operation can never overflow.			#
+#									#
+#	A2. Convert the bcd mantissa to binary by successive		#
+#	adds and muls in FP0. Set the sign according to SM.		#
+#	The mantissa digits will be converted with the decimal point	#
+#	assumed following the least-significant digit.			#
+#	Note: this operation can never overflow.			#
+#									#
+#	A3. Count the number of leading/trailing zeros in the		#
+#	bcd string.  If SE is positive, count the leading zeros;	#
+#	if negative, count the trailing zeros.  Set the adjusted	#
+#	exponent equal to the exponent from A1 and the zero count	#
+#	added if SM = 1 and subtracted if SM = 0.  Scale the		#
+#	mantissa the equivalent of forcing in the bcd value:		#
+#									#
+#	SM = 0	a non-zero digit in the integer position		#
+#	SM = 1	a non-zero digit in Mant0, lsd of the fraction		#
+#									#
+#	this will insure that any value, regardless of its		#
+#	representation (ex. 0.1E2, 1E1, 10E0, 100E-1), is converted	#
+#	consistently.							#
+#									#
+#	A4. Calculate the factor 10^exp in FP1 using a table of		#
+#	10^(2^n) values.  To reduce the error in forming factors	#
+#	greater than 10^27, a directed rounding scheme is used with	#
+#	tables rounded to RN, RM, and RP, according to the table	#
+#	in the comments of the pwrten section.				#
+#									#
+#	A5. Form the final binary number by scaling the mantissa by	#
+#	the exponent factor.  This is done by multiplying the		#
+#	mantissa in FP0 by the factor in FP1 if the adjusted		#
+#	exponent sign is positive, and dividing FP0 by FP1 if		#
+#	it is negative.							#
+#									#
+#	Clean up and return. Check if the final mul or div was inexact.	#
+#	If so, set INEX1 in USER_FPSR.					#
+#									#
+#########################################################################
+
+#
+#	PTENRN, PTENRM, and PTENRP are arrays of powers of 10 rounded
+#	to nearest, minus, and plus, respectively.  The tables include
+#	10**{1,2,4,8,16,32,64,128,256,512,1024,2048,4096}.  No rounding
+#	is required until the power is greater than 27, however, all
+#	tables include the first 5 for ease of indexing.
+#
+RTABLE:
+	byte		0,0,0,0
+	byte		2,3,2,3
+	byte		2,3,3,2
+	byte		3,2,2,3
+
+	set		FNIBS,7
+	set		FSTRT,0
+
+	set		ESTRT,4
+	set		EDIGITS,2
+
+	global		decbin
+decbin:
+	mov.l		0x0(%a0),FP_SCR0_EX(%a6) # make a copy of input
+	mov.l		0x4(%a0),FP_SCR0_HI(%a6) # so we don't alter it
+	mov.l		0x8(%a0),FP_SCR0_LO(%a6)
+
+	lea		FP_SCR0(%a6),%a0
+
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+	fmovm.x		&0x1,-(%sp)		# save fp1
+#
+# Calculate exponent:
+#  1. Copy bcd value in memory for use as a working copy.
+#  2. Calculate absolute value of exponent in d1 by mul and add.
+#  3. Correct for exponent sign.
+#  4. Subtract 16 to compensate for interpreting the mant as all integer digits.
+#     (i.e., all digits assumed left of the decimal point.)
+#
+# Register usage:
+#
+#  calc_e:
+#	(*)  d0: temp digit storage
+#	(*)  d1: accumulator for binary exponent
+#	(*)  d2: digit count
+#	(*)  d3: offset pointer
+#	( )  d4: first word of bcd
+#	( )  a0: pointer to working bcd value
+#	( )  a6: pointer to original bcd value
+#	(*)  FP_SCR1: working copy of original bcd value
+#	(*)  L_SCR1: copy of original exponent word
+#
+calc_e:
+	mov.l		&EDIGITS,%d2		# # of nibbles (digits) in fraction part
+	mov.l		&ESTRT,%d3		# counter to pick up digits
+	mov.l		(%a0),%d4		# get first word of bcd
+	clr.l		%d1			# zero d1 for accumulator
+e_gd:
+	mulu.l		&0xa,%d1		# mul partial product by one digit place
+	bfextu		%d4{%d3:&4},%d0		# get the digit and zero extend into d0
+	add.l		%d0,%d1			# d1 = d1 + d0
+	addq.b		&4,%d3			# advance d3 to the next digit
+	dbf.w		%d2,e_gd		# if we have used all 3 digits, exit loop
+	btst		&30,%d4			# get SE
+	beq.b		e_pos			# don't negate if pos
+	neg.l		%d1			# negate before subtracting
+e_pos:
+	sub.l		&16,%d1			# sub to compensate for shift of mant
+	bge.b		e_save			# if still pos, do not neg
+	neg.l		%d1			# now negative, make pos and set SE
+	or.l		&0x40000000,%d4		# set SE in d4,
+	or.l		&0x40000000,(%a0)	# and in working bcd
+e_save:
+	mov.l		%d1,-(%sp)		# save exp on stack
+#
+#
+# Calculate mantissa:
+#  1. Calculate absolute value of mantissa in fp0 by mul and add.
+#  2. Correct for mantissa sign.
+#     (i.e., all digits assumed left of the decimal point.)
+#
+# Register usage:
+#
+#  calc_m:
+#	(*)  d0: temp digit storage
+#	(*)  d1: lword counter
+#	(*)  d2: digit count
+#	(*)  d3: offset pointer
+#	( )  d4: words 2 and 3 of bcd
+#	( )  a0: pointer to working bcd value
+#	( )  a6: pointer to original bcd value
+#	(*) fp0: mantissa accumulator
+#	( )  FP_SCR1: working copy of original bcd value
+#	( )  L_SCR1: copy of original exponent word
+#
+calc_m:
+	mov.l		&1,%d1			# word counter, init to 1
+	fmov.s		&0x00000000,%fp0	# accumulator
+#
+#
+#  Since the packed number has a long word between the first & second parts,
+#  get the integer digit then skip down & get the rest of the
+#  mantissa.  We will unroll the loop once.
+#
+	bfextu		(%a0){&28:&4},%d0	# integer part is ls digit in long word
+	fadd.b		%d0,%fp0		# add digit to sum in fp0
+#
+#
+#  Get the rest of the mantissa.
+#
+loadlw:
+	mov.l		(%a0,%d1.L*4),%d4	# load mantissa lonqword into d4
+	mov.l		&FSTRT,%d3		# counter to pick up digits
+	mov.l		&FNIBS,%d2		# reset number of digits per a0 ptr
+md2b:
+	fmul.s		&0x41200000,%fp0	# fp0 = fp0 * 10
+	bfextu		%d4{%d3:&4},%d0		# get the digit and zero extend
+	fadd.b		%d0,%fp0		# fp0 = fp0 + digit
+#
+#
+#  If all the digits (8) in that long word have been converted (d2=0),
+#  then inc d1 (=2) to point to the next long word and reset d3 to 0
+#  to initialize the digit offset, and set d2 to 7 for the digit count;
+#  else continue with this long word.
+#
+	addq.b		&4,%d3			# advance d3 to the next digit
+	dbf.w		%d2,md2b		# check for last digit in this lw
+nextlw:
+	addq.l		&1,%d1			# inc lw pointer in mantissa
+	cmp.l		%d1,&2			# test for last lw
+	ble.b		loadlw			# if not, get last one
+#
+#  Check the sign of the mant and make the value in fp0 the same sign.
+#
+m_sign:
+	btst		&31,(%a0)		# test sign of the mantissa
+	beq.b		ap_st_z			# if clear, go to append/strip zeros
+	fneg.x		%fp0			# if set, negate fp0
+#
+# Append/strip zeros:
+#
+#  For adjusted exponents which have an absolute value greater than 27*,
+#  this routine calculates the amount needed to normalize the mantissa
+#  for the adjusted exponent.  That number is subtracted from the exp
+#  if the exp was positive, and added if it was negative.  The purpose
+#  of this is to reduce the value of the exponent and the possibility
+#  of error in calculation of pwrten.
+#
+#  1. Branch on the sign of the adjusted exponent.
+#  2p.(positive exp)
+#   2. Check M16 and the digits in lwords 2 and 3 in decending order.
+#   3. Add one for each zero encountered until a non-zero digit.
+#   4. Subtract the count from the exp.
+#   5. Check if the exp has crossed zero in #3 above; make the exp abs
+#	   and set SE.
+#	6. Multiply the mantissa by 10**count.
+#  2n.(negative exp)
+#   2. Check the digits in lwords 3 and 2 in decending order.
+#   3. Add one for each zero encountered until a non-zero digit.
+#   4. Add the count to the exp.
+#   5. Check if the exp has crossed zero in #3 above; clear SE.
+#   6. Divide the mantissa by 10**count.
+#
+#  *Why 27?  If the adjusted exponent is within -28 < expA < 28, than
+#   any adjustment due to append/strip zeros will drive the resultane
+#   exponent towards zero.  Since all pwrten constants with a power
+#   of 27 or less are exact, there is no need to use this routine to
+#   attempt to lessen the resultant exponent.
+#
+# Register usage:
+#
+#  ap_st_z:
+#	(*)  d0: temp digit storage
+#	(*)  d1: zero count
+#	(*)  d2: digit count
+#	(*)  d3: offset pointer
+#	( )  d4: first word of bcd
+#	(*)  d5: lword counter
+#	( )  a0: pointer to working bcd value
+#	( )  FP_SCR1: working copy of original bcd value
+#	( )  L_SCR1: copy of original exponent word
+#
+#
+# First check the absolute value of the exponent to see if this
+# routine is necessary.  If so, then check the sign of the exponent
+# and do append (+) or strip (-) zeros accordingly.
+# This section handles a positive adjusted exponent.
+#
+ap_st_z:
+	mov.l		(%sp),%d1		# load expA for range test
+	cmp.l		%d1,&27			# test is with 27
+	ble.w		pwrten			# if abs(expA) <28, skip ap/st zeros
+	btst		&30,(%a0)		# check sign of exp
+	bne.b		ap_st_n			# if neg, go to neg side
+	clr.l		%d1			# zero count reg
+	mov.l		(%a0),%d4		# load lword 1 to d4
+	bfextu		%d4{&28:&4},%d0		# get M16 in d0
+	bne.b		ap_p_fx			# if M16 is non-zero, go fix exp
+	addq.l		&1,%d1			# inc zero count
+	mov.l		&1,%d5			# init lword counter
+	mov.l		(%a0,%d5.L*4),%d4	# get lword 2 to d4
+	bne.b		ap_p_cl			# if lw 2 is zero, skip it
+	addq.l		&8,%d1			# and inc count by 8
+	addq.l		&1,%d5			# inc lword counter
+	mov.l		(%a0,%d5.L*4),%d4	# get lword 3 to d4
+ap_p_cl:
+	clr.l		%d3			# init offset reg
+	mov.l		&7,%d2			# init digit counter
+ap_p_gd:
+	bfextu		%d4{%d3:&4},%d0		# get digit
+	bne.b		ap_p_fx			# if non-zero, go to fix exp
+	addq.l		&4,%d3			# point to next digit
+	addq.l		&1,%d1			# inc digit counter
+	dbf.w		%d2,ap_p_gd		# get next digit
+ap_p_fx:
+	mov.l		%d1,%d0			# copy counter to d2
+	mov.l		(%sp),%d1		# get adjusted exp from memory
+	sub.l		%d0,%d1			# subtract count from exp
+	bge.b		ap_p_fm			# if still pos, go to pwrten
+	neg.l		%d1			# now its neg; get abs
+	mov.l		(%a0),%d4		# load lword 1 to d4
+	or.l		&0x40000000,%d4		# and set SE in d4
+	or.l		&0x40000000,(%a0)	# and in memory
+#
+# Calculate the mantissa multiplier to compensate for the striping of
+# zeros from the mantissa.
+#
+ap_p_fm:
+	lea.l		PTENRN(%pc),%a1		# get address of power-of-ten table
+	clr.l		%d3			# init table index
+	fmov.s		&0x3f800000,%fp1	# init fp1 to 1
+	mov.l		&3,%d2			# init d2 to count bits in counter
+ap_p_el:
+	asr.l		&1,%d0			# shift lsb into carry
+	bcc.b		ap_p_en			# if 1, mul fp1 by pwrten factor
+	fmul.x		(%a1,%d3),%fp1		# mul by 10**(d3_bit_no)
+ap_p_en:
+	add.l		&12,%d3			# inc d3 to next rtable entry
+	tst.l		%d0			# check if d0 is zero
+	bne.b		ap_p_el			# if not, get next bit
+	fmul.x		%fp1,%fp0		# mul mantissa by 10**(no_bits_shifted)
+	bra.b		pwrten			# go calc pwrten
+#
+# This section handles a negative adjusted exponent.
+#
+ap_st_n:
+	clr.l		%d1			# clr counter
+	mov.l		&2,%d5			# set up d5 to point to lword 3
+	mov.l		(%a0,%d5.L*4),%d4	# get lword 3
+	bne.b		ap_n_cl			# if not zero, check digits
+	sub.l		&1,%d5			# dec d5 to point to lword 2
+	addq.l		&8,%d1			# inc counter by 8
+	mov.l		(%a0,%d5.L*4),%d4	# get lword 2
+ap_n_cl:
+	mov.l		&28,%d3			# point to last digit
+	mov.l		&7,%d2			# init digit counter
+ap_n_gd:
+	bfextu		%d4{%d3:&4},%d0		# get digit
+	bne.b		ap_n_fx			# if non-zero, go to exp fix
+	subq.l		&4,%d3			# point to previous digit
+	addq.l		&1,%d1			# inc digit counter
+	dbf.w		%d2,ap_n_gd		# get next digit
+ap_n_fx:
+	mov.l		%d1,%d0			# copy counter to d0
+	mov.l		(%sp),%d1		# get adjusted exp from memory
+	sub.l		%d0,%d1			# subtract count from exp
+	bgt.b		ap_n_fm			# if still pos, go fix mantissa
+	neg.l		%d1			# take abs of exp and clr SE
+	mov.l		(%a0),%d4		# load lword 1 to d4
+	and.l		&0xbfffffff,%d4		# and clr SE in d4
+	and.l		&0xbfffffff,(%a0)	# and in memory
+#
+# Calculate the mantissa multiplier to compensate for the appending of
+# zeros to the mantissa.
+#
+ap_n_fm:
+	lea.l		PTENRN(%pc),%a1		# get address of power-of-ten table
+	clr.l		%d3			# init table index
+	fmov.s		&0x3f800000,%fp1	# init fp1 to 1
+	mov.l		&3,%d2			# init d2 to count bits in counter
+ap_n_el:
+	asr.l		&1,%d0			# shift lsb into carry
+	bcc.b		ap_n_en			# if 1, mul fp1 by pwrten factor
+	fmul.x		(%a1,%d3),%fp1		# mul by 10**(d3_bit_no)
+ap_n_en:
+	add.l		&12,%d3			# inc d3 to next rtable entry
+	tst.l		%d0			# check if d0 is zero
+	bne.b		ap_n_el			# if not, get next bit
+	fdiv.x		%fp1,%fp0		# div mantissa by 10**(no_bits_shifted)
+#
+#
+# Calculate power-of-ten factor from adjusted and shifted exponent.
+#
+# Register usage:
+#
+#  pwrten:
+#	(*)  d0: temp
+#	( )  d1: exponent
+#	(*)  d2: {FPCR[6:5],SM,SE} as index in RTABLE; temp
+#	(*)  d3: FPCR work copy
+#	( )  d4: first word of bcd
+#	(*)  a1: RTABLE pointer
+#  calc_p:
+#	(*)  d0: temp
+#	( )  d1: exponent
+#	(*)  d3: PWRTxx table index
+#	( )  a0: pointer to working copy of bcd
+#	(*)  a1: PWRTxx pointer
+#	(*) fp1: power-of-ten accumulator
+#
+# Pwrten calculates the exponent factor in the selected rounding mode
+# according to the following table:
+#
+#	Sign of Mant  Sign of Exp  Rounding Mode  PWRTEN Rounding Mode
+#
+#	ANY	  ANY	RN	RN
+#
+#	 +	   +	RP	RP
+#	 -	   +	RP	RM
+#	 +	   -	RP	RM
+#	 -	   -	RP	RP
+#
+#	 +	   +	RM	RM
+#	 -	   +	RM	RP
+#	 +	   -	RM	RP
+#	 -	   -	RM	RM
+#
+#	 +	   +	RZ	RM
+#	 -	   +	RZ	RM
+#	 +	   -	RZ	RP
+#	 -	   -	RZ	RP
+#
+#
+pwrten:
+	mov.l		USER_FPCR(%a6),%d3	# get user's FPCR
+	bfextu		%d3{&26:&2},%d2		# isolate rounding mode bits
+	mov.l		(%a0),%d4		# reload 1st bcd word to d4
+	asl.l		&2,%d2			# format d2 to be
+	bfextu		%d4{&0:&2},%d0		# {FPCR[6],FPCR[5],SM,SE}
+	add.l		%d0,%d2			# in d2 as index into RTABLE
+	lea.l		RTABLE(%pc),%a1		# load rtable base
+	mov.b		(%a1,%d2),%d0		# load new rounding bits from table
+	clr.l		%d3			# clear d3 to force no exc and extended
+	bfins		%d0,%d3{&26:&2}		# stuff new rounding bits in FPCR
+	fmov.l		%d3,%fpcr		# write new FPCR
+	asr.l		&1,%d0			# write correct PTENxx table
+	bcc.b		not_rp			# to a1
+	lea.l		PTENRP(%pc),%a1		# it is RP
+	bra.b		calc_p			# go to init section
+not_rp:
+	asr.l		&1,%d0			# keep checking
+	bcc.b		not_rm
+	lea.l		PTENRM(%pc),%a1		# it is RM
+	bra.b		calc_p			# go to init section
+not_rm:
+	lea.l		PTENRN(%pc),%a1		# it is RN
+calc_p:
+	mov.l		%d1,%d0			# copy exp to d0;use d0
+	bpl.b		no_neg			# if exp is negative,
+	neg.l		%d0			# invert it
+	or.l		&0x40000000,(%a0)	# and set SE bit
+no_neg:
+	clr.l		%d3			# table index
+	fmov.s		&0x3f800000,%fp1	# init fp1 to 1
+e_loop:
+	asr.l		&1,%d0			# shift next bit into carry
+	bcc.b		e_next			# if zero, skip the mul
+	fmul.x		(%a1,%d3),%fp1		# mul by 10**(d3_bit_no)
+e_next:
+	add.l		&12,%d3			# inc d3 to next rtable entry
+	tst.l		%d0			# check if d0 is zero
+	bne.b		e_loop			# not zero, continue shifting
+#
+#
+#  Check the sign of the adjusted exp and make the value in fp0 the
+#  same sign. If the exp was pos then multiply fp1*fp0;
+#  else divide fp0/fp1.
+#
+# Register Usage:
+#  norm:
+#	( )  a0: pointer to working bcd value
+#	(*) fp0: mantissa accumulator
+#	( ) fp1: scaling factor - 10**(abs(exp))
+#
+pnorm:
+	btst		&30,(%a0)		# test the sign of the exponent
+	beq.b		mul			# if clear, go to multiply
+div:
+	fdiv.x		%fp1,%fp0		# exp is negative, so divide mant by exp
+	bra.b		end_dec
+mul:
+	fmul.x		%fp1,%fp0		# exp is positive, so multiply by exp
+#
+#
+# Clean up and return with result in fp0.
+#
+# If the final mul/div in decbin incurred an inex exception,
+# it will be inex2, but will be reported as inex1 by get_op.
+#
+end_dec:
+	fmov.l		%fpsr,%d0		# get status register
+	bclr		&inex2_bit+8,%d0	# test for inex2 and clear it
+	beq.b		no_exc			# skip this if no exc
+	ori.w		&inx1a_mask,2+USER_FPSR(%a6) # set INEX1/AINEX
+no_exc:
+	add.l		&0x4,%sp		# clear 1 lw param
+	fmovm.x		(%sp)+,&0x40		# restore fp1
+	movm.l		(%sp)+,&0x3c		# restore d2-d5
+	fmov.l		&0x0,%fpcr
+	fmov.l		&0x0,%fpsr
+	rts
+
+#########################################################################
+# bindec(): Converts an input in extended precision format to bcd format#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to the input extended precision value in memory.	#
+#	     the input may be either normalized, unnormalized, or	#
+#	     denormalized.						#
+#	d0 = contains the k-factor sign-extended to 32-bits.		#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SCR0(a6) = bcd format result on the stack.			#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	A1.	Set RM and size ext;  Set SIGMA = sign of input.	#
+#		The k-factor is saved for use in d7. Clear the		#
+#		BINDEC_FLG for separating normalized/denormalized	#
+#		input.  If input is unnormalized or denormalized,	#
+#		normalize it.						#
+#									#
+#	A2.	Set X = abs(input).					#
+#									#
+#	A3.	Compute ILOG.						#
+#		ILOG is the log base 10 of the input value.  It is	#
+#		approximated by adding e + 0.f when the original	#
+#		value is viewed as 2^^e * 1.f in extended precision.	#
+#		This value is stored in d6.				#
+#									#
+#	A4.	Clr INEX bit.						#
+#		The operation in A3 above may have set INEX2.		#
+#									#
+#	A5.	Set ICTR = 0;						#
+#		ICTR is a flag used in A13.  It must be set before the	#
+#		loop entry A6.						#
+#									#
+#	A6.	Calculate LEN.						#
+#		LEN is the number of digits to be displayed.  The	#
+#		k-factor can dictate either the total number of digits,	#
+#		if it is a positive number, or the number of digits	#
+#		after the decimal point which are to be included as	#
+#		significant.  See the 68882 manual for examples.	#
+#		If LEN is computed to be greater than 17, set OPERR in	#
+#		USER_FPSR.  LEN is stored in d4.			#
+#									#
+#	A7.	Calculate SCALE.					#
+#		SCALE is equal to 10^ISCALE, where ISCALE is the number	#
+#		of decimal places needed to insure LEN integer digits	#
+#		in the output before conversion to bcd. LAMBDA is the	#
+#		sign of ISCALE, used in A9. Fp1 contains		#
+#		10^^(abs(ISCALE)) using a rounding mode which is a	#
+#		function of the original rounding mode and the signs	#
+#		of ISCALE and X.  A table is given in the code.		#
+#									#
+#	A8.	Clr INEX; Force RZ.					#
+#		The operation in A3 above may have set INEX2.		#
+#		RZ mode is forced for the scaling operation to insure	#
+#		only one rounding error.  The grs bits are collected in #
+#		the INEX flag for use in A10.				#
+#									#
+#	A9.	Scale X -> Y.						#
+#		The mantissa is scaled to the desired number of		#
+#		significant digits.  The excess digits are collected	#
+#		in INEX2.						#
+#									#
+#	A10.	Or in INEX.						#
+#		If INEX is set, round error occurred.  This is		#
+#		compensated for by 'or-ing' in the INEX2 flag to	#
+#		the lsb of Y.						#
+#									#
+#	A11.	Restore original FPCR; set size ext.			#
+#		Perform FINT operation in the user's rounding mode.	#
+#		Keep the size to extended.				#
+#									#
+#	A12.	Calculate YINT = FINT(Y) according to user's rounding	#
+#		mode.  The FPSP routine sintd0 is used.  The output	#
+#		is in fp0.						#
+#									#
+#	A13.	Check for LEN digits.					#
+#		If the int operation results in more than LEN digits,	#
+#		or less than LEN -1 digits, adjust ILOG and repeat from	#
+#		A6.  This test occurs only on the first pass.  If the	#
+#		result is exactly 10^LEN, decrement ILOG and divide	#
+#		the mantissa by 10.					#
+#									#
+#	A14.	Convert the mantissa to bcd.				#
+#		The binstr routine is used to convert the LEN digit	#
+#		mantissa to bcd in memory.  The input to binstr is	#
+#		to be a fraction; i.e. (mantissa)/10^LEN and adjusted	#
+#		such that the decimal point is to the left of bit 63.	#
+#		The bcd digits are stored in the correct position in	#
+#		the final string area in memory.			#
+#									#
+#	A15.	Convert the exponent to bcd.				#
+#		As in A14 above, the exp is converted to bcd and the	#
+#		digits are stored in the final string.			#
+#		Test the length of the final exponent string.  If the	#
+#		length is 4, set operr.					#
+#									#
+#	A16.	Write sign bits to final string.			#
+#									#
+#########################################################################
+
+set	BINDEC_FLG,	EXC_TEMP	# DENORM flag
+
+# Constants in extended precision
+PLOG2:
+	long		0x3FFD0000,0x9A209A84,0xFBCFF798,0x00000000
+PLOG2UP1:
+	long		0x3FFD0000,0x9A209A84,0xFBCFF799,0x00000000
+
+# Constants in single precision
+FONE:
+	long		0x3F800000,0x00000000,0x00000000,0x00000000
+FTWO:
+	long		0x40000000,0x00000000,0x00000000,0x00000000
+FTEN:
+	long		0x41200000,0x00000000,0x00000000,0x00000000
+F4933:
+	long		0x459A2800,0x00000000,0x00000000,0x00000000
+
+RBDTBL:
+	byte		0,0,0,0
+	byte		3,3,2,2
+	byte		3,2,2,3
+	byte		2,3,3,2
+
+#	Implementation Notes:
+#
+#	The registers are used as follows:
+#
+#		d0: scratch; LEN input to binstr
+#		d1: scratch
+#		d2: upper 32-bits of mantissa for binstr
+#		d3: scratch;lower 32-bits of mantissa for binstr
+#		d4: LEN
+#		d5: LAMBDA/ICTR
+#		d6: ILOG
+#		d7: k-factor
+#		a0: ptr for original operand/final result
+#		a1: scratch pointer
+#		a2: pointer to FP_X; abs(original value) in ext
+#		fp0: scratch
+#		fp1: scratch
+#		fp2: scratch
+#		F_SCR1:
+#		F_SCR2:
+#		L_SCR1:
+#		L_SCR2:
+
+	global		bindec
+bindec:
+	movm.l		&0x3f20,-(%sp)	#  {%d2-%d7/%a2}
+	fmovm.x		&0x7,-(%sp)	#  {%fp0-%fp2}
+
+# A1. Set RM and size ext. Set SIGMA = sign input;
+#     The k-factor is saved for use in d7.  Clear BINDEC_FLG for
+#     separating  normalized/denormalized input.  If the input
+#     is a denormalized number, set the BINDEC_FLG memory word
+#     to signal denorm.  If the input is unnormalized, normalize
+#     the input and test for denormalized result.
+#
+	fmov.l		&rm_mode*0x10,%fpcr	# set RM and ext
+	mov.l		(%a0),L_SCR2(%a6)	# save exponent for sign check
+	mov.l		%d0,%d7		# move k-factor to d7
+
+	clr.b		BINDEC_FLG(%a6)	# clr norm/denorm flag
+	cmpi.b		STAG(%a6),&DENORM # is input a DENORM?
+	bne.w		A2_str		# no; input is a NORM
+
+#
+# Normalize the denorm
+#
+un_de_norm:
+	mov.w		(%a0),%d0
+	and.w		&0x7fff,%d0	# strip sign of normalized exp
+	mov.l		4(%a0),%d1
+	mov.l		8(%a0),%d2
+norm_loop:
+	sub.w		&1,%d0
+	lsl.l		&1,%d2
+	roxl.l		&1,%d1
+	tst.l		%d1
+	bge.b		norm_loop
+#
+# Test if the normalized input is denormalized
+#
+	tst.w		%d0
+	bgt.b		pos_exp		# if greater than zero, it is a norm
+	st		BINDEC_FLG(%a6)	# set flag for denorm
+pos_exp:
+	and.w		&0x7fff,%d0	# strip sign of normalized exp
+	mov.w		%d0,(%a0)
+	mov.l		%d1,4(%a0)
+	mov.l		%d2,8(%a0)
+
+# A2. Set X = abs(input).
+#
+A2_str:
+	mov.l		(%a0),FP_SCR1(%a6)	# move input to work space
+	mov.l		4(%a0),FP_SCR1+4(%a6)	# move input to work space
+	mov.l		8(%a0),FP_SCR1+8(%a6)	# move input to work space
+	and.l		&0x7fffffff,FP_SCR1(%a6)	# create abs(X)
+
+# A3. Compute ILOG.
+#     ILOG is the log base 10 of the input value.  It is approx-
+#     imated by adding e + 0.f when the original value is viewed
+#     as 2^^e * 1.f in extended precision.  This value is stored
+#     in d6.
+#
+# Register usage:
+#	Input/Output
+#	d0: k-factor/exponent
+#	d2: x/x
+#	d3: x/x
+#	d4: x/x
+#	d5: x/x
+#	d6: x/ILOG
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: x/x
+#	a2: x/x
+#	fp0: x/float(ILOG)
+#	fp1: x/x
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Abs(X)/Abs(X) with $3fff exponent
+#	L_SCR1:x/x
+#	L_SCR2:first word of X packed/Unchanged
+
+	tst.b		BINDEC_FLG(%a6)	# check for denorm
+	beq.b		A3_cont		# if clr, continue with norm
+	mov.l		&-4933,%d6	# force ILOG = -4933
+	bra.b		A4_str
+A3_cont:
+	mov.w		FP_SCR1(%a6),%d0	# move exp to d0
+	mov.w		&0x3fff,FP_SCR1(%a6)	# replace exponent with 0x3fff
+	fmov.x		FP_SCR1(%a6),%fp0	# now fp0 has 1.f
+	sub.w		&0x3fff,%d0	# strip off bias
+	fadd.w		%d0,%fp0	# add in exp
+	fsub.s		FONE(%pc),%fp0	# subtract off 1.0
+	fbge.w		pos_res		# if pos, branch
+	fmul.x		PLOG2UP1(%pc),%fp0	# if neg, mul by LOG2UP1
+	fmov.l		%fp0,%d6	# put ILOG in d6 as a lword
+	bra.b		A4_str		# go move out ILOG
+pos_res:
+	fmul.x		PLOG2(%pc),%fp0	# if pos, mul by LOG2
+	fmov.l		%fp0,%d6	# put ILOG in d6 as a lword
+
+
+# A4. Clr INEX bit.
+#     The operation in A3 above may have set INEX2.
+
+A4_str:
+	fmov.l		&0,%fpsr	# zero all of fpsr - nothing needed
+
+
+# A5. Set ICTR = 0;
+#     ICTR is a flag used in A13.  It must be set before the
+#     loop entry A6. The lower word of d5 is used for ICTR.
+
+	clr.w		%d5		# clear ICTR
+
+# A6. Calculate LEN.
+#     LEN is the number of digits to be displayed.  The k-factor
+#     can dictate either the total number of digits, if it is
+#     a positive number, or the number of digits after the
+#     original decimal point which are to be included as
+#     significant.  See the 68882 manual for examples.
+#     If LEN is computed to be greater than 17, set OPERR in
+#     USER_FPSR.  LEN is stored in d4.
+#
+# Register usage:
+#	Input/Output
+#	d0: exponent/Unchanged
+#	d2: x/x/scratch
+#	d3: x/x
+#	d4: exc picture/LEN
+#	d5: ICTR/Unchanged
+#	d6: ILOG/Unchanged
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: x/x
+#	a2: x/x
+#	fp0: float(ILOG)/Unchanged
+#	fp1: x/x
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Abs(X) with $3fff exponent/Unchanged
+#	L_SCR1:x/x
+#	L_SCR2:first word of X packed/Unchanged
+
+A6_str:
+	tst.l		%d7		# branch on sign of k
+	ble.b		k_neg		# if k <= 0, LEN = ILOG + 1 - k
+	mov.l		%d7,%d4		# if k > 0, LEN = k
+	bra.b		len_ck		# skip to LEN check
+k_neg:
+	mov.l		%d6,%d4		# first load ILOG to d4
+	sub.l		%d7,%d4		# subtract off k
+	addq.l		&1,%d4		# add in the 1
+len_ck:
+	tst.l		%d4		# LEN check: branch on sign of LEN
+	ble.b		LEN_ng		# if neg, set LEN = 1
+	cmp.l		%d4,&17		# test if LEN > 17
+	ble.b		A7_str		# if not, forget it
+	mov.l		&17,%d4		# set max LEN = 17
+	tst.l		%d7		# if negative, never set OPERR
+	ble.b		A7_str		# if positive, continue
+	or.l		&opaop_mask,USER_FPSR(%a6)	# set OPERR & AIOP in USER_FPSR
+	bra.b		A7_str		# finished here
+LEN_ng:
+	mov.l		&1,%d4		# min LEN is 1
+
+
+# A7. Calculate SCALE.
+#     SCALE is equal to 10^ISCALE, where ISCALE is the number
+#     of decimal places needed to insure LEN integer digits
+#     in the output before conversion to bcd. LAMBDA is the sign
+#     of ISCALE, used in A9.  Fp1 contains 10^^(abs(ISCALE)) using
+#     the rounding mode as given in the following table (see
+#     Coonen, p. 7.23 as ref.; however, the SCALE variable is
+#     of opposite sign in bindec.sa from Coonen).
+#
+#	Initial					USE
+#	FPCR[6:5]	LAMBDA	SIGN(X)		FPCR[6:5]
+#	----------------------------------------------
+#	 RN	00	   0	   0		00/0	RN
+#	 RN	00	   0	   1		00/0	RN
+#	 RN	00	   1	   0		00/0	RN
+#	 RN	00	   1	   1		00/0	RN
+#	 RZ	01	   0	   0		11/3	RP
+#	 RZ	01	   0	   1		11/3	RP
+#	 RZ	01	   1	   0		10/2	RM
+#	 RZ	01	   1	   1		10/2	RM
+#	 RM	10	   0	   0		11/3	RP
+#	 RM	10	   0	   1		10/2	RM
+#	 RM	10	   1	   0		10/2	RM
+#	 RM	10	   1	   1		11/3	RP
+#	 RP	11	   0	   0		10/2	RM
+#	 RP	11	   0	   1		11/3	RP
+#	 RP	11	   1	   0		11/3	RP
+#	 RP	11	   1	   1		10/2	RM
+#
+# Register usage:
+#	Input/Output
+#	d0: exponent/scratch - final is 0
+#	d2: x/0 or 24 for A9
+#	d3: x/scratch - offset ptr into PTENRM array
+#	d4: LEN/Unchanged
+#	d5: 0/ICTR:LAMBDA
+#	d6: ILOG/ILOG or k if ((k<=0)&(ILOG<k))
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: x/ptr to PTENRM array
+#	a2: x/x
+#	fp0: float(ILOG)/Unchanged
+#	fp1: x/10^ISCALE
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Abs(X) with $3fff exponent/Unchanged
+#	L_SCR1:x/x
+#	L_SCR2:first word of X packed/Unchanged
+
+A7_str:
+	tst.l		%d7		# test sign of k
+	bgt.b		k_pos		# if pos and > 0, skip this
+	cmp.l		%d7,%d6		# test k - ILOG
+	blt.b		k_pos		# if ILOG >= k, skip this
+	mov.l		%d7,%d6		# if ((k<0) & (ILOG < k)) ILOG = k
+k_pos:
+	mov.l		%d6,%d0		# calc ILOG + 1 - LEN in d0
+	addq.l		&1,%d0		# add the 1
+	sub.l		%d4,%d0		# sub off LEN
+	swap		%d5		# use upper word of d5 for LAMBDA
+	clr.w		%d5		# set it zero initially
+	clr.w		%d2		# set up d2 for very small case
+	tst.l		%d0		# test sign of ISCALE
+	bge.b		iscale		# if pos, skip next inst
+	addq.w		&1,%d5		# if neg, set LAMBDA true
+	cmp.l		%d0,&0xffffecd4	# test iscale <= -4908
+	bgt.b		no_inf		# if false, skip rest
+	add.l		&24,%d0		# add in 24 to iscale
+	mov.l		&24,%d2		# put 24 in d2 for A9
+no_inf:
+	neg.l		%d0		# and take abs of ISCALE
+iscale:
+	fmov.s		FONE(%pc),%fp1	# init fp1 to 1
+	bfextu		USER_FPCR(%a6){&26:&2},%d1	# get initial rmode bits
+	lsl.w		&1,%d1		# put them in bits 2:1
+	add.w		%d5,%d1		# add in LAMBDA
+	lsl.w		&1,%d1		# put them in bits 3:1
+	tst.l		L_SCR2(%a6)	# test sign of original x
+	bge.b		x_pos		# if pos, don't set bit 0
+	addq.l		&1,%d1		# if neg, set bit 0
+x_pos:
+	lea.l		RBDTBL(%pc),%a2	# load rbdtbl base
+	mov.b		(%a2,%d1),%d3	# load d3 with new rmode
+	lsl.l		&4,%d3		# put bits in proper position
+	fmov.l		%d3,%fpcr	# load bits into fpu
+	lsr.l		&4,%d3		# put bits in proper position
+	tst.b		%d3		# decode new rmode for pten table
+	bne.b		not_rn		# if zero, it is RN
+	lea.l		PTENRN(%pc),%a1	# load a1 with RN table base
+	bra.b		rmode		# exit decode
+not_rn:
+	lsr.b		&1,%d3		# get lsb in carry
+	bcc.b		not_rp2		# if carry clear, it is RM
+	lea.l		PTENRP(%pc),%a1	# load a1 with RP table base
+	bra.b		rmode		# exit decode
+not_rp2:
+	lea.l		PTENRM(%pc),%a1	# load a1 with RM table base
+rmode:
+	clr.l		%d3		# clr table index
+e_loop2:
+	lsr.l		&1,%d0		# shift next bit into carry
+	bcc.b		e_next2		# if zero, skip the mul
+	fmul.x		(%a1,%d3),%fp1	# mul by 10**(d3_bit_no)
+e_next2:
+	add.l		&12,%d3		# inc d3 to next pwrten table entry
+	tst.l		%d0		# test if ISCALE is zero
+	bne.b		e_loop2		# if not, loop
+
+# A8. Clr INEX; Force RZ.
+#     The operation in A3 above may have set INEX2.
+#     RZ mode is forced for the scaling operation to insure
+#     only one rounding error.  The grs bits are collected in
+#     the INEX flag for use in A10.
+#
+# Register usage:
+#	Input/Output
+
+	fmov.l		&0,%fpsr	# clr INEX
+	fmov.l		&rz_mode*0x10,%fpcr	# set RZ rounding mode
+
+# A9. Scale X -> Y.
+#     The mantissa is scaled to the desired number of significant
+#     digits.  The excess digits are collected in INEX2. If mul,
+#     Check d2 for excess 10 exponential value.  If not zero,
+#     the iscale value would have caused the pwrten calculation
+#     to overflow.  Only a negative iscale can cause this, so
+#     multiply by 10^(d2), which is now only allowed to be 24,
+#     with a multiply by 10^8 and 10^16, which is exact since
+#     10^24 is exact.  If the input was denormalized, we must
+#     create a busy stack frame with the mul command and the
+#     two operands, and allow the fpu to complete the multiply.
+#
+# Register usage:
+#	Input/Output
+#	d0: FPCR with RZ mode/Unchanged
+#	d2: 0 or 24/unchanged
+#	d3: x/x
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA
+#	d6: ILOG/Unchanged
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: ptr to PTENRM array/Unchanged
+#	a2: x/x
+#	fp0: float(ILOG)/X adjusted for SCALE (Y)
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Abs(X) with $3fff exponent/Unchanged
+#	L_SCR1:x/x
+#	L_SCR2:first word of X packed/Unchanged
+
+A9_str:
+	fmov.x		(%a0),%fp0	# load X from memory
+	fabs.x		%fp0		# use abs(X)
+	tst.w		%d5		# LAMBDA is in lower word of d5
+	bne.b		sc_mul		# if neg (LAMBDA = 1), scale by mul
+	fdiv.x		%fp1,%fp0	# calculate X / SCALE -> Y to fp0
+	bra.w		A10_st		# branch to A10
+
+sc_mul:
+	tst.b		BINDEC_FLG(%a6)	# check for denorm
+	beq.w		A9_norm		# if norm, continue with mul
+
+# for DENORM, we must calculate:
+#	fp0 = input_op * 10^ISCALE * 10^24
+# since the input operand is a DENORM, we can't multiply it directly.
+# so, we do the multiplication of the exponents and mantissas separately.
+# in this way, we avoid underflow on intermediate stages of the
+# multiplication and guarantee a result without exception.
+	fmovm.x		&0x2,-(%sp)	# save 10^ISCALE to stack
+
+	mov.w		(%sp),%d3	# grab exponent
+	andi.w		&0x7fff,%d3	# clear sign
+	ori.w		&0x8000,(%a0)	# make DENORM exp negative
+	add.w		(%a0),%d3	# add DENORM exp to 10^ISCALE exp
+	subi.w		&0x3fff,%d3	# subtract BIAS
+	add.w		36(%a1),%d3
+	subi.w		&0x3fff,%d3	# subtract BIAS
+	add.w		48(%a1),%d3
+	subi.w		&0x3fff,%d3	# subtract BIAS
+
+	bmi.w		sc_mul_err	# is result is DENORM, punt!!!
+
+	andi.w		&0x8000,(%sp)	# keep sign
+	or.w		%d3,(%sp)	# insert new exponent
+	andi.w		&0x7fff,(%a0)	# clear sign bit on DENORM again
+	mov.l		0x8(%a0),-(%sp) # put input op mantissa on stk
+	mov.l		0x4(%a0),-(%sp)
+	mov.l		&0x3fff0000,-(%sp) # force exp to zero
+	fmovm.x		(%sp)+,&0x80	# load normalized DENORM into fp0
+	fmul.x		(%sp)+,%fp0
+
+#	fmul.x	36(%a1),%fp0	# multiply fp0 by 10^8
+#	fmul.x	48(%a1),%fp0	# multiply fp0 by 10^16
+	mov.l		36+8(%a1),-(%sp) # get 10^8 mantissa
+	mov.l		36+4(%a1),-(%sp)
+	mov.l		&0x3fff0000,-(%sp) # force exp to zero
+	mov.l		48+8(%a1),-(%sp) # get 10^16 mantissa
+	mov.l		48+4(%a1),-(%sp)
+	mov.l		&0x3fff0000,-(%sp)# force exp to zero
+	fmul.x		(%sp)+,%fp0	# multiply fp0 by 10^8
+	fmul.x		(%sp)+,%fp0	# multiply fp0 by 10^16
+	bra.b		A10_st
+
+sc_mul_err:
+	bra.b		sc_mul_err
+
+A9_norm:
+	tst.w		%d2		# test for small exp case
+	beq.b		A9_con		# if zero, continue as normal
+	fmul.x		36(%a1),%fp0	# multiply fp0 by 10^8
+	fmul.x		48(%a1),%fp0	# multiply fp0 by 10^16
+A9_con:
+	fmul.x		%fp1,%fp0	# calculate X * SCALE -> Y to fp0
+
+# A10. Or in INEX.
+#      If INEX is set, round error occurred.  This is compensated
+#      for by 'or-ing' in the INEX2 flag to the lsb of Y.
+#
+# Register usage:
+#	Input/Output
+#	d0: FPCR with RZ mode/FPSR with INEX2 isolated
+#	d2: x/x
+#	d3: x/x
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA
+#	d6: ILOG/Unchanged
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: x/ptr to FP_SCR1(a6)
+#	fp0: Y/Y with lsb adjusted
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: x/x
+
+A10_st:
+	fmov.l		%fpsr,%d0	# get FPSR
+	fmov.x		%fp0,FP_SCR1(%a6)	# move Y to memory
+	lea.l		FP_SCR1(%a6),%a2	# load a2 with ptr to FP_SCR1
+	btst		&9,%d0		# check if INEX2 set
+	beq.b		A11_st		# if clear, skip rest
+	or.l		&1,8(%a2)	# or in 1 to lsb of mantissa
+	fmov.x		FP_SCR1(%a6),%fp0	# write adjusted Y back to fpu
+
+
+# A11. Restore original FPCR; set size ext.
+#      Perform FINT operation in the user's rounding mode.  Keep
+#      the size to extended.  The sintdo entry point in the sint
+#      routine expects the FPCR value to be in USER_FPCR for
+#      mode and precision.  The original FPCR is saved in L_SCR1.
+
+A11_st:
+	mov.l		USER_FPCR(%a6),L_SCR1(%a6)	# save it for later
+	and.l		&0x00000030,USER_FPCR(%a6)	# set size to ext,
+#					;block exceptions
+
+
+# A12. Calculate YINT = FINT(Y) according to user's rounding mode.
+#      The FPSP routine sintd0 is used.  The output is in fp0.
+#
+# Register usage:
+#	Input/Output
+#	d0: FPSR with AINEX cleared/FPCR with size set to ext
+#	d2: x/x/scratch
+#	d3: x/x
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA/Unchanged
+#	d6: ILOG/Unchanged
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/src ptr for sintdo
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	a6: temp pointer to FP_SCR1(a6) - orig value saved and restored
+#	fp0: Y/YINT
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Y adjusted for inex/Y with original exponent
+#	L_SCR1:x/original USER_FPCR
+#	L_SCR2:first word of X packed/Unchanged
+
+A12_st:
+	movm.l	&0xc0c0,-(%sp)	# save regs used by sintd0	 {%d0-%d1/%a0-%a1}
+	mov.l	L_SCR1(%a6),-(%sp)
+	mov.l	L_SCR2(%a6),-(%sp)
+
+	lea.l		FP_SCR1(%a6),%a0	# a0 is ptr to FP_SCR1(a6)
+	fmov.x		%fp0,(%a0)	# move Y to memory at FP_SCR1(a6)
+	tst.l		L_SCR2(%a6)	# test sign of original operand
+	bge.b		do_fint12		# if pos, use Y
+	or.l		&0x80000000,(%a0)	# if neg, use -Y
+do_fint12:
+	mov.l	USER_FPSR(%a6),-(%sp)
+#	bsr	sintdo		# sint routine returns int in fp0
+
+	fmov.l	USER_FPCR(%a6),%fpcr
+	fmov.l	&0x0,%fpsr			# clear the AEXC bits!!!
+##	mov.l		USER_FPCR(%a6),%d0	# ext prec/keep rnd mode
+##	andi.l		&0x00000030,%d0
+##	fmov.l		%d0,%fpcr
+	fint.x		FP_SCR1(%a6),%fp0	# do fint()
+	fmov.l	%fpsr,%d0
+	or.w	%d0,FPSR_EXCEPT(%a6)
+##	fmov.l		&0x0,%fpcr
+##	fmov.l		%fpsr,%d0		# don't keep ccodes
+##	or.w		%d0,FPSR_EXCEPT(%a6)
+
+	mov.b	(%sp),USER_FPSR(%a6)
+	add.l	&4,%sp
+
+	mov.l	(%sp)+,L_SCR2(%a6)
+	mov.l	(%sp)+,L_SCR1(%a6)
+	movm.l	(%sp)+,&0x303	# restore regs used by sint	 {%d0-%d1/%a0-%a1}
+
+	mov.l	L_SCR2(%a6),FP_SCR1(%a6)	# restore original exponent
+	mov.l	L_SCR1(%a6),USER_FPCR(%a6)	# restore user's FPCR
+
+# A13. Check for LEN digits.
+#      If the int operation results in more than LEN digits,
+#      or less than LEN -1 digits, adjust ILOG and repeat from
+#      A6.  This test occurs only on the first pass.  If the
+#      result is exactly 10^LEN, decrement ILOG and divide
+#      the mantissa by 10.  The calculation of 10^LEN cannot
+#      be inexact, since all powers of ten upto 10^27 are exact
+#      in extended precision, so the use of a previous power-of-ten
+#      table will introduce no error.
+#
+#
+# Register usage:
+#	Input/Output
+#	d0: FPCR with size set to ext/scratch final = 0
+#	d2: x/x
+#	d3: x/scratch final = x
+#	d4: LEN/LEN adjusted
+#	d5: ICTR:LAMBDA/LAMBDA:ICTR
+#	d6: ILOG/ILOG adjusted
+#	d7: k-factor/Unchanged
+#	a0: pointer into memory for packed bcd string formation
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	fp0: int portion of Y/abs(YINT) adjusted
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: x/10^LEN
+#	F_SCR1:x/x
+#	F_SCR2:Y with original exponent/Unchanged
+#	L_SCR1:original USER_FPCR/Unchanged
+#	L_SCR2:first word of X packed/Unchanged
+
+A13_st:
+	swap		%d5		# put ICTR in lower word of d5
+	tst.w		%d5		# check if ICTR = 0
+	bne		not_zr		# if non-zero, go to second test
+#
+# Compute 10^(LEN-1)
+#
+	fmov.s		FONE(%pc),%fp2	# init fp2 to 1.0
+	mov.l		%d4,%d0		# put LEN in d0
+	subq.l		&1,%d0		# d0 = LEN -1
+	clr.l		%d3		# clr table index
+l_loop:
+	lsr.l		&1,%d0		# shift next bit into carry
+	bcc.b		l_next		# if zero, skip the mul
+	fmul.x		(%a1,%d3),%fp2	# mul by 10**(d3_bit_no)
+l_next:
+	add.l		&12,%d3		# inc d3 to next pwrten table entry
+	tst.l		%d0		# test if LEN is zero
+	bne.b		l_loop		# if not, loop
+#
+# 10^LEN-1 is computed for this test and A14.  If the input was
+# denormalized, check only the case in which YINT > 10^LEN.
+#
+	tst.b		BINDEC_FLG(%a6)	# check if input was norm
+	beq.b		A13_con		# if norm, continue with checking
+	fabs.x		%fp0		# take abs of YINT
+	bra		test_2
+#
+# Compare abs(YINT) to 10^(LEN-1) and 10^LEN
+#
+A13_con:
+	fabs.x		%fp0		# take abs of YINT
+	fcmp.x		%fp0,%fp2	# compare abs(YINT) with 10^(LEN-1)
+	fbge.w		test_2		# if greater, do next test
+	subq.l		&1,%d6		# subtract 1 from ILOG
+	mov.w		&1,%d5		# set ICTR
+	fmov.l		&rm_mode*0x10,%fpcr	# set rmode to RM
+	fmul.s		FTEN(%pc),%fp2	# compute 10^LEN
+	bra.w		A6_str		# return to A6 and recompute YINT
+test_2:
+	fmul.s		FTEN(%pc),%fp2	# compute 10^LEN
+	fcmp.x		%fp0,%fp2	# compare abs(YINT) with 10^LEN
+	fblt.w		A14_st		# if less, all is ok, go to A14
+	fbgt.w		fix_ex		# if greater, fix and redo
+	fdiv.s		FTEN(%pc),%fp0	# if equal, divide by 10
+	addq.l		&1,%d6		# and inc ILOG
+	bra.b		A14_st		# and continue elsewhere
+fix_ex:
+	addq.l		&1,%d6		# increment ILOG by 1
+	mov.w		&1,%d5		# set ICTR
+	fmov.l		&rm_mode*0x10,%fpcr	# set rmode to RM
+	bra.w		A6_str		# return to A6 and recompute YINT
+#
+# Since ICTR <> 0, we have already been through one adjustment,
+# and shouldn't have another; this is to check if abs(YINT) = 10^LEN
+# 10^LEN is again computed using whatever table is in a1 since the
+# value calculated cannot be inexact.
+#
+not_zr:
+	fmov.s		FONE(%pc),%fp2	# init fp2 to 1.0
+	mov.l		%d4,%d0		# put LEN in d0
+	clr.l		%d3		# clr table index
+z_loop:
+	lsr.l		&1,%d0		# shift next bit into carry
+	bcc.b		z_next		# if zero, skip the mul
+	fmul.x		(%a1,%d3),%fp2	# mul by 10**(d3_bit_no)
+z_next:
+	add.l		&12,%d3		# inc d3 to next pwrten table entry
+	tst.l		%d0		# test if LEN is zero
+	bne.b		z_loop		# if not, loop
+	fabs.x		%fp0		# get abs(YINT)
+	fcmp.x		%fp0,%fp2	# check if abs(YINT) = 10^LEN
+	fbneq.w		A14_st		# if not, skip this
+	fdiv.s		FTEN(%pc),%fp0	# divide abs(YINT) by 10
+	addq.l		&1,%d6		# and inc ILOG by 1
+	addq.l		&1,%d4		# and inc LEN
+	fmul.s		FTEN(%pc),%fp2	# if LEN++, the get 10^^LEN
+
+# A14. Convert the mantissa to bcd.
+#      The binstr routine is used to convert the LEN digit
+#      mantissa to bcd in memory.  The input to binstr is
+#      to be a fraction; i.e. (mantissa)/10^LEN and adjusted
+#      such that the decimal point is to the left of bit 63.
+#      The bcd digits are stored in the correct position in
+#      the final string area in memory.
+#
+#
+# Register usage:
+#	Input/Output
+#	d0: x/LEN call to binstr - final is 0
+#	d1: x/0
+#	d2: x/ms 32-bits of mant of abs(YINT)
+#	d3: x/ls 32-bits of mant of abs(YINT)
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA/LAMBDA:ICTR
+#	d6: ILOG
+#	d7: k-factor/Unchanged
+#	a0: pointer into memory for packed bcd string formation
+#	    /ptr to first mantissa byte in result string
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	fp0: int portion of Y/abs(YINT) adjusted
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: 10^LEN/Unchanged
+#	F_SCR1:x/Work area for final result
+#	F_SCR2:Y with original exponent/Unchanged
+#	L_SCR1:original USER_FPCR/Unchanged
+#	L_SCR2:first word of X packed/Unchanged
+
+A14_st:
+	fmov.l		&rz_mode*0x10,%fpcr	# force rz for conversion
+	fdiv.x		%fp2,%fp0	# divide abs(YINT) by 10^LEN
+	lea.l		FP_SCR0(%a6),%a0
+	fmov.x		%fp0,(%a0)	# move abs(YINT)/10^LEN to memory
+	mov.l		4(%a0),%d2	# move 2nd word of FP_RES to d2
+	mov.l		8(%a0),%d3	# move 3rd word of FP_RES to d3
+	clr.l		4(%a0)		# zero word 2 of FP_RES
+	clr.l		8(%a0)		# zero word 3 of FP_RES
+	mov.l		(%a0),%d0	# move exponent to d0
+	swap		%d0		# put exponent in lower word
+	beq.b		no_sft		# if zero, don't shift
+	sub.l		&0x3ffd,%d0	# sub bias less 2 to make fract
+	tst.l		%d0		# check if > 1
+	bgt.b		no_sft		# if so, don't shift
+	neg.l		%d0		# make exp positive
+m_loop:
+	lsr.l		&1,%d2		# shift d2:d3 right, add 0s
+	roxr.l		&1,%d3		# the number of places
+	dbf.w		%d0,m_loop	# given in d0
+no_sft:
+	tst.l		%d2		# check for mantissa of zero
+	bne.b		no_zr		# if not, go on
+	tst.l		%d3		# continue zero check
+	beq.b		zer_m		# if zero, go directly to binstr
+no_zr:
+	clr.l		%d1		# put zero in d1 for addx
+	add.l		&0x00000080,%d3	# inc at bit 7
+	addx.l		%d1,%d2		# continue inc
+	and.l		&0xffffff80,%d3	# strip off lsb not used by 882
+zer_m:
+	mov.l		%d4,%d0		# put LEN in d0 for binstr call
+	addq.l		&3,%a0		# a0 points to M16 byte in result
+	bsr		binstr		# call binstr to convert mant
+
+
+# A15. Convert the exponent to bcd.
+#      As in A14 above, the exp is converted to bcd and the
+#      digits are stored in the final string.
+#
+#      Digits are stored in L_SCR1(a6) on return from BINDEC as:
+#
+#	 32               16 15                0
+#	-----------------------------------------
+#	|  0 | e3 | e2 | e1 | e4 |  X |  X |  X |
+#	-----------------------------------------
+#
+# And are moved into their proper places in FP_SCR0.  If digit e4
+# is non-zero, OPERR is signaled.  In all cases, all 4 digits are
+# written as specified in the 881/882 manual for packed decimal.
+#
+# Register usage:
+#	Input/Output
+#	d0: x/LEN call to binstr - final is 0
+#	d1: x/scratch (0);shift count for final exponent packing
+#	d2: x/ms 32-bits of exp fraction/scratch
+#	d3: x/ls 32-bits of exp fraction
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA/LAMBDA:ICTR
+#	d6: ILOG
+#	d7: k-factor/Unchanged
+#	a0: ptr to result string/ptr to L_SCR1(a6)
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	fp0: abs(YINT) adjusted/float(ILOG)
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: 10^LEN/Unchanged
+#	F_SCR1:Work area for final result/BCD result
+#	F_SCR2:Y with original exponent/ILOG/10^4
+#	L_SCR1:original USER_FPCR/Exponent digits on return from binstr
+#	L_SCR2:first word of X packed/Unchanged
+
+A15_st:
+	tst.b		BINDEC_FLG(%a6)	# check for denorm
+	beq.b		not_denorm
+	ftest.x		%fp0		# test for zero
+	fbeq.w		den_zero	# if zero, use k-factor or 4933
+	fmov.l		%d6,%fp0	# float ILOG
+	fabs.x		%fp0		# get abs of ILOG
+	bra.b		convrt
+den_zero:
+	tst.l		%d7		# check sign of the k-factor
+	blt.b		use_ilog	# if negative, use ILOG
+	fmov.s		F4933(%pc),%fp0	# force exponent to 4933
+	bra.b		convrt		# do it
+use_ilog:
+	fmov.l		%d6,%fp0	# float ILOG
+	fabs.x		%fp0		# get abs of ILOG
+	bra.b		convrt
+not_denorm:
+	ftest.x		%fp0		# test for zero
+	fbneq.w		not_zero	# if zero, force exponent
+	fmov.s		FONE(%pc),%fp0	# force exponent to 1
+	bra.b		convrt		# do it
+not_zero:
+	fmov.l		%d6,%fp0	# float ILOG
+	fabs.x		%fp0		# get abs of ILOG
+convrt:
+	fdiv.x		24(%a1),%fp0	# compute ILOG/10^4
+	fmov.x		%fp0,FP_SCR1(%a6)	# store fp0 in memory
+	mov.l		4(%a2),%d2	# move word 2 to d2
+	mov.l		8(%a2),%d3	# move word 3 to d3
+	mov.w		(%a2),%d0	# move exp to d0
+	beq.b		x_loop_fin	# if zero, skip the shift
+	sub.w		&0x3ffd,%d0	# subtract off bias
+	neg.w		%d0		# make exp positive
+x_loop:
+	lsr.l		&1,%d2		# shift d2:d3 right
+	roxr.l		&1,%d3		# the number of places
+	dbf.w		%d0,x_loop	# given in d0
+x_loop_fin:
+	clr.l		%d1		# put zero in d1 for addx
+	add.l		&0x00000080,%d3	# inc at bit 6
+	addx.l		%d1,%d2		# continue inc
+	and.l		&0xffffff80,%d3	# strip off lsb not used by 882
+	mov.l		&4,%d0		# put 4 in d0 for binstr call
+	lea.l		L_SCR1(%a6),%a0	# a0 is ptr to L_SCR1 for exp digits
+	bsr		binstr		# call binstr to convert exp
+	mov.l		L_SCR1(%a6),%d0	# load L_SCR1 lword to d0
+	mov.l		&12,%d1		# use d1 for shift count
+	lsr.l		%d1,%d0		# shift d0 right by 12
+	bfins		%d0,FP_SCR0(%a6){&4:&12}	# put e3:e2:e1 in FP_SCR0
+	lsr.l		%d1,%d0		# shift d0 right by 12
+	bfins		%d0,FP_SCR0(%a6){&16:&4}	# put e4 in FP_SCR0
+	tst.b		%d0		# check if e4 is zero
+	beq.b		A16_st		# if zero, skip rest
+	or.l		&opaop_mask,USER_FPSR(%a6)	# set OPERR & AIOP in USER_FPSR
+
+
+# A16. Write sign bits to final string.
+#	   Sigma is bit 31 of initial value; RHO is bit 31 of d6 (ILOG).
+#
+# Register usage:
+#	Input/Output
+#	d0: x/scratch - final is x
+#	d2: x/x
+#	d3: x/x
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA/LAMBDA:ICTR
+#	d6: ILOG/ILOG adjusted
+#	d7: k-factor/Unchanged
+#	a0: ptr to L_SCR1(a6)/Unchanged
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	fp0: float(ILOG)/Unchanged
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: 10^LEN/Unchanged
+#	F_SCR1:BCD result with correct signs
+#	F_SCR2:ILOG/10^4
+#	L_SCR1:Exponent digits on return from binstr
+#	L_SCR2:first word of X packed/Unchanged
+
+A16_st:
+	clr.l		%d0		# clr d0 for collection of signs
+	and.b		&0x0f,FP_SCR0(%a6)	# clear first nibble of FP_SCR0
+	tst.l		L_SCR2(%a6)	# check sign of original mantissa
+	bge.b		mant_p		# if pos, don't set SM
+	mov.l		&2,%d0		# move 2 in to d0 for SM
+mant_p:
+	tst.l		%d6		# check sign of ILOG
+	bge.b		wr_sgn		# if pos, don't set SE
+	addq.l		&1,%d0		# set bit 0 in d0 for SE
+wr_sgn:
+	bfins		%d0,FP_SCR0(%a6){&0:&2}	# insert SM and SE into FP_SCR0
+
+# Clean up and restore all registers used.
+
+	fmov.l		&0,%fpsr	# clear possible inex2/ainex bits
+	fmovm.x		(%sp)+,&0xe0	#  {%fp0-%fp2}
+	movm.l		(%sp)+,&0x4fc	#  {%d2-%d7/%a2}
+	rts
+
+	global		PTENRN
+PTENRN:
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59E	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D5	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CE0	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8E	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C7	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C17	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE5	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979B	# 10 ^ 4096
+
+	global		PTENRP
+PTENRP:
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59E	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D6	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CE0	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8E	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C7	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C18	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE5	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979B	# 10 ^ 4096
+
+	global		PTENRM
+PTENRM:
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59D	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D5	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CDF	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8D	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C6	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C17	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE4	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979A	# 10 ^ 4096
+
+#########################################################################
+# binstr(): Converts a 64-bit binary integer to bcd.			#
+#									#
+# INPUT *************************************************************** #
+#	d2:d3 = 64-bit binary integer					#
+#	d0    = desired length (LEN)					#
+#	a0    = pointer to start in memory for bcd characters		#
+#		(This pointer must point to byte 4 of the first		#
+#		 lword of the packed decimal memory string.)		#
+#									#
+# OUTPUT ************************************************************** #
+#	a0 = pointer to LEN bcd digits representing the 64-bit integer.	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	The 64-bit binary is assumed to have a decimal point before	#
+#	bit 63.  The fraction is multiplied by 10 using a mul by 2	#
+#	shift and a mul by 8 shift.  The bits shifted out of the	#
+#	msb form a decimal digit.  This process is iterated until	#
+#	LEN digits are formed.						#
+#									#
+# A1. Init d7 to 1.  D7 is the byte digit counter, and if 1, the	#
+#     digit formed will be assumed the least significant.  This is	#
+#     to force the first byte formed to have a 0 in the upper 4 bits.	#
+#									#
+# A2. Beginning of the loop:						#
+#     Copy the fraction in d2:d3 to d4:d5.				#
+#									#
+# A3. Multiply the fraction in d2:d3 by 8 using bit-field		#
+#     extracts and shifts.  The three msbs from d2 will go into d1.	#
+#									#
+# A4. Multiply the fraction in d4:d5 by 2 using shifts.  The msb	#
+#     will be collected by the carry.					#
+#									#
+# A5. Add using the carry the 64-bit quantities in d2:d3 and d4:d5	#
+#     into d2:d3.  D1 will contain the bcd digit formed.		#
+#									#
+# A6. Test d7.  If zero, the digit formed is the ms digit.  If non-	#
+#     zero, it is the ls digit.  Put the digit in its place in the	#
+#     upper word of d0.  If it is the ls digit, write the word		#
+#     from d0 to memory.						#
+#									#
+# A7. Decrement d6 (LEN counter) and repeat the loop until zero.	#
+#									#
+#########################################################################
+
+#	Implementation Notes:
+#
+#	The registers are used as follows:
+#
+#		d0: LEN counter
+#		d1: temp used to form the digit
+#		d2: upper 32-bits of fraction for mul by 8
+#		d3: lower 32-bits of fraction for mul by 8
+#		d4: upper 32-bits of fraction for mul by 2
+#		d5: lower 32-bits of fraction for mul by 2
+#		d6: temp for bit-field extracts
+#		d7: byte digit formation word;digit count {0,1}
+#		a0: pointer into memory for packed bcd string formation
+#
+
+	global		binstr
+binstr:
+	movm.l		&0xff00,-(%sp)	#  {%d0-%d7}
+
+#
+# A1: Init d7
+#
+	mov.l		&1,%d7		# init d7 for second digit
+	subq.l		&1,%d0		# for dbf d0 would have LEN+1 passes
+#
+# A2. Copy d2:d3 to d4:d5.  Start loop.
+#
+loop:
+	mov.l		%d2,%d4		# copy the fraction before muls
+	mov.l		%d3,%d5		# to d4:d5
+#
+# A3. Multiply d2:d3 by 8; extract msbs into d1.
+#
+	bfextu		%d2{&0:&3},%d1	# copy 3 msbs of d2 into d1
+	asl.l		&3,%d2		# shift d2 left by 3 places
+	bfextu		%d3{&0:&3},%d6	# copy 3 msbs of d3 into d6
+	asl.l		&3,%d3		# shift d3 left by 3 places
+	or.l		%d6,%d2		# or in msbs from d3 into d2
+#
+# A4. Multiply d4:d5 by 2; add carry out to d1.
+#
+	asl.l		&1,%d5		# mul d5 by 2
+	roxl.l		&1,%d4		# mul d4 by 2
+	swap		%d6		# put 0 in d6 lower word
+	addx.w		%d6,%d1		# add in extend from mul by 2
+#
+# A5. Add mul by 8 to mul by 2.  D1 contains the digit formed.
+#
+	add.l		%d5,%d3		# add lower 32 bits
+	nop				# ERRATA FIX #13 (Rev. 1.2 6/6/90)
+	addx.l		%d4,%d2		# add with extend upper 32 bits
+	nop				# ERRATA FIX #13 (Rev. 1.2 6/6/90)
+	addx.w		%d6,%d1		# add in extend from add to d1
+	swap		%d6		# with d6 = 0; put 0 in upper word
+#
+# A6. Test d7 and branch.
+#
+	tst.w		%d7		# if zero, store digit & to loop
+	beq.b		first_d		# if non-zero, form byte & write
+sec_d:
+	swap		%d7		# bring first digit to word d7b
+	asl.w		&4,%d7		# first digit in upper 4 bits d7b
+	add.w		%d1,%d7		# add in ls digit to d7b
+	mov.b		%d7,(%a0)+	# store d7b byte in memory
+	swap		%d7		# put LEN counter in word d7a
+	clr.w		%d7		# set d7a to signal no digits done
+	dbf.w		%d0,loop	# do loop some more!
+	bra.b		end_bstr	# finished, so exit
+first_d:
+	swap		%d7		# put digit word in d7b
+	mov.w		%d1,%d7		# put new digit in d7b
+	swap		%d7		# put LEN counter in word d7a
+	addq.w		&1,%d7		# set d7a to signal first digit done
+	dbf.w		%d0,loop	# do loop some more!
+	swap		%d7		# put last digit in string
+	lsl.w		&4,%d7		# move it to upper 4 bits
+	mov.b		%d7,(%a0)+	# store it in memory string
+#
+# Clean up and return with result in fp0.
+#
+end_bstr:
+	movm.l		(%sp)+,&0xff	#  {%d0-%d7}
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	facc_in_b(): dmem_read_byte failed				#
+#	facc_in_w(): dmem_read_word failed				#
+#	facc_in_l(): dmem_read_long failed				#
+#	facc_in_d(): dmem_read of dbl prec failed			#
+#	facc_in_x(): dmem_read of ext prec failed			#
+#									#
+#	facc_out_b(): dmem_write_byte failed				#
+#	facc_out_w(): dmem_write_word failed				#
+#	facc_out_l(): dmem_write_long failed				#
+#	facc_out_d(): dmem_write of dbl prec failed			#
+#	facc_out_x(): dmem_write of ext prec failed			#
+#									#
+# XREF ****************************************************************	#
+#	_real_access() - exit through access error handler		#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	None								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Flow jumps here when an FP data fetch call gets an error	#
+# result. This means the operating system wants an access error frame	#
+# made out of the current exception stack frame.			#
+#	So, we first call restore() which makes sure that any updated	#
+# -(an)+ register gets returned to its pre-exception value and then	#
+# we change the stack to an access error stack frame.			#
+#									#
+#########################################################################
+
+facc_in_b:
+	movq.l		&0x1,%d0			# one byte
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0121,EXC_VOFF(%a6)		# set FSLW
+	bra.w		facc_finish
+
+facc_in_w:
+	movq.l		&0x2,%d0			# two bytes
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0141,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_in_l:
+	movq.l		&0x4,%d0			# four bytes
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0101,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_in_d:
+	movq.l		&0x8,%d0			# eight bytes
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0161,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_in_x:
+	movq.l		&0xc,%d0			# twelve bytes
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0161,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+################################################################
+
+facc_out_b:
+	movq.l		&0x1,%d0			# one byte
+	bsr.w		restore				# restore An
+
+	mov.w		&0x00a1,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_out_w:
+	movq.l		&0x2,%d0			# two bytes
+	bsr.w		restore				# restore An
+
+	mov.w		&0x00c1,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_out_l:
+	movq.l		&0x4,%d0			# four bytes
+	bsr.w		restore				# restore An
+
+	mov.w		&0x0081,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_out_d:
+	movq.l		&0x8,%d0			# eight bytes
+	bsr.w		restore				# restore An
+
+	mov.w		&0x00e1,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_out_x:
+	mov.l		&0xc,%d0			# twelve bytes
+	bsr.w		restore				# restore An
+
+	mov.w		&0x00e1,EXC_VOFF(%a6)		# set FSLW
+
+# here's where we actually create the access error frame from the
+# current exception stack frame.
+facc_finish:
+	mov.l		USER_FPIAR(%a6),EXC_PC(%a6) # store current PC
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	mov.l		(%sp),-(%sp)		# store SR, hi(PC)
+	mov.l		0x8(%sp),0x4(%sp)	# store lo(PC)
+	mov.l		0xc(%sp),0x8(%sp)	# store EA
+	mov.l		&0x00000001,0xc(%sp)	# store FSLW
+	mov.w		0x6(%sp),0xc(%sp)	# fix FSLW (size)
+	mov.w		&0x4008,0x6(%sp)	# store voff
+
+	btst		&0x5,(%sp)		# supervisor or user mode?
+	beq.b		facc_out2		# user
+	bset		&0x2,0xd(%sp)		# set supervisor TM bit
+
+facc_out2:
+	bra.l		_real_access
+
+##################################################################
+
+# if the effective addressing mode was predecrement or postincrement,
+# the emulation has already changed its value to the correct post-
+# instruction value. but since we're exiting to the access error
+# handler, then AN must be returned to its pre-instruction value.
+# we do that here.
+restore:
+	mov.b		EXC_OPWORD+0x1(%a6),%d1
+	andi.b		&0x38,%d1		# extract opmode
+	cmpi.b		%d1,&0x18		# postinc?
+	beq.w		rest_inc
+	cmpi.b		%d1,&0x20		# predec?
+	beq.w		rest_dec
+	rts
+
+rest_inc:
+	mov.b		EXC_OPWORD+0x1(%a6),%d1
+	andi.w		&0x0007,%d1		# fetch An
+
+	mov.w		(tbl_rest_inc.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_rest_inc.b,%pc,%d1.w*1)
+
+tbl_rest_inc:
+	short		ri_a0 - tbl_rest_inc
+	short		ri_a1 - tbl_rest_inc
+	short		ri_a2 - tbl_rest_inc
+	short		ri_a3 - tbl_rest_inc
+	short		ri_a4 - tbl_rest_inc
+	short		ri_a5 - tbl_rest_inc
+	short		ri_a6 - tbl_rest_inc
+	short		ri_a7 - tbl_rest_inc
+
+ri_a0:
+	sub.l		%d0,EXC_DREGS+0x8(%a6)	# fix stacked a0
+	rts
+ri_a1:
+	sub.l		%d0,EXC_DREGS+0xc(%a6)	# fix stacked a1
+	rts
+ri_a2:
+	sub.l		%d0,%a2			# fix a2
+	rts
+ri_a3:
+	sub.l		%d0,%a3			# fix a3
+	rts
+ri_a4:
+	sub.l		%d0,%a4			# fix a4
+	rts
+ri_a5:
+	sub.l		%d0,%a5			# fix a5
+	rts
+ri_a6:
+	sub.l		%d0,(%a6)		# fix stacked a6
+	rts
+# if it's a fmove out instruction, we don't have to fix a7
+# because we hadn't changed it yet. if it's an opclass two
+# instruction (data moved in) and the exception was in supervisor
+# mode, then also also wasn't updated. if it was user mode, then
+# restore the correct a7 which is in the USP currently.
+ri_a7:
+	cmpi.b		EXC_VOFF(%a6),&0x30	# move in or out?
+	bne.b		ri_a7_done		# out
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor?
+	bne.b		ri_a7_done		# supervisor
+	movc		%usp,%a0		# restore USP
+	sub.l		%d0,%a0
+	movc		%a0,%usp
+ri_a7_done:
+	rts
+
+# need to invert adjustment value if the <ea> was predec
+rest_dec:
+	neg.l		%d0
+	bra.b		rest_inc
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
new file mode 100644
index 0000000..458925c
--- /dev/null
+++ b/arch/m68k/kernel/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the linux kernel.
+#
+
+ifndef CONFIG_SUN3
+  extra-y := head.o
+else
+  extra-y := sun3-head.o
+endif
+extra-y	+= vmlinux.lds
+
+obj-y		:= entry.o process.o traps.o ints.o signal.o ptrace.o \
+			sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o
+
+obj-$(CONFIG_PCI)	+= bios32.o
+obj-$(CONFIG_MODULES)	+= module.o
+
+EXTRA_AFLAGS := -traditional
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
new file mode 100644
index 0000000..cee3317
--- /dev/null
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -0,0 +1,109 @@
+/*
+ * This program is used to generate definitions needed by
+ * assembly language modules.
+ *
+ * We use the technique used in the OSF Mach kernel code:
+ * generate asm statements containing #defines,
+ * compile this file to assembler, and then extract the
+ * #defines from the assembly-language output.
+ */
+
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+#include <asm/amigahw.h>
+#include <linux/font.h>
+
+#define DEFINE(sym, val) \
+	asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+int main(void)
+{
+	/* offsets into the task struct */
+	DEFINE(TASK_STATE, offsetof(struct task_struct, state));
+	DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
+	DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+	DEFINE(TASK_WORK, offsetof(struct task_struct, thread.work));
+	DEFINE(TASK_NEEDRESCHED, offsetof(struct task_struct, thread.work.need_resched));
+	DEFINE(TASK_SYSCALL_TRACE, offsetof(struct task_struct, thread.work.syscall_trace));
+	DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, thread.work.sigpending));
+	DEFINE(TASK_NOTIFY_RESUME, offsetof(struct task_struct, thread.work.notify_resume));
+	DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+	DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+	DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+
+	/* offsets into the thread struct */
+	DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
+	DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
+	DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
+	DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
+	DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
+	DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
+	DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
+	DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl));
+	DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
+
+	/* offsets into the pt_regs */
+	DEFINE(PT_D0, offsetof(struct pt_regs, d0));
+	DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0));
+	DEFINE(PT_D1, offsetof(struct pt_regs, d1));
+	DEFINE(PT_D2, offsetof(struct pt_regs, d2));
+	DEFINE(PT_D3, offsetof(struct pt_regs, d3));
+	DEFINE(PT_D4, offsetof(struct pt_regs, d4));
+	DEFINE(PT_D5, offsetof(struct pt_regs, d5));
+	DEFINE(PT_A0, offsetof(struct pt_regs, a0));
+	DEFINE(PT_A1, offsetof(struct pt_regs, a1));
+	DEFINE(PT_A2, offsetof(struct pt_regs, a2));
+	DEFINE(PT_PC, offsetof(struct pt_regs, pc));
+	DEFINE(PT_SR, offsetof(struct pt_regs, sr));
+	/* bitfields are a bit difficult */
+	DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4);
+
+	/* offsets into the irq_handler struct */
+	DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler));
+	DEFINE(IRQ_DEVID, offsetof(struct irq_node, dev_id));
+	DEFINE(IRQ_NEXT, offsetof(struct irq_node, next));
+
+	/* offsets into the kernel_stat struct */
+	DEFINE(STAT_IRQ, offsetof(struct kernel_stat, irqs));
+
+	/* offsets into the irq_cpustat_t struct */
+	DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
+
+	/* offsets into the bi_record struct */
+	DEFINE(BIR_TAG, offsetof(struct bi_record, tag));
+	DEFINE(BIR_SIZE, offsetof(struct bi_record, size));
+	DEFINE(BIR_DATA, offsetof(struct bi_record, data));
+
+	/* offsets into font_desc (drivers/video/console/font.h) */
+	DEFINE(FONT_DESC_IDX, offsetof(struct font_desc, idx));
+	DEFINE(FONT_DESC_NAME, offsetof(struct font_desc, name));
+	DEFINE(FONT_DESC_WIDTH, offsetof(struct font_desc, width));
+	DEFINE(FONT_DESC_HEIGHT, offsetof(struct font_desc, height));
+	DEFINE(FONT_DESC_DATA, offsetof(struct font_desc, data));
+	DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref));
+
+	/* signal defines */
+	DEFINE(SIGSEGV, SIGSEGV);
+	DEFINE(SEGV_MAPERR, SEGV_MAPERR);
+	DEFINE(SIGTRAP, SIGTRAP);
+	DEFINE(TRAP_TRACE, TRAP_TRACE);
+
+	/* offsets into the custom struct */
+	DEFINE(CUSTOMBASE, &custom);
+	DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar));
+	DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr));
+	DEFINE(C_INTENA, offsetof(struct CUSTOM, intena));
+	DEFINE(C_INTREQ, offsetof(struct CUSTOM, intreq));
+	DEFINE(C_SERDATR, offsetof(struct CUSTOM, serdatr));
+	DEFINE(C_SERDAT, offsetof(struct CUSTOM, serdat));
+	DEFINE(C_SERPER, offsetof(struct CUSTOM, serper));
+	DEFINE(CIAABASE, &ciaa);
+	DEFINE(CIABBASE, &ciab);
+	DEFINE(C_PRA, offsetof(struct CIA, pra));
+	DEFINE(ZTWOBASE, zTwoBase);
+
+	return 0;
+}
diff --git a/arch/m68k/kernel/bios32.c b/arch/m68k/kernel/bios32.c
new file mode 100644
index 0000000..a901685
--- /dev/null
+++ b/arch/m68k/kernel/bios32.c
@@ -0,0 +1,515 @@
+/*
+ * bios32.c - PCI BIOS functions for m68k systems.
+ *
+ * Written by Wout Klaren.
+ *
+ * Based on the DEC Alpha bios32.c by Dave Rusling and David Mosberger.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#if 0
+# define DBG_DEVS(args)		printk args
+#else
+# define DBG_DEVS(args)
+#endif
+
+#ifdef CONFIG_PCI
+
+/*
+ * PCI support for Linux/m68k. Currently only the Hades is supported.
+ *
+ * The support for PCI bridges in the DEC Alpha version has
+ * been removed in this version.
+ */
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/pci.h>
+#include <asm/uaccess.h>
+
+#define KB		1024
+#define MB		(1024*KB)
+#define GB		(1024*MB)
+
+#define MAJOR_REV	0
+#define MINOR_REV	5
+
+/*
+ * Align VAL to ALIGN, which must be a power of two.
+ */
+
+#define ALIGN(val,align)	(((val) + ((align) - 1)) & ~((align) - 1))
+
+/*
+ * Offsets relative to the I/O and memory base addresses from where resources
+ * are allocated.
+ */
+
+#define IO_ALLOC_OFFSET		0x00004000
+#define MEM_ALLOC_OFFSET	0x04000000
+
+/*
+ * Declarations of hardware specific initialisation functions.
+ */
+
+extern struct pci_bus_info *init_hades_pci(void);
+
+/*
+ * Bus info structure of the PCI bus. A pointer to this structure is
+ * put in the sysdata member of the pci_bus structure.
+ */
+
+static struct pci_bus_info *bus_info;
+
+static int pci_modify = 1;		/* If set, layout the PCI bus ourself. */
+static int skip_vga;			/* If set do not modify base addresses
+					   of vga cards.*/
+static int disable_pci_burst;		/* If set do not allow PCI bursts. */
+
+static unsigned int io_base;
+static unsigned int mem_base;
+
+/*
+ * static void disable_dev(struct pci_dev *dev)
+ *
+ * Disable PCI device DEV so that it does not respond to I/O or memory
+ * accesses.
+ *
+ * Parameters:
+ *
+ * dev	- device to disable.
+ */
+
+static void __init disable_dev(struct pci_dev *dev)
+{
+	unsigned short cmd;
+
+	if (((dev->class >> 8 == PCI_CLASS_NOT_DEFINED_VGA) ||
+	     (dev->class >> 8 == PCI_CLASS_DISPLAY_VGA) ||
+	     (dev->class >> 8 == PCI_CLASS_DISPLAY_XGA)) && skip_vga)
+		return;
+
+	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+
+	cmd &= (~PCI_COMMAND_IO & ~PCI_COMMAND_MEMORY & ~PCI_COMMAND_MASTER);
+	pci_write_config_word(dev, PCI_COMMAND, cmd);
+}
+
+/*
+ * static void layout_dev(struct pci_dev *dev)
+ *
+ * Layout memory and I/O for a device.
+ *
+ * Parameters:
+ *
+ * device	- device to layout memory and I/O for.
+ */
+
+static void __init layout_dev(struct pci_dev *dev)
+{
+	unsigned short cmd;
+	unsigned int base, mask, size, reg;
+	unsigned int alignto;
+	int i;
+
+	/*
+	 * Skip video cards if requested.
+	 */
+
+	if (((dev->class >> 8 == PCI_CLASS_NOT_DEFINED_VGA) ||
+	     (dev->class >> 8 == PCI_CLASS_DISPLAY_VGA) ||
+	     (dev->class >> 8 == PCI_CLASS_DISPLAY_XGA)) && skip_vga)
+		return;
+
+	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+
+	for (reg = PCI_BASE_ADDRESS_0, i = 0; reg <= PCI_BASE_ADDRESS_5; reg += 4, i++)
+	{
+		/*
+		 * Figure out how much space and of what type this
+		 * device wants.
+		 */
+
+		pci_write_config_dword(dev, reg, 0xffffffff);
+		pci_read_config_dword(dev, reg, &base);
+
+		if (!base)
+		{
+			/* this base-address register is unused */
+			dev->resource[i].start = 0;
+			dev->resource[i].end = 0;
+			dev->resource[i].flags = 0;
+			continue;
+		}
+
+		/*
+		 * We've read the base address register back after
+		 * writing all ones and so now we must decode it.
+		 */
+
+		if (base & PCI_BASE_ADDRESS_SPACE_IO)
+		{
+			/*
+			 * I/O space base address register.
+			 */
+
+			cmd |= PCI_COMMAND_IO;
+
+			base &= PCI_BASE_ADDRESS_IO_MASK;
+			mask = (~base << 1) | 0x1;
+			size = (mask & base) & 0xffffffff;
+
+			/*
+			 * Align to multiple of size of minimum base.
+			 */
+
+			alignto = max_t(unsigned int, 0x040, size);
+			base = ALIGN(io_base, alignto);
+			io_base = base + size;
+			pci_write_config_dword(dev, reg, base | PCI_BASE_ADDRESS_SPACE_IO);
+
+			dev->resource[i].start = base;
+			dev->resource[i].end = dev->resource[i].start + size - 1;
+			dev->resource[i].flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
+
+			DBG_DEVS(("layout_dev: IO address: %lX\n", base));
+		}
+		else
+		{
+			unsigned int type;
+
+			/*
+			 * Memory space base address register.
+			 */
+
+			cmd |= PCI_COMMAND_MEMORY;
+			type = base & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+			base &= PCI_BASE_ADDRESS_MEM_MASK;
+			mask = (~base << 1) | 0x1;
+			size = (mask & base) & 0xffffffff;
+			switch (type)
+			{
+			case PCI_BASE_ADDRESS_MEM_TYPE_32:
+			case PCI_BASE_ADDRESS_MEM_TYPE_64:
+				break;
+
+			case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+				printk("bios32 WARNING: slot %d, function %d "
+				       "requests memory below 1MB---don't "
+				       "know how to do that.\n",
+				       PCI_SLOT(dev->devfn),
+				       PCI_FUNC(dev->devfn));
+				continue;
+			}
+
+			/*
+			 * Align to multiple of size of minimum base.
+			 */
+
+			alignto = max_t(unsigned int, 0x1000, size);
+			base = ALIGN(mem_base, alignto);
+			mem_base = base + size;
+			pci_write_config_dword(dev, reg, base);
+
+			dev->resource[i].start = base;
+			dev->resource[i].end = dev->resource[i].start + size - 1;
+			dev->resource[i].flags = IORESOURCE_MEM;
+
+			if (type == PCI_BASE_ADDRESS_MEM_TYPE_64)
+			{
+				/*
+				 * 64-bit address, set the highest 32 bits
+				 * to zero.
+				 */
+
+				reg += 4;
+				pci_write_config_dword(dev, reg, 0);
+
+				i++;
+				dev->resource[i].start = 0;
+				dev->resource[i].end = 0;
+				dev->resource[i].flags = 0;
+			}
+		}
+	}
+
+	/*
+	 * Enable device:
+	 */
+
+	if (dev->class >> 8 == PCI_CLASS_NOT_DEFINED ||
+	    dev->class >> 8 == PCI_CLASS_NOT_DEFINED_VGA ||
+	    dev->class >> 8 == PCI_CLASS_DISPLAY_VGA ||
+	    dev->class >> 8 == PCI_CLASS_DISPLAY_XGA)
+	{
+		/*
+		 * All of these (may) have I/O scattered all around
+		 * and may not use i/o-base address registers at all.
+		 * So we just have to always enable I/O to these
+		 * devices.
+		 */
+		cmd |= PCI_COMMAND_IO;
+	}
+
+	pci_write_config_word(dev, PCI_COMMAND, cmd | PCI_COMMAND_MASTER);
+
+	pci_write_config_byte(dev, PCI_LATENCY_TIMER, (disable_pci_burst) ? 0 : 32);
+
+	if (bus_info != NULL)
+		bus_info->conf_device(dev);	/* Machine dependent configuration. */
+
+	DBG_DEVS(("layout_dev: bus %d  slot 0x%x  VID 0x%x  DID 0x%x  class 0x%x\n",
+		  dev->bus->number, PCI_SLOT(dev->devfn), dev->vendor, dev->device, dev->class));
+}
+
+/*
+ * static void layout_bus(struct pci_bus *bus)
+ *
+ * Layout memory and I/O for all devices on the given bus.
+ *
+ * Parameters:
+ *
+ * bus	- bus.
+ */
+
+static void __init layout_bus(struct pci_bus *bus)
+{
+	unsigned int bio, bmem;
+	struct pci_dev *dev;
+
+	DBG_DEVS(("layout_bus: starting bus %d\n", bus->number));
+
+	if (!bus->devices && !bus->children)
+		return;
+
+	/*
+	 * Align the current bases on appropriate boundaries (4K for
+	 * IO and 1MB for memory).
+	 */
+
+	bio = io_base = ALIGN(io_base, 4*KB);
+	bmem = mem_base = ALIGN(mem_base, 1*MB);
+
+	/*
+	 * PCI devices might have been setup by a PCI BIOS emulation
+	 * running under TOS. In these cases there is a
+	 * window during which two devices may have an overlapping
+	 * address range. To avoid this causing trouble, we first
+	 * turn off the I/O and memory address decoders for all PCI
+	 * devices.  They'll be re-enabled only once all address
+	 * decoders are programmed consistently.
+	 */
+
+	DBG_DEVS(("layout_bus: disable_dev for bus %d\n", bus->number));
+
+	for (dev = bus->devices; dev; dev = dev->sibling)
+	{
+		if ((dev->class >> 16 != PCI_BASE_CLASS_BRIDGE) ||
+		    (dev->class >> 8 == PCI_CLASS_BRIDGE_PCMCIA))
+			disable_dev(dev);
+	}
+
+	/*
+	 * Allocate space to each device:
+	 */
+
+	DBG_DEVS(("layout_bus: starting bus %d devices\n", bus->number));
+
+	for (dev = bus->devices; dev; dev = dev->sibling)
+	{
+		if ((dev->class >> 16 != PCI_BASE_CLASS_BRIDGE) ||
+		    (dev->class >> 8 == PCI_CLASS_BRIDGE_PCMCIA))
+			layout_dev(dev);
+	}
+
+	DBG_DEVS(("layout_bus: bus %d finished\n", bus->number));
+}
+
+/*
+ * static void pcibios_fixup(void)
+ *
+ * Layout memory and I/O of all devices on the PCI bus if 'pci_modify' is
+ * true. This might be necessary because not every m68k machine with a PCI
+ * bus has a PCI BIOS. This function should be called right after
+ * pci_scan_bus() in pcibios_init().
+ */
+
+static void __init pcibios_fixup(void)
+{
+	if (pci_modify)
+	{
+		/*
+		 * Set base addresses for allocation of I/O and memory space.
+		 */
+
+		io_base = bus_info->io_space.start + IO_ALLOC_OFFSET;
+		mem_base = bus_info->mem_space.start + MEM_ALLOC_OFFSET;
+
+		/*
+		 * Scan the tree, allocating PCI memory and I/O space.
+		 */
+
+		layout_bus(pci_bus_b(pci_root.next));
+	}
+
+	/*
+	 * Fix interrupt assignments, etc.
+	 */
+
+	bus_info->fixup(pci_modify);
+}
+
+/*
+ * static void pcibios_claim_resources(struct pci_bus *bus)
+ *
+ * Claim all resources that are assigned to devices on the given bus.
+ *
+ * Parameters:
+ *
+ * bus	- bus.
+ */
+
+static void __init pcibios_claim_resources(struct pci_bus *bus)
+{
+	struct pci_dev *dev;
+	int i;
+
+	while (bus)
+	{
+		for (dev = bus->devices; (dev != NULL); dev = dev->sibling)
+		{
+			for (i = 0; i < PCI_NUM_RESOURCES; i++)
+			{
+				struct resource *r = &dev->resource[i];
+				struct resource *pr;
+				struct pci_bus_info *bus_info = (struct pci_bus_info *) dev->sysdata;
+
+				if ((r->start == 0) || (r->parent != NULL))
+					continue;
+#if 1
+				if (r->flags & IORESOURCE_IO)
+					pr = &bus_info->io_space;
+				else
+					pr = &bus_info->mem_space;
+#else
+				if (r->flags & IORESOURCE_IO)
+					pr = &ioport_resource;
+				else
+					pr = &iomem_resource;
+#endif
+				if (request_resource(pr, r) < 0)
+				{
+					printk(KERN_ERR "PCI: Address space collision on region %d of device %s\n", i, dev->name);
+				}
+			}
+		}
+
+		if (bus->children)
+			pcibios_claim_resources(bus->children);
+
+		bus = bus->next;
+	}
+}
+
+/*
+ * int pcibios_assign_resource(struct pci_dev *dev, int i)
+ *
+ * Assign a new address to a PCI resource.
+ *
+ * Parameters:
+ *
+ * dev	- device.
+ * i	- resource.
+ *
+ * Result: 0 if successful.
+ */
+
+int __init pcibios_assign_resource(struct pci_dev *dev, int i)
+{
+	struct resource *r = &dev->resource[i];
+	struct resource *pr = pci_find_parent_resource(dev, r);
+	unsigned long size = r->end + 1;
+
+	if (!pr)
+		return -EINVAL;
+
+	if (r->flags & IORESOURCE_IO)
+	{
+		if (size > 0x100)
+			return -EFBIG;
+
+		if (allocate_resource(pr, r, size, bus_info->io_space.start +
+				      IO_ALLOC_OFFSET,  bus_info->io_space.end, 1024))
+			return -EBUSY;
+	}
+	else
+	{
+		if (allocate_resource(pr, r, size, bus_info->mem_space.start +
+				      MEM_ALLOC_OFFSET, bus_info->mem_space.end, size))
+			return -EBUSY;
+	}
+
+	if (i < 6)
+		pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, r->start);
+
+	return 0;
+}
+
+void __init pcibios_fixup_bus(struct pci_bus *bus)
+{
+	struct pci_dev *dev;
+	void *sysdata;
+
+	sysdata = (bus->parent) ? bus->parent->sysdata : bus->sysdata;
+
+	for (dev = bus->devices; (dev != NULL); dev = dev->sibling)
+		dev->sysdata = sysdata;
+}
+
+void __init pcibios_init(void)
+{
+	printk("Linux/m68k PCI BIOS32 revision %x.%02x\n", MAJOR_REV, MINOR_REV);
+
+	bus_info = NULL;
+#ifdef CONFIG_HADES
+	if (MACH_IS_HADES)
+		bus_info = init_hades_pci();
+#endif
+	if (bus_info != NULL)
+	{
+		printk("PCI: Probing PCI hardware\n");
+		pci_scan_bus(0, bus_info->m68k_pci_ops, bus_info);
+		pcibios_fixup();
+		pcibios_claim_resources(pci_root);
+	}
+	else
+		printk("PCI: No PCI bus detected\n");
+}
+
+char * __init pcibios_setup(char *str)
+{
+	if (!strcmp(str, "nomodify"))
+	{
+		pci_modify = 0;
+		return NULL;
+	}
+	else if (!strcmp(str, "skipvga"))
+	{
+		skip_vga = 1;
+		return NULL;
+	}
+	else if (!strcmp(str, "noburst"))
+	{
+		disable_pci_burst = 1;
+		return NULL;
+	}
+
+	return str;
+}
+#endif /* CONFIG_PCI */
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
new file mode 100644
index 0000000..e964015
--- /dev/null
+++ b/arch/m68k/kernel/entry.S
@@ -0,0 +1,712 @@
+/* -*- mode: asm -*-
+ *
+ *  linux/arch/m68k/kernel/entry.S
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file README.legal in the main directory of this archive
+ * for more details.
+ *
+ * Linux/m68k support by Hamish Macdonald
+ *
+ * 68060 fixes by Jesper Skov
+ *
+ */
+
+/*
+ * entry.S  contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ */
+
+/*
+ * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
+ *               all pointers that used to be 'current' are now entry
+ *               number 0 in the 'current_set' list.
+ *
+ *  6/05/00 RZ:	 addedd writeback completion after return from sighandler
+ *		 for 68040
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <asm/entry.h>
+#include <asm/errno.h>
+#include <asm/setup.h>
+#include <asm/segment.h>
+#include <asm/traps.h>
+#include <asm/unistd.h>
+
+#include <asm/offsets.h>
+
+.globl system_call, buserr, trap
+.globl resume, ret_from_exception
+.globl ret_from_signal
+.globl inthandler, sys_call_table
+.globl sys_fork, sys_clone, sys_vfork
+.globl ret_from_interrupt, bad_interrupt
+
+.text
+ENTRY(buserr)
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	movel	%sp,%sp@-		| stack frame pointer argument
+	bsrl	buserr_c
+	addql	#4,%sp
+	jra	ret_from_exception
+
+ENTRY(trap)
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	movel	%sp,%sp@-		| stack frame pointer argument
+	bsrl	trap_c
+	addql	#4,%sp
+	jra	ret_from_exception
+
+	| After a fork we jump here directly from resume,
+	| so that %d1 contains the previous task
+	| schedule_tail now used regardless of CONFIG_SMP
+ENTRY(ret_from_fork)
+	movel	%d1,%sp@-
+	jsr	schedule_tail
+	addql	#4,%sp
+	jra	ret_from_exception
+
+badsys:
+	movel	#-ENOSYS,%sp@(PT_D0)
+	jra	ret_from_exception
+
+do_trace:
+	movel	#-ENOSYS,%sp@(PT_D0)	| needed for strace
+	subql	#4,%sp
+	SAVE_SWITCH_STACK
+	jbsr	syscall_trace
+	RESTORE_SWITCH_STACK
+	addql	#4,%sp
+	movel	%sp@(PT_ORIG_D0),%d1
+	movel	#-ENOSYS,%d0
+	cmpl	#NR_syscalls,%d1
+	jcc	1f
+	jbsr	@(sys_call_table,%d1:l:4)@(0)
+1:	movel	%d0,%sp@(PT_D0)		| save the return value
+	subql	#4,%sp			| dummy return address
+	SAVE_SWITCH_STACK
+	jbsr	syscall_trace
+
+ret_from_signal:
+	RESTORE_SWITCH_STACK
+	addql	#4,%sp
+/* on 68040 complete pending writebacks if any */
+#ifdef CONFIG_M68040
+	bfextu	%sp@(PT_VECTOR){#0,#4},%d0
+	subql	#7,%d0				| bus error frame ?
+	jbne	1f
+	movel	%sp,%sp@-
+	jbsr	berr_040cleanup
+	addql	#4,%sp
+1:
+#endif
+	jra	ret_from_exception
+
+ENTRY(system_call)
+	SAVE_ALL_SYS
+
+	GET_CURRENT(%d1)
+	| save top of frame
+	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
+
+	tstb	%curptr@(TASK_SYSCALL_TRACE)
+	jne	do_trace
+	cmpl	#NR_syscalls,%d0
+	jcc	badsys
+	jbsr	@(sys_call_table,%d0:l:4)@(0)
+	movel	%d0,%sp@(PT_D0)		| save the return value
+
+	|oriw	#0x0700,%sr
+	movel	%curptr@(TASK_WORK),%d0
+	jne	syscall_exit_work
+1:	RESTORE_ALL
+
+syscall_exit_work:
+	btst	#5,%sp@(PT_SR)		| check if returning to kernel
+	bnes	1b			| if so, skip resched, signals
+	tstw	%d0
+	jeq	do_signal_return
+	tstb	%d0
+	jne	do_delayed_trace
+
+	pea	resume_userspace
+	jmp	schedule
+
+ret_from_exception:
+	btst	#5,%sp@(PT_SR)		| check if returning to kernel
+	bnes	1f			| if so, skip resched, signals
+	| only allow interrupts when we are really the last one on the
+	| kernel stack, otherwise stack overflow can occur during
+	| heavy interrupt load
+	andw	#ALLOWINT,%sr
+
+resume_userspace:
+	movel	%curptr@(TASK_WORK),%d0
+	lsrl	#8,%d0
+	jne	exit_work
+1:	RESTORE_ALL
+
+exit_work:
+	| save top of frame
+	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
+	tstb	%d0
+	jeq	do_signal_return
+
+	pea	resume_userspace
+	jmp	schedule
+
+do_signal_return:
+	|andw	#ALLOWINT,%sr
+	subql	#4,%sp			| dummy return address
+	SAVE_SWITCH_STACK
+	pea	%sp@(SWITCH_STACK_SIZE)
+	clrl	%sp@-
+	bsrl	do_signal
+	addql	#8,%sp
+	RESTORE_SWITCH_STACK
+	addql	#4,%sp
+	jbra	resume_userspace
+
+do_delayed_trace:
+	bclr	#7,%sp@(PT_SR)		| clear trace bit in SR
+	pea	1			| send SIGTRAP
+	movel	%curptr,%sp@-
+	pea	LSIGTRAP
+	jbsr	send_sig
+	addql	#8,%sp
+	addql	#4,%sp
+	jbra	resume_userspace
+
+
+#if 0
+#ifdef CONFIG_AMIGA
+ami_inthandler:
+	addql	#1,irq_stat+CPUSTAT_LOCAL_IRQ_COUNT
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+
+	bfextu	%sp@(PT_VECTOR){#4,#12},%d0
+	movel	%d0,%a0
+	addql	#1,%a0@(kstat+STAT_IRQ-VECOFF(VEC_SPUR))
+	movel	%a0@(autoirq_list-VECOFF(VEC_SPUR)),%a0
+
+| amiga vector int handler get the req mask instead of irq vector
+	lea	CUSTOMBASE,%a1
+	movew	%a1@(C_INTREQR),%d0
+	andw	%a1@(C_INTENAR),%d0
+
+| prepare stack (push frame pointer, dev_id & req mask)
+	pea	%sp@
+	movel	%a0@(IRQ_DEVID),%sp@-
+	movel	%d0,%sp@-
+	pea	%pc@(ret_from_interrupt:w)
+	jbra	@(IRQ_HANDLER,%a0)@(0)
+
+ENTRY(nmi_handler)
+	rte
+#endif
+#endif
+
+/*
+** This is the main interrupt handler, responsible for calling process_int()
+*/
+inthandler:
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	addqb	#1,%curptr@(TASK_INFO+TINFO_PREEMPT+2)
+					|  put exception # in d0
+	bfextu %sp@(PT_VECTOR){#4,#10},%d0
+
+	movel	%sp,%sp@-
+	movel	%d0,%sp@-		|  put vector # on stack
+#if defined(MACH_Q40_ONLY) && defined(CONFIG_BLK_DEV_FD)
+	btstb	#4,0xff000000		| Q40 floppy needs very special treatment ...
+	jbeq	1f
+	btstb	#3,0xff000004
+	jbeq	1f
+	jbsr	floppy_hardint
+	jbra	3f
+1:
+#endif
+	jbsr	process_int		|  process the IRQ
+3:	addql	#8,%sp			|  pop parameters off stack
+
+ret_from_interrupt:
+	subqb	#1,%curptr@(TASK_INFO+TINFO_PREEMPT+2)
+	jeq	1f
+2:
+	RESTORE_ALL
+1:
+	moveq	#(~ALLOWINT>>8)&0xff,%d0
+	andb	%sp@(PT_SR),%d0
+	jne	2b
+
+	/* check if we need to do software interrupts */
+	tstl	irq_stat+CPUSTAT_SOFTIRQ_PENDING
+	jeq	ret_from_exception
+	pea	ret_from_exception
+	jra	do_softirq
+
+
+/* Handler for uninitialized and spurious interrupts */
+
+bad_interrupt:
+	addql	#1,num_spurious
+	rte
+
+ENTRY(sys_fork)
+	SAVE_SWITCH_STACK
+	pea	%sp@(SWITCH_STACK_SIZE)
+	jbsr	m68k_fork
+	addql	#4,%sp
+	RESTORE_SWITCH_STACK
+	rts
+
+ENTRY(sys_clone)
+	SAVE_SWITCH_STACK
+	pea	%sp@(SWITCH_STACK_SIZE)
+	jbsr	m68k_clone
+	addql	#4,%sp
+	RESTORE_SWITCH_STACK
+	rts
+
+ENTRY(sys_vfork)
+	SAVE_SWITCH_STACK
+	pea	%sp@(SWITCH_STACK_SIZE)
+	jbsr	m68k_vfork
+	addql	#4,%sp
+	RESTORE_SWITCH_STACK
+	rts
+
+ENTRY(sys_sigsuspend)
+	SAVE_SWITCH_STACK
+	pea	%sp@(SWITCH_STACK_SIZE)
+	jbsr	do_sigsuspend
+	addql	#4,%sp
+	RESTORE_SWITCH_STACK
+	rts
+
+ENTRY(sys_rt_sigsuspend)
+	SAVE_SWITCH_STACK
+	pea	%sp@(SWITCH_STACK_SIZE)
+	jbsr	do_rt_sigsuspend
+	addql	#4,%sp
+	RESTORE_SWITCH_STACK
+	rts
+
+ENTRY(sys_sigreturn)
+	SAVE_SWITCH_STACK
+	jbsr	do_sigreturn
+	RESTORE_SWITCH_STACK
+	rts
+
+ENTRY(sys_rt_sigreturn)
+	SAVE_SWITCH_STACK
+	jbsr	do_rt_sigreturn
+	RESTORE_SWITCH_STACK
+	rts
+
+resume:
+	/*
+	 * Beware - when entering resume, prev (the current task) is
+	 * in a0, next (the new task) is in a1,so don't change these
+	 * registers until their contents are no longer needed.
+	 */
+
+	/* save sr */
+	movew	%sr,%a0@(TASK_THREAD+THREAD_SR)
+
+	/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
+	movec	%sfc,%d0
+	movew	%d0,%a0@(TASK_THREAD+THREAD_FS)
+
+	/* save usp */
+	/* it is better to use a movel here instead of a movew 8*) */
+	movec	%usp,%d0
+	movel	%d0,%a0@(TASK_THREAD+THREAD_USP)
+
+	/* save non-scratch registers on stack */
+	SAVE_SWITCH_STACK
+
+	/* save current kernel stack pointer */
+	movel	%sp,%a0@(TASK_THREAD+THREAD_KSP)
+
+	/* save floating point context */
+#ifndef CONFIG_M68KFPU_EMU_ONLY
+#ifdef CONFIG_M68KFPU_EMU
+	tstl	m68k_fputype
+	jeq	3f
+#endif
+	fsave	%a0@(TASK_THREAD+THREAD_FPSTATE)
+
+#if defined(CONFIG_M68060)
+#if !defined(CPU_M68060_ONLY)
+	btst	#3,m68k_cputype+3
+	beqs	1f
+#endif
+	/* The 060 FPU keeps status in bits 15-8 of the first longword */
+	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE+2)
+	jeq	3f
+#if !defined(CPU_M68060_ONLY)
+	jra	2f
+#endif
+#endif /* CONFIG_M68060 */
+#if !defined(CPU_M68060_ONLY)
+1:	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE)
+	jeq	3f
+#endif
+2:	fmovemx	%fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
+	fmoveml	%fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
+3:
+#endif	/* CONFIG_M68KFPU_EMU_ONLY */
+	/* Return previous task in %d1 */
+	movel	%curptr,%d1
+
+	/* switch to new task (a1 contains new task) */
+	movel	%a1,%curptr
+
+	/* restore floating point context */
+#ifndef CONFIG_M68KFPU_EMU_ONLY
+#ifdef CONFIG_M68KFPU_EMU
+	tstl	m68k_fputype
+	jeq	4f
+#endif
+#if defined(CONFIG_M68060)
+#if !defined(CPU_M68060_ONLY)
+	btst	#3,m68k_cputype+3
+	beqs	1f
+#endif
+	/* The 060 FPU keeps status in bits 15-8 of the first longword */
+	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE+2)
+	jeq	3f
+#if !defined(CPU_M68060_ONLY)
+	jra	2f
+#endif
+#endif /* CONFIG_M68060 */
+#if !defined(CPU_M68060_ONLY)
+1:	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE)
+	jeq	3f
+#endif
+2:	fmovemx	%a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
+	fmoveml	%a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
+3:	frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
+4:
+#endif	/* CONFIG_M68KFPU_EMU_ONLY */
+
+	/* restore the kernel stack pointer */
+	movel	%a1@(TASK_THREAD+THREAD_KSP),%sp
+
+	/* restore non-scratch registers */
+	RESTORE_SWITCH_STACK
+
+	/* restore user stack pointer */
+	movel	%a1@(TASK_THREAD+THREAD_USP),%a0
+	movel	%a0,%usp
+
+	/* restore fs (sfc,%dfc) */
+	movew	%a1@(TASK_THREAD+THREAD_FS),%a0
+	movec	%a0,%sfc
+	movec	%a0,%dfc
+
+	/* restore status register */
+	movew	%a1@(TASK_THREAD+THREAD_SR),%sr
+
+	rts
+
+.data
+ALIGN
+sys_call_table:
+	.long sys_ni_syscall	/* 0  -  old "setup()" system call*/
+	.long sys_exit
+	.long sys_fork
+	.long sys_read
+	.long sys_write
+	.long sys_open		/* 5 */
+	.long sys_close
+	.long sys_waitpid
+	.long sys_creat
+	.long sys_link
+	.long sys_unlink	/* 10 */
+	.long sys_execve
+	.long sys_chdir
+	.long sys_time
+	.long sys_mknod
+	.long sys_chmod		/* 15 */
+	.long sys_chown16
+	.long sys_ni_syscall				/* old break syscall holder */
+	.long sys_stat
+	.long sys_lseek
+	.long sys_getpid	/* 20 */
+	.long sys_mount
+	.long sys_oldumount
+	.long sys_setuid16
+	.long sys_getuid16
+	.long sys_stime		/* 25 */
+	.long sys_ptrace
+	.long sys_alarm
+	.long sys_fstat
+	.long sys_pause
+	.long sys_utime		/* 30 */
+	.long sys_ni_syscall				/* old stty syscall holder */
+	.long sys_ni_syscall				/* old gtty syscall holder */
+	.long sys_access
+	.long sys_nice
+	.long sys_ni_syscall	/* 35 */	/* old ftime syscall holder */
+	.long sys_sync
+	.long sys_kill
+	.long sys_rename
+	.long sys_mkdir
+	.long sys_rmdir		/* 40 */
+	.long sys_dup
+	.long sys_pipe
+	.long sys_times
+	.long sys_ni_syscall				/* old prof syscall holder */
+	.long sys_brk		/* 45 */
+	.long sys_setgid16
+	.long sys_getgid16
+	.long sys_signal
+	.long sys_geteuid16
+	.long sys_getegid16	/* 50 */
+	.long sys_acct
+	.long sys_umount				/* recycled never used phys() */
+	.long sys_ni_syscall				/* old lock syscall holder */
+	.long sys_ioctl
+	.long sys_fcntl		/* 55 */
+	.long sys_ni_syscall				/* old mpx syscall holder */
+	.long sys_setpgid
+	.long sys_ni_syscall				/* old ulimit syscall holder */
+	.long sys_ni_syscall
+	.long sys_umask		/* 60 */
+	.long sys_chroot
+	.long sys_ustat
+	.long sys_dup2
+	.long sys_getppid
+	.long sys_getpgrp	/* 65 */
+	.long sys_setsid
+	.long sys_sigaction
+	.long sys_sgetmask
+	.long sys_ssetmask
+	.long sys_setreuid16	/* 70 */
+	.long sys_setregid16
+	.long sys_sigsuspend
+	.long sys_sigpending
+	.long sys_sethostname
+	.long sys_setrlimit	/* 75 */
+	.long sys_old_getrlimit
+	.long sys_getrusage
+	.long sys_gettimeofday
+	.long sys_settimeofday
+	.long sys_getgroups16	/* 80 */
+	.long sys_setgroups16
+	.long old_select
+	.long sys_symlink
+	.long sys_lstat
+	.long sys_readlink	/* 85 */
+	.long sys_uselib
+	.long sys_swapon
+	.long sys_reboot
+	.long old_readdir
+	.long old_mmap		/* 90 */
+	.long sys_munmap
+	.long sys_truncate
+	.long sys_ftruncate
+	.long sys_fchmod
+	.long sys_fchown16	/* 95 */
+	.long sys_getpriority
+	.long sys_setpriority
+	.long sys_ni_syscall				/* old profil syscall holder */
+	.long sys_statfs
+	.long sys_fstatfs	/* 100 */
+	.long sys_ni_syscall				/* ioperm for i386 */
+	.long sys_socketcall
+	.long sys_syslog
+	.long sys_setitimer
+	.long sys_getitimer	/* 105 */
+	.long sys_newstat
+	.long sys_newlstat
+	.long sys_newfstat
+	.long sys_ni_syscall
+	.long sys_ni_syscall	/* 110 */	/* iopl for i386 */
+	.long sys_vhangup
+	.long sys_ni_syscall				/* obsolete idle() syscall */
+	.long sys_ni_syscall				/* vm86old for i386 */
+	.long sys_wait4
+	.long sys_swapoff	/* 115 */
+	.long sys_sysinfo
+	.long sys_ipc
+	.long sys_fsync
+	.long sys_sigreturn
+	.long sys_clone		/* 120 */
+	.long sys_setdomainname
+	.long sys_newuname
+	.long sys_cacheflush				/* modify_ldt for i386 */
+	.long sys_adjtimex
+	.long sys_mprotect	/* 125 */
+	.long sys_sigprocmask
+	.long sys_ni_syscall		/* old "create_module" */
+	.long sys_init_module
+	.long sys_delete_module
+	.long sys_ni_syscall	/* 130 - old "get_kernel_syms" */
+	.long sys_quotactl
+	.long sys_getpgid
+	.long sys_fchdir
+	.long sys_bdflush
+	.long sys_sysfs		/* 135 */
+	.long sys_personality
+	.long sys_ni_syscall				/* for afs_syscall */
+	.long sys_setfsuid16
+	.long sys_setfsgid16
+	.long sys_llseek	/* 140 */
+	.long sys_getdents
+	.long sys_select
+	.long sys_flock
+	.long sys_msync
+	.long sys_readv		/* 145 */
+	.long sys_writev
+	.long sys_getsid
+	.long sys_fdatasync
+	.long sys_sysctl
+	.long sys_mlock		/* 150 */
+	.long sys_munlock
+	.long sys_mlockall
+	.long sys_munlockall
+	.long sys_sched_setparam
+	.long sys_sched_getparam	/* 155 */
+	.long sys_sched_setscheduler
+	.long sys_sched_getscheduler
+	.long sys_sched_yield
+	.long sys_sched_get_priority_max
+	.long sys_sched_get_priority_min  /* 160 */
+	.long sys_sched_rr_get_interval
+	.long sys_nanosleep
+	.long sys_mremap
+	.long sys_setresuid16
+	.long sys_getresuid16	/* 165 */
+	.long sys_getpagesize
+	.long sys_ni_syscall		/* old sys_query_module */
+	.long sys_poll
+	.long sys_nfsservctl
+	.long sys_setresgid16	/* 170 */
+	.long sys_getresgid16
+	.long sys_prctl
+	.long sys_rt_sigreturn
+	.long sys_rt_sigaction
+	.long sys_rt_sigprocmask	/* 175 */
+	.long sys_rt_sigpending
+	.long sys_rt_sigtimedwait
+	.long sys_rt_sigqueueinfo
+	.long sys_rt_sigsuspend
+	.long sys_pread64	/* 180 */
+	.long sys_pwrite64
+	.long sys_lchown16;
+	.long sys_getcwd
+	.long sys_capget
+	.long sys_capset	/* 185 */
+	.long sys_sigaltstack
+	.long sys_sendfile
+	.long sys_ni_syscall				/* streams1 */
+	.long sys_ni_syscall				/* streams2 */
+	.long sys_vfork		/* 190 */
+	.long sys_getrlimit
+	.long sys_mmap2
+	.long sys_truncate64
+	.long sys_ftruncate64
+	.long sys_stat64	/* 195 */
+	.long sys_lstat64
+	.long sys_fstat64
+	.long sys_chown
+	.long sys_getuid
+	.long sys_getgid	/* 200 */
+	.long sys_geteuid
+	.long sys_getegid
+	.long sys_setreuid
+	.long sys_setregid
+	.long sys_getgroups	/* 205 */
+	.long sys_setgroups
+	.long sys_fchown
+	.long sys_setresuid
+	.long sys_getresuid
+	.long sys_setresgid	/* 210 */
+	.long sys_getresgid
+	.long sys_lchown
+	.long sys_setuid
+	.long sys_setgid
+	.long sys_setfsuid	/* 215 */
+	.long sys_setfsgid
+	.long sys_pivot_root
+	.long sys_ni_syscall
+	.long sys_ni_syscall
+	.long sys_getdents64	/* 220 */
+	.long sys_gettid
+	.long sys_tkill
+	.long sys_setxattr
+	.long sys_lsetxattr
+	.long sys_fsetxattr	/* 225 */
+	.long sys_getxattr
+	.long sys_lgetxattr
+	.long sys_fgetxattr
+	.long sys_listxattr
+	.long sys_llistxattr	/* 230 */
+	.long sys_flistxattr
+	.long sys_removexattr
+	.long sys_lremovexattr
+	.long sys_fremovexattr
+	.long sys_futex		/* 235 */
+	.long sys_sendfile64
+	.long sys_mincore
+	.long sys_madvise
+	.long sys_fcntl64
+	.long sys_readahead	/* 240 */
+	.long sys_io_setup
+	.long sys_io_destroy
+	.long sys_io_getevents
+	.long sys_io_submit
+	.long sys_io_cancel	/* 245 */
+	.long sys_fadvise64
+	.long sys_exit_group
+	.long sys_lookup_dcookie
+	.long sys_epoll_create
+	.long sys_epoll_ctl	/* 250 */
+	.long sys_epoll_wait
+	.long sys_remap_file_pages
+	.long sys_set_tid_address
+	.long sys_timer_create
+	.long sys_timer_settime	/* 255 */
+	.long sys_timer_gettime
+	.long sys_timer_getoverrun
+	.long sys_timer_delete
+	.long sys_clock_settime
+	.long sys_clock_gettime	/* 260 */
+	.long sys_clock_getres
+	.long sys_clock_nanosleep
+	.long sys_statfs64
+	.long sys_fstatfs64
+	.long sys_tgkill	/* 265 */
+	.long sys_utimes
+	.long sys_fadvise64_64
+	.long sys_mbind	
+	.long sys_get_mempolicy
+	.long sys_set_mempolicy	/* 270 */
+	.long sys_mq_open
+	.long sys_mq_unlink
+	.long sys_mq_timedsend
+	.long sys_mq_timedreceive
+	.long sys_mq_notify	/* 275 */
+	.long sys_mq_getsetattr
+	.long sys_waitid
+	.long sys_ni_syscall	/* for sys_vserver */
+	.long sys_add_key
+	.long sys_request_key	/* 280 */
+	.long sys_keyctl
+
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
new file mode 100644
index 0000000..7cd6de1
--- /dev/null
+++ b/arch/m68k/kernel/head.S
@@ -0,0 +1,3940 @@
+/* -*- mode: asm -*-
+**
+** head.S -- This file contains the initial boot code for the
+**	     Linux/68k kernel.
+**
+** Copyright 1993 by Hamish Macdonald
+**
+** 68040 fixes by Michael Rausch
+** 68060 fixes by Roman Hodek
+** MMU cleanup by Randy Thelen
+** Final MMU cleanup by Roman Zippel
+**
+** Atari support by Andreas Schwab, using ideas of Robert de Vries
+** and Bjoern Brauel
+** VME Support by Richard Hirst
+**
+** 94/11/14 Andreas Schwab: put kernel at PAGESIZE
+** 94/11/18 Andreas Schwab: remove identity mapping of STRAM for Atari
+** ++ Bjoern & Roman: ATARI-68040 support for the Medusa
+** 95/11/18 Richard Hirst: Added MVME166 support
+** 96/04/26 Guenther Kelleter: fixed identity mapping for Falcon with
+**			      Magnum- and FX-alternate ram
+** 98/04/25 Phil Blundell: added HP300 support
+** 1998/08/30 David Kilzer: Added support for font_desc structures
+**            for linux-2.1.115
+** 9/02/11  Richard Zidlicky: added Q40 support (initial vesion 99/01/01)
+** 2004/05/13 Kars de Jong: Finalised HP300 support
+**
+** This file is subject to the terms and conditions of the GNU General Public
+** License. See the file README.legal in the main directory of this archive
+** for more details.
+**
+*/
+
+/*
+ * Linux startup code.
+ *
+ * At this point, the boot loader has:
+ * Disabled interrupts
+ * Disabled caches
+ * Put us in supervisor state.
+ *
+ * The kernel setup code takes the following steps:
+ * .  Raise interrupt level
+ * .  Set up initial kernel memory mapping.
+ *    .  This sets up a mapping of the 4M of memory the kernel is located in.
+ *    .  It also does a mapping of any initial machine specific areas.
+ * .  Enable the MMU
+ * .  Enable cache memories
+ * .  Jump to kernel startup
+ *
+ * Much of the file restructuring was to accomplish:
+ * 1) Remove register dependency through-out the file.
+ * 2) Increase use of subroutines to perform functions
+ * 3) Increase readability of the code
+ *
+ * Of course, readability is a subjective issue, so it will never be
+ * argued that that goal was accomplished.  It was merely a goal.
+ * A key way to help make code more readable is to give good
+ * documentation.  So, the first thing you will find is exaustive
+ * write-ups on the structure of the file, and the features of the
+ * functional subroutines.
+ *
+ * General Structure:
+ * ------------------
+ *	Without a doubt the single largest chunk of head.S is spent
+ * mapping the kernel and I/O physical space into the logical range
+ * for the kernel.
+ *	There are new subroutines and data structures to make MMU
+ * support cleaner and easier to understand.
+ *	First, you will find a routine call "mmu_map" which maps
+ * a logical to a physical region for some length given a cache
+ * type on behalf of the caller.  This routine makes writing the
+ * actual per-machine specific code very simple.
+ *	A central part of the code, but not a subroutine in itself,
+ * is the mmu_init code which is broken down into mapping the kernel
+ * (the same for all machines) and mapping machine-specific I/O
+ * regions.
+ *	Also, there will be a description of engaging the MMU and
+ * caches.
+ *	You will notice that there is a chunk of code which
+ * can emit the entire MMU mapping of the machine.  This is present
+ * only in debug modes and can be very helpful.
+ *	Further, there is a new console driver in head.S that is
+ * also only engaged in debug mode.  Currently, it's only supported
+ * on the Macintosh class of machines.  However, it is hoped that
+ * others will plug-in support for specific machines.
+ *
+ * ######################################################################
+ *
+ * mmu_map
+ * -------
+ *	mmu_map was written for two key reasons.  First, it was clear
+ * that it was very difficult to read the previous code for mapping
+ * regions of memory.  Second, the Macintosh required such extensive
+ * memory allocations that it didn't make sense to propagate the
+ * existing code any further.
+ *	mmu_map requires some parameters:
+ *
+ *	mmu_map (logical, physical, length, cache_type)
+ *
+ *	While this essentially describes the function in the abstract, you'll
+ * find more indepth description of other parameters at the implementation site.
+ *
+ * mmu_get_root_table_entry
+ * ------------------------
+ * mmu_get_ptr_table_entry
+ * -----------------------
+ * mmu_get_page_table_entry
+ * ------------------------
+ *
+ *	These routines are used by other mmu routines to get a pointer into
+ * a table, if necessary a new table is allocated. These routines are working
+ * basically like pmd_alloc() and pte_alloc() in <asm/pgtable.h>. The root
+ * table needs of course only to be allocated once in mmu_get_root_table_entry,
+ * so that here also some mmu specific initialization is done. The second page
+ * at the start of the kernel (the first page is unmapped later) is used for
+ * the kernel_pg_dir. It must be at a position known at link time (as it's used
+ * to initialize the init task struct) and since it needs special cache
+ * settings, it's the easiest to use this page, the rest of the page is used
+ * for further pointer tables.
+ * mmu_get_page_table_entry allocates always a whole page for page tables, this
+ * means 1024 pages and so 4MB of memory can be mapped. It doesn't make sense
+ * to manage page tables in smaller pieces as nearly all mappings have that
+ * size.
+ *
+ * ######################################################################
+ *
+ *
+ * ######################################################################
+ *
+ * mmu_engage
+ * ----------
+ *	Thanks to a small helping routine enabling the mmu got quite simple
+ * and there is only one way left. mmu_engage makes a complete a new mapping
+ * that only includes the absolute necessary to be able to jump to the final
+ * postion and to restore the original mapping.
+ * As this code doesn't need a transparent translation register anymore this
+ * means all registers are free to be used by machines that needs them for
+ * other purposes.
+ *
+ * ######################################################################
+ *
+ * mmu_print
+ * ---------
+ *	This algorithm will print out the page tables of the system as
+ * appropriate for an 030 or an 040.  This is useful for debugging purposes
+ * and as such is enclosed in #ifdef MMU_PRINT/#endif clauses.
+ *
+ * ######################################################################
+ *
+ * console_init
+ * ------------
+ *	The console is also able to be turned off.  The console in head.S
+ * is specifically for debugging and can be very useful.  It is surrounded by
+ * #ifdef CONSOLE/#endif clauses so it doesn't have to ship in known-good
+ * kernels.  It's basic algorithm is to determine the size of the screen
+ * (in height/width and bit depth) and then use that information for
+ * displaying an 8x8 font or an 8x16 (widthxheight).  I prefer the 8x8 for
+ * debugging so I can see more good data.  But it was trivial to add support
+ * for both fonts, so I included it.
+ *	Also, the algorithm for plotting pixels is abstracted so that in
+ * theory other platforms could add support for different kinds of frame
+ * buffers.  This could be very useful.
+ *
+ * console_put_penguin
+ * -------------------
+ *	An important part of any Linux bring up is the penguin and there's
+ * nothing like getting the Penguin on the screen!  This algorithm will work
+ * on any machine for which there is a console_plot_pixel.
+ *
+ * console_scroll
+ * --------------
+ *	My hope is that the scroll algorithm does the right thing on the
+ * various platforms, but it wouldn't be hard to add the test conditions
+ * and new code if it doesn't.
+ *
+ * console_putc
+ * -------------
+ *
+ * ######################################################################
+ *
+ *	Register usage has greatly simplified within head.S. Every subroutine
+ * saves and restores all registers that it modifies (except it returns a
+ * value in there of course). So the only register that needs to be initialized
+ * is the stack pointer.
+ * All other init code and data is now placed in the init section, so it will
+ * be automatically freed at the end of the kernel initialization.
+ *
+ * ######################################################################
+ *
+ * options
+ * -------
+ *	There are many options available in a build of this file.  I've
+ * taken the time to describe them here to save you the time of searching
+ * for them and trying to understand what they mean.
+ *
+ * CONFIG_xxx:	These are the obvious machine configuration defines created
+ * during configuration.  These are defined in include/linux/autoconf.h.
+ *
+ * CONSOLE:	There is support for head.S console in this file.  This
+ * console can talk to a Mac frame buffer, but could easily be extrapolated
+ * to extend it to support other platforms.
+ *
+ * TEST_MMU:	This is a test harness for running on any given machine but
+ * getting an MMU dump for another class of machine.  The classes of machines
+ * that can be tested are any of the makes (Atari, Amiga, Mac, VME, etc.)
+ * and any of the models (030, 040, 060, etc.).
+ *
+ *	NOTE:	TEST_MMU is NOT permanent!  It is scheduled to be removed
+ *		When head.S boots on Atari, Amiga, Macintosh, and VME
+ *		machines.  At that point the underlying logic will be
+ *		believed to be solid enough to be trusted, and TEST_MMU
+ *		can be dropped.  Do note that that will clean up the
+ *		head.S code significantly as large blocks of #if/#else
+ *		clauses can be removed.
+ *
+ * MMU_NOCACHE_KERNEL:	On the Macintosh platform there was an inquiry into
+ * determing why devices don't appear to work.  A test case was to remove
+ * the cacheability of the kernel bits.
+ *
+ * MMU_PRINT:	There is a routine built into head.S that can display the
+ * MMU data structures.  It outputs its result through the serial_putc
+ * interface.  So where ever that winds up driving data, that's where the
+ * mmu struct will appear.  On the Macintosh that's typically the console.
+ *
+ * SERIAL_DEBUG:	There are a series of putc() macro statements
+ * scattered through out the code to give progress of status to the
+ * person sitting at the console.  This constant determines whether those
+ * are used.
+ *
+ * DEBUG:	This is the standard DEBUG flag that can be set for building
+ *		the kernel.  It has the effect adding additional tests into
+ *		the code.
+ *
+ * FONT_6x11:
+ * FONT_8x8:
+ * FONT_8x16:
+ *		In theory these could be determined at run time or handed
+ *		over by the booter.  But, let's be real, it's a fine hard
+ *		coded value.  (But, you will notice the code is run-time
+ *		flexible!)  A pointer to the font's struct font_desc
+ *		is kept locally in Lconsole_font.  It is used to determine
+ *		font size information dynamically.
+ *
+ * Atari constants:
+ * USE_PRINTER:	Use the printer port for serial debug.
+ * USE_SCC_B:	Use the SCC port A (Serial2) for serial debug.
+ * USE_SCC_A:	Use the SCC port B (Modem2) for serial debug.
+ * USE_MFP:	Use the ST-MFP port (Modem1) for serial debug.
+ *
+ * Macintosh constants:
+ * MAC_SERIAL_DEBUG:	Turns on serial debug output for the Macintosh.
+ * MAC_USE_SCC_A:	Use the SCC port A (modem) for serial debug.
+ * MAC_USE_SCC_B:	Use the SCC port B (printer) for serial debug (default).
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/bootinfo.h>
+#include <asm/setup.h>
+#include <asm/entry.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/offsets.h>
+
+#ifdef CONFIG_MAC
+
+#include <asm/machw.h>
+
+/*
+ * Macintosh console support
+ */
+
+#define CONSOLE
+#define CONSOLE_PENGUIN
+
+/*
+ * Macintosh serial debug support; outputs boot info to the printer
+ *   and/or modem serial ports
+ */
+#undef MAC_SERIAL_DEBUG
+
+/*
+ * Macintosh serial debug port selection; define one or both;
+ *   requires MAC_SERIAL_DEBUG to be defined
+ */
+#define MAC_USE_SCC_A		/* Macintosh modem serial port */
+#define MAC_USE_SCC_B		/* Macintosh printer serial port */
+
+#endif	/* CONFIG_MAC */
+
+#undef MMU_PRINT
+#undef MMU_NOCACHE_KERNEL
+#define SERIAL_DEBUG
+#undef DEBUG
+
+/*
+ * For the head.S console, there are three supported fonts, 6x11, 8x16 and 8x8.
+ * The 8x8 font is harder to read but fits more on the screen.
+ */
+#define FONT_8x8	/* default */
+/* #define FONT_8x16 */	/* 2nd choice */
+/* #define FONT_6x11 */	/* 3rd choice */
+
+.globl kernel_pg_dir
+.globl availmem
+.globl m68k_pgtable_cachemode
+.globl m68k_supervisor_cachemode
+#ifdef CONFIG_MVME16x
+.globl mvme_bdid
+#endif
+#ifdef CONFIG_Q40
+.globl q40_mem_cptr
+#endif
+
+CPUTYPE_040	= 1	/* indicates an 040 */
+CPUTYPE_060	= 2	/* indicates an 060 */
+CPUTYPE_0460	= 3	/* if either above are set, this is set */
+CPUTYPE_020	= 4	/* indicates an 020 */
+
+/* Translation control register */
+TC_ENABLE = 0x8000
+TC_PAGE8K = 0x4000
+TC_PAGE4K = 0x0000
+
+/* Transparent translation registers */
+TTR_ENABLE	= 0x8000	/* enable transparent translation */
+TTR_ANYMODE	= 0x4000	/* user and kernel mode access */
+TTR_KERNELMODE	= 0x2000	/* only kernel mode access */
+TTR_USERMODE	= 0x0000	/* only user mode access */
+TTR_CI		= 0x0400	/* inhibit cache */
+TTR_RW		= 0x0200	/* read/write mode */
+TTR_RWM		= 0x0100	/* read/write mask */
+TTR_FCB2	= 0x0040	/* function code base bit 2 */
+TTR_FCB1	= 0x0020	/* function code base bit 1 */
+TTR_FCB0	= 0x0010	/* function code base bit 0 */
+TTR_FCM2	= 0x0004	/* function code mask bit 2 */
+TTR_FCM1	= 0x0002	/* function code mask bit 1 */
+TTR_FCM0	= 0x0001	/* function code mask bit 0 */
+
+/* Cache Control registers */
+CC6_ENABLE_D	= 0x80000000	/* enable data cache (680[46]0) */
+CC6_FREEZE_D	= 0x40000000	/* freeze data cache (68060) */
+CC6_ENABLE_SB	= 0x20000000	/* enable store buffer (68060) */
+CC6_PUSH_DPI	= 0x10000000	/* disable CPUSH invalidation (68060) */
+CC6_HALF_D	= 0x08000000	/* half-cache mode for data cache (68060) */
+CC6_ENABLE_B	= 0x00800000	/* enable branch cache (68060) */
+CC6_CLRA_B	= 0x00400000	/* clear all entries in branch cache (68060) */
+CC6_CLRU_B	= 0x00200000	/* clear user entries in branch cache (68060) */
+CC6_ENABLE_I	= 0x00008000	/* enable instruction cache (680[46]0) */
+CC6_FREEZE_I	= 0x00004000	/* freeze instruction cache (68060) */
+CC6_HALF_I	= 0x00002000	/* half-cache mode for instruction cache (68060) */
+CC3_ALLOC_WRITE	= 0x00002000	/* write allocate mode(68030) */
+CC3_ENABLE_DB	= 0x00001000	/* enable data burst (68030) */
+CC3_CLR_D	= 0x00000800	/* clear data cache (68030) */
+CC3_CLRE_D	= 0x00000400	/* clear entry in data cache (68030) */
+CC3_FREEZE_D	= 0x00000200	/* freeze data cache (68030) */
+CC3_ENABLE_D	= 0x00000100	/* enable data cache (68030) */
+CC3_ENABLE_IB	= 0x00000010	/* enable instruction burst (68030) */
+CC3_CLR_I	= 0x00000008	/* clear instruction cache (68030) */
+CC3_CLRE_I	= 0x00000004	/* clear entry in instruction cache (68030) */
+CC3_FREEZE_I	= 0x00000002	/* freeze instruction cache (68030) */
+CC3_ENABLE_I	= 0x00000001	/* enable instruction cache (68030) */
+
+/* Miscellaneous definitions */
+PAGESIZE	= 4096
+PAGESHIFT	= 12
+
+ROOT_TABLE_SIZE	= 128
+PTR_TABLE_SIZE	= 128
+PAGE_TABLE_SIZE	= 64
+ROOT_INDEX_SHIFT = 25
+PTR_INDEX_SHIFT  = 18
+PAGE_INDEX_SHIFT = 12
+
+#ifdef DEBUG
+/* When debugging use readable names for labels */
+#ifdef __STDC__
+#define L(name) .head.S.##name
+#else
+#define L(name) .head.S./**/name
+#endif
+#else
+#ifdef __STDC__
+#define L(name) .L##name
+#else
+#define L(name) .L/**/name
+#endif
+#endif
+
+/* The __INITDATA stuff is a no-op when ftrace or kgdb are turned on */
+#ifndef __INITDATA
+#define __INITDATA	.data
+#define __FINIT		.previous
+#endif
+
+/* Several macros to make the writing of subroutines easier:
+ * - func_start marks the beginning of the routine which setups the frame
+ *   register and saves the registers, it also defines another macro
+ *   to automatically restore the registers again.
+ * - func_return marks the end of the routine and simply calls the prepared
+ *   macro to restore registers and jump back to the caller.
+ * - func_define generates another macro to automatically put arguments
+ *   onto the stack call the subroutine and cleanup the stack again.
+ */
+
+/* Within subroutines these macros can be used to access the arguments
+ * on the stack. With STACK some allocated memory on the stack can be
+ * accessed and ARG0 points to the return address (used by mmu_engage).
+ */
+#define	STACK	%a6@(stackstart)
+#define ARG0	%a6@(4)
+#define ARG1	%a6@(8)
+#define ARG2	%a6@(12)
+#define ARG3	%a6@(16)
+#define ARG4	%a6@(20)
+
+.macro	func_start	name,saveregs,stack=0
+L(\name):
+	linkw	%a6,#-\stack
+	moveml	\saveregs,%sp@-
+.set	stackstart,-\stack
+
+.macro	func_return_\name
+	moveml	%sp@+,\saveregs
+	unlk	%a6
+	rts
+.endm
+.endm
+
+.macro	func_return	name
+	func_return_\name
+.endm
+
+.macro	func_call	name
+	jbsr	L(\name)
+.endm
+
+.macro	move_stack	nr,arg1,arg2,arg3,arg4
+.if	\nr
+	move_stack	"(\nr-1)",\arg2,\arg3,\arg4
+	movel	\arg1,%sp@-
+.endif
+.endm
+
+.macro	func_define	name,nr=0
+.macro	\name	arg1,arg2,arg3,arg4
+	move_stack	\nr,\arg1,\arg2,\arg3,\arg4
+	func_call	\name
+.if	\nr
+	lea	%sp@(\nr*4),%sp
+.endif
+.endm
+.endm
+
+func_define	mmu_map,4
+func_define	mmu_map_tt,4
+func_define	mmu_fixup_page_mmu_cache,1
+func_define	mmu_temp_map,2
+func_define	mmu_engage
+func_define	mmu_get_root_table_entry,1
+func_define	mmu_get_ptr_table_entry,2
+func_define	mmu_get_page_table_entry,2
+func_define	mmu_print
+func_define	get_new_page
+#if defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
+func_define	set_leds
+#endif
+
+.macro	mmu_map_eq	arg1,arg2,arg3
+	mmu_map	\arg1,\arg1,\arg2,\arg3
+.endm
+
+.macro	get_bi_record	record
+	pea	\record
+	func_call	get_bi_record
+	addql	#4,%sp
+.endm
+
+func_define	serial_putc,1
+func_define	console_putc,1
+
+func_define	console_init
+func_define	console_put_stats
+func_define	console_put_penguin
+func_define	console_plot_pixel,3
+func_define	console_scroll
+
+.macro	putc	ch
+#if defined(CONSOLE) || defined(SERIAL_DEBUG)
+	pea	\ch
+#endif
+#ifdef CONSOLE
+	func_call	console_putc
+#endif
+#ifdef SERIAL_DEBUG
+	func_call	serial_putc
+#endif
+#if defined(CONSOLE) || defined(SERIAL_DEBUG)
+	addql	#4,%sp
+#endif
+.endm
+
+.macro	dputc	ch
+#ifdef DEBUG
+	putc	\ch
+#endif
+.endm
+
+func_define	putn,1
+
+.macro	dputn	nr
+#ifdef DEBUG
+	putn	\nr
+#endif
+.endm
+
+.macro	puts		string
+#if defined(CONSOLE) || defined(SERIAL_DEBUG)
+	__INITDATA
+.Lstr\@:
+	.string	"\string"
+	__FINIT
+	pea	%pc@(.Lstr\@)
+	func_call	puts
+	addql	#4,%sp
+#endif
+.endm
+
+.macro	dputs	string
+#ifdef DEBUG
+	puts	"\string"
+#endif
+.endm
+
+#define is_not_amiga(lab) cmpl &MACH_AMIGA,%pc@(m68k_machtype); jne lab
+#define is_not_atari(lab) cmpl &MACH_ATARI,%pc@(m68k_machtype); jne lab
+#define is_not_mac(lab) cmpl &MACH_MAC,%pc@(m68k_machtype); jne lab
+#define is_not_mvme147(lab) cmpl &MACH_MVME147,%pc@(m68k_machtype); jne lab
+#define is_not_mvme16x(lab) cmpl &MACH_MVME16x,%pc@(m68k_machtype); jne lab
+#define is_not_bvme6000(lab) cmpl &MACH_BVME6000,%pc@(m68k_machtype); jne lab
+#define is_mvme147(lab) cmpl &MACH_MVME147,%pc@(m68k_machtype); jeq lab
+#define is_mvme16x(lab) cmpl &MACH_MVME16x,%pc@(m68k_machtype); jeq lab
+#define is_bvme6000(lab) cmpl &MACH_BVME6000,%pc@(m68k_machtype); jeq lab
+#define is_not_hp300(lab) cmpl &MACH_HP300,%pc@(m68k_machtype); jne lab
+#define is_not_apollo(lab) cmpl &MACH_APOLLO,%pc@(m68k_machtype); jne lab
+#define is_not_q40(lab) cmpl &MACH_Q40,%pc@(m68k_machtype); jne lab
+#define is_not_sun3x(lab) cmpl &MACH_SUN3X,%pc@(m68k_machtype); jne lab
+
+#define hasnt_leds(lab) cmpl &MACH_HP300,%pc@(m68k_machtype); \
+			jeq 42f; \
+			cmpl &MACH_APOLLO,%pc@(m68k_machtype); \
+			jne lab ;\
+		42:\
+
+#define is_040_or_060(lab)	btst &CPUTYPE_0460,%pc@(L(cputype)+3); jne lab
+#define is_not_040_or_060(lab)	btst &CPUTYPE_0460,%pc@(L(cputype)+3); jeq lab
+#define is_040(lab)		btst &CPUTYPE_040,%pc@(L(cputype)+3); jne lab
+#define is_060(lab)		btst &CPUTYPE_060,%pc@(L(cputype)+3); jne lab
+#define is_not_060(lab)		btst &CPUTYPE_060,%pc@(L(cputype)+3); jeq lab
+#define is_020(lab)		btst &CPUTYPE_020,%pc@(L(cputype)+3); jne lab
+#define is_not_020(lab)		btst &CPUTYPE_020,%pc@(L(cputype)+3); jeq lab
+
+/* On the HP300 we use the on-board LEDs for debug output before
+   the console is running.  Writing a 1 bit turns the corresponding LED
+   _off_ - on the 340 bit 7 is towards the back panel of the machine.  */
+.macro	leds	mask
+#if defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
+	hasnt_leds(.Lled\@)
+	pea	\mask
+	func_call	set_leds
+	addql	#4,%sp
+.Lled\@:
+#endif
+.endm
+
+.text
+ENTRY(_stext)
+/*
+ * Version numbers of the bootinfo interface
+ * The area from _stext to _start will later be used as kernel pointer table
+ */
+	bras	1f	/* Jump over bootinfo version numbers */
+
+	.long	BOOTINFOV_MAGIC
+	.long	MACH_AMIGA, AMIGA_BOOTI_VERSION
+	.long	MACH_ATARI, ATARI_BOOTI_VERSION
+	.long	MACH_MVME147, MVME147_BOOTI_VERSION
+	.long	MACH_MVME16x, MVME16x_BOOTI_VERSION
+	.long	MACH_BVME6000, BVME6000_BOOTI_VERSION
+	.long	MACH_MAC, MAC_BOOTI_VERSION
+	.long	MACH_Q40, Q40_BOOTI_VERSION
+	.long	MACH_HP300, HP300_BOOTI_VERSION
+	.long	0
+1:	jra	__start
+
+.equ	kernel_pg_dir,_stext
+
+.equ	.,_stext+PAGESIZE
+
+ENTRY(_start)
+	jra	__start
+__INIT
+ENTRY(__start)
+/*
+ * Setup initial stack pointer
+ */
+	lea	%pc@(_stext),%sp
+
+/*
+ * Record the CPU and machine type.
+ */
+	get_bi_record	BI_MACHTYPE
+	lea	%pc@(m68k_machtype),%a1
+	movel	%a0@,%a1@
+
+	get_bi_record	BI_FPUTYPE
+	lea	%pc@(m68k_fputype),%a1
+	movel	%a0@,%a1@
+
+	get_bi_record	BI_MMUTYPE
+	lea	%pc@(m68k_mmutype),%a1
+	movel	%a0@,%a1@
+
+	get_bi_record	BI_CPUTYPE
+	lea	%pc@(m68k_cputype),%a1
+	movel	%a0@,%a1@
+
+	leds	0x1
+
+#ifdef CONFIG_MAC
+/*
+ * For Macintosh, we need to determine the display parameters early (at least
+ * while debugging it).
+ */
+
+	is_not_mac(L(test_notmac))
+
+	get_bi_record	BI_MAC_VADDR
+	lea	%pc@(L(mac_videobase)),%a1
+	movel	%a0@,%a1@
+
+	get_bi_record	BI_MAC_VDEPTH
+	lea	%pc@(L(mac_videodepth)),%a1
+	movel	%a0@,%a1@
+
+	get_bi_record	BI_MAC_VDIM
+	lea	%pc@(L(mac_dimensions)),%a1
+	movel	%a0@,%a1@
+
+	get_bi_record	BI_MAC_VROW
+	lea	%pc@(L(mac_rowbytes)),%a1
+	movel	%a0@,%a1@
+
+#ifdef MAC_SERIAL_DEBUG
+	get_bi_record	BI_MAC_SCCBASE
+	lea	%pc@(L(mac_sccbase)),%a1
+	movel	%a0@,%a1@
+#endif /* MAC_SERIAL_DEBUG */
+
+#if 0
+	/*
+	 * Clear the screen
+	 */
+	lea	%pc@(L(mac_videobase)),%a0
+	movel	%a0@,%a1
+	lea	%pc@(L(mac_dimensions)),%a0
+	movel	%a0@,%d1
+	swap	%d1		/* #rows is high bytes */
+	andl	#0xFFFF,%d1	/* rows */
+	subl	#10,%d1
+	lea	%pc@(L(mac_rowbytes)),%a0
+loopy2:
+	movel	%a0@,%d0
+	subql	#1,%d0
+loopx2:
+	moveb	#0x55, %a1@+
+	dbra	%d0,loopx2
+	dbra	%d1,loopy2
+#endif
+
+L(test_notmac):
+#endif /* CONFIG_MAC */
+
+
+/*
+ * There are ultimately two pieces of information we want for all kinds of
+ * processors CpuType and CacheBits.  The CPUTYPE was passed in from booter
+ * and is converted here from a booter type definition to a separate bit
+ * number which allows for the standard is_0x0 macro tests.
+ */
+	movel	%pc@(m68k_cputype),%d0
+	/*
+	 * Assume it's an 030
+	 */
+	clrl	%d1
+
+	/*
+	 * Test the BootInfo cputype for 060
+	 */
+	btst	#CPUB_68060,%d0
+	jeq	1f
+	bset	#CPUTYPE_060,%d1
+	bset	#CPUTYPE_0460,%d1
+	jra	3f
+1:
+	/*
+	 * Test the BootInfo cputype for 040
+	 */
+	btst	#CPUB_68040,%d0
+	jeq	2f
+	bset	#CPUTYPE_040,%d1
+	bset	#CPUTYPE_0460,%d1
+	jra	3f
+2:
+	/*
+	 * Test the BootInfo cputype for 020
+	 */
+	btst	#CPUB_68020,%d0
+	jeq	3f
+	bset	#CPUTYPE_020,%d1
+	jra	3f
+3:
+	/*
+	 * Record the cpu type
+	 */
+	lea	%pc@(L(cputype)),%a0
+	movel	%d1,%a0@
+
+	/*
+	 * NOTE:
+	 *
+	 * Now the macros are valid:
+	 *	is_040_or_060
+	 *	is_not_040_or_060
+	 *	is_040
+	 *	is_060
+	 *	is_not_060
+	 */
+
+	/*
+	 * Determine the cache mode for pages holding MMU tables
+	 * and for supervisor mode, unused for '020 and '030
+	 */
+	clrl	%d0
+	clrl	%d1
+
+	is_not_040_or_060(L(save_cachetype))
+
+	/*
+	 * '040 or '060
+	 * d1 := cacheable write-through
+	 * NOTE: The 68040 manual strongly recommends non-cached for MMU tables,
+	 * but we have been using write-through since at least 2.0.29 so I
+	 * guess it is OK.
+	 */
+#ifdef CONFIG_060_WRITETHROUGH
+	/*
+	 * If this is a 68060 board using drivers with cache coherency
+	 * problems, then supervisor memory accesses need to be write-through
+	 * also; otherwise, we want copyback.
+	 */
+
+	is_not_060(1f)
+	movel	#_PAGE_CACHE040W,%d0
+	jra	L(save_cachetype)
+#endif /* CONFIG_060_WRITETHROUGH */
+1:
+	movew	#_PAGE_CACHE040,%d0
+
+	movel	#_PAGE_CACHE040W,%d1
+
+L(save_cachetype):
+	/* Save cache mode for supervisor mode and page tables
+	 */
+	lea	%pc@(m68k_supervisor_cachemode),%a0
+	movel	%d0,%a0@
+	lea	%pc@(m68k_pgtable_cachemode),%a0
+	movel	%d1,%a0@
+
+/*
+ * raise interrupt level
+ */
+	movew	#0x2700,%sr
+
+/*
+   If running on an Atari, determine the I/O base of the
+   serial port and test if we are running on a Medusa or Hades.
+   This test is necessary here, because on the Hades the serial
+   port is only accessible in the high I/O memory area.
+
+   The test whether it is a Medusa is done by writing to the byte at
+   phys. 0x0. This should result in a bus error on all other machines.
+
+   ...should, but doesn't. The Afterburner040 for the Falcon has the
+   same behaviour (0x0..0x7 are no ROM shadow). So we have to do
+   another test to distinguish Medusa and AB040. This is a
+   read attempt for 0x00ff82fe phys. that should bus error on a Falcon
+   (+AB040), but is in the range where the Medusa always asserts DTACK.
+
+   The test for the Hades is done by reading address 0xb0000000. This
+   should give a bus error on the Medusa.
+ */
+
+#ifdef CONFIG_ATARI
+	is_not_atari(L(notypetest))
+
+	/* get special machine type (Medusa/Hades/AB40) */
+	moveq	#0,%d3 /* default if tag doesn't exist */
+	get_bi_record	BI_ATARI_MCH_TYPE
+	tstl	%d0
+	jbmi	1f
+	movel	%a0@,%d3
+	lea	%pc@(atari_mch_type),%a0
+	movel	%d3,%a0@
+1:
+	/* On the Hades, the iobase must be set up before opening the
+	 * serial port. There are no I/O regs at 0x00ffxxxx at all. */
+	moveq	#0,%d0
+	cmpl	#ATARI_MACH_HADES,%d3
+	jbne	1f
+	movel	#0xff000000,%d0		/* Hades I/O base addr: 0xff000000 */
+1:	lea     %pc@(L(iobase)),%a0
+	movel   %d0,%a0@
+
+L(notypetest):
+#endif
+
+#ifdef CONFIG_VME
+	is_mvme147(L(getvmetype))
+	is_bvme6000(L(getvmetype))
+	is_not_mvme16x(L(gvtdone))
+
+	/* See if the loader has specified the BI_VME_TYPE tag.  Recent
+	 * versions of VMELILO and TFTPLILO do this.  We have to do this
+	 * early so we know how to handle console output.  If the tag
+	 * doesn't exist then we use the Bug for output on MVME16x.
+	 */
+L(getvmetype):
+	get_bi_record	BI_VME_TYPE
+	tstl	%d0
+	jbmi	1f
+	movel	%a0@,%d3
+	lea	%pc@(vme_brdtype),%a0
+	movel	%d3,%a0@
+1:
+#ifdef CONFIG_MVME16x
+	is_not_mvme16x(L(gvtdone))
+
+	/* Need to get the BRD_ID info to differentiate between 162, 167,
+	 * etc.  This is available as a BI_VME_BRDINFO tag with later
+	 * versions of VMELILO and TFTPLILO, otherwise we call the Bug.
+	 */
+	get_bi_record	BI_VME_BRDINFO
+	tstl	%d0
+	jpl	1f
+
+	/* Get pointer to board ID data from Bug */
+	movel	%d2,%sp@-
+	trap	#15
+	.word	0x70		/* trap 0x70 - .BRD_ID */
+	movel	%sp@+,%a0
+1:
+	lea	%pc@(mvme_bdid),%a1
+	/* Structure is 32 bytes long */
+	movel	%a0@+,%a1@+
+	movel	%a0@+,%a1@+
+	movel	%a0@+,%a1@+
+	movel	%a0@+,%a1@+
+	movel	%a0@+,%a1@+
+	movel	%a0@+,%a1@+
+	movel	%a0@+,%a1@+
+	movel	%a0@+,%a1@+
+#endif
+
+L(gvtdone):
+
+#endif
+
+#ifdef CONFIG_HP300
+	is_not_hp300(L(nothp))
+
+	/* Get the address of the UART for serial debugging */
+	get_bi_record	BI_HP300_UART_ADDR
+	tstl	%d0
+	jbmi	1f
+	movel	%a0@,%d3
+	lea	%pc@(L(uartbase)),%a0
+	movel	%d3,%a0@
+	get_bi_record	BI_HP300_UART_SCODE
+	tstl	%d0
+	jbmi	1f
+	movel	%a0@,%d3
+	lea	%pc@(L(uart_scode)),%a0
+	movel	%d3,%a0@
+1:
+L(nothp):
+#endif
+
+/*
+ * Initialize serial port
+ */
+	jbsr	L(serial_init)
+
+/*
+ * Initialize console
+ */
+#ifdef CONFIG_MAC
+	is_not_mac(L(nocon))
+#ifdef CONSOLE
+	console_init
+#ifdef CONSOLE_PENGUIN
+	console_put_penguin
+#endif	/* CONSOLE_PENGUIN */
+	console_put_stats
+#endif	/* CONSOLE */
+L(nocon):
+#endif	/* CONFIG_MAC */
+
+
+	putc	'\n'
+	putc	'A'
+	leds	0x2
+	dputn	%pc@(L(cputype))
+	dputn	%pc@(m68k_supervisor_cachemode)
+	dputn	%pc@(m68k_pgtable_cachemode)
+	dputc	'\n'
+
+/*
+ * Save physical start address of kernel
+ */
+	lea	%pc@(L(phys_kernel_start)),%a0
+	lea	%pc@(_stext),%a1
+	subl	#_stext,%a1
+	addl	#PAGE_OFFSET,%a1
+	movel	%a1,%a0@
+
+	putc	'B'
+
+	leds	0x4
+
+/*
+ *	mmu_init
+ *
+ *	This block of code does what's necessary to map in the various kinds
+ *	of machines for execution of Linux.
+ *	First map the first 4 MB of kernel code & data
+ */
+
+	mmu_map	#PAGE_OFFSET,%pc@(L(phys_kernel_start)),#4*1024*1024,\
+		%pc@(m68k_supervisor_cachemode)
+
+	putc	'C'
+
+#ifdef CONFIG_AMIGA
+
+L(mmu_init_amiga):
+
+	is_not_amiga(L(mmu_init_not_amiga))
+/*
+ * mmu_init_amiga
+ */
+
+	putc	'D'
+
+	is_not_040_or_060(1f)
+
+	/*
+	 * 040: Map the 16Meg range physical 0x0 upto logical 0x8000.0000
+	 */
+	mmu_map		#0x80000000,#0,#0x01000000,#_PAGE_NOCACHE_S
+	/*
+	 * Map the Zorro III I/O space with transparent translation
+	 * for frame buffer memory etc.
+	 */
+	mmu_map_tt	#1,#0x40000000,#0x20000000,#_PAGE_NOCACHE_S
+
+	jbra	L(mmu_init_done)
+
+1:
+	/*
+	 * 030:	Map the 32Meg range physical 0x0 upto logical 0x8000.0000
+	 */
+	mmu_map		#0x80000000,#0,#0x02000000,#_PAGE_NOCACHE030
+	mmu_map_tt	#1,#0x40000000,#0x20000000,#_PAGE_NOCACHE030
+
+	jbra	L(mmu_init_done)
+
+L(mmu_init_not_amiga):
+#endif
+
+#ifdef CONFIG_ATARI
+
+L(mmu_init_atari):
+
+	is_not_atari(L(mmu_init_not_atari))
+
+	putc	'E'
+
+/* On the Atari, we map the I/O region (phys. 0x00ffxxxx) by mapping
+   the last 16 MB of virtual address space to the first 16 MB (i.e.
+   0xffxxxxxx -> 0x00xxxxxx). For this, an additional pointer table is
+   needed. I/O ranges are marked non-cachable.
+
+   For the Medusa it is better to map the I/O region transparently
+   (i.e. 0xffxxxxxx -> 0xffxxxxxx), because some I/O registers are
+   accessible only in the high area.
+
+   On the Hades all I/O registers are only accessible in the high
+   area.
+*/
+
+	/* I/O base addr for non-Medusa, non-Hades: 0x00000000 */
+	moveq	#0,%d0
+	movel	%pc@(atari_mch_type),%d3
+	cmpl	#ATARI_MACH_MEDUSA,%d3
+	jbeq	2f
+	cmpl	#ATARI_MACH_HADES,%d3
+	jbne	1f
+2:	movel	#0xff000000,%d0 /* Medusa/Hades base addr: 0xff000000 */
+1:	movel	%d0,%d3
+
+	is_040_or_060(L(spata68040))
+
+	/* Map everything non-cacheable, though not all parts really
+	 * need to disable caches (crucial only for 0xff8000..0xffffff
+	 * (standard I/O) and 0xf00000..0xf3ffff (IDE)). The remainder
+	 * isn't really used, except for sometimes peeking into the
+	 * ROMs (mirror at phys. 0x0), so caching isn't necessary for
+	 * this. */
+	mmu_map	#0xff000000,%d3,#0x01000000,#_PAGE_NOCACHE030
+
+	jbra	L(mmu_init_done)
+
+L(spata68040):
+
+	mmu_map	#0xff000000,%d3,#0x01000000,#_PAGE_NOCACHE_S
+
+	jbra	L(mmu_init_done)
+
+L(mmu_init_not_atari):
+#endif
+
+#ifdef CONFIG_Q40
+	is_not_q40(L(notq40))
+	/*
+	 * add transparent mapping for 0xff00 0000 - 0xffff ffff
+	 * non-cached serialized etc..
+	 * this includes master chip, DAC, RTC and ISA ports
+	 * 0xfe000000-0xfeffffff is for screen and ROM
+	 */
+
+	putc    'Q'
+
+	mmu_map_tt	#0,#0xfe000000,#0x01000000,#_PAGE_CACHE040W
+	mmu_map_tt	#1,#0xff000000,#0x01000000,#_PAGE_NOCACHE_S
+
+	jbra	L(mmu_init_done)
+
+L(notq40):
+#endif
+
+#ifdef CONFIG_HP300
+	is_not_hp300(L(nothp300))
+
+	/* On the HP300, we map the ROM, INTIO and DIO regions (phys. 0x00xxxxxx)
+	 * by mapping 32MB (on 020/030) or 16 MB (on 040) from 0xf0xxxxxx -> 0x00xxxxxx).
+	 * The ROM mapping is needed because the LEDs are mapped there too.
+	 */
+
+	is_040(1f)
+
+	/*
+	 * 030: Map the 32Meg range physical 0x0 upto logical 0xf000.0000
+	 */
+	mmu_map	#0xf0000000,#0,#0x02000000,#_PAGE_NOCACHE030
+
+	jbra	L(mmu_init_done)
+
+1:
+	/*
+	 * 040: Map the 16Meg range physical 0x0 upto logical 0xf000.0000
+	 */
+	mmu_map #0xf0000000,#0,#0x01000000,#_PAGE_NOCACHE_S
+
+	jbra	L(mmu_init_done)
+
+L(nothp300):
+#endif /* CONFIG_HP300 */
+
+#ifdef CONFIG_MVME147
+
+	is_not_mvme147(L(not147))
+
+	/*
+	 * On MVME147 we have already created kernel page tables for
+	 * 4MB of RAM at address 0, so now need to do a transparent
+	 * mapping of the top of memory space.  Make it 0.5GByte for now,
+	 * so we can access on-board i/o areas.
+	 */
+
+	mmu_map_tt	#1,#0xe0000000,#0x20000000,#_PAGE_NOCACHE030
+
+	jbra	L(mmu_init_done)
+
+L(not147):
+#endif /* CONFIG_MVME147 */
+
+#ifdef CONFIG_MVME16x
+
+	is_not_mvme16x(L(not16x))
+
+	/*
+	 * On MVME16x we have already created kernel page tables for
+	 * 4MB of RAM at address 0, so now need to do a transparent
+	 * mapping of the top of memory space.  Make it 0.5GByte for now.
+	 * Supervisor only access, so transparent mapping doesn't
+	 * clash with User code virtual address space.
+	 * this covers IO devices, PROM and SRAM.  The PROM and SRAM
+	 * mapping is needed to allow 167Bug to run.
+	 * IO is in the range 0xfff00000 to 0xfffeffff.
+	 * PROM is 0xff800000->0xffbfffff and SRAM is
+	 * 0xffe00000->0xffe1ffff.
+	 */
+
+	mmu_map_tt	#1,#0xe0000000,#0x20000000,#_PAGE_NOCACHE_S
+
+	jbra	L(mmu_init_done)
+
+L(not16x):
+#endif	/* CONFIG_MVME162 | CONFIG_MVME167 */
+
+#ifdef CONFIG_BVME6000
+
+	is_not_bvme6000(L(not6000))
+
+	/*
+	 * On BVME6000 we have already created kernel page tables for
+	 * 4MB of RAM at address 0, so now need to do a transparent
+	 * mapping of the top of memory space.  Make it 0.5GByte for now,
+	 * so we can access on-board i/o areas.
+	 * Supervisor only access, so transparent mapping doesn't
+	 * clash with User code virtual address space.
+	 */
+
+	mmu_map_tt	#1,#0xe0000000,#0x20000000,#_PAGE_NOCACHE_S
+
+	jbra	L(mmu_init_done)
+
+L(not6000):
+#endif /* CONFIG_BVME6000 */
+
+/*
+ * mmu_init_mac
+ *
+ * The Macintosh mappings are less clear.
+ *
+ * Even as of this writing, it is unclear how the
+ * Macintosh mappings will be done.  However, as
+ * the first author of this code I'm proposing the
+ * following model:
+ *
+ * Map the kernel (that's already done),
+ * Map the I/O (on most machines that's the
+ * 0x5000.0000 ... 0x5300.0000 range,
+ * Map the video frame buffer using as few pages
+ * as absolutely (this requirement mostly stems from
+ * the fact that when the frame buffer is at
+ * 0x0000.0000 then we know there is valid RAM just
+ * above the screen that we don't want to waste!).
+ *
+ * By the way, if the frame buffer is at 0x0000.0000
+ * then the Macintosh is known as an RBV based Mac.
+ *
+ * By the way 2, the code currently maps in a bunch of
+ * regions.  But I'd like to cut that out.  (And move most
+ * of the mappings up into the kernel proper ... or only
+ * map what's necessary.)
+ */
+
+#ifdef CONFIG_MAC
+
+L(mmu_init_mac):
+
+	is_not_mac(L(mmu_init_not_mac))
+
+	putc	'F'
+
+	is_not_040_or_060(1f)
+
+	moveq	#_PAGE_NOCACHE_S,%d3
+	jbra	2f
+1:
+	moveq	#_PAGE_NOCACHE030,%d3
+2:
+	/*
+	 * Mac Note: screen address of logical 0xF000.0000 -> <screen physical>
+	 *	     we simply map the 4MB that contains the videomem
+	 */
+
+	movel	#VIDEOMEMMASK,%d0
+	andl	%pc@(L(mac_videobase)),%d0
+
+	mmu_map		#VIDEOMEMBASE,%d0,#VIDEOMEMSIZE,%d3
+	/* ROM from 4000 0000 to 4200 0000 (only for mac_reset()) */
+	mmu_map_eq	#0x40000000,#0x02000000,%d3
+	/* IO devices (incl. serial port) from 5000 0000 to 5300 0000 */
+	mmu_map_eq	#0x50000000,#0x03000000,%d3
+	/* Nubus slot space (video at 0xF0000000, rom at 0xF0F80000) */
+	mmu_map_tt	#1,#0xf8000000,#0x08000000,%d3
+
+	jbra	L(mmu_init_done)
+
+L(mmu_init_not_mac):
+#endif
+
+#ifdef CONFIG_SUN3X
+	is_not_sun3x(L(notsun3x))
+
+	/* oh, the pain..  We're gonna want the prom code after
+	 * starting the MMU, so we copy the mappings, translating
+	 * from 8k -> 4k pages as we go.
+	 */
+
+	/* copy maps from 0xfee00000 to 0xff000000 */
+	movel	#0xfee00000, %d0
+	moveq	#ROOT_INDEX_SHIFT, %d1
+	lsrl	%d1,%d0
+	mmu_get_root_table_entry	%d0
+
+	movel	#0xfee00000, %d0
+	moveq	#PTR_INDEX_SHIFT, %d1
+	lsrl	%d1,%d0
+	andl	#PTR_TABLE_SIZE-1, %d0
+	mmu_get_ptr_table_entry		%a0,%d0
+
+	movel	#0xfee00000, %d0
+	moveq	#PAGE_INDEX_SHIFT, %d1
+	lsrl	%d1,%d0
+	andl	#PAGE_TABLE_SIZE-1, %d0
+	mmu_get_page_table_entry	%a0,%d0
+
+	/* this is where the prom page table lives */
+	movel	0xfefe00d4, %a1
+	movel	%a1@, %a1
+
+	movel	#((0x200000 >> 13)-1), %d1
+
+1:
+	movel	%a1@+, %d3
+	movel	%d3,%a0@+
+	addl	#0x1000,%d3
+	movel	%d3,%a0@+
+
+	dbra	%d1,1b
+
+	/* setup tt1 for I/O */
+	mmu_map_tt	#1,#0x40000000,#0x40000000,#_PAGE_NOCACHE_S
+	jbra	L(mmu_init_done)
+
+L(notsun3x):
+#endif
+
+#ifdef CONFIG_APOLLO
+	is_not_apollo(L(notapollo))
+
+	putc	'P'
+	mmu_map         #0x80000000,#0,#0x02000000,#_PAGE_NOCACHE030
+
+L(notapollo):
+	jbra	L(mmu_init_done)
+#endif
+
+L(mmu_init_done):
+
+	putc	'G'
+	leds	0x8
+
+/*
+ * mmu_fixup
+ *
+ * On the 040 class machines, all pages that are used for the
+ * mmu have to be fixed up. According to Motorola, pages holding mmu
+ * tables should be non-cacheable on a '040 and write-through on a
+ * '060. But analysis of the reasons for this, and practical
+ * experience, showed that write-through also works on a '040.
+ *
+ * Allocated memory so far goes from kernel_end to memory_start that
+ * is used for all kind of tables, for that the cache attributes
+ * are now fixed.
+ */
+L(mmu_fixup):
+
+	is_not_040_or_060(L(mmu_fixup_done))
+
+#ifdef MMU_NOCACHE_KERNEL
+	jbra	L(mmu_fixup_done)
+#endif
+
+	/* first fix the page at the start of the kernel, that
+	 * contains also kernel_pg_dir.
+	 */
+	movel	%pc@(L(phys_kernel_start)),%d0
+	subl	#PAGE_OFFSET,%d0
+	lea	%pc@(_stext),%a0
+	subl	%d0,%a0
+	mmu_fixup_page_mmu_cache	%a0
+
+	movel	%pc@(L(kernel_end)),%a0
+	subl	%d0,%a0
+	movel	%pc@(L(memory_start)),%a1
+	subl	%d0,%a1
+	bra	2f
+1:
+	mmu_fixup_page_mmu_cache	%a0
+	addw	#PAGESIZE,%a0
+2:
+	cmpl	%a0,%a1
+	jgt	1b
+
+L(mmu_fixup_done):
+
+#ifdef MMU_PRINT
+	mmu_print
+#endif
+
+/*
+ * mmu_engage
+ *
+ * This chunk of code performs the gruesome task of engaging the MMU.
+ * The reason its gruesome is because when the MMU becomes engaged it
+ * maps logical addresses to physical addresses.  The Program Counter
+ * register is then passed through the MMU before the next instruction
+ * is fetched (the instruction following the engage MMU instruction).
+ * This may mean one of two things:
+ * 1. The Program Counter falls within the logical address space of
+ *    the kernel of which there are two sub-possibilities:
+ *    A. The PC maps to the correct instruction (logical PC == physical
+ *       code location), or
+ *    B. The PC does not map through and the processor will read some
+ *       data (or instruction) which is not the logically next instr.
+ *    As you can imagine, A is good and B is bad.
+ * Alternatively,
+ * 2. The Program Counter does not map through the MMU.  The processor
+ *    will take a Bus Error.
+ * Clearly, 2 is bad.
+ * It doesn't take a wiz kid to figure you want 1.A.
+ * This code creates that possibility.
+ * There are two possible 1.A. states (we now ignore the other above states):
+ * A. The kernel is located at physical memory addressed the same as
+ *    the logical memory for the kernel, i.e., 0x01000.
+ * B. The kernel is located some where else.  e.g., 0x0400.0000
+ *
+ *    Under some conditions the Macintosh can look like A or B.
+ * [A friend and I once noted that Apple hardware engineers should be
+ * wacked twice each day: once when they show up at work (as in, Whack!,
+ * "This is for the screwy hardware we know you're going to design today."),
+ * and also at the end of the day (as in, Whack! "I don't know what
+ * you designed today, but I'm sure it wasn't good."). -- rst]
+ *
+ * This code works on the following premise:
+ * If the kernel start (%d5) is within the first 16 Meg of RAM,
+ * then create a mapping for the kernel at logical 0x8000.0000 to
+ * the physical location of the pc.  And, create a transparent
+ * translation register for the first 16 Meg.  Then, after the MMU
+ * is engaged, the PC can be moved up into the 0x8000.0000 range
+ * and then the transparent translation can be turned off and then
+ * the PC can jump to the correct logical location and it will be
+ * home (finally).  This is essentially the code that the Amiga used
+ * to use.  Now, it's generalized for all processors.  Which means
+ * that a fresh (but temporary) mapping has to be created.  The mapping
+ * is made in page 0 (an as of yet unused location -- except for the
+ * stack!).  This temporary mapping will only require 1 pointer table
+ * and a single page table (it can map 256K).
+ *
+ * OK, alternatively, imagine that the Program Counter is not within
+ * the first 16 Meg.  Then, just use Transparent Translation registers
+ * to do the right thing.
+ *
+ * Last, if _start is already at 0x01000, then there's nothing special
+ * to do (in other words, in a degenerate case of the first case above,
+ * do nothing).
+ *
+ * Let's do it.
+ *
+ *
+ */
+
+	putc	'H'
+
+	mmu_engage
+
+/*
+ * After this point no new memory is allocated and
+ * the start of available memory is stored in availmem.
+ * (The bootmem allocator requires now the physicall address.)
+ */
+
+	movel	L(memory_start),availmem
+
+#ifdef CONFIG_AMIGA
+	is_not_amiga(1f)
+	/* fixup the Amiga custom register location before printing */
+	clrl	L(custom)
+1:
+#endif
+
+#ifdef CONFIG_ATARI
+	is_not_atari(1f)
+	/* fixup the Atari iobase register location before printing */
+	movel	#0xff000000,L(iobase)
+1:
+#endif
+
+#ifdef CONFIG_MAC
+	is_not_mac(1f)
+	movel	#~VIDEOMEMMASK,%d0
+	andl	L(mac_videobase),%d0
+	addl	#VIDEOMEMBASE,%d0
+	movel	%d0,L(mac_videobase)
+#if defined(CONSOLE)
+	movel	%pc@(L(phys_kernel_start)),%d0
+	subl	#PAGE_OFFSET,%d0
+	subl	%d0,L(console_font)
+	subl	%d0,L(console_font_data)
+#endif
+#ifdef MAC_SERIAL_DEBUG
+	orl	#0x50000000,L(mac_sccbase)
+#endif
+1:
+#endif
+
+#ifdef CONFIG_HP300
+	is_not_hp300(1f)
+	/*
+	 * Fix up the iobase register to point to the new location of the LEDs.
+	 */
+	movel	#0xf0000000,L(iobase)
+
+	/*
+	 * Energise the FPU and caches.
+	 */
+	is_040(1f)
+	movel	#0x60,0xf05f400c
+	jbra	2f
+
+	/*
+	 * 040: slightly different, apparently.
+	 */
+1:	movew	#0,0xf05f400e
+	movew	#0x64,0xf05f400e
+2:
+#endif
+
+#ifdef CONFIG_SUN3X
+	is_not_sun3x(1f)
+
+	/* enable copro */
+	oriw	#0x4000,0x61000000
+1:
+#endif
+
+#ifdef CONFIG_APOLLO
+	is_not_apollo(1f)
+
+	/*
+	 * Fix up the iobase before printing
+	 */
+	movel	#0x80000000,L(iobase)
+1:
+#endif
+
+	putc	'I'
+	leds	0x10
+
+/*
+ * Enable caches
+ */
+
+	is_not_040_or_060(L(cache_not_680460))
+
+L(cache680460):
+	.chip	68040
+	nop
+	cpusha	%bc
+	nop
+
+	is_060(L(cache68060))
+
+	movel	#CC6_ENABLE_D+CC6_ENABLE_I,%d0
+	/* MMU stuff works in copyback mode now, so enable the cache */
+	movec	%d0,%cacr
+	jra	L(cache_done)
+
+L(cache68060):
+	movel	#CC6_ENABLE_D+CC6_ENABLE_I+CC6_ENABLE_SB+CC6_PUSH_DPI+CC6_ENABLE_B+CC6_CLRA_B,%d0
+	/* MMU stuff works in copyback mode now, so enable the cache */
+	movec	%d0,%cacr
+	/* enable superscalar dispatch in PCR */
+	moveq	#1,%d0
+	.chip	68060
+	movec	%d0,%pcr
+
+	jbra	L(cache_done)
+L(cache_not_680460):
+L(cache68030):
+	.chip	68030
+	movel	#CC3_ENABLE_DB+CC3_CLR_D+CC3_ENABLE_D+CC3_ENABLE_IB+CC3_CLR_I+CC3_ENABLE_I,%d0
+	movec	%d0,%cacr
+
+	jra	L(cache_done)
+	.chip	68k
+L(cache_done):
+
+	putc	'J'
+
+/*
+ * Setup initial stack pointer
+ */
+	lea	init_task,%curptr
+	lea	init_thread_union+THREAD_SIZE,%sp
+
+	putc	'K'
+
+	subl	%a6,%a6		/* clear a6 for gdb */
+
+/*
+ * The new 64bit printf support requires an early exception initialization.
+ */
+	jbsr	base_trap_init
+
+/* jump to the kernel start */
+
+	putc	'\n'
+	leds	0x55
+
+	jbsr	start_kernel
+
+/*
+ * Find a tag record in the bootinfo structure
+ * The bootinfo structure is located right after the kernel bss
+ * Returns: d0: size (-1 if not found)
+ *          a0: data pointer (end-of-records if not found)
+ */
+func_start	get_bi_record,%d1
+
+	movel	ARG1,%d0
+	lea	%pc@(_end),%a0
+1:	tstw	%a0@(BIR_TAG)
+	jeq	3f
+	cmpw	%a0@(BIR_TAG),%d0
+	jeq	2f
+	addw	%a0@(BIR_SIZE),%a0
+	jra	1b
+2:	moveq	#0,%d0
+	movew	%a0@(BIR_SIZE),%d0
+	lea	%a0@(BIR_DATA),%a0
+	jra	4f
+3:	moveq	#-1,%d0
+	lea	%a0@(BIR_SIZE),%a0
+4:
+func_return	get_bi_record
+
+
+/*
+ *	MMU Initialization Begins Here
+ *
+ *	The structure of the MMU tables on the 68k machines
+ *	is thus:
+ *	Root Table
+ *		Logical addresses are translated through
+ *	a hierarchical translation mechanism where the high-order
+ *	seven bits of the logical address (LA) are used as an
+ *	index into the "root table."  Each entry in the root
+ *	table has a bit which specifies if it's a valid pointer to a
+ *	pointer table.  Each entry defines a 32KMeg range of memory.
+ *	If an entry is invalid then that logical range of 32M is
+ *	invalid and references to that range of memory (when the MMU
+ *	is enabled) will fault.  If the entry is valid, then it does
+ *	one of two things.  On 040/060 class machines, it points to
+ *	a pointer table which then describes more finely the memory
+ *	within that 32M range.  On 020/030 class machines, a technique
+ *	called "early terminating descriptors" are used.  This technique
+ *	allows an entire 32Meg to be described by a single entry in the
+ *	root table.  Thus, this entry in the root table, contains the
+ *	physical address of the memory or I/O at the logical address
+ *	which the entry represents and it also contains the necessary
+ *	cache bits for this region.
+ *
+ *	Pointer Tables
+ *		Per the Root Table, there will be one or more
+ *	pointer tables.  Each pointer table defines a 32M range.
+ *	Not all of the 32M range need be defined.  Again, the next
+ *	seven bits of the logical address are used an index into
+ *	the pointer table to point to page tables (if the pointer
+ *	is valid).  There will undoubtedly be more than one
+ *	pointer table for the kernel because each pointer table
+ *	defines a range of only 32M.  Valid pointer table entries
+ *	point to page tables, or are early terminating entries
+ *	themselves.
+ *
+ *	Page Tables
+ *		Per the Pointer Tables, each page table entry points
+ *	to the physical page in memory that supports the logical
+ *	address that translates to the particular index.
+ *
+ *	In short, the Logical Address gets translated as follows:
+ *		bits 31..26 - index into the Root Table
+ *		bits 25..18 - index into the Pointer Table
+ *		bits 17..12 - index into the Page Table
+ *		bits 11..0  - offset into a particular 4K page
+ *
+ *	The algorithms which follows do one thing: they abstract
+ *	the MMU hardware.  For example, there are three kinds of
+ *	cache settings that are relevant.  Either, memory is
+ *	being mapped in which case it is either Kernel Code (or
+ *	the RamDisk) or it is MMU data.  On the 030, the MMU data
+ *	option also describes the kernel.  Or, I/O is being mapped
+ *	in which case it has its own kind of cache bits.  There
+ *	are constants which abstract these notions from the code that
+ *	actually makes the call to map some range of memory.
+ *
+ *
+ *
+ */
+
+#ifdef MMU_PRINT
+/*
+ *	mmu_print
+ *
+ *	This algorithm will print out the current MMU mappings.
+ *
+ *	Input:
+ *		%a5 points to the root table.  Everything else is calculated
+ *			from this.
+ */
+
+#define mmu_next_valid		0
+#define mmu_start_logical	4
+#define mmu_next_logical	8
+#define mmu_start_physical	12
+#define mmu_next_physical	16
+
+#define MMU_PRINT_INVALID		-1
+#define MMU_PRINT_VALID			1
+#define MMU_PRINT_UNINITED		0
+
+#define putZc(z,n)		jbne 1f; putc z; jbra 2f; 1: putc n; 2:
+
+func_start	mmu_print,%a0-%a6/%d0-%d7
+
+	movel	%pc@(L(kernel_pgdir_ptr)),%a5
+	lea	%pc@(L(mmu_print_data)),%a0
+	movel	#MMU_PRINT_UNINITED,%a0@(mmu_next_valid)
+
+	is_not_040_or_060(mmu_030_print)
+
+mmu_040_print:
+	puts	"\nMMU040\n"
+	puts	"rp:"
+	putn	%a5
+	putc	'\n'
+#if 0
+	/*
+	 * The following #if/#endif block is a tight algorithm for dumping the 040
+	 * MMU Map in gory detail.  It really isn't that practical unless the
+	 * MMU Map algorithm appears to go awry and you need to debug it at the
+	 * entry per entry level.
+	 */
+	movel	#ROOT_TABLE_SIZE,%d5
+#if 0
+	movel	%a5@+,%d7		| Burn an entry to skip the kernel mappings,
+	subql	#1,%d5			| they (might) work
+#endif
+1:	tstl	%d5
+	jbeq	mmu_print_done
+	subq	#1,%d5
+	movel	%a5@+,%d7
+	btst	#1,%d7
+	jbeq	1b
+
+2:	putn	%d7
+	andil	#0xFFFFFE00,%d7
+	movel	%d7,%a4
+	movel	#PTR_TABLE_SIZE,%d4
+	putc	' '
+3:	tstl	%d4
+	jbeq	11f
+	subq	#1,%d4
+	movel	%a4@+,%d7
+	btst	#1,%d7
+	jbeq	3b
+
+4:	putn	%d7
+	andil	#0xFFFFFF00,%d7
+	movel	%d7,%a3
+	movel	#PAGE_TABLE_SIZE,%d3
+5:	movel	#8,%d2
+6:	tstl	%d3
+	jbeq	31f
+	subq	#1,%d3
+	movel	%a3@+,%d6
+	btst	#0,%d6
+	jbeq	6b
+7:	tstl	%d2
+	jbeq	8f
+	subq	#1,%d2
+	putc	' '
+	jbra	91f
+8:	putc	'\n'
+	movel	#8+1+8+1+1,%d2
+9:	putc	' '
+	dbra	%d2,9b
+	movel	#7,%d2
+91:	putn	%d6
+	jbra	6b
+
+31:	putc	'\n'
+	movel	#8+1,%d2
+32:	putc	' '
+	dbra	%d2,32b
+	jbra	3b
+
+11:	putc	'\n'
+	jbra	1b
+#endif /* MMU 040 Dumping code that's gory and detailed */
+
+	lea	%pc@(kernel_pg_dir),%a5
+	movel	%a5,%a0			/* a0 has the address of the root table ptr */
+	movel	#0x00000000,%a4		/* logical address */
+	moveql	#0,%d0
+40:
+	/* Increment the logical address and preserve in d5 */
+	movel	%a4,%d5
+	addil	#PAGESIZE<<13,%d5
+	movel	%a0@+,%d6
+	btst	#1,%d6
+	jbne	41f
+	jbsr	mmu_print_tuple_invalidate
+	jbra	48f
+41:
+	movel	#0,%d1
+	andil	#0xfffffe00,%d6
+	movel	%d6,%a1
+42:
+	movel	%a4,%d5
+	addil	#PAGESIZE<<6,%d5
+	movel	%a1@+,%d6
+	btst	#1,%d6
+	jbne	43f
+	jbsr	mmu_print_tuple_invalidate
+	jbra	47f
+43:
+	movel	#0,%d2
+	andil	#0xffffff00,%d6
+	movel	%d6,%a2
+44:
+	movel	%a4,%d5
+	addil	#PAGESIZE,%d5
+	movel	%a2@+,%d6
+	btst	#0,%d6
+	jbne	45f
+	jbsr	mmu_print_tuple_invalidate
+	jbra	46f
+45:
+	moveml	%d0-%d1,%sp@-
+	movel	%a4,%d0
+	movel	%d6,%d1
+	andil	#0xfffff4e0,%d1
+	lea	%pc@(mmu_040_print_flags),%a6
+	jbsr	mmu_print_tuple
+	moveml	%sp@+,%d0-%d1
+46:
+	movel	%d5,%a4
+	addq	#1,%d2
+	cmpib	#64,%d2
+	jbne	44b
+47:
+	movel	%d5,%a4
+	addq	#1,%d1
+	cmpib	#128,%d1
+	jbne	42b
+48:
+	movel	%d5,%a4			/* move to the next logical address */
+	addq	#1,%d0
+	cmpib	#128,%d0
+	jbne	40b
+
+	.chip	68040
+	movec	%dtt1,%d0
+	movel	%d0,%d1
+	andiw	#0x8000,%d1		/* is it valid ? */
+	jbeq	1f			/* No, bail out */
+
+	movel	%d0,%d1
+	andil	#0xff000000,%d1		/* Get the address */
+	putn	%d1
+	puts	"=="
+	putn	%d1
+
+	movel	%d0,%d6
+	jbsr	mmu_040_print_flags_tt
+1:
+	movec	%dtt0,%d0
+	movel	%d0,%d1
+	andiw	#0x8000,%d1		/* is it valid ? */
+	jbeq	1f			/* No, bail out */
+
+	movel	%d0,%d1
+	andil	#0xff000000,%d1		/* Get the address */
+	putn	%d1
+	puts	"=="
+	putn	%d1
+
+	movel	%d0,%d6
+	jbsr	mmu_040_print_flags_tt
+1:
+	.chip	68k
+
+	jbra	mmu_print_done
+
+mmu_040_print_flags:
+	btstl	#10,%d6
+	putZc(' ','G')	/* global bit */
+	btstl	#7,%d6
+	putZc(' ','S')	/* supervisor bit */
+mmu_040_print_flags_tt:
+	btstl	#6,%d6
+	jbne	3f
+	putc	'C'
+	btstl	#5,%d6
+	putZc('w','c')	/* write through or copy-back */
+	jbra	4f
+3:
+	putc	'N'
+	btstl	#5,%d6
+	putZc('s',' ')	/* serialized non-cacheable, or non-cacheable */
+4:
+	rts
+
+mmu_030_print_flags:
+	btstl	#6,%d6
+	putZc('C','I')	/* write through or copy-back */
+	rts
+
+mmu_030_print:
+	puts	"\nMMU030\n"
+	puts	"\nrp:"
+	putn	%a5
+	putc	'\n'
+	movel	%a5,%d0
+	andil	#0xfffffff0,%d0
+	movel	%d0,%a0
+	movel	#0x00000000,%a4		/* logical address */
+	movel	#0,%d0
+30:
+	movel	%a4,%d5
+	addil	#PAGESIZE<<13,%d5
+	movel	%a0@+,%d6
+	btst	#1,%d6			/* is it a table ptr? */
+	jbne	31f			/* yes */
+	btst	#0,%d6			/* is it early terminating? */
+	jbeq	1f			/* no */
+	jbsr	mmu_030_print_helper
+	jbra	38f
+1:
+	jbsr	mmu_print_tuple_invalidate
+	jbra	38f
+31:
+	movel	#0,%d1
+	andil	#0xfffffff0,%d6
+	movel	%d6,%a1
+32:
+	movel	%a4,%d5
+	addil	#PAGESIZE<<6,%d5
+	movel	%a1@+,%d6
+	btst	#1,%d6			/* is it a table ptr? */
+	jbne	33f			/* yes */
+	btst	#0,%d6			/* is it a page descriptor? */
+	jbeq	1f			/* no */
+	jbsr	mmu_030_print_helper
+	jbra	37f
+1:
+	jbsr	mmu_print_tuple_invalidate
+	jbra	37f
+33:
+	movel	#0,%d2
+	andil	#0xfffffff0,%d6
+	movel	%d6,%a2
+34:
+	movel	%a4,%d5
+	addil	#PAGESIZE,%d5
+	movel	%a2@+,%d6
+	btst	#0,%d6
+	jbne	35f
+	jbsr	mmu_print_tuple_invalidate
+	jbra	36f
+35:
+	jbsr	mmu_030_print_helper
+36:
+	movel	%d5,%a4
+	addq	#1,%d2
+	cmpib	#64,%d2
+	jbne	34b
+37:
+	movel	%d5,%a4
+	addq	#1,%d1
+	cmpib	#128,%d1
+	jbne	32b
+38:
+	movel	%d5,%a4			/* move to the next logical address */
+	addq	#1,%d0
+	cmpib	#128,%d0
+	jbne	30b
+
+mmu_print_done:
+	puts	"\n\n"
+
+func_return	mmu_print
+
+
+mmu_030_print_helper:
+	moveml	%d0-%d1,%sp@-
+	movel	%a4,%d0
+	movel	%d6,%d1
+	lea	%pc@(mmu_030_print_flags),%a6
+	jbsr	mmu_print_tuple
+	moveml	%sp@+,%d0-%d1
+	rts
+
+mmu_print_tuple_invalidate:
+	moveml	%a0/%d7,%sp@-
+
+	lea	%pc@(L(mmu_print_data)),%a0
+	tstl	%a0@(mmu_next_valid)
+	jbmi	mmu_print_tuple_invalidate_exit
+
+	movel	#MMU_PRINT_INVALID,%a0@(mmu_next_valid)
+
+	putn	%a4
+
+	puts	"##\n"
+
+mmu_print_tuple_invalidate_exit:
+	moveml	%sp@+,%a0/%d7
+	rts
+
+
+mmu_print_tuple:
+	moveml	%d0-%d7/%a0,%sp@-
+
+	lea	%pc@(L(mmu_print_data)),%a0
+
+	tstl	%a0@(mmu_next_valid)
+	jble	mmu_print_tuple_print
+
+	cmpl	%a0@(mmu_next_physical),%d1
+	jbeq	mmu_print_tuple_increment
+
+mmu_print_tuple_print:
+	putn	%d0
+	puts	"->"
+	putn	%d1
+
+	movel	%d1,%d6
+	jbsr	%a6@
+
+mmu_print_tuple_record:
+	movel	#MMU_PRINT_VALID,%a0@(mmu_next_valid)
+
+	movel	%d1,%a0@(mmu_next_physical)
+
+mmu_print_tuple_increment:
+	movel	%d5,%d7
+	subl	%a4,%d7
+	addl	%d7,%a0@(mmu_next_physical)
+
+mmu_print_tuple_exit:
+	moveml	%sp@+,%d0-%d7/%a0
+	rts
+
+mmu_print_machine_cpu_types:
+	puts	"machine: "
+
+	is_not_amiga(1f)
+	puts	"amiga"
+	jbra	9f
+1:
+	is_not_atari(2f)
+	puts	"atari"
+	jbra	9f
+2:
+	is_not_mac(3f)
+	puts	"macintosh"
+	jbra	9f
+3:	puts	"unknown"
+9:	putc	'\n'
+
+	puts	"cputype: 0"
+	is_not_060(1f)
+	putc	'6'
+	jbra	9f
+1:
+	is_not_040_or_060(2f)
+	putc	'4'
+	jbra	9f
+2:	putc	'3'
+9:	putc	'0'
+	putc	'\n'
+
+	rts
+#endif /* MMU_PRINT */
+
+/*
+ * mmu_map_tt
+ *
+ * This is a specific function which works on all 680x0 machines.
+ * On 030, 040 & 060 it will attempt to use Transparent Translation
+ * registers (tt1).
+ * On 020 it will call the standard mmu_map which will use early
+ * terminating descriptors.
+ */
+func_start	mmu_map_tt,%d0/%d1/%a0,4
+
+	dputs	"mmu_map_tt:"
+	dputn	ARG1
+	dputn	ARG2
+	dputn	ARG3
+	dputn	ARG4
+	dputc	'\n'
+
+	is_020(L(do_map))
+
+	/* Extract the highest bit set
+	 */
+	bfffo	ARG3{#0,#32},%d1
+	cmpw	#8,%d1
+	jcc	L(do_map)
+
+	/* And get the mask
+	 */
+	moveq	#-1,%d0
+	lsrl	%d1,%d0
+	lsrl	#1,%d0
+
+	/* Mask the address
+	 */
+	movel	%d0,%d1
+	notl	%d1
+	andl	ARG2,%d1
+
+	/* Generate the upper 16bit of the tt register
+	 */
+	lsrl	#8,%d0
+	orl	%d0,%d1
+	clrw	%d1
+
+	is_040_or_060(L(mmu_map_tt_040))
+
+	/* set 030 specific bits (read/write access for supervisor mode
+	 * (highest function code set, lower two bits masked))
+	 */
+	orw	#TTR_ENABLE+TTR_RWM+TTR_FCB2+TTR_FCM1+TTR_FCM0,%d1
+	movel	ARG4,%d0
+	btst	#6,%d0
+	jeq	1f
+	orw	#TTR_CI,%d1
+
+1:	lea	STACK,%a0
+	dputn	%d1
+	movel	%d1,%a0@
+	.chip	68030
+	tstl	ARG1
+	jne	1f
+	pmove	%a0@,%tt0
+	jra	2f
+1:	pmove	%a0@,%tt1
+2:	.chip	68k
+	jra	L(mmu_map_tt_done)
+
+	/* set 040 specific bits
+	 */
+L(mmu_map_tt_040):
+	orw	#TTR_ENABLE+TTR_KERNELMODE,%d1
+	orl	ARG4,%d1
+	dputn	%d1
+
+	.chip	68040
+	tstl	ARG1
+	jne	1f
+	movec	%d1,%itt0
+	movec	%d1,%dtt0
+	jra	2f
+1:	movec	%d1,%itt1
+	movec	%d1,%dtt1
+2:	.chip	68k
+
+	jra	L(mmu_map_tt_done)
+
+L(do_map):
+	mmu_map_eq	ARG2,ARG3,ARG4
+
+L(mmu_map_tt_done):
+
+func_return	mmu_map_tt
+
+/*
+ *	mmu_map
+ *
+ *	This routine will map a range of memory using a pointer
+ *	table and allocating the pages on the fly from the kernel.
+ *	The pointer table does not have to be already linked into
+ *	the root table, this routine will do that if necessary.
+ *
+ *	NOTE
+ *	This routine will assert failure and use the serial_putc
+ *	routines in the case of a run-time error.  For example,
+ *	if the address is already mapped.
+ *
+ *	NOTE-2
+ *	This routine will use early terminating descriptors
+ *	where possible for the 68020+68851 and 68030 type
+ *	processors.
+ */
+func_start	mmu_map,%d0-%d4/%a0-%a4
+
+	dputs	"\nmmu_map:"
+	dputn	ARG1
+	dputn	ARG2
+	dputn	ARG3
+	dputn	ARG4
+	dputc	'\n'
+
+	/* Get logical address and round it down to 256KB
+	 */
+	movel	ARG1,%d0
+	andl	#-(PAGESIZE*PAGE_TABLE_SIZE),%d0
+	movel	%d0,%a3
+
+	/* Get the end address
+	 */
+	movel	ARG1,%a4
+	addl	ARG3,%a4
+	subql	#1,%a4
+
+	/* Get physical address and round it down to 256KB
+	 */
+	movel	ARG2,%d0
+	andl	#-(PAGESIZE*PAGE_TABLE_SIZE),%d0
+	movel	%d0,%a2
+
+	/* Add page attributes to the physical address
+	 */
+	movel	ARG4,%d0
+	orw	#_PAGE_PRESENT+_PAGE_ACCESSED+_PAGE_DIRTY,%d0
+	addw	%d0,%a2
+
+	dputn	%a2
+	dputn	%a3
+	dputn	%a4
+
+	is_not_040_or_060(L(mmu_map_030))
+
+	addw	#_PAGE_GLOBAL040,%a2
+/*
+ *	MMU 040 & 060 Support
+ *
+ *	The MMU usage for the 040 and 060 is different enough from
+ *	the 030 and 68851 that there is separate code.  This comment
+ *	block describes the data structures and algorithms built by
+ *	this code.
+ *
+ *	The 040 does not support early terminating descriptors, as
+ *	the 030 does.  Therefore, a third level of table is needed
+ *	for the 040, and that would be the page table.  In Linux,
+ *	page tables are allocated directly from the memory above the
+ *	kernel.
+ *
+ */
+
+L(mmu_map_040):
+	/* Calculate the offset into the root table
+	 */
+	movel	%a3,%d0
+	moveq	#ROOT_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	mmu_get_root_table_entry	%d0
+
+	/* Calculate the offset into the pointer table
+	 */
+	movel	%a3,%d0
+	moveq	#PTR_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	andl	#PTR_TABLE_SIZE-1,%d0
+	mmu_get_ptr_table_entry		%a0,%d0
+
+	/* Calculate the offset into the page table
+	 */
+	movel	%a3,%d0
+	moveq	#PAGE_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	andl	#PAGE_TABLE_SIZE-1,%d0
+	mmu_get_page_table_entry	%a0,%d0
+
+	/* The page table entry must not no be busy
+	 */
+	tstl	%a0@
+	jne	L(mmu_map_error)
+
+	/* Do the mapping and advance the pointers
+	 */
+	movel	%a2,%a0@
+2:
+	addw	#PAGESIZE,%a2
+	addw	#PAGESIZE,%a3
+
+	/* Ready with mapping?
+	 */
+	lea	%a3@(-1),%a0
+	cmpl	%a0,%a4
+	jhi	L(mmu_map_040)
+	jra	L(mmu_map_done)
+
+L(mmu_map_030):
+	/* Calculate the offset into the root table
+	 */
+	movel	%a3,%d0
+	moveq	#ROOT_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	mmu_get_root_table_entry	%d0
+
+	/* Check if logical address 32MB aligned,
+	 * so we can try to map it once
+	 */
+	movel	%a3,%d0
+	andl	#(PTR_TABLE_SIZE*PAGE_TABLE_SIZE*PAGESIZE-1)&(-ROOT_TABLE_SIZE),%d0
+	jne	1f
+
+	/* Is there enough to map for 32MB at once
+	 */
+	lea	%a3@(PTR_TABLE_SIZE*PAGE_TABLE_SIZE*PAGESIZE-1),%a1
+	cmpl	%a1,%a4
+	jcs	1f
+
+	addql	#1,%a1
+
+	/* The root table entry must not no be busy
+	 */
+	tstl	%a0@
+	jne	L(mmu_map_error)
+
+	/* Do the mapping and advance the pointers
+	 */
+	dputs	"early term1"
+	dputn	%a2
+	dputn	%a3
+	dputn	%a1
+	dputc	'\n'
+	movel	%a2,%a0@
+
+	movel	%a1,%a3
+	lea	%a2@(PTR_TABLE_SIZE*PAGE_TABLE_SIZE*PAGESIZE),%a2
+	jra	L(mmu_mapnext_030)
+1:
+	/* Calculate the offset into the pointer table
+	 */
+	movel	%a3,%d0
+	moveq	#PTR_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	andl	#PTR_TABLE_SIZE-1,%d0
+	mmu_get_ptr_table_entry		%a0,%d0
+
+	/* The pointer table entry must not no be busy
+	 */
+	tstl	%a0@
+	jne	L(mmu_map_error)
+
+	/* Do the mapping and advance the pointers
+	 */
+	dputs	"early term2"
+	dputn	%a2
+	dputn	%a3
+	dputc	'\n'
+	movel	%a2,%a0@
+
+	addl	#PAGE_TABLE_SIZE*PAGESIZE,%a2
+	addl	#PAGE_TABLE_SIZE*PAGESIZE,%a3
+
+L(mmu_mapnext_030):
+	/* Ready with mapping?
+	 */
+	lea	%a3@(-1),%a0
+	cmpl	%a0,%a4
+	jhi	L(mmu_map_030)
+	jra	L(mmu_map_done)
+
+L(mmu_map_error):
+
+	dputs	"mmu_map error:"
+	dputn	%a2
+	dputn	%a3
+	dputc	'\n'
+
+L(mmu_map_done):
+
+func_return	mmu_map
+
+/*
+ *	mmu_fixup
+ *
+ *	On the 040 class machines, all pages that are used for the
+ *	mmu have to be fixed up.
+ */
+
+func_start	mmu_fixup_page_mmu_cache,%d0/%a0
+
+	dputs	"mmu_fixup_page_mmu_cache"
+	dputn	ARG1
+
+	/* Calculate the offset into the root table
+	 */
+	movel	ARG1,%d0
+	moveq	#ROOT_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	mmu_get_root_table_entry	%d0
+
+	/* Calculate the offset into the pointer table
+	 */
+	movel	ARG1,%d0
+	moveq	#PTR_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	andl	#PTR_TABLE_SIZE-1,%d0
+	mmu_get_ptr_table_entry		%a0,%d0
+
+	/* Calculate the offset into the page table
+	 */
+	movel	ARG1,%d0
+	moveq	#PAGE_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	andl	#PAGE_TABLE_SIZE-1,%d0
+	mmu_get_page_table_entry	%a0,%d0
+
+	movel	%a0@,%d0
+	andil	#_CACHEMASK040,%d0
+	orl	%pc@(m68k_pgtable_cachemode),%d0
+	movel	%d0,%a0@
+
+	dputc	'\n'
+
+func_return	mmu_fixup_page_mmu_cache
+
+/*
+ *	mmu_temp_map
+ *
+ *	create a temporary mapping to enable the mmu,
+ *	this we don't need any transparation translation tricks.
+ */
+
+func_start	mmu_temp_map,%d0/%d1/%a0/%a1
+
+	dputs	"mmu_temp_map"
+	dputn	ARG1
+	dputn	ARG2
+	dputc	'\n'
+
+	lea	%pc@(L(temp_mmap_mem)),%a1
+
+	/* Calculate the offset in the root table
+	 */
+	movel	ARG2,%d0
+	moveq	#ROOT_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	mmu_get_root_table_entry	%d0
+
+	/* Check if the table is temporary allocated, so we have to reuse it
+	 */
+	movel	%a0@,%d0
+	cmpl	%pc@(L(memory_start)),%d0
+	jcc	1f
+
+	/* Temporary allocate a ptr table and insert it into the root table
+	 */
+	movel	%a1@,%d0
+	addl	#PTR_TABLE_SIZE*4,%a1@
+	orw	#_PAGE_TABLE+_PAGE_ACCESSED,%d0
+	movel	%d0,%a0@
+	dputs	" (new)"
+1:
+	dputn	%d0
+	/* Mask the root table entry for the ptr table
+	 */
+	andw	#-ROOT_TABLE_SIZE,%d0
+	movel	%d0,%a0
+
+	/* Calculate the offset into the pointer table
+	 */
+	movel	ARG2,%d0
+	moveq	#PTR_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	andl	#PTR_TABLE_SIZE-1,%d0
+	lea	%a0@(%d0*4),%a0
+	dputn	%a0
+
+	/* Check if a temporary page table is already allocated
+	 */
+	movel	%a0@,%d0
+	jne	1f
+
+	/* Temporary allocate a page table and insert it into the ptr table
+	 */
+	movel	%a1@,%d0
+	/* The 512 should be PAGE_TABLE_SIZE*4, but that violates the
+	   alignment restriction for pointer tables on the '0[46]0.  */
+	addl	#512,%a1@
+	orw	#_PAGE_TABLE+_PAGE_ACCESSED,%d0
+	movel	%d0,%a0@
+	dputs	" (new)"
+1:
+	dputn	%d0
+	/* Mask the ptr table entry for the page table
+	 */
+	andw	#-PTR_TABLE_SIZE,%d0
+	movel	%d0,%a0
+
+	/* Calculate the offset into the page table
+	 */
+	movel	ARG2,%d0
+	moveq	#PAGE_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	andl	#PAGE_TABLE_SIZE-1,%d0
+	lea	%a0@(%d0*4),%a0
+	dputn	%a0
+
+	/* Insert the address into the page table
+	 */
+	movel	ARG1,%d0
+	andw	#-PAGESIZE,%d0
+	orw	#_PAGE_PRESENT+_PAGE_ACCESSED+_PAGE_DIRTY,%d0
+	movel	%d0,%a0@
+	dputn	%d0
+
+	dputc	'\n'
+
+func_return	mmu_temp_map
+
+func_start	mmu_engage,%d0-%d2/%a0-%a3
+
+	moveq	#ROOT_TABLE_SIZE-1,%d0
+	/* Temporarily use a different root table.  */
+	lea	%pc@(L(kernel_pgdir_ptr)),%a0
+	movel	%a0@,%a2
+	movel	%pc@(L(memory_start)),%a1
+	movel	%a1,%a0@
+	movel	%a2,%a0
+1:
+	movel	%a0@+,%a1@+
+	dbra	%d0,1b
+
+	lea	%pc@(L(temp_mmap_mem)),%a0
+	movel	%a1,%a0@
+
+	movew	#PAGESIZE-1,%d0
+1:
+	clrl	%a1@+
+	dbra	%d0,1b
+
+	lea	%pc@(1b),%a0
+	movel	#1b,%a1
+	/* Skip temp mappings if phys == virt */
+	cmpl	%a0,%a1
+	jeq	1f
+
+	mmu_temp_map	%a0,%a0
+	mmu_temp_map	%a0,%a1
+
+	addw	#PAGESIZE,%a0
+	addw	#PAGESIZE,%a1
+	mmu_temp_map	%a0,%a0
+	mmu_temp_map	%a0,%a1
+1:
+	movel	%pc@(L(memory_start)),%a3
+	movel	%pc@(L(phys_kernel_start)),%d2
+
+	is_not_040_or_060(L(mmu_engage_030))
+
+L(mmu_engage_040):
+	.chip	68040
+	nop
+	cinva	%bc
+	nop
+	pflusha
+	nop
+	movec	%a3,%srp
+	movel	#TC_ENABLE+TC_PAGE4K,%d0
+	movec	%d0,%tc		/* enable the MMU */
+	jmp	1f:l
+1:	nop
+	movec	%a2,%srp
+	nop
+	cinva	%bc
+	nop
+	pflusha
+	.chip	68k
+	jra	L(mmu_engage_cleanup)
+
+L(mmu_engage_030_temp):
+	.space	12
+L(mmu_engage_030):
+	.chip	68030
+	lea	%pc@(L(mmu_engage_030_temp)),%a0
+	movel	#0x80000002,%a0@
+	movel	%a3,%a0@(4)
+	movel	#0x0808,%d0
+	movec	%d0,%cacr
+	pmove	%a0@,%srp
+	pflusha
+	/*
+	 * enable,super root enable,4096 byte pages,7 bit root index,
+	 * 7 bit pointer index, 6 bit page table index.
+	 */
+	movel	#0x82c07760,%a0@(8)
+	pmove	%a0@(8),%tc	/* enable the MMU */
+	jmp	1f:l
+1:	movel	%a2,%a0@(4)
+	movel	#0x0808,%d0
+	movec	%d0,%cacr
+	pmove	%a0@,%srp
+	pflusha
+	.chip	68k
+
+L(mmu_engage_cleanup):
+	subl	#PAGE_OFFSET,%d2
+	subl	%d2,%a2
+	movel	%a2,L(kernel_pgdir_ptr)
+	subl	%d2,%fp
+	subl	%d2,%sp
+	subl	%d2,ARG0
+
+func_return	mmu_engage
+
+func_start	mmu_get_root_table_entry,%d0/%a1
+
+#if 0
+	dputs	"mmu_get_root_table_entry:"
+	dputn	ARG1
+	dputs	" ="
+#endif
+
+	movel	%pc@(L(kernel_pgdir_ptr)),%a0
+	tstl	%a0
+	jne	2f
+
+	dputs	"\nmmu_init:"
+
+	/* Find the start of free memory, get_bi_record does this for us,
+	 * as the bootinfo structure is located directly behind the kernel
+	 * and and we simply search for the last entry.
+	 */
+	get_bi_record	BI_LAST
+	addw	#PAGESIZE-1,%a0
+	movel	%a0,%d0
+	andw	#-PAGESIZE,%d0
+
+	dputn	%d0
+
+	lea	%pc@(L(memory_start)),%a0
+	movel	%d0,%a0@
+	lea	%pc@(L(kernel_end)),%a0
+	movel	%d0,%a0@
+
+	/* we have to return the first page at _stext since the init code
+	 * in mm/init.c simply expects kernel_pg_dir there, the rest of
+	 * page is used for further ptr tables in get_ptr_table.
+	 */
+	lea	%pc@(_stext),%a0
+	lea	%pc@(L(mmu_cached_pointer_tables)),%a1
+	movel	%a0,%a1@
+	addl	#ROOT_TABLE_SIZE*4,%a1@
+
+	lea	%pc@(L(mmu_num_pointer_tables)),%a1
+	addql	#1,%a1@
+
+	/* clear the page
+	 */
+	movel	%a0,%a1
+	movew	#PAGESIZE/4-1,%d0
+1:
+	clrl	%a1@+
+	dbra	%d0,1b
+
+	lea	%pc@(L(kernel_pgdir_ptr)),%a1
+	movel	%a0,%a1@
+
+	dputn	%a0
+	dputc	'\n'
+2:
+	movel	ARG1,%d0
+	lea	%a0@(%d0*4),%a0
+
+#if 0
+	dputn	%a0
+	dputc	'\n'
+#endif
+
+func_return	mmu_get_root_table_entry
+
+
+
+func_start	mmu_get_ptr_table_entry,%d0/%a1
+
+#if 0
+	dputs	"mmu_get_ptr_table_entry:"
+	dputn	ARG1
+	dputn	ARG2
+	dputs	" ="
+#endif
+
+	movel	ARG1,%a0
+	movel	%a0@,%d0
+	jne	2f
+
+	/* Keep track of the number of pointer tables we use
+	 */
+	dputs	"\nmmu_get_new_ptr_table:"
+	lea	%pc@(L(mmu_num_pointer_tables)),%a0
+	movel	%a0@,%d0
+	addql	#1,%a0@
+
+	/* See if there is a free pointer table in our cache of pointer tables
+	 */
+	lea	%pc@(L(mmu_cached_pointer_tables)),%a1
+	andw	#7,%d0
+	jne	1f
+
+	/* Get a new pointer table page from above the kernel memory
+	 */
+	get_new_page
+	movel	%a0,%a1@
+1:
+	/* There is an unused pointer table in our cache... use it
+	 */
+	movel	%a1@,%d0
+	addl	#PTR_TABLE_SIZE*4,%a1@
+
+	dputn	%d0
+	dputc	'\n'
+
+	/* Insert the new pointer table into the root table
+	 */
+	movel	ARG1,%a0
+	orw	#_PAGE_TABLE+_PAGE_ACCESSED,%d0
+	movel	%d0,%a0@
+2:
+	/* Extract the pointer table entry
+	 */
+	andw	#-PTR_TABLE_SIZE,%d0
+	movel	%d0,%a0
+	movel	ARG2,%d0
+	lea	%a0@(%d0*4),%a0
+
+#if 0
+	dputn	%a0
+	dputc	'\n'
+#endif
+
+func_return	mmu_get_ptr_table_entry
+
+
+func_start	mmu_get_page_table_entry,%d0/%a1
+
+#if 0
+	dputs	"mmu_get_page_table_entry:"
+	dputn	ARG1
+	dputn	ARG2
+	dputs	" ="
+#endif
+
+	movel	ARG1,%a0
+	movel	%a0@,%d0
+	jne	2f
+
+	/* If the page table entry doesn't exist, we allocate a complete new
+	 * page and use it as one continues big page table which can cover
+	 * 4MB of memory, nearly almost all mappings have that alignment.
+	 */
+	get_new_page
+	addw	#_PAGE_TABLE+_PAGE_ACCESSED,%a0
+
+	/* align pointer table entry for a page of page tables
+	 */
+	movel	ARG1,%d0
+	andw	#-(PAGESIZE/PAGE_TABLE_SIZE),%d0
+	movel	%d0,%a1
+
+	/* Insert the page tables into the pointer entries
+	 */
+	moveq	#PAGESIZE/PAGE_TABLE_SIZE/4-1,%d0
+1:
+	movel	%a0,%a1@+
+	lea	%a0@(PAGE_TABLE_SIZE*4),%a0
+	dbra	%d0,1b
+
+	/* Now we can get the initialized pointer table entry
+	 */
+	movel	ARG1,%a0
+	movel	%a0@,%d0
+2:
+	/* Extract the page table entry
+	 */
+	andw	#-PAGE_TABLE_SIZE,%d0
+	movel	%d0,%a0
+	movel	ARG2,%d0
+	lea	%a0@(%d0*4),%a0
+
+#if 0
+	dputn	%a0
+	dputc	'\n'
+#endif
+
+func_return	mmu_get_page_table_entry
+
+/*
+ *	get_new_page
+ *
+ *	Return a new page from the memory start and clear it.
+ */
+func_start	get_new_page,%d0/%a1
+
+	dputs	"\nget_new_page:"
+
+	/* allocate the page and adjust memory_start
+	 */
+	lea	%pc@(L(memory_start)),%a0
+	movel	%a0@,%a1
+	addl	#PAGESIZE,%a0@
+
+	/* clear the new page
+	 */
+	movel	%a1,%a0
+	movew	#PAGESIZE/4-1,%d0
+1:
+	clrl	%a1@+
+	dbra	%d0,1b
+
+	dputn	%a0
+	dputc	'\n'
+
+func_return	get_new_page
+
+
+
+/*
+ * Debug output support
+ * Atarians have a choice between the parallel port, the serial port
+ * from the MFP or a serial port of the SCC
+ */
+
+#ifdef CONFIG_MAC
+
+L(scc_initable_mac):
+	.byte	9,12		/* Reset */
+	.byte	4,0x44		/* x16, 1 stopbit, no parity */
+	.byte	3,0xc0		/* receiver: 8 bpc */
+	.byte	5,0xe2		/* transmitter: 8 bpc, assert dtr/rts */
+	.byte	9,0		/* no interrupts */
+	.byte	10,0		/* NRZ */
+	.byte	11,0x50		/* use baud rate generator */
+	.byte	12,10,13,0	/* 9600 baud */
+	.byte	14,1		/* Baud rate generator enable */
+	.byte	3,0xc1		/* enable receiver */
+	.byte	5,0xea		/* enable transmitter */
+	.byte	-1
+	.even
+#endif
+
+#ifdef CONFIG_ATARI
+/* #define USE_PRINTER */
+/* #define USE_SCC_B */
+/* #define USE_SCC_A */
+#define USE_MFP
+
+#if defined(USE_SCC_A) || defined(USE_SCC_B)
+#define USE_SCC
+/* Initialisation table for SCC */
+L(scc_initable):
+	.byte	9,12		/* Reset */
+	.byte	4,0x44		/* x16, 1 stopbit, no parity */
+	.byte	3,0xc0		/* receiver: 8 bpc */
+	.byte	5,0xe2		/* transmitter: 8 bpc, assert dtr/rts */
+	.byte	9,0		/* no interrupts */
+	.byte	10,0		/* NRZ */
+	.byte	11,0x50		/* use baud rate generator */
+	.byte	12,24,13,0	/* 9600 baud */
+	.byte	14,2,14,3	/* use master clock for BRG, enable */
+	.byte	3,0xc1		/* enable receiver */
+	.byte	5,0xea		/* enable transmitter */
+	.byte	-1
+	.even
+#endif
+
+#ifdef USE_PRINTER
+
+LPSG_SELECT	= 0xff8800
+LPSG_READ	= 0xff8800
+LPSG_WRITE	= 0xff8802
+LPSG_IO_A	= 14
+LPSG_IO_B	= 15
+LPSG_CONTROL	= 7
+LSTMFP_GPIP	= 0xfffa01
+LSTMFP_DDR	= 0xfffa05
+LSTMFP_IERB	= 0xfffa09
+
+#elif defined(USE_SCC_B)
+
+LSCC_CTRL	= 0xff8c85
+LSCC_DATA	= 0xff8c87
+
+#elif defined(USE_SCC_A)
+
+LSCC_CTRL	= 0xff8c81
+LSCC_DATA	= 0xff8c83
+
+#elif defined(USE_MFP)
+
+LMFP_UCR     = 0xfffa29
+LMFP_TDCDR   = 0xfffa1d
+LMFP_TDDR    = 0xfffa25
+LMFP_TSR     = 0xfffa2d
+LMFP_UDR     = 0xfffa2f
+
+#endif
+#endif	/* CONFIG_ATARI */
+
+/*
+ * Serial port output support.
+ */
+
+/*
+ * Initialize serial port hardware for 9600/8/1
+ */
+func_start	serial_init,%d0/%d1/%a0/%a1
+	/*
+	 *	Some of the register usage that follows
+	 *	CONFIG_AMIGA
+	 *		a0 = pointer to boot info record
+	 *		d0 = boot info offset
+	 *	CONFIG_ATARI
+	 *		a0 = address of SCC
+	 *		a1 = Liobase address/address of scc_initable
+	 *		d0 = init data for serial port
+	 *	CONFIG_MAC
+	 *		a0 = address of SCC
+	 *		a1 = address of scc_initable_mac
+	 *		d0 = init data for serial port
+	 */
+
+#ifdef CONFIG_AMIGA
+#define SERIAL_DTR	7
+#define SERIAL_CNTRL	CIABBASE+C_PRA
+
+	is_not_amiga(1f)
+	lea	%pc@(L(custom)),%a0
+	movel	#-ZTWOBASE,%a0@
+	bclr	#SERIAL_DTR,SERIAL_CNTRL-ZTWOBASE
+	get_bi_record	BI_AMIGA_SERPER
+	movew	%a0@,CUSTOMBASE+C_SERPER-ZTWOBASE
+|	movew	#61,CUSTOMBASE+C_SERPER-ZTWOBASE
+1:
+#endif
+#ifdef CONFIG_ATARI
+	is_not_atari(4f)
+	movel	%pc@(L(iobase)),%a1
+#if defined(USE_PRINTER)
+	bclr	#0,%a1@(LSTMFP_IERB)
+	bclr	#0,%a1@(LSTMFP_DDR)
+	moveb	#LPSG_CONTROL,%a1@(LPSG_SELECT)
+	moveb	#0xff,%a1@(LPSG_WRITE)
+	moveb	#LPSG_IO_B,%a1@(LPSG_SELECT)
+	clrb	%a1@(LPSG_WRITE)
+	moveb	#LPSG_IO_A,%a1@(LPSG_SELECT)
+	moveb	%a1@(LPSG_READ),%d0
+	bset	#5,%d0
+	moveb	%d0,%a1@(LPSG_WRITE)
+#elif defined(USE_SCC)
+	lea	%a1@(LSCC_CTRL),%a0
+	lea	%pc@(L(scc_initable)),%a1
+2:	moveb	%a1@+,%d0
+	jmi	3f
+	moveb	%d0,%a0@
+	moveb	%a1@+,%a0@
+	jra	2b
+3:	clrb	%a0@
+#elif defined(USE_MFP)
+	bclr	#1,%a1@(LMFP_TSR)
+	moveb   #0x88,%a1@(LMFP_UCR)
+	andb	#0x70,%a1@(LMFP_TDCDR)
+	moveb   #2,%a1@(LMFP_TDDR)
+	orb	#1,%a1@(LMFP_TDCDR)
+	bset	#1,%a1@(LMFP_TSR)
+#endif
+	jra	L(serial_init_done)
+4:
+#endif
+#ifdef CONFIG_MAC
+	is_not_mac(L(serial_init_not_mac))
+#ifdef MAC_SERIAL_DEBUG
+#if !defined(MAC_USE_SCC_A) && !defined(MAC_USE_SCC_B)
+#define MAC_USE_SCC_B
+#endif
+#define mac_scc_cha_b_ctrl_offset	0x0
+#define mac_scc_cha_a_ctrl_offset	0x2
+#define mac_scc_cha_b_data_offset	0x4
+#define mac_scc_cha_a_data_offset	0x6
+
+#ifdef MAC_USE_SCC_A
+	/* Initialize channel A */
+	movel	%pc@(L(mac_sccbase)),%a0
+	lea	%pc@(L(scc_initable_mac)),%a1
+5:	moveb	%a1@+,%d0
+	jmi	6f
+	moveb	%d0,%a0@(mac_scc_cha_a_ctrl_offset)
+	moveb	%a1@+,%a0@(mac_scc_cha_a_ctrl_offset)
+	jra	5b
+6:
+#endif	/* MAC_USE_SCC_A */
+
+#ifdef MAC_USE_SCC_B
+	/* Initialize channel B */
+#ifndef MAC_USE_SCC_A	/* Load mac_sccbase only if needed */
+	movel	%pc@(L(mac_sccbase)),%a0
+#endif	/* MAC_USE_SCC_A */
+	lea	%pc@(L(scc_initable_mac)),%a1
+7:	moveb	%a1@+,%d0
+	jmi	8f
+	moveb	%d0,%a0@(mac_scc_cha_b_ctrl_offset)
+	moveb	%a1@+,%a0@(mac_scc_cha_b_ctrl_offset)
+	jra	7b
+8:
+#endif	/* MAC_USE_SCC_B */
+#endif	/* MAC_SERIAL_DEBUG */
+
+	jra	L(serial_init_done)
+L(serial_init_not_mac):
+#endif	/* CONFIG_MAC */
+
+#ifdef CONFIG_Q40
+	is_not_q40(2f)
+/* debug output goes into SRAM, so we don't do it unless requested
+   - check for '%LX$' signature in SRAM   */
+	lea	%pc@(q40_mem_cptr),%a1
+	move.l	#0xff020010,%a1@  /* must be inited - also used by debug=mem */
+	move.l	#0xff020000,%a1
+	cmp.b	#'%',%a1@
+	bne	2f	/*nodbg*/
+	addq.w	#4,%a1
+	cmp.b	#'L',%a1@
+	bne	2f	/*nodbg*/
+	addq.w	#4,%a1
+	cmp.b	#'X',%a1@
+	bne	2f	/*nodbg*/
+	addq.w	#4,%a1
+	cmp.b	#'$',%a1@
+	bne	2f	/*nodbg*/
+	/* signature OK */
+	lea	%pc@(L(q40_do_debug)),%a1
+	tas	%a1@
+/*nodbg: q40_do_debug is 0 by default*/
+2:
+#endif
+
+#ifdef CONFIG_APOLLO
+/* We count on the PROM initializing SIO1 */
+#endif
+
+#ifdef CONFIG_HP300
+/* We count on the boot loader initialising the UART */
+#endif
+
+L(serial_init_done):
+func_return	serial_init
+
+/*
+ * Output character on serial port.
+ */
+func_start	serial_putc,%d0/%d1/%a0/%a1
+
+	movel	ARG1,%d0
+	cmpib	#'\n',%d0
+	jbne	1f
+
+	/* A little safe recursion is good for the soul */
+	serial_putc	#'\r'
+1:
+
+#ifdef CONFIG_AMIGA
+	is_not_amiga(2f)
+	andw	#0x00ff,%d0
+	oriw	#0x0100,%d0
+	movel	%pc@(L(custom)),%a0
+	movew	%d0,%a0@(CUSTOMBASE+C_SERDAT)
+1:	movew	%a0@(CUSTOMBASE+C_SERDATR),%d0
+	andw	#0x2000,%d0
+	jeq	1b
+	jra	L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_MAC
+	is_not_mac(5f)
+
+#ifdef MAC_SERIAL_DEBUG
+
+#ifdef MAC_USE_SCC_A
+	movel	%pc@(L(mac_sccbase)),%a1
+3:	btst	#2,%a1@(mac_scc_cha_a_ctrl_offset)
+	jeq	3b
+	moveb	%d0,%a1@(mac_scc_cha_a_data_offset)
+#endif	/* MAC_USE_SCC_A */
+
+#ifdef MAC_USE_SCC_B
+#ifndef MAC_USE_SCC_A	/* Load mac_sccbase only if needed */
+	movel	%pc@(L(mac_sccbase)),%a1
+#endif	/* MAC_USE_SCC_A */
+4:	btst	#2,%a1@(mac_scc_cha_b_ctrl_offset)
+	jeq	4b
+	moveb	%d0,%a1@(mac_scc_cha_b_data_offset)
+#endif	/* MAC_USE_SCC_B */
+
+#endif	/* MAC_SERIAL_DEBUG */
+
+	jra	L(serial_putc_done)
+5:
+#endif	/* CONFIG_MAC */
+
+#ifdef CONFIG_ATARI
+	is_not_atari(4f)
+	movel	%pc@(L(iobase)),%a1
+#if defined(USE_PRINTER)
+3:	btst	#0,%a1@(LSTMFP_GPIP)
+	jne	3b
+	moveb	#LPSG_IO_B,%a1@(LPSG_SELECT)
+	moveb	%d0,%a1@(LPSG_WRITE)
+	moveb	#LPSG_IO_A,%a1@(LPSG_SELECT)
+	moveb	%a1@(LPSG_READ),%d0
+	bclr	#5,%d0
+	moveb	%d0,%a1@(LPSG_WRITE)
+	nop
+	nop
+	bset	#5,%d0
+	moveb	%d0,%a1@(LPSG_WRITE)
+#elif defined(USE_SCC)
+3:	btst	#2,%a1@(LSCC_CTRL)
+	jeq	3b
+	moveb	%d0,%a1@(LSCC_DATA)
+#elif defined(USE_MFP)
+3:	btst	#7,%a1@(LMFP_TSR)
+	jeq	3b
+	moveb	%d0,%a1@(LMFP_UDR)
+#endif
+	jra	L(serial_putc_done)
+4:
+#endif	/* CONFIG_ATARI */
+
+#ifdef CONFIG_MVME147
+	is_not_mvme147(2f)
+1:	btst	#2,M147_SCC_CTRL_A
+	jeq	1b
+	moveb	%d0,M147_SCC_DATA_A
+	jbra	L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_MVME16x
+	is_not_mvme16x(2f)
+	/*
+	 * If the loader gave us a board type then we can use that to
+	 * select an appropriate output routine; otherwise we just use
+	 * the Bug code.  If we haev to use the Bug that means the Bug
+	 * workspace has to be valid, which means the Bug has to use
+	 * the SRAM, which is non-standard.
+	 */
+	moveml	%d0-%d7/%a2-%a6,%sp@-
+	movel	vme_brdtype,%d1
+	jeq	1f			| No tag - use the Bug
+	cmpi	#VME_TYPE_MVME162,%d1
+	jeq	6f
+	cmpi	#VME_TYPE_MVME172,%d1
+	jne	5f
+	/* 162/172; it's an SCC */
+6:	btst	#2,M162_SCC_CTRL_A
+	nop
+	nop
+	nop
+	jeq	6b
+	moveb	#8,M162_SCC_CTRL_A
+	nop
+	nop
+	nop
+	moveb	%d0,M162_SCC_CTRL_A
+	jra	3f
+5:
+	/* 166/167/177; it's a CD2401 */
+	moveb	#0,M167_CYCAR
+	moveb	M167_CYIER,%d2
+	moveb	#0x02,M167_CYIER
+7:
+	btst	#5,M167_PCSCCTICR
+	jeq	7b
+	moveb	M167_PCTPIACKR,%d1
+	moveb	M167_CYLICR,%d1
+	jeq	8f
+	moveb	#0x08,M167_CYTEOIR
+	jra	7b
+8:
+	moveb	%d0,M167_CYTDR
+	moveb	#0,M167_CYTEOIR
+	moveb	%d2,M167_CYIER
+	jra	3f
+1:
+	moveb	%d0,%sp@-
+	trap	#15
+	.word	0x0020	/* TRAP 0x020 */
+3:
+	moveml	%sp@+,%d0-%d7/%a2-%a6
+	jbra	L(serial_putc_done)
+2:
+#endif /* CONFIG_MVME16x */
+
+#ifdef CONFIG_BVME6000
+	is_not_bvme6000(2f)
+	/*
+	 * The BVME6000 machine has a serial port ...
+	 */
+1:	btst	#2,BVME_SCC_CTRL_A
+	jeq	1b
+	moveb	%d0,BVME_SCC_DATA_A
+	jbra	L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_SUN3X
+	is_not_sun3x(2f)
+	movel	%d0,-(%sp)
+	movel	0xFEFE0018,%a1
+	jbsr	(%a1)
+	addq	#4,%sp
+	jbra	L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_Q40
+	is_not_q40(2f)
+	tst.l	%pc@(L(q40_do_debug))	/* only debug if requested */
+	beq	2f
+	lea	%pc@(q40_mem_cptr),%a1
+	move.l	%a1@,%a0
+	move.b	%d0,%a0@
+	addq.l	#4,%a0
+	move.l	%a0,%a1@
+	jbra    L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_APOLLO
+	is_not_apollo(2f)
+	movl    %pc@(L(iobase)),%a1
+	moveb	%d0,%a1@(LTHRB0)
+1:      moveb   %a1@(LSRB0),%d0
+	andb	#0x4,%d0
+	beq	1b
+	jbra	L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_HP300
+	is_not_hp300(3f)
+	movl    %pc@(L(iobase)),%a1
+	addl	%pc@(L(uartbase)),%a1
+	movel	%pc@(L(uart_scode)),%d1	/* Check the scode */
+	jmi	3f			/* Unset? Exit */
+	cmpi	#256,%d1		/* APCI scode? */
+	jeq	2f
+1:      moveb   %a1@(DCALSR),%d1	/* Output to DCA */
+	andb	#0x20,%d1
+	beq	1b
+	moveb	%d0,%a1@(DCADATA)
+	jbra	L(serial_putc_done)
+2:	moveb	%a1@(APCILSR),%d1	/* Output to APCI */
+	andb	#0x20,%d1
+	beq	2b
+	moveb	%d0,%a1@(APCIDATA)
+	jbra	L(serial_putc_done)
+3:
+#endif
+	
+L(serial_putc_done):
+func_return	serial_putc
+
+/*
+ * Output a string.
+ */
+func_start	puts,%d0/%a0
+
+	movel	ARG1,%a0
+	jra	2f
+1:
+#ifdef CONSOLE
+	console_putc	%d0
+#endif
+#ifdef SERIAL_DEBUG
+	serial_putc	%d0
+#endif
+2:	moveb	%a0@+,%d0
+	jne	1b
+
+func_return	puts
+
+/*
+ * Output number in hex notation.
+ */
+
+func_start	putn,%d0-%d2
+
+	putc	' '
+
+	movel	ARG1,%d0
+	moveq	#7,%d1
+1:	roll	#4,%d0
+	move	%d0,%d2
+	andb	#0x0f,%d2
+	addb	#'0',%d2
+	cmpb	#'9',%d2
+	jls	2f
+	addb	#'A'-('9'+1),%d2
+2:
+#ifdef CONSOLE
+	console_putc	%d2
+#endif
+#ifdef SERIAL_DEBUG
+	serial_putc	%d2
+#endif
+	dbra	%d1,1b
+
+func_return	putn
+
+#ifdef CONFIG_MAC
+/*
+ *	mac_serial_print
+ *
+ *	This routine takes its parameters on the stack.  It then
+ *	turns around and calls the internal routine.  This routine
+ *	is used until the Linux console driver initializes itself.
+ *
+ *	The calling parameters are:
+ *		void mac_serial_print(const char *str);
+ *
+ *	This routine does NOT understand variable arguments only
+ *	simple strings!
+ */
+ENTRY(mac_serial_print)
+	moveml	%d0/%a0,%sp@-
+#if 1
+	move	%sr,%sp@-
+	ori	#0x0700,%sr
+#endif
+	movel	%sp@(10),%a0		/* fetch parameter */
+	jra	2f
+1:	serial_putc	%d0
+2:	moveb	%a0@+,%d0
+	jne	1b
+#if 1
+	move	%sp@+,%sr
+#endif
+	moveml	%sp@+,%d0/%a0
+	rts
+#endif /* CONFIG_MAC */
+
+#if defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
+func_start	set_leds,%d0/%a0
+	movel	ARG1,%d0
+#ifdef CONFIG_HP300
+	is_not_hp300(1f)
+	movel	%pc@(L(iobase)),%a0
+	moveb	%d0,%a0@(0x1ffff)
+	jra	2f
+#endif
+1:
+#ifdef CONFIG_APOLLO
+	movel   %pc@(L(iobase)),%a0
+	lsll    #8,%d0
+	eorw    #0xff00,%d0
+	moveb	%d0,%a0@(LCPUCTRL)
+#endif
+2:
+func_return	set_leds
+#endif
+
+#ifdef CONSOLE
+/*
+ *	For continuity, see the data alignment
+ *	to which this structure is tied.
+ */
+#define Lconsole_struct_cur_column	0
+#define Lconsole_struct_cur_row		4
+#define Lconsole_struct_num_columns	8
+#define Lconsole_struct_num_rows	12
+#define Lconsole_struct_left_edge	16
+#define Lconsole_struct_penguin_putc	20
+
+func_start	console_init,%a0-%a4/%d0-%d7
+	/*
+	 *	Some of the register usage that follows
+	 *		a0 = pointer to boot_info
+	 *		a1 = pointer to screen
+	 *		a2 = pointer to Lconsole_globals
+	 *		d3 = pixel width of screen
+	 *		d4 = pixel height of screen
+	 *		(d3,d4) ~= (x,y) of a point just below
+	 *			and to the right of the screen
+	 *			NOT on the screen!
+	 *		d5 = number of bytes per scan line
+	 *		d6 = number of bytes on the entire screen
+	 */
+
+	lea	%pc@(L(console_globals)),%a2
+	movel	%pc@(L(mac_videobase)),%a1
+	movel	%pc@(L(mac_rowbytes)),%d5
+	movel	%pc@(L(mac_dimensions)),%d3	/* -> low byte */
+	movel	%d3,%d4
+	swap	%d4		/* -> high byte */
+	andl	#0xffff,%d3	/* d3 = screen width in pixels */
+	andl	#0xffff,%d4	/* d4 = screen height in pixels */
+
+	movel	%d5,%d6
+|	subl	#20,%d6
+	mulul	%d4,%d6		/* scan line bytes x num scan lines */
+	divul	#8,%d6		/* we'll clear 8 bytes at a time */
+	moveq	#-1,%d0		/* Mac_black */
+	subq	#1,%d6
+
+L(console_clear_loop):
+	movel	%d0,%a1@+
+	movel	%d0,%a1@+
+	dbra	%d6,L(console_clear_loop)
+
+	/* Calculate font size */
+
+#if   defined(FONT_8x8) && defined(CONFIG_FONT_8x8)
+	lea	%pc@(font_vga_8x8),%a0
+#elif defined(FONT_8x16) && defined(CONFIG_FONT_8x16)
+	lea	%pc@(font_vga_8x16),%a0
+#elif defined(FONT_6x11) && defined(CONFIG_FONT_6x11)
+	lea	%pc@(font_vga_6x11),%a0
+#elif defined(CONFIG_FONT_8x8) /* default */
+	lea	%pc@(font_vga_8x8),%a0
+#else /* no compiled-in font */
+	lea	0,%a0
+#endif
+
+	/*
+	 *	At this point we make a shift in register usage
+	 *	a1 = address of console_font pointer
+	 */
+	lea	%pc@(L(console_font)),%a1
+	movel	%a0,%a1@	/* store pointer to struct fbcon_font_desc in console_font */
+	tstl	%a0
+	jeq	1f
+	lea	%pc@(L(console_font_data)),%a4
+	movel	%a0@(FONT_DESC_DATA),%d0
+	subl	#L(console_font),%a1
+	addl	%a1,%d0
+	movel	%d0,%a4@
+
+	/*
+	 *	Calculate global maxs
+	 *	Note - we can use either an
+	 *	8 x 16 or 8 x 8 character font
+	 *	6 x 11 also supported
+	 */
+		/* ASSERT: a0 = contents of Lconsole_font */
+	movel	%d3,%d0				/* screen width in pixels */
+	divul	%a0@(FONT_DESC_WIDTH),%d0	/* d0 = max num chars per row */
+
+	movel	%d4,%d1				/* screen height in pixels */
+	divul	%a0@(FONT_DESC_HEIGHT),%d1	/* d1 = max num rows */
+
+	movel	%d0,%a2@(Lconsole_struct_num_columns)
+	movel	%d1,%a2@(Lconsole_struct_num_rows)
+
+	/*
+	 *	Clear the current row and column
+	 */
+	clrl	%a2@(Lconsole_struct_cur_column)
+	clrl	%a2@(Lconsole_struct_cur_row)
+	clrl	%a2@(Lconsole_struct_left_edge)
+
+	/*
+	 * Initialization is complete
+	 */
+1:
+func_return	console_init
+
+func_start	console_put_stats,%a0/%d7
+	/*
+	 *	Some of the register usage that follows
+	 *		a0 = pointer to boot_info
+	 *		d7 = value of boot_info fields
+	 */
+	puts	"\nMacLinux\n\n"
+
+#ifdef SERIAL_DEBUG
+	puts	" vidaddr:"
+	putn	%pc@(L(mac_videobase))		/* video addr. */
+
+	puts	"\n  _stext:"
+	lea	%pc@(_stext),%a0
+	putn	%a0
+
+	puts	"\nbootinfo:"
+	lea	%pc@(_end),%a0
+	putn	%a0
+
+	puts	"\ncpuid:"
+	putn	%pc@(L(cputype))
+	putc	'\n'
+
+#ifdef MAC_SERIAL_DEBUG
+	putn	%pc@(L(mac_sccbase))
+	putc	'\n'
+#endif
+#  if defined(MMU_PRINT)
+	jbsr	mmu_print_machine_cpu_types
+#  endif /* MMU_PRINT */
+#endif /* SERIAL_DEBUG */
+
+func_return	console_put_stats
+
+#ifdef CONSOLE_PENGUIN
+func_start	console_put_penguin,%a0-%a1/%d0-%d7
+	/*
+	 *	Get 'that_penguin' onto the screen in the upper right corner
+	 *	penguin is 64 x 74 pixels, align against right edge of screen
+	 */
+	lea	%pc@(L(mac_dimensions)),%a0
+	movel	%a0@,%d0
+	andil	#0xffff,%d0
+	subil	#64,%d0		/* snug up against the right edge */
+	clrl	%d1		/* start at the top */
+	movel	#73,%d7
+	lea	%pc@(L(that_penguin)),%a1
+L(console_penguin_row):
+	movel	#31,%d6
+L(console_penguin_pixel_pair):
+	moveb	%a1@,%d2
+	lsrb	#4,%d2
+	console_plot_pixel %d0,%d1,%d2
+	addq	#1,%d0
+	moveb	%a1@+,%d2
+	console_plot_pixel %d0,%d1,%d2
+	addq	#1,%d0
+	dbra	%d6,L(console_penguin_pixel_pair)
+
+	subil	#64,%d0
+	addq	#1,%d1
+	dbra	%d7,L(console_penguin_row)
+
+func_return	console_put_penguin
+
+/* include penguin bitmap */
+L(that_penguin):
+#include "../mac/mac_penguin.S"
+#endif
+
+	/*
+	 * Calculate source and destination addresses
+	 *	output	a1 = dest
+	 *		a2 = source
+	 */
+
+func_start	console_scroll,%a0-%a4/%d0-%d7
+	lea	%pc@(L(mac_videobase)),%a0
+	movel	%a0@,%a1
+	movel	%a1,%a2
+	lea	%pc@(L(mac_rowbytes)),%a0
+	movel	%a0@,%d5
+	movel	%pc@(L(console_font)),%a0
+	tstl	%a0
+	jeq	1f
+	mulul	%a0@(FONT_DESC_HEIGHT),%d5	/* account for # scan lines per character */
+	addal	%d5,%a2
+
+	/*
+	 * Get dimensions
+	 */
+	lea	%pc@(L(mac_dimensions)),%a0
+	movel	%a0@,%d3
+	movel	%d3,%d4
+	swap	%d4
+	andl	#0xffff,%d3	/* d3 = screen width in pixels */
+	andl	#0xffff,%d4	/* d4 = screen height in pixels */
+
+	/*
+	 * Calculate number of bytes to move
+	 */
+	lea	%pc@(L(mac_rowbytes)),%a0
+	movel	%a0@,%d6
+	movel	%pc@(L(console_font)),%a0
+	subl	%a0@(FONT_DESC_HEIGHT),%d4	/* we're not scrolling the top row! */
+	mulul	%d4,%d6		/* scan line bytes x num scan lines */
+	divul	#32,%d6		/* we'll move 8 longs at a time */
+	subq	#1,%d6
+
+L(console_scroll_loop):
+	movel	%a2@+,%a1@+
+	movel	%a2@+,%a1@+
+	movel	%a2@+,%a1@+
+	movel	%a2@+,%a1@+
+	movel	%a2@+,%a1@+
+	movel	%a2@+,%a1@+
+	movel	%a2@+,%a1@+
+	movel	%a2@+,%a1@+
+	dbra	%d6,L(console_scroll_loop)
+
+	lea	%pc@(L(mac_rowbytes)),%a0
+	movel	%a0@,%d6
+	movel	%pc@(L(console_font)),%a0
+	mulul	%a0@(FONT_DESC_HEIGHT),%d6	/* scan line bytes x font height */
+	divul	#32,%d6			/* we'll move 8 words at a time */
+	subq	#1,%d6
+
+	moveq	#-1,%d0
+L(console_scroll_clear_loop):
+	movel	%d0,%a1@+
+	movel	%d0,%a1@+
+	movel	%d0,%a1@+
+	movel	%d0,%a1@+
+	movel	%d0,%a1@+
+	movel	%d0,%a1@+
+	movel	%d0,%a1@+
+	movel	%d0,%a1@+
+	dbra	%d6,L(console_scroll_clear_loop)
+
+1:
+func_return	console_scroll
+
+
+func_start	console_putc,%a0/%a1/%d0-%d7
+
+	is_not_mac(L(console_exit))
+	tstl	%pc@(L(console_font))
+	jeq	L(console_exit)
+
+	/* Output character in d7 on console.
+	 */
+	movel	ARG1,%d7
+	cmpib	#'\n',%d7
+	jbne	1f
+
+	/* A little safe recursion is good for the soul */
+	console_putc	#'\r'
+1:
+	lea	%pc@(L(console_globals)),%a0
+
+	cmpib	#10,%d7
+	jne	L(console_not_lf)
+	movel	%a0@(Lconsole_struct_cur_row),%d0
+	addil	#1,%d0
+	movel	%d0,%a0@(Lconsole_struct_cur_row)
+	movel	%a0@(Lconsole_struct_num_rows),%d1
+	cmpl	%d1,%d0
+	jcs	1f
+	subil	#1,%d0
+	movel	%d0,%a0@(Lconsole_struct_cur_row)
+	console_scroll
+1:
+	jra	L(console_exit)
+
+L(console_not_lf):
+	cmpib	#13,%d7
+	jne	L(console_not_cr)
+	clrl	%a0@(Lconsole_struct_cur_column)
+	jra	L(console_exit)
+
+L(console_not_cr):
+	cmpib	#1,%d7
+	jne	L(console_not_home)
+	clrl	%a0@(Lconsole_struct_cur_row)
+	clrl	%a0@(Lconsole_struct_cur_column)
+	jra	L(console_exit)
+
+/*
+ *	At this point we know that the %d7 character is going to be
+ *	rendered on the screen.  Register usage is -
+ *		a0 = pointer to console globals
+ *		a1 = font data
+ *		d0 = cursor column
+ *		d1 = cursor row to draw the character
+ *		d7 = character number
+ */
+L(console_not_home):
+	movel	%a0@(Lconsole_struct_cur_column),%d0
+	addql	#1,%a0@(Lconsole_struct_cur_column)
+	movel	%a0@(Lconsole_struct_num_columns),%d1
+	cmpl	%d1,%d0
+	jcs	1f
+	console_putc	#'\n'	/* recursion is OK! */
+1:
+	movel	%a0@(Lconsole_struct_cur_row),%d1
+
+	/*
+	 *	At this point we make a shift in register usage
+	 *	a0 = address of pointer to font data (fbcon_font_desc)
+	 */
+	movel	%pc@(L(console_font)),%a0
+	movel	%pc@(L(console_font_data)),%a1	/* Load fbcon_font_desc.data into a1 */
+	andl	#0x000000ff,%d7
+		/* ASSERT: a0 = contents of Lconsole_font */
+	mulul	%a0@(FONT_DESC_HEIGHT),%d7	/* d7 = index into font data */
+	addl	%d7,%a1			/* a1 = points to char image */
+
+	/*
+	 *	At this point we make a shift in register usage
+	 *	d0 = pixel coordinate, x
+	 *	d1 = pixel coordinate, y
+	 *	d2 = (bit 0) 1/0 for white/black (!) pixel on screen
+	 *	d3 = font scan line data (8 pixels)
+	 *	d6 = count down for the font's pixel width (8)
+	 *	d7 = count down for the font's pixel count in height
+	 */
+		/* ASSERT: a0 = contents of Lconsole_font */
+	mulul	%a0@(FONT_DESC_WIDTH),%d0
+	mulul	%a0@(FONT_DESC_HEIGHT),%d1
+	movel	%a0@(FONT_DESC_HEIGHT),%d7	/* Load fbcon_font_desc.height into d7 */
+	subq	#1,%d7
+L(console_read_char_scanline):
+	moveb	%a1@+,%d3
+
+		/* ASSERT: a0 = contents of Lconsole_font */
+	movel	%a0@(FONT_DESC_WIDTH),%d6	/* Load fbcon_font_desc.width into d6 */
+	subql	#1,%d6
+
+L(console_do_font_scanline):
+	lslb	#1,%d3
+	scsb	%d2		/* convert 1 bit into a byte */
+	console_plot_pixel %d0,%d1,%d2
+	addq	#1,%d0
+	dbra	%d6,L(console_do_font_scanline)
+
+		/* ASSERT: a0 = contents of Lconsole_font */
+	subl	%a0@(FONT_DESC_WIDTH),%d0
+	addq	#1,%d1
+	dbra	%d7,L(console_read_char_scanline)
+
+L(console_exit):
+func_return	console_putc
+
+	/*
+	 *	Input:
+	 *		d0 = x coordinate
+	 *		d1 = y coordinate
+	 *		d2 = (bit 0) 1/0 for white/black (!)
+	 *	All registers are preserved
+	 */
+func_start	console_plot_pixel,%a0-%a1/%d0-%d4
+
+	movel	%pc@(L(mac_videobase)),%a1
+	movel	%pc@(L(mac_videodepth)),%d3
+	movel	ARG1,%d0
+	movel	ARG2,%d1
+	mulul	%pc@(L(mac_rowbytes)),%d1
+	movel	ARG3,%d2
+
+	/*
+	 *	Register usage:
+	 *		d0 = x coord becomes byte offset into frame buffer
+	 *		d1 = y coord
+	 *		d2 = black or white (0/1)
+	 *		d3 = video depth
+	 *		d4 = temp of x (d0) for many bit depths
+	 */
+L(test_1bit):
+	cmpb	#1,%d3
+	jbne	L(test_2bit)
+	movel	%d0,%d4		/* we need the low order 3 bits! */
+	divul	#8,%d0
+	addal	%d0,%a1
+	addal	%d1,%a1
+	andb	#7,%d4
+	eorb	#7,%d4		/* reverse the x-coordinate w/ screen-bit # */
+	andb	#1,%d2
+	jbne	L(white_1)
+	bsetb	%d4,%a1@
+	jbra	L(console_plot_pixel_exit)
+L(white_1):
+	bclrb	%d4,%a1@
+	jbra	L(console_plot_pixel_exit)
+
+L(test_2bit):
+	cmpb	#2,%d3
+	jbne	L(test_4bit)
+	movel	%d0,%d4		/* we need the low order 2 bits! */
+	divul	#4,%d0
+	addal	%d0,%a1
+	addal	%d1,%a1
+	andb	#3,%d4
+	eorb	#3,%d4		/* reverse the x-coordinate w/ screen-bit # */
+	lsll	#1,%d4		/* ! */
+	andb	#1,%d2
+	jbne	L(white_2)
+	bsetb	%d4,%a1@
+	addq	#1,%d4
+	bsetb	%d4,%a1@
+	jbra	L(console_plot_pixel_exit)
+L(white_2):
+	bclrb	%d4,%a1@
+	addq	#1,%d4
+	bclrb	%d4,%a1@
+	jbra	L(console_plot_pixel_exit)
+
+L(test_4bit):
+	cmpb	#4,%d3
+	jbne	L(test_8bit)
+	movel	%d0,%d4		/* we need the low order bit! */
+	divul	#2,%d0
+	addal	%d0,%a1
+	addal	%d1,%a1
+	andb	#1,%d4
+	eorb	#1,%d4
+	lsll	#2,%d4		/* ! */
+	andb	#1,%d2
+	jbne	L(white_4)
+	bsetb	%d4,%a1@
+	addq	#1,%d4
+	bsetb	%d4,%a1@
+	addq	#1,%d4
+	bsetb	%d4,%a1@
+	addq	#1,%d4
+	bsetb	%d4,%a1@
+	jbra	L(console_plot_pixel_exit)
+L(white_4):
+	bclrb	%d4,%a1@
+	addq	#1,%d4
+	bclrb	%d4,%a1@
+	addq	#1,%d4
+	bclrb	%d4,%a1@
+	addq	#1,%d4
+	bclrb	%d4,%a1@
+	jbra	L(console_plot_pixel_exit)
+
+L(test_8bit):
+	cmpb	#8,%d3
+	jbne	L(test_16bit)
+	addal	%d0,%a1
+	addal	%d1,%a1
+	andb	#1,%d2
+	jbne	L(white_8)
+	moveb	#0xff,%a1@
+	jbra	L(console_plot_pixel_exit)
+L(white_8):
+	clrb	%a1@
+	jbra	L(console_plot_pixel_exit)
+
+L(test_16bit):
+	cmpb	#16,%d3
+	jbne	L(console_plot_pixel_exit)
+	addal	%d0,%a1
+	addal	%d0,%a1
+	addal	%d1,%a1
+	andb	#1,%d2
+	jbne	L(white_16)
+	clrw	%a1@
+	jbra	L(console_plot_pixel_exit)
+L(white_16):
+	movew	#0x0fff,%a1@
+	jbra	L(console_plot_pixel_exit)
+
+L(console_plot_pixel_exit):
+func_return	console_plot_pixel
+#endif /* CONSOLE */
+
+#if 0
+/*
+ * This is some old code lying around.  I don't believe
+ * it's used or important anymore.  My guess is it contributed
+ * to getting to this point, but it's done for now.
+ * It was still in the 2.1.77 head.S, so it's still here.
+ * (And still not used!)
+ */
+L(showtest):
+	moveml	%a0/%d7,%sp@-
+	puts	"A="
+	putn	%a1
+
+	.long	0xf0119f15		| ptestr	#5,%a1@,#7,%a0
+
+	puts	"DA="
+	putn	%a0
+
+	puts	"D="
+	putn	%a0@
+
+	puts	"S="
+	lea	%pc@(L(mmu)),%a0
+	.long	0xf0106200		| pmove		%psr,%a0@
+	clrl	%d7
+	movew	%a0@,%d7
+	putn	%d7
+
+	putc	'\n'
+	moveml	%sp@+,%a0/%d7
+	rts
+#endif	/* 0 */
+
+__INITDATA
+	.align	4
+
+#if defined(CONFIG_ATARI) || defined(CONFIG_AMIGA) || \
+    defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
+L(custom):
+L(iobase):
+	.long 0
+#endif
+
+#if defined(CONSOLE)
+L(console_globals):
+	.long	0		/* cursor column */
+	.long	0		/* cursor row */
+	.long	0		/* max num columns */
+	.long	0		/* max num rows */
+	.long	0		/* left edge */
+	.long	0		/* mac putc */
+L(console_font):
+	.long	0		/* pointer to console font (struct font_desc) */
+L(console_font_data):
+	.long	0		/* pointer to console font data */
+#endif /* CONSOLE */
+
+#if defined(MMU_PRINT)
+L(mmu_print_data):
+	.long	0		/* valid flag */
+	.long	0		/* start logical */
+	.long	0		/* next logical */
+	.long	0		/* start physical */
+	.long	0		/* next physical */
+#endif /* MMU_PRINT */
+
+L(cputype):
+	.long	0
+L(mmu_cached_pointer_tables):
+	.long	0
+L(mmu_num_pointer_tables):
+	.long	0
+L(phys_kernel_start):
+	.long	0
+L(kernel_end):
+	.long	0
+L(memory_start):
+	.long	0
+L(kernel_pgdir_ptr):
+	.long	0
+L(temp_mmap_mem):
+	.long	0
+
+#if defined (CONFIG_MVME147)
+M147_SCC_CTRL_A = 0xfffe3002
+M147_SCC_DATA_A = 0xfffe3003
+#endif
+
+#if defined (CONFIG_MVME16x)
+M162_SCC_CTRL_A = 0xfff45005
+M167_CYCAR = 0xfff450ee
+M167_CYIER = 0xfff45011
+M167_CYLICR = 0xfff45026
+M167_CYTEOIR = 0xfff45085
+M167_CYTDR = 0xfff450f8
+M167_PCSCCTICR = 0xfff4201e
+M167_PCTPIACKR = 0xfff42025
+#endif
+
+#if defined (CONFIG_BVME6000)
+BVME_SCC_CTRL_A	= 0xffb0000b
+BVME_SCC_DATA_A	= 0xffb0000f
+#endif
+
+#if defined(CONFIG_MAC)
+L(mac_booter_data):
+	.long	0
+L(mac_videobase):
+	.long	0
+L(mac_videodepth):
+	.long	0
+L(mac_dimensions):
+	.long	0
+L(mac_rowbytes):
+	.long	0
+#ifdef MAC_SERIAL_DEBUG
+L(mac_sccbase):
+	.long	0
+#endif /* MAC_SERIAL_DEBUG */
+#endif
+
+#if defined (CONFIG_APOLLO)
+LSRB0        = 0x10412
+LTHRB0       = 0x10416
+LCPUCTRL     = 0x10100
+#endif
+
+#if defined(CONFIG_HP300)
+DCADATA	     = 0x11
+DCALSR	     = 0x1b
+APCIDATA     = 0x00
+APCILSR      = 0x14
+L(uartbase):
+	.long	0
+L(uart_scode):
+	.long	-1
+#endif
+
+__FINIT
+	.data
+	.align	4
+
+availmem:
+	.long	0
+m68k_pgtable_cachemode:
+	.long	0
+m68k_supervisor_cachemode:
+	.long	0
+#if defined(CONFIG_MVME16x)
+mvme_bdid:
+	.long	0,0,0,0,0,0,0,0
+#endif
+#if defined(CONFIG_Q40)
+q40_mem_cptr:
+	.long	0
+L(q40_do_debug):
+	.long	0
+#endif
diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c
new file mode 100644
index 0000000..514d323
--- /dev/null
+++ b/arch/m68k/kernel/ints.c
@@ -0,0 +1,281 @@
+/*
+ * linux/arch/m68k/kernel/ints.c -- Linux/m68k general interrupt handling code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * 07/03/96: Timer initialization, and thus mach_sched_init(),
+ *           removed from request_irq() and moved to init_time().
+ *           We should therefore consider renaming our add_isr() and
+ *           remove_isr() to request_irq() and free_irq()
+ *           respectively, so they are compliant with the other
+ *           architectures.                                     /Jes
+ * 11/07/96: Changed all add_/remove_isr() to request_/free_irq() calls.
+ *           Removed irq list support, if any machine needs an irq server
+ *           it must implement this itself (as it's already done), instead
+ *           only default handler are used with mach_default_handler.
+ *           request_irq got some flags different from other architectures:
+ *           - IRQ_FLG_REPLACE : Replace an existing handler (the default one
+ *                               can be replaced without this flag)
+ *           - IRQ_FLG_LOCK : handler can't be replaced
+ *           There are other machine depending flags, see there
+ *           If you want to replace a default handler you should know what
+ *           you're doing, since it might handle different other irq sources
+ *           which must be served                               /Roman Zippel
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+#include <asm/setup.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/page.h>
+#include <asm/machdep.h>
+
+#ifdef CONFIG_Q40
+#include <asm/q40ints.h>
+#endif
+
+/* table for system interrupt handlers */
+static irq_handler_t irq_list[SYS_IRQS];
+
+static const char *default_names[SYS_IRQS] = {
+	[0] = "spurious int",
+	[1] = "int1 handler",
+	[2] = "int2 handler",
+	[3] = "int3 handler",
+	[4] = "int4 handler",
+	[5] = "int5 handler",
+	[6] = "int6 handler",
+	[7] = "int7 handler"
+};
+
+/* The number of spurious interrupts */
+volatile unsigned int num_spurious;
+
+#define NUM_IRQ_NODES 100
+static irq_node_t nodes[NUM_IRQ_NODES];
+
+static void dummy_enable_irq(unsigned int irq);
+static void dummy_disable_irq(unsigned int irq);
+static int dummy_request_irq(unsigned int irq,
+		irqreturn_t (*handler) (int, void *, struct pt_regs *),
+		unsigned long flags, const char *devname, void *dev_id);
+static void dummy_free_irq(unsigned int irq, void *dev_id);
+
+void (*enable_irq) (unsigned int) = dummy_enable_irq;
+void (*disable_irq) (unsigned int) = dummy_disable_irq;
+
+int (*mach_request_irq) (unsigned int, irqreturn_t (*)(int, void *, struct pt_regs *),
+                      unsigned long, const char *, void *) = dummy_request_irq;
+void (*mach_free_irq) (unsigned int, void *) = dummy_free_irq;
+
+void init_irq_proc(void);
+
+/*
+ * void init_IRQ(void)
+ *
+ * Parameters:	None
+ *
+ * Returns:	Nothing
+ *
+ * This function should be called during kernel startup to initialize
+ * the IRQ handling routines.
+ */
+
+void __init init_IRQ(void)
+{
+	int i;
+
+	for (i = 0; i < SYS_IRQS; i++) {
+		if (mach_default_handler)
+			irq_list[i].handler = (*mach_default_handler)[i];
+		irq_list[i].flags   = 0;
+		irq_list[i].dev_id  = NULL;
+		irq_list[i].devname = default_names[i];
+	}
+
+	for (i = 0; i < NUM_IRQ_NODES; i++)
+		nodes[i].handler = NULL;
+
+	mach_init_IRQ ();
+}
+
+irq_node_t *new_irq_node(void)
+{
+	irq_node_t *node;
+	short i;
+
+	for (node = nodes, i = NUM_IRQ_NODES-1; i >= 0; node++, i--)
+		if (!node->handler)
+			return node;
+
+	printk ("new_irq_node: out of nodes\n");
+	return NULL;
+}
+
+/*
+ * We will keep these functions until I have convinced Linus to move
+ * the declaration of them from include/linux/sched.h to
+ * include/asm/irq.h.
+ */
+int request_irq(unsigned int irq,
+		irqreturn_t (*handler) (int, void *, struct pt_regs *),
+		unsigned long flags, const char *devname, void *dev_id)
+{
+	return mach_request_irq(irq, handler, flags, devname, dev_id);
+}
+
+EXPORT_SYMBOL(request_irq);
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+	mach_free_irq(irq, dev_id);
+}
+
+EXPORT_SYMBOL(free_irq);
+
+int cpu_request_irq(unsigned int irq,
+                    irqreturn_t (*handler)(int, void *, struct pt_regs *),
+                    unsigned long flags, const char *devname, void *dev_id)
+{
+	if (irq < IRQ1 || irq > IRQ7) {
+		printk("%s: Incorrect IRQ %d from %s\n",
+		       __FUNCTION__, irq, devname);
+		return -ENXIO;
+	}
+
+#if 0
+	if (!(irq_list[irq].flags & IRQ_FLG_STD)) {
+		if (irq_list[irq].flags & IRQ_FLG_LOCK) {
+			printk("%s: IRQ %d from %s is not replaceable\n",
+			       __FUNCTION__, irq, irq_list[irq].devname);
+			return -EBUSY;
+		}
+		if (!(flags & IRQ_FLG_REPLACE)) {
+			printk("%s: %s can't replace IRQ %d from %s\n",
+			       __FUNCTION__, devname, irq, irq_list[irq].devname);
+			return -EBUSY;
+		}
+	}
+#endif
+
+	irq_list[irq].handler = handler;
+	irq_list[irq].flags   = flags;
+	irq_list[irq].dev_id  = dev_id;
+	irq_list[irq].devname = devname;
+	return 0;
+}
+
+void cpu_free_irq(unsigned int irq, void *dev_id)
+{
+	if (irq < IRQ1 || irq > IRQ7) {
+		printk("%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
+		return;
+	}
+
+	if (irq_list[irq].dev_id != dev_id)
+		printk("%s: Removing probably wrong IRQ %d from %s\n",
+		       __FUNCTION__, irq, irq_list[irq].devname);
+
+	irq_list[irq].handler = (*mach_default_handler)[irq];
+	irq_list[irq].flags   = 0;
+	irq_list[irq].dev_id  = NULL;
+	irq_list[irq].devname = default_names[irq];
+}
+
+/*
+ * Do we need these probe functions on the m68k?
+ *
+ *  ... may be useful with ISA devices
+ */
+unsigned long probe_irq_on (void)
+{
+#ifdef CONFIG_Q40
+	if (MACH_IS_Q40)
+		return q40_probe_irq_on();
+#endif
+	return 0;
+}
+
+EXPORT_SYMBOL(probe_irq_on);
+
+int probe_irq_off (unsigned long irqs)
+{
+#ifdef CONFIG_Q40
+	if (MACH_IS_Q40)
+		return q40_probe_irq_off(irqs);
+#endif
+	return 0;
+}
+
+EXPORT_SYMBOL(probe_irq_off);
+
+static void dummy_enable_irq(unsigned int irq)
+{
+	printk("calling uninitialized enable_irq()\n");
+}
+
+static void dummy_disable_irq(unsigned int irq)
+{
+	printk("calling uninitialized disable_irq()\n");
+}
+
+static int dummy_request_irq(unsigned int irq,
+		irqreturn_t (*handler) (int, void *, struct pt_regs *),
+		unsigned long flags, const char *devname, void *dev_id)
+{
+	printk("calling uninitialized request_irq()\n");
+	return 0;
+}
+
+static void dummy_free_irq(unsigned int irq, void *dev_id)
+{
+	printk("calling uninitialized disable_irq()\n");
+}
+
+asmlinkage void process_int(unsigned long vec, struct pt_regs *fp)
+{
+	if (vec >= VEC_INT1 && vec <= VEC_INT7 && !MACH_IS_BVME6000) {
+		vec -= VEC_SPUR;
+		kstat_cpu(0).irqs[vec]++;
+		irq_list[vec].handler(vec, irq_list[vec].dev_id, fp);
+	} else {
+		if (mach_process_int)
+			mach_process_int(vec, fp);
+		else
+			panic("Can't process interrupt vector %ld\n", vec);
+		return;
+	}
+}
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+	int i = *(loff_t *) v;
+
+	/* autovector interrupts */
+	if (i < SYS_IRQS) {
+		if (mach_default_handler) {
+			seq_printf(p, "auto %2d: %10u ", i,
+			               i ? kstat_cpu(0).irqs[i] : num_spurious);
+			seq_puts(p, "  ");
+			seq_printf(p, "%s\n", irq_list[i].devname);
+		}
+	} else if (i == SYS_IRQS)
+		mach_get_irq_list(p, v);
+	return 0;
+}
+
+void init_irq_proc(void)
+{
+	/* Insert /proc/irq driver here */
+}
+
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
new file mode 100644
index 0000000..fe837e3
--- /dev/null
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -0,0 +1,88 @@
+#include <linux/module.h>
+#include <linux/linkage.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/config.h>
+
+#include <asm/setup.h>
+#include <asm/machdep.h>
+#include <asm/pgalloc.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/semaphore.h>
+#include <asm/checksum.h>
+
+asmlinkage long long __ashldi3 (long long, int);
+asmlinkage long long __ashrdi3 (long long, int);
+asmlinkage long long __lshrdi3 (long long, int);
+asmlinkage long long __muldi3 (long long, long long);
+extern char m68k_debug_device[];
+
+extern void dump_thread(struct pt_regs *, struct user *);
+
+/* platform dependent support */
+
+EXPORT_SYMBOL(m68k_machtype);
+EXPORT_SYMBOL(m68k_cputype);
+EXPORT_SYMBOL(m68k_is040or060);
+EXPORT_SYMBOL(m68k_realnum_memory);
+EXPORT_SYMBOL(m68k_memory);
+#ifndef CONFIG_SUN3
+EXPORT_SYMBOL(cache_push);
+EXPORT_SYMBOL(cache_clear);
+#ifndef CONFIG_SINGLE_MEMORY_CHUNK
+EXPORT_SYMBOL(mm_vtop);
+EXPORT_SYMBOL(mm_ptov);
+EXPORT_SYMBOL(mm_end_of_chunk);
+#else
+EXPORT_SYMBOL(m68k_memoffset);
+#endif /* !CONFIG_SINGLE_MEMORY_CHUNK */
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(kernel_set_cachemode);
+#endif /* !CONFIG_SUN3 */
+EXPORT_SYMBOL(m68k_debug_device);
+EXPORT_SYMBOL(mach_hwclk);
+EXPORT_SYMBOL(mach_get_ss);
+EXPORT_SYMBOL(mach_get_rtc_pll);
+EXPORT_SYMBOL(mach_set_rtc_pll);
+#ifdef CONFIG_INPUT_M68K_BEEP_MODULE
+EXPORT_SYMBOL(mach_beep);
+#endif
+EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strstr);
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+EXPORT_SYMBOL(kernel_thread);
+#ifdef CONFIG_VME
+EXPORT_SYMBOL(vme_brdtype);
+#endif
+
+/* The following are special because they're not called
+   explicitly (the C compiler generates them).  Fortunately,
+   their interface isn't gonna change any time soon now, so
+   it's OK to leave it out of version control.  */
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memscan);
+EXPORT_SYMBOL(__muldi3);
+
+EXPORT_SYMBOL(__down_failed);
+EXPORT_SYMBOL(__down_failed_interruptible);
+EXPORT_SYMBOL(__down_failed_trylock);
+EXPORT_SYMBOL(__up_wakeup);
+
+EXPORT_SYMBOL(get_wchan);
diff --git a/arch/m68k/kernel/module.c b/arch/m68k/kernel/module.c
new file mode 100644
index 0000000..3b1a2ff
--- /dev/null
+++ b/arch/m68k/kernel/module.c
@@ -0,0 +1,128 @@
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt...)
+#endif
+
+void *module_alloc(unsigned long size)
+{
+	if (size == 0)
+		return NULL;
+	return vmalloc(size);
+}
+
+
+/* Free memory returned from module_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+	vfree(module_region);
+	/* FIXME: If module_region == mod->init_region, trim exception
+           table entries. */
+}
+
+/* We don't need anything special. */
+int module_frob_arch_sections(Elf_Ehdr *hdr,
+			      Elf_Shdr *sechdrs,
+			      char *secstrings,
+			      struct module *mod)
+{
+	return 0;
+}
+
+int apply_relocate(Elf32_Shdr *sechdrs,
+		   const char *strtab,
+		   unsigned int symindex,
+		   unsigned int relsec,
+		   struct module *me)
+{
+	unsigned int i;
+	Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
+	Elf32_Sym *sym;
+	uint32_t *location;
+
+	DEBUGP("Applying relocate section %u to %u\n", relsec,
+	       sechdrs[relsec].sh_info);
+	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+		/* This is where to make the change */
+		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+			+ rel[i].r_offset;
+		/* This is the symbol it is referring to.  Note that all
+		   undefined symbols have been resolved.  */
+		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+			+ ELF32_R_SYM(rel[i].r_info);
+
+		switch (ELF32_R_TYPE(rel[i].r_info)) {
+		case R_68K_32:
+			/* We add the value into the location given */
+			*location += sym->st_value;
+			break;
+		case R_68K_PC32:
+			/* Add the value, subtract its postition */
+			*location += sym->st_value - (uint32_t)location;
+			break;
+		default:
+			printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+			       me->name, ELF32_R_TYPE(rel[i].r_info));
+			return -ENOEXEC;
+		}
+	}
+	return 0;
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+		       const char *strtab,
+		       unsigned int symindex,
+		       unsigned int relsec,
+		       struct module *me)
+{
+	unsigned int i;
+	Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+	Elf32_Sym *sym;
+	uint32_t *location;
+
+	DEBUGP("Applying relocate_add section %u to %u\n", relsec,
+	       sechdrs[relsec].sh_info);
+	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+		/* This is where to make the change */
+		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+			+ rel[i].r_offset;
+		/* This is the symbol it is referring to.  Note that all
+		   undefined symbols have been resolved.  */
+		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+			+ ELF32_R_SYM(rel[i].r_info);
+
+		switch (ELF32_R_TYPE(rel[i].r_info)) {
+		case R_68K_32:
+			/* We add the value into the location given */
+			*location = rel[i].r_addend + sym->st_value;
+			break;
+		case R_68K_PC32:
+			/* Add the value, subtract its postition */
+			*location = rel[i].r_addend + sym->st_value - (uint32_t)location;
+			break;
+		default:
+			printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+			       me->name, ELF32_R_TYPE(rel[i].r_info));
+			return -ENOEXEC;
+		}
+	}
+	return 0;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+		    const Elf_Shdr *sechdrs,
+		    struct module *me)
+{
+	return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+}
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
new file mode 100644
index 0000000..93b043e
--- /dev/null
+++ b/arch/m68k/kernel/process.c
@@ -0,0 +1,405 @@
+/*
+ *  linux/arch/m68k/kernel/process.c
+ *
+ *  Copyright (C) 1995  Hamish Macdonald
+ *
+ *  68060 fixes by Jesper Skov
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/reboot.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+#include <asm/machdep.h>
+#include <asm/setup.h>
+#include <asm/pgtable.h>
+
+/*
+ * Initial task/thread structure. Make this a per-architecture thing,
+ * because different architectures tend to have different
+ * alignment requirements and potentially different initial
+ * setup.
+ */
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+EXPORT_SYMBOL(init_mm);
+
+union thread_union init_thread_union
+__attribute__((section(".data.init_task"), aligned(THREAD_SIZE)))
+       = { INIT_THREAD_INFO(init_task) };
+
+/* initial task structure */
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_task);
+
+asmlinkage void ret_from_fork(void);
+
+
+/*
+ * Return saved PC from a blocked thread
+ */
+unsigned long thread_saved_pc(struct task_struct *tsk)
+{
+	struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
+	/* Check whether the thread is blocked in resume() */
+	if (in_sched_functions(sw->retpc))
+		return ((unsigned long *)sw->a6)[1];
+	else
+		return sw->retpc;
+}
+
+/*
+ * The idle loop on an m68k..
+ */
+void default_idle(void)
+{
+	if (!need_resched())
+#if defined(MACH_ATARI_ONLY) && !defined(CONFIG_HADES)
+		/* block out HSYNC on the atari (falcon) */
+		__asm__("stop #0x2200" : : : "cc");
+#else
+		__asm__("stop #0x2000" : : : "cc");
+#endif
+}
+
+void (*idle)(void) = default_idle;
+
+/*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+void cpu_idle(void)
+{
+	/* endless idle loop with no priority at all */
+	while (1) {
+		while (!need_resched())
+			idle();
+		schedule();
+	}
+}
+
+void machine_restart(char * __unused)
+{
+	if (mach_reset)
+		mach_reset();
+	for (;;);
+}
+
+EXPORT_SYMBOL(machine_restart);
+
+void machine_halt(void)
+{
+	if (mach_halt)
+		mach_halt();
+	for (;;);
+}
+
+EXPORT_SYMBOL(machine_halt);
+
+void machine_power_off(void)
+{
+	if (mach_power_off)
+		mach_power_off();
+	for (;;);
+}
+
+EXPORT_SYMBOL(machine_power_off);
+
+void show_regs(struct pt_regs * regs)
+{
+	printk("\n");
+	printk("Format %02x  Vector: %04x  PC: %08lx  Status: %04x    %s\n",
+	       regs->format, regs->vector, regs->pc, regs->sr, print_tainted());
+	printk("ORIG_D0: %08lx  D0: %08lx  A2: %08lx  A1: %08lx\n",
+	       regs->orig_d0, regs->d0, regs->a2, regs->a1);
+	printk("A0: %08lx  D5: %08lx  D4: %08lx\n",
+	       regs->a0, regs->d5, regs->d4);
+	printk("D3: %08lx  D2: %08lx  D1: %08lx\n",
+	       regs->d3, regs->d2, regs->d1);
+	if (!(regs->sr & PS_S))
+		printk("USP: %08lx\n", rdusp());
+}
+
+/*
+ * Create a kernel thread
+ */
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+	int pid;
+	mm_segment_t fs;
+
+	fs = get_fs();
+	set_fs (KERNEL_DS);
+
+	{
+	register long retval __asm__ ("d0");
+	register long clone_arg __asm__ ("d1") = flags | CLONE_VM | CLONE_UNTRACED;
+
+	retval = __NR_clone;
+	__asm__ __volatile__
+	  ("clrl %%d2\n\t"
+	   "trap #0\n\t"		/* Linux/m68k system call */
+	   "tstl %0\n\t"		/* child or parent */
+	   "jne 1f\n\t"			/* parent - jump */
+	   "lea %%sp@(%c7),%6\n\t"	/* reload current */
+	   "movel %6@,%6\n\t"
+	   "movel %3,%%sp@-\n\t"	/* push argument */
+	   "jsr %4@\n\t"		/* call fn */
+	   "movel %0,%%d1\n\t"		/* pass exit value */
+	   "movel %2,%%d0\n\t"		/* exit */
+	   "trap #0\n"
+	   "1:"
+	   : "+d" (retval)
+	   : "i" (__NR_clone), "i" (__NR_exit),
+	     "r" (arg), "a" (fn), "d" (clone_arg), "r" (current),
+	     "i" (-THREAD_SIZE)
+	   : "d2");
+
+	pid = retval;
+	}
+
+	set_fs (fs);
+	return pid;
+}
+
+void flush_thread(void)
+{
+	unsigned long zero = 0;
+	set_fs(USER_DS);
+	current->thread.fs = __USER_DS;
+	if (!FPU_IS_EMU)
+		asm volatile (".chip 68k/68881\n\t"
+			      "frestore %0@\n\t"
+			      ".chip 68k" : : "a" (&zero));
+}
+
+/*
+ * "m68k_fork()".. By the time we get here, the
+ * non-volatile registers have also been saved on the
+ * stack. We do some ugly pointer stuff here.. (see
+ * also copy_thread)
+ */
+
+asmlinkage int m68k_fork(struct pt_regs *regs)
+{
+	return do_fork(SIGCHLD, rdusp(), regs, 0, NULL, NULL);
+}
+
+asmlinkage int m68k_vfork(struct pt_regs *regs)
+{
+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0,
+		       NULL, NULL);
+}
+
+asmlinkage int m68k_clone(struct pt_regs *regs)
+{
+	unsigned long clone_flags;
+	unsigned long newsp;
+	int *parent_tidptr, *child_tidptr;
+
+	/* syscall2 puts clone_flags in d1 and usp in d2 */
+	clone_flags = regs->d1;
+	newsp = regs->d2;
+	parent_tidptr = (int *)regs->d3;
+	child_tidptr = (int *)regs->d4;
+	if (!newsp)
+		newsp = rdusp();
+	return do_fork(clone_flags, newsp, regs, 0,
+		       parent_tidptr, child_tidptr);
+}
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+		 unsigned long unused,
+		 struct task_struct * p, struct pt_regs * regs)
+{
+	struct pt_regs * childregs;
+	struct switch_stack * childstack, *stack;
+	unsigned long stack_offset, *retp;
+
+	stack_offset = THREAD_SIZE - sizeof(struct pt_regs);
+	childregs = (struct pt_regs *) ((unsigned long) (p->thread_info) + stack_offset);
+
+	*childregs = *regs;
+	childregs->d0 = 0;
+
+	retp = ((unsigned long *) regs);
+	stack = ((struct switch_stack *) retp) - 1;
+
+	childstack = ((struct switch_stack *) childregs) - 1;
+	*childstack = *stack;
+	childstack->retpc = (unsigned long)ret_from_fork;
+
+	p->thread.usp = usp;
+	p->thread.ksp = (unsigned long)childstack;
+	/*
+	 * Must save the current SFC/DFC value, NOT the value when
+	 * the parent was last descheduled - RGH  10-08-96
+	 */
+	p->thread.fs = get_fs().seg;
+
+	if (!FPU_IS_EMU) {
+		/* Copy the current fpu state */
+		asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
+
+		if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2])
+		  asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
+				"fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
+				: : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0])
+				: "memory");
+		/* Restore the state in case the fpu was busy */
+		asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
+	}
+
+	return 0;
+}
+
+/* Fill in the fpu structure for a core dump.  */
+
+int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
+{
+	char fpustate[216];
+
+	if (FPU_IS_EMU) {
+		int i;
+
+		memcpy(fpu->fpcntl, current->thread.fpcntl, 12);
+		memcpy(fpu->fpregs, current->thread.fp, 96);
+		/* Convert internal fpu reg representation
+		 * into long double format
+		 */
+		for (i = 0; i < 24; i += 3)
+			fpu->fpregs[i] = ((fpu->fpregs[i] & 0xffff0000) << 15) |
+			                 ((fpu->fpregs[i] & 0x0000ffff) << 16);
+		return 1;
+	}
+
+	/* First dump the fpu context to avoid protocol violation.  */
+	asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory");
+	if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
+		return 0;
+
+	asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
+		:: "m" (fpu->fpcntl[0])
+		: "memory");
+	asm volatile ("fmovemx %/fp0-%/fp7,%0"
+		:: "m" (fpu->fpregs[0])
+		: "memory");
+	return 1;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+	struct switch_stack *sw;
+
+/* changed the size calculations - should hopefully work better. lbt */
+	dump->magic = CMAGIC;
+	dump->start_code = 0;
+	dump->start_stack = rdusp() & ~(PAGE_SIZE - 1);
+	dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
+	dump->u_dsize = ((unsigned long) (current->mm->brk +
+					  (PAGE_SIZE-1))) >> PAGE_SHIFT;
+	dump->u_dsize -= dump->u_tsize;
+	dump->u_ssize = 0;
+
+	if (dump->start_stack < TASK_SIZE)
+		dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
+
+	dump->u_ar0 = (struct user_regs_struct *)((int)&dump->regs - (int)dump);
+	sw = ((struct switch_stack *)regs) - 1;
+	dump->regs.d1 = regs->d1;
+	dump->regs.d2 = regs->d2;
+	dump->regs.d3 = regs->d3;
+	dump->regs.d4 = regs->d4;
+	dump->regs.d5 = regs->d5;
+	dump->regs.d6 = sw->d6;
+	dump->regs.d7 = sw->d7;
+	dump->regs.a0 = regs->a0;
+	dump->regs.a1 = regs->a1;
+	dump->regs.a2 = regs->a2;
+	dump->regs.a3 = sw->a3;
+	dump->regs.a4 = sw->a4;
+	dump->regs.a5 = sw->a5;
+	dump->regs.a6 = sw->a6;
+	dump->regs.d0 = regs->d0;
+	dump->regs.orig_d0 = regs->orig_d0;
+	dump->regs.stkadj = regs->stkadj;
+	dump->regs.sr = regs->sr;
+	dump->regs.pc = regs->pc;
+	dump->regs.fmtvec = (regs->format << 12) | regs->vector;
+	/* dump floating point stuff */
+	dump->u_fpvalid = dump_fpu (regs, &dump->m68kfp);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage int sys_execve(char *name, char **argv, char **envp)
+{
+	int error;
+	char * filename;
+	struct pt_regs *regs = (struct pt_regs *) &name;
+
+	lock_kernel();
+	filename = getname(name);
+	error = PTR_ERR(filename);
+	if (IS_ERR(filename))
+		goto out;
+	error = do_execve(filename, argv, envp, regs);
+	putname(filename);
+out:
+	unlock_kernel();
+	return error;
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+	unsigned long fp, pc;
+	unsigned long stack_page;
+	int count = 0;
+	if (!p || p == current || p->state == TASK_RUNNING)
+		return 0;
+
+	stack_page = (unsigned long)(p->thread_info);
+	fp = ((struct switch_stack *)p->thread.ksp)->a6;
+	do {
+		if (fp < stack_page+sizeof(struct thread_info) ||
+		    fp >= 8184+stack_page)
+			return 0;
+		pc = ((unsigned long *)fp)[1];
+		if (!in_sched_functions(pc))
+			return pc;
+		fp = *(unsigned long *) fp;
+	} while (count++ < 16);
+	return 0;
+}
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
new file mode 100644
index 0000000..0beb533
--- /dev/null
+++ b/arch/m68k/kernel/ptrace.c
@@ -0,0 +1,393 @@
+/*
+ *  linux/arch/m68k/kernel/ptrace.c
+ *
+ *  Copyright (C) 1994 by Hamish Macdonald
+ *  Taken from linux/kernel/ptrace.c and modified for M680x0.
+ *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file COPYING in the main directory of
+ * this archive for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/config.h>
+
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+/* determines which bits in the SR the user has access to. */
+/* 1 = access 0 = no access */
+#define SR_MASK 0x001f
+
+/* sets the trace bits. */
+#define TRACE_BITS 0x8000
+
+/* Find the stack offset for a register, relative to thread.esp0. */
+#define PT_REG(reg)	((long)&((struct pt_regs *)0)->reg)
+#define SW_REG(reg)	((long)&((struct switch_stack *)0)->reg \
+			 - sizeof(struct switch_stack))
+/* Mapping from PT_xxx to the stack offset at which the register is
+   saved.  Notice that usp has no stack-slot and needs to be treated
+   specially (see get_reg/put_reg below). */
+static int regoff[] = {
+	[0]	= PT_REG(d1),
+	[1]	= PT_REG(d2),
+	[2]	= PT_REG(d3),
+	[3]	= PT_REG(d4),
+	[4]	= PT_REG(d5),
+	[5]	= SW_REG(d6),
+	[6]	= SW_REG(d7),
+	[7]	= PT_REG(a0),
+	[8]	= PT_REG(a1),
+	[9]	= PT_REG(a2),
+	[10]	= SW_REG(a3),
+	[11]	= SW_REG(a4),
+	[12]	= SW_REG(a5),
+	[13]	= SW_REG(a6),
+	[14]	= PT_REG(d0),
+	[15]	= -1,
+	[16]	= PT_REG(orig_d0),
+	[17]	= PT_REG(sr),
+	[18]	= PT_REG(pc),
+};
+
+/*
+ * Get contents of register REGNO in task TASK.
+ */
+static inline long get_reg(struct task_struct *task, int regno)
+{
+	unsigned long *addr;
+
+	if (regno == PT_USP)
+		addr = &task->thread.usp;
+	else if (regno < sizeof(regoff)/sizeof(regoff[0]))
+		addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
+	else
+		return 0;
+	return *addr;
+}
+
+/*
+ * Write contents of register REGNO in task TASK.
+ */
+static inline int put_reg(struct task_struct *task, int regno,
+			  unsigned long data)
+{
+	unsigned long *addr;
+
+	if (regno == PT_USP)
+		addr = &task->thread.usp;
+	else if (regno < sizeof(regoff)/sizeof(regoff[0]))
+		addr = (unsigned long *) (task->thread.esp0 + regoff[regno]);
+	else
+		return -1;
+	*addr = data;
+	return 0;
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure the single step bit is not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+	unsigned long tmp;
+	/* make sure the single step bit is not set. */
+	tmp = get_reg(child, PT_SR) & ~(TRACE_BITS << 16);
+	put_reg(child, PT_SR, tmp);
+	child->thread.work.delayed_trace = 0;
+	child->thread.work.syscall_trace = 0;
+}
+
+asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
+{
+	struct task_struct *child;
+	int ret;
+
+	lock_kernel();
+	ret = -EPERM;
+	if (request == PTRACE_TRACEME) {
+		/* are we already being traced? */
+		if (current->ptrace & PT_PTRACED)
+			goto out;
+		/* set the ptrace bit in the process flags. */
+		current->ptrace |= PT_PTRACED;
+		ret = 0;
+		goto out;
+	}
+	ret = -ESRCH;
+	read_lock(&tasklist_lock);
+	child = find_task_by_pid(pid);
+	if (child)
+		get_task_struct(child);
+	read_unlock(&tasklist_lock);
+	if (!child)
+		goto out;
+
+	ret = -EPERM;
+	if (pid == 1)		/* you may not mess with init */
+		goto out_tsk;
+
+	if (request == PTRACE_ATTACH) {
+		ret = ptrace_attach(child);
+		goto out_tsk;
+	}
+
+	ret = ptrace_check_attach(child, request == PTRACE_KILL);
+	if (ret < 0)
+		goto out_tsk;
+
+	switch (request) {
+	/* when I and D space are separate, these will need to be fixed. */
+		case PTRACE_PEEKTEXT: /* read word at location addr. */
+		case PTRACE_PEEKDATA: {
+			unsigned long tmp;
+			int copied;
+
+			copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+			ret = -EIO;
+			if (copied != sizeof(tmp))
+				break;
+			ret = put_user(tmp,(unsigned long *) data);
+			break;
+		}
+
+	/* read the word at location addr in the USER area. */
+		case PTRACE_PEEKUSR: {
+			unsigned long tmp;
+
+			ret = -EIO;
+			if ((addr & 3) || addr < 0 ||
+			    addr > sizeof(struct user) - 3)
+				break;
+
+			tmp = 0;  /* Default return condition */
+			addr = addr >> 2; /* temporary hack. */
+			ret = -EIO;
+			if (addr < 19) {
+				tmp = get_reg(child, addr);
+				if (addr == PT_SR)
+					tmp >>= 16;
+			} else if (addr >= 21 && addr < 49) {
+				tmp = child->thread.fp[addr - 21];
+#ifdef CONFIG_M68KFPU_EMU
+				/* Convert internal fpu reg representation
+				 * into long double format
+				 */
+				if (FPU_IS_EMU && (addr < 45) && !(addr % 3))
+					tmp = ((tmp & 0xffff0000) << 15) |
+					      ((tmp & 0x0000ffff) << 16);
+#endif
+			} else
+				break;
+			ret = put_user(tmp,(unsigned long *) data);
+			break;
+		}
+
+      /* when I and D space are separate, this will have to be fixed. */
+		case PTRACE_POKETEXT: /* write the word at location addr. */
+		case PTRACE_POKEDATA:
+			ret = 0;
+			if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
+				break;
+			ret = -EIO;
+			break;
+
+		case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
+			ret = -EIO;
+			if ((addr & 3) || addr < 0 ||
+			    addr > sizeof(struct user) - 3)
+				break;
+
+			addr = addr >> 2; /* temporary hack. */
+
+			if (addr == PT_SR) {
+				data &= SR_MASK;
+				data <<= 16;
+				data |= get_reg(child, PT_SR) & ~(SR_MASK << 16);
+			}
+			if (addr < 19) {
+				if (put_reg(child, addr, data))
+					break;
+				ret = 0;
+				break;
+			}
+			if (addr >= 21 && addr < 48)
+			{
+#ifdef CONFIG_M68KFPU_EMU
+				/* Convert long double format
+				 * into internal fpu reg representation
+				 */
+				if (FPU_IS_EMU && (addr < 45) && !(addr % 3)) {
+					data = (unsigned long)data << 15;
+					data = (data & 0xffff0000) |
+					       ((data & 0x0000ffff) >> 1);
+				}
+#endif
+				child->thread.fp[addr - 21] = data;
+				ret = 0;
+			}
+			break;
+
+		case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
+		case PTRACE_CONT: { /* restart after signal. */
+			long tmp;
+
+			ret = -EIO;
+			if ((unsigned long) data > _NSIG)
+				break;
+			if (request == PTRACE_SYSCALL) {
+					child->thread.work.syscall_trace = ~0;
+			} else {
+					child->thread.work.syscall_trace = 0;
+			}
+			child->exit_code = data;
+			/* make sure the single step bit is not set. */
+			tmp = get_reg(child, PT_SR) & ~(TRACE_BITS << 16);
+			put_reg(child, PT_SR, tmp);
+			child->thread.work.delayed_trace = 0;
+			wake_up_process(child);
+			ret = 0;
+			break;
+		}
+
+/*
+ * make the child exit.  Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+		case PTRACE_KILL: {
+			long tmp;
+
+			ret = 0;
+			if (child->exit_state == EXIT_ZOMBIE) /* already dead */
+				break;
+			child->exit_code = SIGKILL;
+	/* make sure the single step bit is not set. */
+			tmp = get_reg(child, PT_SR) & ~(TRACE_BITS << 16);
+			put_reg(child, PT_SR, tmp);
+			child->thread.work.delayed_trace = 0;
+			wake_up_process(child);
+			break;
+		}
+
+		case PTRACE_SINGLESTEP: {  /* set the trap flag. */
+			long tmp;
+
+			ret = -EIO;
+			if ((unsigned long) data > _NSIG)
+				break;
+			child->thread.work.syscall_trace = 0;
+			tmp = get_reg(child, PT_SR) | (TRACE_BITS << 16);
+			put_reg(child, PT_SR, tmp);
+			child->thread.work.delayed_trace = 1;
+
+			child->exit_code = data;
+	/* give it a chance to run. */
+			wake_up_process(child);
+			ret = 0;
+			break;
+		}
+
+		case PTRACE_DETACH:	/* detach a process that was attached. */
+			ret = ptrace_detach(child, data);
+			break;
+
+		case PTRACE_GETREGS: { /* Get all gp regs from the child. */
+			int i;
+			unsigned long tmp;
+			for (i = 0; i < 19; i++) {
+			    tmp = get_reg(child, i);
+			    if (i == PT_SR)
+				tmp >>= 16;
+			    if (put_user(tmp, (unsigned long *) data)) {
+				ret = -EFAULT;
+				break;
+			    }
+			    data += sizeof(long);
+			}
+			ret = 0;
+			break;
+		}
+
+		case PTRACE_SETREGS: { /* Set all gp regs in the child. */
+			int i;
+			unsigned long tmp;
+			for (i = 0; i < 19; i++) {
+			    if (get_user(tmp, (unsigned long *) data)) {
+				ret = -EFAULT;
+				break;
+			    }
+			    if (i == PT_SR) {
+				tmp &= SR_MASK;
+				tmp <<= 16;
+				tmp |= get_reg(child, PT_SR) & ~(SR_MASK << 16);
+			    }
+			    put_reg(child, i, tmp);
+			    data += sizeof(long);
+			}
+			ret = 0;
+			break;
+		}
+
+		case PTRACE_GETFPREGS: { /* Get the child FPU state. */
+			ret = 0;
+			if (copy_to_user((void *)data, &child->thread.fp,
+					 sizeof(struct user_m68kfp_struct)))
+				ret = -EFAULT;
+			break;
+		}
+
+		case PTRACE_SETFPREGS: { /* Set the child FPU state. */
+			ret = 0;
+			if (copy_from_user(&child->thread.fp, (void *)data,
+					   sizeof(struct user_m68kfp_struct)))
+				ret = -EFAULT;
+			break;
+		}
+
+		default:
+			ret = ptrace_request(child, request, addr, data);
+			break;
+	}
+out_tsk:
+	put_task_struct(child);
+out:
+	unlock_kernel();
+	return ret;
+}
+
+asmlinkage void syscall_trace(void)
+{
+	if (!current->thread.work.delayed_trace &&
+	    !current->thread.work.syscall_trace)
+		return;
+	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+				 ? 0x80 : 0));
+	/*
+	 * this isn't the same as continuing with a signal, but it will do
+	 * for normal use.  strace only continues with a signal if the
+	 * stopping signal is not SIGTRAP.  -brl
+	 */
+	if (current->exit_code) {
+		send_sig(current->exit_code, current, 1);
+		current->exit_code = 0;
+	}
+}
diff --git a/arch/m68k/kernel/semaphore.c b/arch/m68k/kernel/semaphore.c
new file mode 100644
index 0000000..1ebb79b
--- /dev/null
+++ b/arch/m68k/kernel/semaphore.c
@@ -0,0 +1,133 @@
+/*
+ *  Generic semaphore code. Buyer beware. Do your own
+ * specific changes in <asm/semaphore-helper.h>
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <asm/semaphore-helper.h>
+
+#ifndef CONFIG_RMW_INSNS
+spinlock_t semaphore_wake_lock;
+#endif
+
+/*
+ * Semaphores are implemented using a two-way counter:
+ * The "count" variable is decremented for each process
+ * that tries to sleep, while the "waking" variable is
+ * incremented when the "up()" code goes to wake up waiting
+ * processes.
+ *
+ * Notably, the inline "up()" and "down()" functions can
+ * efficiently test if they need to do any extra work (up
+ * needs to do something only if count was negative before
+ * the increment operation.
+ *
+ * waking_non_zero() (from asm/semaphore.h) must execute
+ * atomically.
+ *
+ * When __up() is called, the count was negative before
+ * incrementing it, and we need to wake up somebody.
+ *
+ * This routine adds one to the count of processes that need to
+ * wake up and exit.  ALL waiting processes actually wake up but
+ * only the one that gets to the "waking" field first will gate
+ * through and acquire the semaphore.  The others will go back
+ * to sleep.
+ *
+ * Note that these functions are only called when there is
+ * contention on the lock, and as such all this is the
+ * "non-critical" part of the whole semaphore business. The
+ * critical part is the inline stuff in <asm/semaphore.h>
+ * where we want to avoid any extra jumps and calls.
+ */
+void __up(struct semaphore *sem)
+{
+	wake_one_more(sem);
+	wake_up(&sem->wait);
+}
+
+/*
+ * Perform the "down" function.  Return zero for semaphore acquired,
+ * return negative for signalled out of the function.
+ *
+ * If called from __down, the return is ignored and the wait loop is
+ * not interruptible.  This means that a task waiting on a semaphore
+ * using "down()" cannot be killed until someone does an "up()" on
+ * the semaphore.
+ *
+ * If called from __down_interruptible, the return value gets checked
+ * upon return.  If the return value is negative then the task continues
+ * with the negative value in the return register (it can be tested by
+ * the caller).
+ *
+ * Either form may be used in conjunction with "up()".
+ *
+ */
+
+
+#define DOWN_HEAD(task_state)						\
+									\
+									\
+	current->state = (task_state);					\
+	add_wait_queue(&sem->wait, &wait);				\
+									\
+	/*								\
+	 * Ok, we're set up.  sem->count is known to be less than zero	\
+	 * so we must wait.						\
+	 *								\
+	 * We can let go the lock for purposes of waiting.		\
+	 * We re-acquire it after awaking so as to protect		\
+	 * all semaphore operations.					\
+	 *								\
+	 * If "up()" is called before we call waking_non_zero() then	\
+	 * we will catch it right away.  If it is called later then	\
+	 * we will have to go through a wakeup cycle to catch it.	\
+	 *								\
+	 * Multiple waiters contend for the semaphore lock to see	\
+	 * who gets to gate through and who has to wait some more.	\
+	 */								\
+	for (;;) {
+
+#define DOWN_TAIL(task_state)			\
+		current->state = (task_state);	\
+	}					\
+	current->state = TASK_RUNNING;		\
+	remove_wait_queue(&sem->wait, &wait);
+
+void __sched __down(struct semaphore * sem)
+{
+	DECLARE_WAITQUEUE(wait, current);
+
+	DOWN_HEAD(TASK_UNINTERRUPTIBLE)
+	if (waking_non_zero(sem))
+		break;
+	schedule();
+	DOWN_TAIL(TASK_UNINTERRUPTIBLE)
+}
+
+int __sched __down_interruptible(struct semaphore * sem)
+{
+	DECLARE_WAITQUEUE(wait, current);
+	int ret = 0;
+
+	DOWN_HEAD(TASK_INTERRUPTIBLE)
+
+	ret = waking_non_zero_interruptible(sem, current);
+	if (ret)
+	{
+		if (ret == 1)
+			/* ret != 0 only if we get interrupted -arca */
+			ret = 0;
+		break;
+	}
+	schedule();
+	DOWN_TAIL(TASK_INTERRUPTIBLE)
+	return ret;
+}
+
+int __down_trylock(struct semaphore * sem)
+{
+	return waking_non_zero_trylock(sem);
+}
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
new file mode 100644
index 0000000..d6ca992
--- /dev/null
+++ b/arch/m68k/kernel/setup.c
@@ -0,0 +1,545 @@
+/*
+ *  linux/arch/m68k/kernel/setup.c
+ *
+ *  Copyright (C) 1995  Hamish Macdonald
+ */
+
+/*
+ * This file handles the architecture-dependent parts of system setup
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/console.h>
+#include <linux/genhd.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include <linux/initrd.h>
+
+#include <asm/bootinfo.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#ifdef CONFIG_AMIGA
+#include <asm/amigahw.h>
+#endif
+#ifdef CONFIG_ATARI
+#include <asm/atarihw.h>
+#include <asm/atari_stram.h>
+#endif
+#ifdef CONFIG_SUN3X
+#include <asm/dvma.h>
+#endif
+
+unsigned long m68k_machtype;
+unsigned long m68k_cputype;
+unsigned long m68k_fputype;
+unsigned long m68k_mmutype;
+#ifdef CONFIG_VME
+unsigned long vme_brdtype;
+#endif
+
+int m68k_is040or060;
+
+extern int end;
+extern unsigned long availmem;
+
+int m68k_num_memory;
+int m68k_realnum_memory;
+unsigned long m68k_memoffset;
+struct mem_info m68k_memory[NUM_MEMINFO];
+
+static struct mem_info m68k_ramdisk;
+
+static char m68k_command_line[CL_SIZE];
+
+char m68k_debug_device[6] = "";
+
+void (*mach_sched_init) (irqreturn_t (*handler)(int, void *, struct pt_regs *)) __initdata = NULL;
+/* machine dependent irq functions */
+void (*mach_init_IRQ) (void) __initdata = NULL;
+irqreturn_t (*(*mach_default_handler)[]) (int, void *, struct pt_regs *);
+void (*mach_get_model) (char *model);
+int (*mach_get_hardware_list) (char *buffer);
+int (*mach_get_irq_list) (struct seq_file *, void *);
+irqreturn_t (*mach_process_int) (int, struct pt_regs *);
+/* machine dependent timer functions */
+unsigned long (*mach_gettimeoffset) (void);
+int (*mach_hwclk) (int, struct rtc_time*);
+int (*mach_set_clock_mmss) (unsigned long);
+unsigned int (*mach_get_ss)(void);
+int (*mach_get_rtc_pll)(struct rtc_pll_info *);
+int (*mach_set_rtc_pll)(struct rtc_pll_info *);
+void (*mach_reset)( void );
+void (*mach_halt)( void );
+void (*mach_power_off)( void );
+long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
+#if defined(CONFIG_AMIGA_FLOPPY) || defined(CONFIG_ATARI_FLOPPY)
+void (*mach_floppy_setup) (char *, int *) __initdata = NULL;
+#endif
+#ifdef CONFIG_HEARTBEAT
+void (*mach_heartbeat) (int);
+EXPORT_SYMBOL(mach_heartbeat);
+#endif
+#ifdef CONFIG_M68K_L2_CACHE
+void (*mach_l2_flush) (int);
+#endif
+#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
+void (*mach_beep)(unsigned int, unsigned int);
+#endif
+#if defined(CONFIG_ISA) && defined(MULTI_ISA)
+int isa_type;
+int isa_sex;
+#endif
+
+extern int amiga_parse_bootinfo(const struct bi_record *);
+extern int atari_parse_bootinfo(const struct bi_record *);
+extern int mac_parse_bootinfo(const struct bi_record *);
+extern int q40_parse_bootinfo(const struct bi_record *);
+extern int bvme6000_parse_bootinfo(const struct bi_record *);
+extern int mvme16x_parse_bootinfo(const struct bi_record *);
+extern int mvme147_parse_bootinfo(const struct bi_record *);
+extern int hp300_parse_bootinfo(const struct bi_record *);
+
+extern void config_amiga(void);
+extern void config_atari(void);
+extern void config_mac(void);
+extern void config_sun3(void);
+extern void config_apollo(void);
+extern void config_mvme147(void);
+extern void config_mvme16x(void);
+extern void config_bvme6000(void);
+extern void config_hp300(void);
+extern void config_q40(void);
+extern void config_sun3x(void);
+
+extern void mac_debugging_short (int, short);
+extern void mac_debugging_long  (int, long);
+
+#define MASK_256K 0xfffc0000
+
+extern void paging_init(void);
+
+static void __init m68k_parse_bootinfo(const struct bi_record *record)
+{
+    while (record->tag != BI_LAST) {
+	int unknown = 0;
+	const unsigned long *data = record->data;
+	switch (record->tag) {
+	    case BI_MACHTYPE:
+	    case BI_CPUTYPE:
+	    case BI_FPUTYPE:
+	    case BI_MMUTYPE:
+		/* Already set up by head.S */
+		break;
+
+	    case BI_MEMCHUNK:
+		if (m68k_num_memory < NUM_MEMINFO) {
+		    m68k_memory[m68k_num_memory].addr = data[0];
+		    m68k_memory[m68k_num_memory].size = data[1];
+		    m68k_num_memory++;
+		} else
+		    printk("m68k_parse_bootinfo: too many memory chunks\n");
+		break;
+
+	    case BI_RAMDISK:
+		m68k_ramdisk.addr = data[0];
+		m68k_ramdisk.size = data[1];
+		break;
+
+	    case BI_COMMAND_LINE:
+		strlcpy(m68k_command_line, (const char *)data, sizeof(m68k_command_line));
+		break;
+
+	    default:
+		if (MACH_IS_AMIGA)
+		    unknown = amiga_parse_bootinfo(record);
+		else if (MACH_IS_ATARI)
+		    unknown = atari_parse_bootinfo(record);
+		else if (MACH_IS_MAC)
+		    unknown = mac_parse_bootinfo(record);
+		else if (MACH_IS_Q40)
+		    unknown = q40_parse_bootinfo(record);
+		else if (MACH_IS_BVME6000)
+		    unknown = bvme6000_parse_bootinfo(record);
+		else if (MACH_IS_MVME16x)
+		    unknown = mvme16x_parse_bootinfo(record);
+		else if (MACH_IS_MVME147)
+		    unknown = mvme147_parse_bootinfo(record);
+		else if (MACH_IS_HP300)
+		    unknown = hp300_parse_bootinfo(record);
+		else
+		    unknown = 1;
+	}
+	if (unknown)
+	    printk("m68k_parse_bootinfo: unknown tag 0x%04x ignored\n",
+		   record->tag);
+	record = (struct bi_record *)((unsigned long)record+record->size);
+    }
+
+    m68k_realnum_memory = m68k_num_memory;
+#ifdef CONFIG_SINGLE_MEMORY_CHUNK
+    if (m68k_num_memory > 1) {
+	printk("Ignoring last %i chunks of physical memory\n",
+	       (m68k_num_memory - 1));
+	m68k_num_memory = 1;
+    }
+    m68k_memoffset = m68k_memory[0].addr-PAGE_OFFSET;
+#endif
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+	extern int _etext, _edata, _end;
+#ifndef CONFIG_SUN3
+	unsigned long endmem, startmem;
+#endif
+	int i;
+	char *p, *q;
+
+	/* The bootinfo is located right after the kernel bss */
+	m68k_parse_bootinfo((const struct bi_record *)&_end);
+
+	if (CPU_IS_040)
+		m68k_is040or060 = 4;
+	else if (CPU_IS_060)
+		m68k_is040or060 = 6;
+
+	/* FIXME: m68k_fputype is passed in by Penguin booter, which can
+	 * be confused by software FPU emulation. BEWARE.
+	 * We should really do our own FPU check at startup.
+	 * [what do we do with buggy 68LC040s? if we have problems
+	 *  with them, we should add a test to check_bugs() below] */
+#ifndef CONFIG_M68KFPU_EMU_ONLY
+	/* clear the fpu if we have one */
+	if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060)) {
+		volatile int zero = 0;
+		asm __volatile__ ("frestore %0" : : "m" (zero));
+	}
+#endif
+
+	if (CPU_IS_060) {
+		u32 pcr;
+
+		asm (".chip 68060; movec %%pcr,%0; .chip 68k"
+		     : "=d" (pcr));
+		if (((pcr >> 8) & 0xff) <= 5) {
+			printk("Enabling workaround for errata I14\n");
+			asm (".chip 68060; movec %0,%%pcr; .chip 68k"
+			     : : "d" (pcr | 0x20));
+		}
+	}
+
+	init_mm.start_code = PAGE_OFFSET;
+	init_mm.end_code = (unsigned long) &_etext;
+	init_mm.end_data = (unsigned long) &_edata;
+	init_mm.brk = (unsigned long) &_end;
+
+	*cmdline_p = m68k_command_line;
+	memcpy(saved_command_line, *cmdline_p, CL_SIZE);
+
+	/* Parse the command line for arch-specific options.
+	 * For the m68k, this is currently only "debug=xxx" to enable printing
+	 * certain kernel messages to some machine-specific device.
+	 */
+	for( p = *cmdline_p; p && *p; ) {
+	    i = 0;
+	    if (!strncmp( p, "debug=", 6 )) {
+		strlcpy( m68k_debug_device, p+6, sizeof(m68k_debug_device) );
+		if ((q = strchr( m68k_debug_device, ' ' ))) *q = 0;
+		i = 1;
+	    }
+#ifdef CONFIG_ATARI
+	    /* This option must be parsed very early */
+	    if (!strncmp( p, "switches=", 9 )) {
+		extern void atari_switches_setup( const char *, int );
+		atari_switches_setup( p+9, (q = strchr( p+9, ' ' )) ?
+				           (q - (p+9)) : strlen(p+9) );
+		i = 1;
+	    }
+#endif
+
+	    if (i) {
+		/* option processed, delete it */
+		if ((q = strchr( p, ' ' )))
+		    strcpy( p, q+1 );
+		else
+		    *p = 0;
+	    } else {
+		if ((p = strchr( p, ' ' ))) ++p;
+	    }
+	}
+
+	switch (m68k_machtype) {
+#ifdef CONFIG_AMIGA
+	    case MACH_AMIGA:
+		config_amiga();
+		break;
+#endif
+#ifdef CONFIG_ATARI
+	    case MACH_ATARI:
+		config_atari();
+		break;
+#endif
+#ifdef CONFIG_MAC
+	    case MACH_MAC:
+		config_mac();
+		break;
+#endif
+#ifdef CONFIG_SUN3
+	    case MACH_SUN3:
+		config_sun3();
+		break;
+#endif
+#ifdef CONFIG_APOLLO
+	    case MACH_APOLLO:
+		config_apollo();
+		break;
+#endif
+#ifdef CONFIG_MVME147
+	    case MACH_MVME147:
+		config_mvme147();
+		break;
+#endif
+#ifdef CONFIG_MVME16x
+	    case MACH_MVME16x:
+		config_mvme16x();
+		break;
+#endif
+#ifdef CONFIG_BVME6000
+	    case MACH_BVME6000:
+		config_bvme6000();
+		break;
+#endif
+#ifdef CONFIG_HP300
+	    case MACH_HP300:
+		config_hp300();
+		break;
+#endif
+#ifdef CONFIG_Q40
+	    case MACH_Q40:
+	        config_q40();
+		break;
+#endif
+#ifdef CONFIG_SUN3X
+	    case MACH_SUN3X:
+		config_sun3x();
+		break;
+#endif
+	    default:
+		panic ("No configuration setup");
+	}
+
+#ifndef CONFIG_SUN3
+	startmem= m68k_memory[0].addr;
+	endmem = startmem + m68k_memory[0].size;
+	high_memory = (void *)PAGE_OFFSET;
+	for (i = 0; i < m68k_num_memory; i++) {
+		m68k_memory[i].size &= MASK_256K;
+		if (m68k_memory[i].addr < startmem)
+			startmem = m68k_memory[i].addr;
+		if (m68k_memory[i].addr+m68k_memory[i].size > endmem)
+			endmem = m68k_memory[i].addr+m68k_memory[i].size;
+		high_memory += m68k_memory[i].size;
+	}
+
+	availmem += init_bootmem_node(NODE_DATA(0), availmem >> PAGE_SHIFT,
+				      startmem >> PAGE_SHIFT, endmem >> PAGE_SHIFT);
+
+	for (i = 0; i < m68k_num_memory; i++)
+		free_bootmem(m68k_memory[i].addr, m68k_memory[i].size);
+
+	reserve_bootmem(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (m68k_ramdisk.size) {
+		reserve_bootmem(m68k_ramdisk.addr, m68k_ramdisk.size);
+		initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
+		initrd_end = initrd_start + m68k_ramdisk.size;
+		printk ("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
+	}
+#endif
+
+#ifdef CONFIG_ATARI
+	if (MACH_IS_ATARI)
+		atari_stram_reserve_pages((void *)availmem);
+#endif
+#ifdef CONFIG_SUN3X
+	if (MACH_IS_SUN3X) {
+		dvma_init();
+	}
+#endif
+
+#endif /* !CONFIG_SUN3 */
+
+	paging_init();
+
+/* set ISA defs early as possible */
+#if defined(CONFIG_ISA) && defined(MULTI_ISA)
+#if defined(CONFIG_Q40)
+	if (MACH_IS_Q40) {
+	    isa_type = Q40_ISA;
+	    isa_sex = 0;
+	}
+#elif defined(CONFIG_GG2)
+	if (MACH_IS_AMIGA && AMIGAHW_PRESENT(GG2_ISA)){
+	    isa_type = GG2_ISA;
+	    isa_sex = 0;
+	}
+#elif defined(CONFIG_AMIGA_PCMCIA)
+	if (MACH_IS_AMIGA && AMIGAHW_PRESENT(PCMCIA)){
+	    isa_type = AG_ISA;
+	    isa_sex = 1;
+	}
+#endif
+#endif
+}
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+    const char *cpu, *mmu, *fpu;
+    unsigned long clockfreq, clockfactor;
+
+#define LOOP_CYCLES_68020	(8)
+#define LOOP_CYCLES_68030	(8)
+#define LOOP_CYCLES_68040	(3)
+#define LOOP_CYCLES_68060	(1)
+
+    if (CPU_IS_020) {
+	cpu = "68020";
+	clockfactor = LOOP_CYCLES_68020;
+    } else if (CPU_IS_030) {
+	cpu = "68030";
+	clockfactor = LOOP_CYCLES_68030;
+    } else if (CPU_IS_040) {
+	cpu = "68040";
+	clockfactor = LOOP_CYCLES_68040;
+    } else if (CPU_IS_060) {
+	cpu = "68060";
+	clockfactor = LOOP_CYCLES_68060;
+    } else {
+	cpu = "680x0";
+	clockfactor = 0;
+    }
+
+#ifdef CONFIG_M68KFPU_EMU_ONLY
+    fpu="none(soft float)";
+#else
+    if (m68k_fputype & FPU_68881)
+	fpu = "68881";
+    else if (m68k_fputype & FPU_68882)
+	fpu = "68882";
+    else if (m68k_fputype & FPU_68040)
+	fpu = "68040";
+    else if (m68k_fputype & FPU_68060)
+	fpu = "68060";
+    else if (m68k_fputype & FPU_SUNFPA)
+	fpu = "Sun FPA";
+    else
+	fpu = "none";
+#endif
+
+    if (m68k_mmutype & MMU_68851)
+	mmu = "68851";
+    else if (m68k_mmutype & MMU_68030)
+	mmu = "68030";
+    else if (m68k_mmutype & MMU_68040)
+	mmu = "68040";
+    else if (m68k_mmutype & MMU_68060)
+	mmu = "68060";
+    else if (m68k_mmutype & MMU_SUN3)
+	mmu = "Sun-3";
+    else if (m68k_mmutype & MMU_APOLLO)
+	mmu = "Apollo";
+    else
+	mmu = "unknown";
+
+    clockfreq = loops_per_jiffy*HZ*clockfactor;
+
+    seq_printf(m, "CPU:\t\t%s\n"
+		   "MMU:\t\t%s\n"
+		   "FPU:\t\t%s\n"
+		   "Clocking:\t%lu.%1luMHz\n"
+		   "BogoMips:\t%lu.%02lu\n"
+		   "Calibration:\t%lu loops\n",
+		   cpu, mmu, fpu,
+		   clockfreq/1000000,(clockfreq/100000)%10,
+		   loops_per_jiffy/(500000/HZ),(loops_per_jiffy/(5000/HZ))%100,
+		   loops_per_jiffy);
+    return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+	return *pos < 1 ? (void *)1 : NULL;
+}
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return NULL;
+}
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+struct seq_operations cpuinfo_op = {
+	.start =	c_start,
+	.next =		c_next,
+	.stop =		c_stop,
+	.show =		show_cpuinfo,
+};
+
+int get_hardware_list(char *buffer)
+{
+    int len = 0;
+    char model[80];
+    unsigned long mem;
+    int i;
+
+    if (mach_get_model)
+	mach_get_model(model);
+    else
+	strcpy(model, "Unknown m68k");
+
+    len += sprintf(buffer+len, "Model:\t\t%s\n", model);
+    for (mem = 0, i = 0; i < m68k_num_memory; i++)
+	mem += m68k_memory[i].size;
+    len += sprintf(buffer+len, "System Memory:\t%ldK\n", mem>>10);
+
+    if (mach_get_hardware_list)
+	len += mach_get_hardware_list(buffer+len);
+
+    return(len);
+}
+
+
+#if defined(CONFIG_AMIGA_FLOPPY) || defined(CONFIG_ATARI_FLOPPY)
+void __init floppy_setup(char *str, int *ints)
+{
+	if (mach_floppy_setup)
+		mach_floppy_setup (str, ints);
+}
+
+#endif
+
+void check_bugs(void)
+{
+#ifndef CONFIG_M68KFPU_EMU
+	if (m68k_fputype == 0) {
+		printk( KERN_EMERG "*** YOU DO NOT HAVE A FLOATING POINT UNIT, "
+				"WHICH IS REQUIRED BY LINUX/M68K ***\n" );
+		printk( KERN_EMERG "Upgrade your hardware or join the FPU "
+				"emulation project\n" );
+		panic( "no FPU" );
+	}
+#endif /* !CONFIG_M68KFPU_EMU */
+}
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
new file mode 100644
index 0000000..9c636a4
--- /dev/null
+++ b/arch/m68k/kernel/signal.c
@@ -0,0 +1,1025 @@
+/*
+ *  linux/arch/m68k/kernel/signal.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * Linux/m68k support by Hamish Macdonald
+ *
+ * 68060 fixes by Jesper Skov
+ *
+ * 1997-12-01  Modified for POSIX.1b signals by Andreas Schwab
+ *
+ * mathemu support by Roman Zippel
+ *  (Note: fpstate in the signal context is completely ignored for the emulator
+ *         and the internal floating point format is put on stack)
+ */
+
+/*
+ * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
+ * Atari :-) Current limitation: Only one sigstack can be active at one time.
+ * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
+ * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
+ * signal handlers!
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/syscalls.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/highuid.h>
+#include <linux/personality.h>
+#include <linux/tty.h>
+#include <linux/binfmts.h>
+
+#include <asm/setup.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/traps.h>
+#include <asm/ucontext.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
+
+const int frame_extra_sizes[16] = {
+  [1]	= -1, /* sizeof(((struct frame *)0)->un.fmt1), */
+  [2]	= sizeof(((struct frame *)0)->un.fmt2),
+  [3]	= sizeof(((struct frame *)0)->un.fmt3),
+  [4]	= sizeof(((struct frame *)0)->un.fmt4),
+  [5]	= -1, /* sizeof(((struct frame *)0)->un.fmt5), */
+  [6]	= -1, /* sizeof(((struct frame *)0)->un.fmt6), */
+  [7]	= sizeof(((struct frame *)0)->un.fmt7),
+  [8]	= -1, /* sizeof(((struct frame *)0)->un.fmt8), */
+  [9]	= sizeof(((struct frame *)0)->un.fmt9),
+  [10]	= sizeof(((struct frame *)0)->un.fmta),
+  [11]	= sizeof(((struct frame *)0)->un.fmtb),
+  [12]	= -1, /* sizeof(((struct frame *)0)->un.fmtc), */
+  [13]	= -1, /* sizeof(((struct frame *)0)->un.fmtd), */
+  [14]	= -1, /* sizeof(((struct frame *)0)->un.fmte), */
+  [15]	= -1, /* sizeof(((struct frame *)0)->un.fmtf), */
+};
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int do_sigsuspend(struct pt_regs *regs)
+{
+	old_sigset_t mask = regs->d3;
+	sigset_t saveset;
+
+	mask &= _BLOCKABLE;
+	saveset = current->blocked;
+	siginitset(&current->blocked, mask);
+	recalc_sigpending();
+
+	regs->d0 = -EINTR;
+	while (1) {
+		current->state = TASK_INTERRUPTIBLE;
+		schedule();
+		if (do_signal(&saveset, regs))
+			return -EINTR;
+	}
+}
+
+asmlinkage int
+do_rt_sigsuspend(struct pt_regs *regs)
+{
+	sigset_t *unewset = (sigset_t *)regs->d1;
+	size_t sigsetsize = (size_t)regs->d2;
+	sigset_t saveset, newset;
+
+	/* XXX: Don't preclude handling different sized sigset_t's.  */
+	if (sigsetsize != sizeof(sigset_t))
+		return -EINVAL;
+
+	if (copy_from_user(&newset, unewset, sizeof(newset)))
+		return -EFAULT;
+	sigdelsetmask(&newset, ~_BLOCKABLE);
+
+	saveset = current->blocked;
+	current->blocked = newset;
+	recalc_sigpending();
+
+	regs->d0 = -EINTR;
+	while (1) {
+		current->state = TASK_INTERRUPTIBLE;
+		schedule();
+		if (do_signal(&saveset, regs))
+			return -EINTR;
+	}
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction *act,
+	      struct old_sigaction *oact)
+{
+	struct k_sigaction new_ka, old_ka;
+	int ret;
+
+	if (act) {
+		old_sigset_t mask;
+		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+			return -EFAULT;
+		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
+		__get_user(mask, &act->sa_mask);
+		siginitset(&new_ka.sa.sa_mask, mask);
+	}
+
+	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+	if (!ret && oact) {
+		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+			return -EFAULT;
+		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+	}
+
+	return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(const stack_t *uss, stack_t *uoss)
+{
+	return do_sigaltstack(uss, uoss, rdusp());
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ *
+ * Keep the return code on the stack quadword aligned!
+ * That makes the cache flush below easier.
+ */
+
+struct sigframe
+{
+	char *pretcode;
+	int sig;
+	int code;
+	struct sigcontext *psc;
+	char retcode[8];
+	unsigned long extramask[_NSIG_WORDS-1];
+	struct sigcontext sc;
+};
+
+struct rt_sigframe
+{
+	char *pretcode;
+	int sig;
+	struct siginfo *pinfo;
+	void *puc;
+	char retcode[8];
+	struct siginfo info;
+	struct ucontext uc;
+};
+
+
+static unsigned char fpu_version;	/* version number of fpu, set by setup_frame */
+
+static inline int restore_fpu_state(struct sigcontext *sc)
+{
+	int err = 1;
+
+	if (FPU_IS_EMU) {
+	    /* restore registers */
+	    memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
+	    memcpy(current->thread.fp, sc->sc_fpregs, 24);
+	    return 0;
+	}
+
+	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
+	    /* Verify the frame format.  */
+	    if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version))
+		goto out;
+	    if (CPU_IS_020_OR_030) {
+		if (m68k_fputype & FPU_68881 &&
+		    !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
+		    goto out;
+		if (m68k_fputype & FPU_68882 &&
+		    !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
+		    goto out;
+	    } else if (CPU_IS_040) {
+		if (!(sc->sc_fpstate[1] == 0x00 ||
+                      sc->sc_fpstate[1] == 0x28 ||
+                      sc->sc_fpstate[1] == 0x60))
+		    goto out;
+	    } else if (CPU_IS_060) {
+		if (!(sc->sc_fpstate[3] == 0x00 ||
+                      sc->sc_fpstate[3] == 0x60 ||
+		      sc->sc_fpstate[3] == 0xe0))
+		    goto out;
+	    } else
+		goto out;
+
+	    __asm__ volatile (".chip 68k/68881\n\t"
+			      "fmovemx %0,%%fp0-%%fp1\n\t"
+			      "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
+			      ".chip 68k"
+			      : /* no outputs */
+			      : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl));
+	}
+	__asm__ volatile (".chip 68k/68881\n\t"
+			  "frestore %0\n\t"
+			  ".chip 68k" : : "m" (*sc->sc_fpstate));
+	err = 0;
+
+out:
+	return err;
+}
+
+#define FPCONTEXT_SIZE	216
+#define uc_fpstate	uc_filler[0]
+#define uc_formatvec	uc_filler[FPCONTEXT_SIZE/4]
+#define uc_extra	uc_filler[FPCONTEXT_SIZE/4+1]
+
+static inline int rt_restore_fpu_state(struct ucontext *uc)
+{
+	unsigned char fpstate[FPCONTEXT_SIZE];
+	int context_size = CPU_IS_060 ? 8 : 0;
+	fpregset_t fpregs;
+	int err = 1;
+
+	if (FPU_IS_EMU) {
+		/* restore fpu control register */
+		if (__copy_from_user(current->thread.fpcntl,
+				uc->uc_mcontext.fpregs.f_fpcntl, 12))
+			goto out;
+		/* restore all other fpu register */
+		if (__copy_from_user(current->thread.fp,
+				uc->uc_mcontext.fpregs.f_fpregs, 96))
+			goto out;
+		return 0;
+	}
+
+	if (__get_user(*(long *)fpstate, (long *)&uc->uc_fpstate))
+		goto out;
+	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
+		if (!CPU_IS_060)
+			context_size = fpstate[1];
+		/* Verify the frame format.  */
+		if (!CPU_IS_060 && (fpstate[0] != fpu_version))
+			goto out;
+		if (CPU_IS_020_OR_030) {
+			if (m68k_fputype & FPU_68881 &&
+			    !(context_size == 0x18 || context_size == 0xb4))
+				goto out;
+			if (m68k_fputype & FPU_68882 &&
+			    !(context_size == 0x38 || context_size == 0xd4))
+				goto out;
+		} else if (CPU_IS_040) {
+			if (!(context_size == 0x00 ||
+			      context_size == 0x28 ||
+			      context_size == 0x60))
+				goto out;
+		} else if (CPU_IS_060) {
+			if (!(fpstate[3] == 0x00 ||
+			      fpstate[3] == 0x60 ||
+			      fpstate[3] == 0xe0))
+				goto out;
+		} else
+			goto out;
+		if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
+				     sizeof(fpregs)))
+			goto out;
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "fmovemx %0,%%fp0-%%fp7\n\t"
+				  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
+				  ".chip 68k"
+				  : /* no outputs */
+				  : "m" (*fpregs.f_fpregs),
+				    "m" (*fpregs.f_fpcntl));
+	}
+	if (context_size &&
+	    __copy_from_user(fpstate + 4, (long *)&uc->uc_fpstate + 1,
+			     context_size))
+		goto out;
+	__asm__ volatile (".chip 68k/68881\n\t"
+			  "frestore %0\n\t"
+			  ".chip 68k" : : "m" (*fpstate));
+	err = 0;
+
+out:
+	return err;
+}
+
+static inline int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext *usc, void *fp,
+		   int *pd0)
+{
+	int fsize, formatvec;
+	struct sigcontext context;
+	int err;
+
+	/* get previous context */
+	if (copy_from_user(&context, usc, sizeof(context)))
+		goto badframe;
+
+	/* restore passed registers */
+	regs->d1 = context.sc_d1;
+	regs->a0 = context.sc_a0;
+	regs->a1 = context.sc_a1;
+	regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
+	regs->pc = context.sc_pc;
+	regs->orig_d0 = -1;		/* disable syscall checks */
+	wrusp(context.sc_usp);
+	formatvec = context.sc_formatvec;
+	regs->format = formatvec >> 12;
+	regs->vector = formatvec & 0xfff;
+
+	err = restore_fpu_state(&context);
+
+	fsize = frame_extra_sizes[regs->format];
+	if (fsize < 0) {
+		/*
+		 * user process trying to return with weird frame format
+		 */
+#ifdef DEBUG
+		printk("user process returning with weird frame format\n");
+#endif
+		goto badframe;
+	}
+
+	/* OK.	Make room on the supervisor stack for the extra junk,
+	 * if necessary.
+	 */
+
+	if (fsize) {
+		struct switch_stack *sw = (struct switch_stack *)regs - 1;
+		regs->d0 = context.sc_d0;
+#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
+		__asm__ __volatile__
+			("   movel %0,%/a0\n\t"
+			 "   subl %1,%/a0\n\t"     /* make room on stack */
+			 "   movel %/a0,%/sp\n\t"  /* set stack pointer */
+			 /* move switch_stack and pt_regs */
+			 "1: movel %0@+,%/a0@+\n\t"
+			 "   dbra %2,1b\n\t"
+			 "   lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
+			 "   lsrl  #2,%1\n\t"
+			 "   subql #1,%1\n\t"
+			 "2: movesl %4@+,%2\n\t"
+			 "3: movel %2,%/a0@+\n\t"
+			 "   dbra %1,2b\n\t"
+			 "   bral ret_from_signal\n"
+			 "4:\n"
+			 ".section __ex_table,\"a\"\n"
+			 "   .align 4\n"
+			 "   .long 2b,4b\n"
+			 "   .long 3b,4b\n"
+			 ".previous"
+			 : /* no outputs, it doesn't ever return */
+			 : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
+			   "n" (frame_offset), "a" (fp)
+			 : "a0");
+#undef frame_offset
+		/*
+		 * If we ever get here an exception occurred while
+		 * building the above stack-frame.
+		 */
+		goto badframe;
+	}
+
+	*pd0 = context.sc_d0;
+	return err;
+
+badframe:
+	return 1;
+}
+
+static inline int
+rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
+		    struct ucontext *uc, int *pd0)
+{
+	int fsize, temp;
+	greg_t *gregs = uc->uc_mcontext.gregs;
+	unsigned long usp;
+	int err;
+
+	err = __get_user(temp, &uc->uc_mcontext.version);
+	if (temp != MCONTEXT_VERSION)
+		goto badframe;
+	/* restore passed registers */
+	err |= __get_user(regs->d0, &gregs[0]);
+	err |= __get_user(regs->d1, &gregs[1]);
+	err |= __get_user(regs->d2, &gregs[2]);
+	err |= __get_user(regs->d3, &gregs[3]);
+	err |= __get_user(regs->d4, &gregs[4]);
+	err |= __get_user(regs->d5, &gregs[5]);
+	err |= __get_user(sw->d6, &gregs[6]);
+	err |= __get_user(sw->d7, &gregs[7]);
+	err |= __get_user(regs->a0, &gregs[8]);
+	err |= __get_user(regs->a1, &gregs[9]);
+	err |= __get_user(regs->a2, &gregs[10]);
+	err |= __get_user(sw->a3, &gregs[11]);
+	err |= __get_user(sw->a4, &gregs[12]);
+	err |= __get_user(sw->a5, &gregs[13]);
+	err |= __get_user(sw->a6, &gregs[14]);
+	err |= __get_user(usp, &gregs[15]);
+	wrusp(usp);
+	err |= __get_user(regs->pc, &gregs[16]);
+	err |= __get_user(temp, &gregs[17]);
+	regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
+	regs->orig_d0 = -1;		/* disable syscall checks */
+	err |= __get_user(temp, &uc->uc_formatvec);
+	regs->format = temp >> 12;
+	regs->vector = temp & 0xfff;
+
+	err |= rt_restore_fpu_state(uc);
+
+	if (do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
+		goto badframe;
+
+	fsize = frame_extra_sizes[regs->format];
+	if (fsize < 0) {
+		/*
+		 * user process trying to return with weird frame format
+		 */
+#ifdef DEBUG
+		printk("user process returning with weird frame format\n");
+#endif
+		goto badframe;
+	}
+
+	/* OK.	Make room on the supervisor stack for the extra junk,
+	 * if necessary.
+	 */
+
+	if (fsize) {
+#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
+		__asm__ __volatile__
+			("   movel %0,%/a0\n\t"
+			 "   subl %1,%/a0\n\t"     /* make room on stack */
+			 "   movel %/a0,%/sp\n\t"  /* set stack pointer */
+			 /* move switch_stack and pt_regs */
+			 "1: movel %0@+,%/a0@+\n\t"
+			 "   dbra %2,1b\n\t"
+			 "   lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
+			 "   lsrl  #2,%1\n\t"
+			 "   subql #1,%1\n\t"
+			 "2: movesl %4@+,%2\n\t"
+			 "3: movel %2,%/a0@+\n\t"
+			 "   dbra %1,2b\n\t"
+			 "   bral ret_from_signal\n"
+			 "4:\n"
+			 ".section __ex_table,\"a\"\n"
+			 "   .align 4\n"
+			 "   .long 2b,4b\n"
+			 "   .long 3b,4b\n"
+			 ".previous"
+			 : /* no outputs, it doesn't ever return */
+			 : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
+			   "n" (frame_offset), "a" (&uc->uc_extra)
+			 : "a0");
+#undef frame_offset
+		/*
+		 * If we ever get here an exception occurred while
+		 * building the above stack-frame.
+		 */
+		goto badframe;
+	}
+
+	*pd0 = regs->d0;
+	return err;
+
+badframe:
+	return 1;
+}
+
+asmlinkage int do_sigreturn(unsigned long __unused)
+{
+	struct switch_stack *sw = (struct switch_stack *) &__unused;
+	struct pt_regs *regs = (struct pt_regs *) (sw + 1);
+	unsigned long usp = rdusp();
+	struct sigframe *frame = (struct sigframe *)(usp - 4);
+	sigset_t set;
+	int d0;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
+	    (_NSIG_WORDS > 1 &&
+	     __copy_from_user(&set.sig[1], &frame->extramask,
+			      sizeof(frame->extramask))))
+		goto badframe;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	current->blocked = set;
+	recalc_sigpending();
+
+	if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0))
+		goto badframe;
+	return d0;
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+asmlinkage int do_rt_sigreturn(unsigned long __unused)
+{
+	struct switch_stack *sw = (struct switch_stack *) &__unused;
+	struct pt_regs *regs = (struct pt_regs *) (sw + 1);
+	unsigned long usp = rdusp();
+	struct rt_sigframe *frame = (struct rt_sigframe *)(usp - 4);
+	sigset_t set;
+	int d0;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+		goto badframe;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	current->blocked = set;
+	recalc_sigpending();
+
+	if (rt_restore_ucontext(regs, sw, &frame->uc, &d0))
+		goto badframe;
+	return d0;
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
+{
+	if (FPU_IS_EMU) {
+		/* save registers */
+		memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
+		memcpy(sc->sc_fpregs, current->thread.fp, 24);
+		return;
+	}
+
+	__asm__ volatile (".chip 68k/68881\n\t"
+			  "fsave %0\n\t"
+			  ".chip 68k"
+			  : : "m" (*sc->sc_fpstate) : "memory");
+
+	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
+		fpu_version = sc->sc_fpstate[0];
+		if (CPU_IS_020_OR_030 &&
+		    regs->vector >= (VEC_FPBRUC * 4) &&
+		    regs->vector <= (VEC_FPNAN * 4)) {
+			/* Clear pending exception in 68882 idle frame */
+			if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
+				sc->sc_fpstate[0x38] |= 1 << 3;
+		}
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "fmovemx %%fp0-%%fp1,%0\n\t"
+				  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
+				  ".chip 68k"
+				  : "=m" (*sc->sc_fpregs),
+				    "=m" (*sc->sc_fpcntl)
+				  : /* no inputs */
+				  : "memory");
+	}
+}
+
+static inline int rt_save_fpu_state(struct ucontext *uc, struct pt_regs *regs)
+{
+	unsigned char fpstate[FPCONTEXT_SIZE];
+	int context_size = CPU_IS_060 ? 8 : 0;
+	int err = 0;
+
+	if (FPU_IS_EMU) {
+		/* save fpu control register */
+		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
+				current->thread.fpcntl, 12);
+		/* save all other fpu register */
+		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
+				current->thread.fp, 96);
+		return err;
+	}
+
+	__asm__ volatile (".chip 68k/68881\n\t"
+			  "fsave %0\n\t"
+			  ".chip 68k"
+			  : : "m" (*fpstate) : "memory");
+
+	err |= __put_user(*(long *)fpstate, (long *)&uc->uc_fpstate);
+	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
+		fpregset_t fpregs;
+		if (!CPU_IS_060)
+			context_size = fpstate[1];
+		fpu_version = fpstate[0];
+		if (CPU_IS_020_OR_030 &&
+		    regs->vector >= (VEC_FPBRUC * 4) &&
+		    regs->vector <= (VEC_FPNAN * 4)) {
+			/* Clear pending exception in 68882 idle frame */
+			if (*(unsigned short *) fpstate == 0x1f38)
+				fpstate[0x38] |= 1 << 3;
+		}
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "fmovemx %%fp0-%%fp7,%0\n\t"
+				  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
+				  ".chip 68k"
+				  : "=m" (*fpregs.f_fpregs),
+				    "=m" (*fpregs.f_fpcntl)
+				  : /* no inputs */
+				  : "memory");
+		err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
+				    sizeof(fpregs));
+	}
+	if (context_size)
+		err |= copy_to_user((long *)&uc->uc_fpstate + 1, fpstate + 4,
+				    context_size);
+	return err;
+}
+
+static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+			     unsigned long mask)
+{
+	sc->sc_mask = mask;
+	sc->sc_usp = rdusp();
+	sc->sc_d0 = regs->d0;
+	sc->sc_d1 = regs->d1;
+	sc->sc_a0 = regs->a0;
+	sc->sc_a1 = regs->a1;
+	sc->sc_sr = regs->sr;
+	sc->sc_pc = regs->pc;
+	sc->sc_formatvec = regs->format << 12 | regs->vector;
+	save_fpu_state(sc, regs);
+}
+
+static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs)
+{
+	struct switch_stack *sw = (struct switch_stack *)regs - 1;
+	greg_t *gregs = uc->uc_mcontext.gregs;
+	int err = 0;
+
+	err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
+	err |= __put_user(regs->d0, &gregs[0]);
+	err |= __put_user(regs->d1, &gregs[1]);
+	err |= __put_user(regs->d2, &gregs[2]);
+	err |= __put_user(regs->d3, &gregs[3]);
+	err |= __put_user(regs->d4, &gregs[4]);
+	err |= __put_user(regs->d5, &gregs[5]);
+	err |= __put_user(sw->d6, &gregs[6]);
+	err |= __put_user(sw->d7, &gregs[7]);
+	err |= __put_user(regs->a0, &gregs[8]);
+	err |= __put_user(regs->a1, &gregs[9]);
+	err |= __put_user(regs->a2, &gregs[10]);
+	err |= __put_user(sw->a3, &gregs[11]);
+	err |= __put_user(sw->a4, &gregs[12]);
+	err |= __put_user(sw->a5, &gregs[13]);
+	err |= __put_user(sw->a6, &gregs[14]);
+	err |= __put_user(rdusp(), &gregs[15]);
+	err |= __put_user(regs->pc, &gregs[16]);
+	err |= __put_user(regs->sr, &gregs[17]);
+	err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
+	err |= rt_save_fpu_state(uc, regs);
+	return err;
+}
+
+static inline void push_cache (unsigned long vaddr)
+{
+	/*
+	 * Using the old cache_push_v() was really a big waste.
+	 *
+	 * What we are trying to do is to flush 8 bytes to ram.
+	 * Flushing 2 cache lines of 16 bytes is much cheaper than
+	 * flushing 1 or 2 pages, as previously done in
+	 * cache_push_v().
+	 *                                                     Jes
+	 */
+	if (CPU_IS_040) {
+		unsigned long temp;
+
+		__asm__ __volatile__ (".chip 68040\n\t"
+				      "nop\n\t"
+				      "ptestr (%1)\n\t"
+				      "movec %%mmusr,%0\n\t"
+				      ".chip 68k"
+				      : "=r" (temp)
+				      : "a" (vaddr));
+
+		temp &= PAGE_MASK;
+		temp |= vaddr & ~PAGE_MASK;
+
+		__asm__ __volatile__ (".chip 68040\n\t"
+				      "nop\n\t"
+				      "cpushl %%bc,(%0)\n\t"
+				      ".chip 68k"
+				      : : "a" (temp));
+	}
+	else if (CPU_IS_060) {
+		unsigned long temp;
+		__asm__ __volatile__ (".chip 68060\n\t"
+				      "plpar (%0)\n\t"
+				      ".chip 68k"
+				      : "=a" (temp)
+				      : "0" (vaddr));
+		__asm__ __volatile__ (".chip 68060\n\t"
+				      "cpushl %%bc,(%0)\n\t"
+				      ".chip 68k"
+				      : : "a" (temp));
+	}
+	else {
+		/*
+		 * 68030/68020 have no writeback cache;
+		 * still need to clear icache.
+		 * Note that vaddr is guaranteed to be long word aligned.
+		 */
+		unsigned long temp;
+		asm volatile ("movec %%cacr,%0" : "=r" (temp));
+		temp += 4;
+		asm volatile ("movec %0,%%caar\n\t"
+			      "movec %1,%%cacr"
+			      : : "r" (vaddr), "r" (temp));
+		asm volatile ("movec %0,%%caar\n\t"
+			      "movec %1,%%cacr"
+			      : : "r" (vaddr + 4), "r" (temp));
+	}
+}
+
+static inline void *
+get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
+{
+	unsigned long usp;
+
+	/* Default to using normal stack.  */
+	usp = rdusp();
+
+	/* This is the X/Open sanctioned signal stack switching.  */
+	if (ka->sa.sa_flags & SA_ONSTACK) {
+		if (!on_sig_stack(usp))
+			usp = current->sas_ss_sp + current->sas_ss_size;
+	}
+	return (void *)((usp - frame_size) & -8UL);
+}
+
+static void setup_frame (int sig, struct k_sigaction *ka,
+			 sigset_t *set, struct pt_regs *regs)
+{
+	struct sigframe *frame;
+	int fsize = frame_extra_sizes[regs->format];
+	struct sigcontext context;
+	int err = 0;
+
+	if (fsize < 0) {
+#ifdef DEBUG
+		printk ("setup_frame: Unknown frame format %#x\n",
+			regs->format);
+#endif
+		goto give_sigsegv;
+	}
+
+	frame = get_sigframe(ka, regs, sizeof(*frame) + fsize);
+
+	if (fsize) {
+		err |= copy_to_user (frame + 1, regs + 1, fsize);
+		regs->stkadj = fsize;
+	}
+
+	err |= __put_user((current_thread_info()->exec_domain
+			   && current_thread_info()->exec_domain->signal_invmap
+			   && sig < 32
+			   ? current_thread_info()->exec_domain->signal_invmap[sig]
+			   : sig),
+			  &frame->sig);
+
+	err |= __put_user(regs->vector, &frame->code);
+	err |= __put_user(&frame->sc, &frame->psc);
+
+	if (_NSIG_WORDS > 1)
+		err |= copy_to_user(frame->extramask, &set->sig[1],
+				    sizeof(frame->extramask));
+
+	setup_sigcontext(&context, regs, set->sig[0]);
+	err |= copy_to_user (&frame->sc, &context, sizeof(context));
+
+	/* Set up to return from userspace.  */
+	err |= __put_user(frame->retcode, &frame->pretcode);
+	/* moveq #,d0; trap #0 */
+	err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
+			  (long *)(frame->retcode));
+
+	if (err)
+		goto give_sigsegv;
+
+	push_cache ((unsigned long) &frame->retcode);
+
+	/* Set up registers for signal handler */
+	wrusp ((unsigned long) frame);
+	regs->pc = (unsigned long) ka->sa.sa_handler;
+
+adjust_stack:
+	/* Prepare to skip over the extra stuff in the exception frame.  */
+	if (regs->stkadj) {
+		struct pt_regs *tregs =
+			(struct pt_regs *)((ulong)regs + regs->stkadj);
+#ifdef DEBUG
+		printk("Performing stackadjust=%04x\n", regs->stkadj);
+#endif
+		/* This must be copied with decreasing addresses to
+                   handle overlaps.  */
+		tregs->vector = 0;
+		tregs->format = 0;
+		tregs->pc = regs->pc;
+		tregs->sr = regs->sr;
+	}
+	return;
+
+give_sigsegv:
+	force_sigsegv(sig, current);
+	goto adjust_stack;
+}
+
+static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
+			    sigset_t *set, struct pt_regs *regs)
+{
+	struct rt_sigframe *frame;
+	int fsize = frame_extra_sizes[regs->format];
+	int err = 0;
+
+	if (fsize < 0) {
+#ifdef DEBUG
+		printk ("setup_frame: Unknown frame format %#x\n",
+			regs->format);
+#endif
+		goto give_sigsegv;
+	}
+
+	frame = get_sigframe(ka, regs, sizeof(*frame));
+
+	if (fsize) {
+		err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
+		regs->stkadj = fsize;
+	}
+
+	err |= __put_user((current_thread_info()->exec_domain
+			   && current_thread_info()->exec_domain->signal_invmap
+			   && sig < 32
+			   ? current_thread_info()->exec_domain->signal_invmap[sig]
+			   : sig),
+			  &frame->sig);
+	err |= __put_user(&frame->info, &frame->pinfo);
+	err |= __put_user(&frame->uc, &frame->puc);
+	err |= copy_siginfo_to_user(&frame->info, info);
+
+	/* Create the ucontext.  */
+	err |= __put_user(0, &frame->uc.uc_flags);
+	err |= __put_user(0, &frame->uc.uc_link);
+	err |= __put_user((void *)current->sas_ss_sp,
+			  &frame->uc.uc_stack.ss_sp);
+	err |= __put_user(sas_ss_flags(rdusp()),
+			  &frame->uc.uc_stack.ss_flags);
+	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+	err |= rt_setup_ucontext(&frame->uc, regs);
+	err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
+
+	/* Set up to return from userspace.  */
+	err |= __put_user(frame->retcode, &frame->pretcode);
+	/* moveq #,d0; notb d0; trap #0 */
+	err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
+			  (long *)(frame->retcode + 0));
+	err |= __put_user(0x4e40, (short *)(frame->retcode + 4));
+
+	if (err)
+		goto give_sigsegv;
+
+	push_cache ((unsigned long) &frame->retcode);
+
+	/* Set up registers for signal handler */
+	wrusp ((unsigned long) frame);
+	regs->pc = (unsigned long) ka->sa.sa_handler;
+
+adjust_stack:
+	/* Prepare to skip over the extra stuff in the exception frame.  */
+	if (regs->stkadj) {
+		struct pt_regs *tregs =
+			(struct pt_regs *)((ulong)regs + regs->stkadj);
+#ifdef DEBUG
+		printk("Performing stackadjust=%04x\n", regs->stkadj);
+#endif
+		/* This must be copied with decreasing addresses to
+                   handle overlaps.  */
+		tregs->vector = 0;
+		tregs->format = 0;
+		tregs->pc = regs->pc;
+		tregs->sr = regs->sr;
+	}
+	return;
+
+give_sigsegv:
+	force_sigsegv(sig, current);
+	goto adjust_stack;
+}
+
+static inline void
+handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
+{
+	switch (regs->d0) {
+	case -ERESTARTNOHAND:
+		if (!has_handler)
+			goto do_restart;
+		regs->d0 = -EINTR;
+		break;
+
+	case -ERESTARTSYS:
+		if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
+			regs->d0 = -EINTR;
+			break;
+		}
+	/* fallthrough */
+	case -ERESTARTNOINTR:
+	do_restart:
+		regs->d0 = regs->orig_d0;
+		regs->pc -= 2;
+		break;
+	}
+}
+
+void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
+{
+	if (regs->orig_d0 < 0)
+		return;
+	switch (regs->d0) {
+	case -ERESTARTNOHAND:
+	case -ERESTARTSYS:
+	case -ERESTARTNOINTR:
+		regs->d0 = regs->orig_d0;
+		regs->orig_d0 = -1;
+		regs->pc -= 2;
+		break;
+	}
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
+	      sigset_t *oldset, struct pt_regs *regs)
+{
+	/* are we from a system call? */
+	if (regs->orig_d0 >= 0)
+		/* If so, check system call restarting.. */
+		handle_restart(regs, ka, 1);
+
+	/* set up the stack frame */
+	if (ka->sa.sa_flags & SA_SIGINFO)
+		setup_rt_frame(sig, ka, info, oldset, regs);
+	else
+		setup_frame(sig, ka, oldset, regs);
+
+	if (ka->sa.sa_flags & SA_ONESHOT)
+		ka->sa.sa_handler = SIG_DFL;
+
+	sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+	if (!(ka->sa.sa_flags & SA_NODEFER))
+		sigaddset(&current->blocked,sig);
+	recalc_sigpending();
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
+{
+	siginfo_t info;
+	struct k_sigaction ka;
+	int signr;
+
+	current->thread.esp0 = (unsigned long) regs;
+
+	if (!oldset)
+		oldset = &current->blocked;
+
+	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+	if (signr > 0) {
+		/* Whee!  Actually deliver the signal.  */
+		handle_signal(signr, &ka, &info, oldset, regs);
+		return 1;
+	}
+
+	/* Did we come from a system call? */
+	if (regs->orig_d0 >= 0)
+		/* Restart the system call - no handlers present */
+		handle_restart(regs, NULL, 0);
+
+	return 0;
+}
diff --git a/arch/m68k/kernel/sun3-head.S b/arch/m68k/kernel/sun3-head.S
new file mode 100644
index 0000000..bffd69a
--- /dev/null
+++ b/arch/m68k/kernel/sun3-head.S
@@ -0,0 +1,104 @@
+#include <linux/linkage.h>
+
+#include <asm/entry.h>
+#include <asm/page.h>
+#include <asm/contregs.h>
+#include <asm/sun3-head.h>
+
+PSL_HIGHIPL     = 0x2700
+NBSG            = 0x20000
+ICACHE_ONLY	= 0x00000009
+CACHES_OFF	= 0x00000008	| actually a clear and disable --m
+#define MAS_STACK INT_STACK
+ROOT_TABLE_SIZE = 128
+PAGESIZE	= 8192
+SUN3_INVALID_PMEG = 255
+.globl bootup_user_stack
+.globl bootup_kernel_stack
+.globl pg0
+.globl swapper_pg_dir
+.globl kernel_pmd_table
+.globl availmem
+.global m68k_pgtable_cachemode
+.global kpt
+| todo: all these should be in bss!
+swapper_pg_dir:                .skip 0x2000
+pg0:                           .skip 0x2000
+kernel_pmd_table:              .skip 0x2000
+
+.globl kernel_pg_dir
+.equ    kernel_pg_dir,kernel_pmd_table
+
+	.section .head
+ENTRY(_stext)
+ENTRY(_start)
+
+/* Firstly, disable interrupts and set up function codes. */
+	movew	#PSL_HIGHIPL, %sr
+	moveq	#FC_CONTROL, %d0
+	movec	%d0, %sfc
+	movec	%d0, %dfc
+
+/* Make sure we're in context zero. */
+	moveq	#0, %d0
+	movsb	%d0, AC_CONTEXT
+
+/* map everything the bootloader left us into high memory, clean up the
+   excess later */
+	lea	(AC_SEGMAP+0),%a0
+	lea	(AC_SEGMAP+KERNBASE),%a1
+1:
+	movsb	%a0@, %d1
+	movsb	%d1, %a1@
+	cmpib	#SUN3_INVALID_PMEG, %d1
+	beq	2f
+	addl	#NBSG,%a0
+	addl	#NBSG,%a1
+	jmp	1b
+
+2:
+
+/* Disable caches and jump to high code. */
+	moveq	#ICACHE_ONLY,%d0	| Cache disabled until we're ready to enable it
+	movc	%d0, %cacr	|   is this the right value? (yes --m)
+	jmp	1f:l
+
+/* Following code executes at high addresses (0xE000xxx). */
+1:	lea	init_task,%curptr			| get initial thread...
+	lea	init_thread_union+THREAD_SIZE,%sp	| ...and its stack.
+
+/* copy bootinfo records from the loader to _end */
+	lea	_end, %a1
+	lea	BI_START, %a0
+	/* number of longs to copy */
+	movel	%a0@, %d0
+1:	addl	#4, %a0
+	movel   %a0@, %a1@
+	addl	#4, %a1
+	dbf	%d0, 1b
+
+/* Point MSP at an invalid page to trap if it's used. --m */
+	movl	#(PAGESIZE),%d0
+	movc	%d0,%msp
+	moveq	#-1,%d0
+	movsb	%d0,(AC_SEGMAP+0x0)
+
+	jbsr	sun3_init
+
+	jbsr	base_trap_init
+
+        jbsr    start_kernel
+	trap	#15
+
+        .data
+        .even
+kpt:
+        .long 0
+availmem:
+        .long 0
+| todo: remove next two. --m
+is_medusa:
+        .long 0
+m68k_pgtable_cachemode:
+        .long 0
+
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
new file mode 100644
index 0000000..2ed7b78
--- /dev/null
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -0,0 +1,671 @@
+/*
+ * linux/arch/m68k/kernel/sys_m68k.c
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/m68k
+ * platform.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+
+#include <asm/setup.h>
+#include <asm/uaccess.h>
+#include <asm/cachectl.h>
+#include <asm/traps.h>
+#include <asm/ipc.h>
+#include <asm/page.h>
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way unix traditionally does this, though.
+ */
+asmlinkage int sys_pipe(unsigned long * fildes)
+{
+	int fd[2];
+	int error;
+
+	error = do_pipe(fd);
+	if (!error) {
+		if (copy_to_user(fildes, fd, 2*sizeof(int)))
+			error = -EFAULT;
+	}
+	return error;
+}
+
+/* common code for old and new mmaps */
+static inline long do_mmap2(
+	unsigned long addr, unsigned long len,
+	unsigned long prot, unsigned long flags,
+	unsigned long fd, unsigned long pgoff)
+{
+	int error = -EBADF;
+	struct file * file = NULL;
+
+	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+	if (!(flags & MAP_ANONYMOUS)) {
+		file = fget(fd);
+		if (!file)
+			goto out;
+	}
+
+	down_write(&current->mm->mmap_sem);
+	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+	up_write(&current->mm->mmap_sem);
+
+	if (file)
+		fput(file);
+out:
+	return error;
+}
+
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+	unsigned long prot, unsigned long flags,
+	unsigned long fd, unsigned long pgoff)
+{
+	return do_mmap2(addr, len, prot, flags, fd, pgoff);
+}
+
+/*
+ * Perform the select(nd, in, out, ex, tv) and mmap() system
+ * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
+ * handle more than 4 system call parameters, so these system calls
+ * used a memory block for parameter passing..
+ */
+
+struct mmap_arg_struct {
+	unsigned long addr;
+	unsigned long len;
+	unsigned long prot;
+	unsigned long flags;
+	unsigned long fd;
+	unsigned long offset;
+};
+
+asmlinkage int old_mmap(struct mmap_arg_struct *arg)
+{
+	struct mmap_arg_struct a;
+	int error = -EFAULT;
+
+	if (copy_from_user(&a, arg, sizeof(a)))
+		goto out;
+
+	error = -EINVAL;
+	if (a.offset & ~PAGE_MASK)
+		goto out;
+
+	a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+out:
+	return error;
+}
+
+#if 0
+struct mmap_arg_struct64 {
+	__u32 addr;
+	__u32 len;
+	__u32 prot;
+	__u32 flags;
+	__u64 offset; /* 64 bits */
+	__u32 fd;
+};
+
+asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
+{
+	int error = -EFAULT;
+	struct file * file = NULL;
+	struct mmap_arg_struct64 a;
+	unsigned long pgoff;
+
+	if (copy_from_user(&a, arg, sizeof(a)))
+		return -EFAULT;
+
+	if ((long)a.offset & ~PAGE_MASK)
+		return -EINVAL;
+
+	pgoff = a.offset >> PAGE_SHIFT;
+	if ((a.offset >> PAGE_SHIFT) != pgoff)
+		return -EINVAL;
+
+	if (!(a.flags & MAP_ANONYMOUS)) {
+		error = -EBADF;
+		file = fget(a.fd);
+		if (!file)
+			goto out;
+	}
+	a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+	down_write(&current->mm->mmap_sem);
+	error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
+	up_write(&current->mm->mmap_sem);
+	if (file)
+		fput(file);
+out:
+	return error;
+}
+#endif
+
+struct sel_arg_struct {
+	unsigned long n;
+	fd_set *inp, *outp, *exp;
+	struct timeval *tvp;
+};
+
+asmlinkage int old_select(struct sel_arg_struct *arg)
+{
+	struct sel_arg_struct a;
+
+	if (copy_from_user(&a, arg, sizeof(a)))
+		return -EFAULT;
+	/* sys_select() does the appropriate kernel locking */
+	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
+}
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+asmlinkage int sys_ipc (uint call, int first, int second,
+			int third, void *ptr, long fifth)
+{
+	int version, ret;
+
+	version = call >> 16; /* hack for backward compatibility */
+	call &= 0xffff;
+
+	if (call <= SEMCTL)
+		switch (call) {
+		case SEMOP:
+			return sys_semop (first, (struct sembuf *)ptr, second);
+		case SEMGET:
+			return sys_semget (first, second, third);
+		case SEMCTL: {
+			union semun fourth;
+			if (!ptr)
+				return -EINVAL;
+			if (get_user(fourth.__pad, (void **) ptr))
+				return -EFAULT;
+			return sys_semctl (first, second, third, fourth);
+			}
+		default:
+			return -ENOSYS;
+		}
+	if (call <= MSGCTL)
+		switch (call) {
+		case MSGSND:
+			return sys_msgsnd (first, (struct msgbuf *) ptr,
+					  second, third);
+		case MSGRCV:
+			switch (version) {
+			case 0: {
+				struct ipc_kludge tmp;
+				if (!ptr)
+					return -EINVAL;
+				if (copy_from_user (&tmp,
+						    (struct ipc_kludge *)ptr,
+						    sizeof (tmp)))
+					return -EFAULT;
+				return sys_msgrcv (first, tmp.msgp, second,
+						   tmp.msgtyp, third);
+				}
+			default:
+				return sys_msgrcv (first,
+						   (struct msgbuf *) ptr,
+						   second, fifth, third);
+			}
+		case MSGGET:
+			return sys_msgget ((key_t) first, second);
+		case MSGCTL:
+			return sys_msgctl (first, second,
+					   (struct msqid_ds *) ptr);
+		default:
+			return -ENOSYS;
+		}
+	if (call <= SHMCTL)
+		switch (call) {
+		case SHMAT:
+			switch (version) {
+			default: {
+				ulong raddr;
+				ret = do_shmat (first, (char *) ptr,
+						 second, &raddr);
+				if (ret)
+					return ret;
+				return put_user (raddr, (ulong *) third);
+			}
+			}
+		case SHMDT:
+			return sys_shmdt ((char *)ptr);
+		case SHMGET:
+			return sys_shmget (first, second, third);
+		case SHMCTL:
+			return sys_shmctl (first, second,
+					   (struct shmid_ds *) ptr);
+		default:
+			return -ENOSYS;
+		}
+
+	return -EINVAL;
+}
+
+/* Convert virtual (user) address VADDR to physical address PADDR */
+#define virt_to_phys_040(vaddr)						\
+({									\
+  unsigned long _mmusr, _paddr;						\
+									\
+  __asm__ __volatile__ (".chip 68040\n\t"				\
+			"ptestr (%1)\n\t"				\
+			"movec %%mmusr,%0\n\t"				\
+			".chip 68k"					\
+			: "=r" (_mmusr)					\
+			: "a" (vaddr));					\
+  _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0;		\
+  _paddr;								\
+})
+
+static inline int
+cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
+{
+  unsigned long paddr, i;
+
+  switch (scope)
+    {
+    case FLUSH_SCOPE_ALL:
+      switch (cache)
+	{
+	case FLUSH_CACHE_DATA:
+	  /* This nop is needed for some broken versions of the 68040.  */
+	  __asm__ __volatile__ ("nop\n\t"
+				".chip 68040\n\t"
+				"cpusha %dc\n\t"
+				".chip 68k");
+	  break;
+	case FLUSH_CACHE_INSN:
+	  __asm__ __volatile__ ("nop\n\t"
+				".chip 68040\n\t"
+				"cpusha %ic\n\t"
+				".chip 68k");
+	  break;
+	default:
+	case FLUSH_CACHE_BOTH:
+	  __asm__ __volatile__ ("nop\n\t"
+				".chip 68040\n\t"
+				"cpusha %bc\n\t"
+				".chip 68k");
+	  break;
+	}
+      break;
+
+    case FLUSH_SCOPE_LINE:
+      /* Find the physical address of the first mapped page in the
+	 address range.  */
+      if ((paddr = virt_to_phys_040(addr))) {
+        paddr += addr & ~(PAGE_MASK | 15);
+        len = (len + (addr & 15) + 15) >> 4;
+      } else {
+	unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
+
+	if (len <= tmp)
+	  return 0;
+	addr += tmp;
+	len -= tmp;
+	tmp = PAGE_SIZE;
+	for (;;)
+	  {
+	    if ((paddr = virt_to_phys_040(addr)))
+	      break;
+	    if (len <= tmp)
+	      return 0;
+	    addr += tmp;
+	    len -= tmp;
+	  }
+	len = (len + 15) >> 4;
+      }
+      i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
+      while (len--)
+	{
+	  switch (cache)
+	    {
+	    case FLUSH_CACHE_DATA:
+	      __asm__ __volatile__ ("nop\n\t"
+				    ".chip 68040\n\t"
+				    "cpushl %%dc,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    case FLUSH_CACHE_INSN:
+	      __asm__ __volatile__ ("nop\n\t"
+				    ".chip 68040\n\t"
+				    "cpushl %%ic,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    default:
+	    case FLUSH_CACHE_BOTH:
+	      __asm__ __volatile__ ("nop\n\t"
+				    ".chip 68040\n\t"
+				    "cpushl %%bc,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    }
+	  if (!--i && len)
+	    {
+	      /*
+	       * No need to page align here since it is done by
+	       * virt_to_phys_040().
+	       */
+	      addr += PAGE_SIZE;
+	      i = PAGE_SIZE / 16;
+	      /* Recompute physical address when crossing a page
+	         boundary. */
+	      for (;;)
+		{
+		  if ((paddr = virt_to_phys_040(addr)))
+		    break;
+		  if (len <= i)
+		    return 0;
+		  len -= i;
+		  addr += PAGE_SIZE;
+		}
+	    }
+	  else
+	    paddr += 16;
+	}
+      break;
+
+    default:
+    case FLUSH_SCOPE_PAGE:
+      len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
+      for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
+	{
+	  if (!(paddr = virt_to_phys_040(addr)))
+	    continue;
+	  switch (cache)
+	    {
+	    case FLUSH_CACHE_DATA:
+	      __asm__ __volatile__ ("nop\n\t"
+				    ".chip 68040\n\t"
+				    "cpushp %%dc,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    case FLUSH_CACHE_INSN:
+	      __asm__ __volatile__ ("nop\n\t"
+				    ".chip 68040\n\t"
+				    "cpushp %%ic,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    default:
+	    case FLUSH_CACHE_BOTH:
+	      __asm__ __volatile__ ("nop\n\t"
+				    ".chip 68040\n\t"
+				    "cpushp %%bc,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    }
+	}
+      break;
+    }
+  return 0;
+}
+
+#define virt_to_phys_060(vaddr)				\
+({							\
+  unsigned long paddr;					\
+  __asm__ __volatile__ (".chip 68060\n\t"		\
+			"plpar (%0)\n\t"		\
+			".chip 68k"			\
+			: "=a" (paddr)			\
+			: "0" (vaddr));			\
+  (paddr); /* XXX */					\
+})
+
+static inline int
+cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
+{
+  unsigned long paddr, i;
+
+  /*
+   * 68060 manual says:
+   *  cpush %dc : flush DC, remains valid (with our %cacr setup)
+   *  cpush %ic : invalidate IC
+   *  cpush %bc : flush DC + invalidate IC
+   */
+  switch (scope)
+    {
+    case FLUSH_SCOPE_ALL:
+      switch (cache)
+	{
+	case FLUSH_CACHE_DATA:
+	  __asm__ __volatile__ (".chip 68060\n\t"
+				"cpusha %dc\n\t"
+				".chip 68k");
+	  break;
+	case FLUSH_CACHE_INSN:
+	  __asm__ __volatile__ (".chip 68060\n\t"
+				"cpusha %ic\n\t"
+				".chip 68k");
+	  break;
+	default:
+	case FLUSH_CACHE_BOTH:
+	  __asm__ __volatile__ (".chip 68060\n\t"
+				"cpusha %bc\n\t"
+				".chip 68k");
+	  break;
+	}
+      break;
+
+    case FLUSH_SCOPE_LINE:
+      /* Find the physical address of the first mapped page in the
+	 address range.  */
+      len += addr & 15;
+      addr &= -16;
+      if (!(paddr = virt_to_phys_060(addr))) {
+	unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
+
+	if (len <= tmp)
+	  return 0;
+	addr += tmp;
+	len -= tmp;
+	tmp = PAGE_SIZE;
+	for (;;)
+	  {
+	    if ((paddr = virt_to_phys_060(addr)))
+	      break;
+	    if (len <= tmp)
+	      return 0;
+	    addr += tmp;
+	    len -= tmp;
+	  }
+      }
+      len = (len + 15) >> 4;
+      i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
+      while (len--)
+	{
+	  switch (cache)
+	    {
+	    case FLUSH_CACHE_DATA:
+	      __asm__ __volatile__ (".chip 68060\n\t"
+				    "cpushl %%dc,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    case FLUSH_CACHE_INSN:
+	      __asm__ __volatile__ (".chip 68060\n\t"
+				    "cpushl %%ic,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    default:
+	    case FLUSH_CACHE_BOTH:
+	      __asm__ __volatile__ (".chip 68060\n\t"
+				    "cpushl %%bc,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    }
+	  if (!--i && len)
+	    {
+
+	      /*
+	       * We just want to jump to the first cache line
+	       * in the next page.
+	       */
+	      addr += PAGE_SIZE;
+	      addr &= PAGE_MASK;
+
+	      i = PAGE_SIZE / 16;
+	      /* Recompute physical address when crossing a page
+	         boundary. */
+	      for (;;)
+	        {
+	          if ((paddr = virt_to_phys_060(addr)))
+	            break;
+	          if (len <= i)
+	            return 0;
+	          len -= i;
+	          addr += PAGE_SIZE;
+	        }
+	    }
+	  else
+	    paddr += 16;
+	}
+      break;
+
+    default:
+    case FLUSH_SCOPE_PAGE:
+      len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
+      addr &= PAGE_MASK;	/* Workaround for bug in some
+				   revisions of the 68060 */
+      for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
+	{
+	  if (!(paddr = virt_to_phys_060(addr)))
+	    continue;
+	  switch (cache)
+	    {
+	    case FLUSH_CACHE_DATA:
+	      __asm__ __volatile__ (".chip 68060\n\t"
+				    "cpushp %%dc,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    case FLUSH_CACHE_INSN:
+	      __asm__ __volatile__ (".chip 68060\n\t"
+				    "cpushp %%ic,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    default:
+	    case FLUSH_CACHE_BOTH:
+	      __asm__ __volatile__ (".chip 68060\n\t"
+				    "cpushp %%bc,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    }
+	}
+      break;
+    }
+  return 0;
+}
+
+/* sys_cacheflush -- flush (part of) the processor cache.  */
+asmlinkage int
+sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
+{
+	struct vm_area_struct *vma;
+	int ret = -EINVAL;
+
+	lock_kernel();
+	if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
+	    cache & ~FLUSH_CACHE_BOTH)
+		goto out;
+
+	if (scope == FLUSH_SCOPE_ALL) {
+		/* Only the superuser may explicitly flush the whole cache. */
+		ret = -EPERM;
+		if (!capable(CAP_SYS_ADMIN))
+			goto out;
+	} else {
+		/*
+		 * Verify that the specified address region actually belongs
+		 * to this process.
+		 */
+		vma = find_vma (current->mm, addr);
+		ret = -EINVAL;
+		/* Check for overflow.  */
+		if (addr + len < addr)
+			goto out;
+		if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
+			goto out;
+	}
+
+	if (CPU_IS_020_OR_030) {
+		if (scope == FLUSH_SCOPE_LINE && len < 256) {
+			unsigned long cacr;
+			__asm__ ("movec %%cacr, %0" : "=r" (cacr));
+			if (cache & FLUSH_CACHE_INSN)
+				cacr |= 4;
+			if (cache & FLUSH_CACHE_DATA)
+				cacr |= 0x400;
+			len >>= 2;
+			while (len--) {
+				__asm__ __volatile__ ("movec %1, %%caar\n\t"
+						      "movec %0, %%cacr"
+						      : /* no outputs */
+						      : "r" (cacr), "r" (addr));
+				addr += 4;
+			}
+		} else {
+			/* Flush the whole cache, even if page granularity requested. */
+			unsigned long cacr;
+			__asm__ ("movec %%cacr, %0" : "=r" (cacr));
+			if (cache & FLUSH_CACHE_INSN)
+				cacr |= 8;
+			if (cache & FLUSH_CACHE_DATA)
+				cacr |= 0x800;
+			__asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
+		}
+		ret = 0;
+		goto out;
+	} else {
+	    /*
+	     * 040 or 060: don't blindly trust 'scope', someone could
+	     * try to flush a few megs of memory.
+	     */
+
+	    if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
+	        scope=FLUSH_SCOPE_PAGE;
+	    if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
+	        scope=FLUSH_SCOPE_ALL;
+	    if (CPU_IS_040) {
+		ret = cache_flush_040 (addr, scope, cache, len);
+	    } else if (CPU_IS_060) {
+		ret = cache_flush_060 (addr, scope, cache, len);
+	    }
+	}
+out:
+	unlock_kernel();
+	return ret;
+}
+
+asmlinkage int sys_getpagesize(void)
+{
+	return PAGE_SIZE;
+}
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
new file mode 100644
index 0000000..e47e195
--- /dev/null
+++ b/arch/m68k/kernel/time.c
@@ -0,0 +1,187 @@
+/*
+ *  linux/arch/m68k/kernel/time.c
+ *
+ *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
+ *
+ * This file contains the m68k-specific time handling details.
+ * Most of the stuff is located in the machine specific files.
+ *
+ * 1997-09-10	Updated NTP code according to technical memorandum Jan '96
+ *		"A Kernel Model for Precision Timekeeping" by Dave Mills
+ */
+
+#include <linux/config.h> /* CONFIG_HEARTBEAT */
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/rtc.h>
+
+#include <asm/machdep.h>
+#include <asm/io.h>
+
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/profile.h>
+
+u64 jiffies_64 = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+static inline int set_rtc_mmss(unsigned long nowtime)
+{
+  if (mach_set_clock_mmss)
+    return mach_set_clock_mmss (nowtime);
+  return -1;
+}
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
+{
+	do_timer(regs);
+#ifndef CONFIG_SMP
+	update_process_times(user_mode(regs));
+#endif
+	profile_tick(CPU_PROFILING, regs);
+
+#ifdef CONFIG_HEARTBEAT
+	/* use power LED as a heartbeat instead -- much more useful
+	   for debugging -- based on the version for PReP by Cort */
+	/* acts like an actual heart beat -- ie thump-thump-pause... */
+	if (mach_heartbeat) {
+	    static unsigned cnt = 0, period = 0, dist = 0;
+
+	    if (cnt == 0 || cnt == dist)
+		mach_heartbeat( 1 );
+	    else if (cnt == 7 || cnt == dist+7)
+		mach_heartbeat( 0 );
+
+	    if (++cnt > period) {
+		cnt = 0;
+		/* The hyperbolic function below modifies the heartbeat period
+		 * length in dependency of the current (5min) load. It goes
+		 * through the points f(0)=126, f(1)=86, f(5)=51,
+		 * f(inf)->30. */
+		period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT))) + 30;
+		dist = period / 4;
+	    }
+	}
+#endif /* CONFIG_HEARTBEAT */
+	return IRQ_HANDLED;
+}
+
+void time_init(void)
+{
+	struct rtc_time time;
+
+	if (mach_hwclk) {
+		mach_hwclk(0, &time);
+
+		if ((time.tm_year += 1900) < 1970)
+			time.tm_year += 100;
+		xtime.tv_sec = mktime(time.tm_year, time.tm_mon, time.tm_mday,
+				      time.tm_hour, time.tm_min, time.tm_sec);
+		xtime.tv_nsec = 0;
+	}
+	wall_to_monotonic.tv_sec = -xtime.tv_sec;
+
+	mach_sched_init(timer_interrupt);
+}
+
+/*
+ * This version of gettimeofday has near microsecond resolution.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+	unsigned long flags;
+	extern unsigned long wall_jiffies;
+	unsigned long seq;
+	unsigned long usec, sec, lost;
+	unsigned long max_ntp_tick = tick_usec - tickadj;
+
+	do {
+		seq = read_seqbegin_irqsave(&xtime_lock, flags);
+
+		usec = mach_gettimeoffset();
+		lost = jiffies - wall_jiffies;
+
+		/*
+		 * If time_adjust is negative then NTP is slowing the clock
+		 * so make sure not to go into next possible interval.
+		 * Better to lose some accuracy than have time go backwards..
+		 */
+		if (unlikely(time_adjust < 0)) {
+			usec = min(usec, max_ntp_tick);
+
+			if (lost)
+				usec += lost * max_ntp_tick;
+		}
+		else if (unlikely(lost))
+			usec += lost * tick_usec;
+
+		sec = xtime.tv_sec;
+		usec += xtime.tv_nsec/1000;
+	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+
+
+	while (usec >= 1000000) {
+		usec -= 1000000;
+		sec++;
+	}
+
+	tv->tv_sec = sec;
+	tv->tv_usec = usec;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+
+int do_settimeofday(struct timespec *tv)
+{
+	time_t wtm_sec, sec = tv->tv_sec;
+	long wtm_nsec, nsec = tv->tv_nsec;
+	extern unsigned long wall_jiffies;
+
+	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+		return -EINVAL;
+
+	write_seqlock_irq(&xtime_lock);
+	/* This is revolting. We need to set the xtime.tv_nsec
+	 * correctly. However, the value in this location is
+	 * is value at the last tick.
+	 * Discover what correction gettimeofday
+	 * would have done, and then undo it!
+	 */
+	nsec -= 1000 * (mach_gettimeoffset() +
+			(jiffies - wall_jiffies) * (1000000 / HZ));
+
+	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+	set_normalized_timespec(&xtime, sec, nsec);
+	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+	time_adjust = 0;		/* stop active adjtime() */
+	time_status |= STA_UNSYNC;
+	time_maxerror = NTP_PHASE_LIMIT;
+	time_esterror = NTP_PHASE_LIMIT;
+	write_sequnlock_irq(&xtime_lock);
+	clock_was_set();
+	return 0;
+}
+
+EXPORT_SYMBOL(do_settimeofday);
+
+/*
+ * Scheduler clock - returns current time in ns units.
+ */
+unsigned long long sched_clock(void)
+{
+       return (unsigned long long)jiffies*(1000000000/HZ);
+}
+
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
new file mode 100644
index 0000000..deb36e8
--- /dev/null
+++ b/arch/m68k/kernel/traps.c
@@ -0,0 +1,1227 @@
+/*
+ *  linux/arch/m68k/kernel/traps.c
+ *
+ *  Copyright (C) 1993, 1994 by Hamish Macdonald
+ *
+ *  68040 fixes by Michael Rausch
+ *  68040 fixes by Martin Apel
+ *  68040 fixes and writeback by Richard Zidlicky
+ *  68060 fixes by Roman Hodek
+ *  68060 fixes by Jesper Skov
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * Sets up all exception vectors
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/a.out.h>
+#include <linux/user.h>
+#include <linux/string.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/kallsyms.h>
+
+#include <asm/setup.h>
+#include <asm/fpu.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/traps.h>
+#include <asm/pgalloc.h>
+#include <asm/machdep.h>
+#include <asm/siginfo.h>
+
+/* assembler routines */
+asmlinkage void system_call(void);
+asmlinkage void buserr(void);
+asmlinkage void trap(void);
+asmlinkage void inthandler(void);
+asmlinkage void nmihandler(void);
+#ifdef CONFIG_M68KFPU_EMU
+asmlinkage void fpu_emu(void);
+#endif
+
+e_vector vectors[256] = {
+	[VEC_BUSERR]	= buserr,
+	[VEC_ADDRERR]	= trap,
+	[VEC_ILLEGAL]	= trap,
+	[VEC_ZERODIV]	= trap,
+	[VEC_CHK]	= trap,
+	[VEC_TRAP]	= trap,
+	[VEC_PRIV]	= trap,
+	[VEC_TRACE]	= trap,
+	[VEC_LINE10]	= trap,
+	[VEC_LINE11]	= trap,
+	[VEC_RESV12]	= trap,
+	[VEC_COPROC]	= trap,
+	[VEC_FORMAT]	= trap,
+	[VEC_UNINT]	= trap,
+	[VEC_RESV16]	= trap,
+	[VEC_RESV17]	= trap,
+	[VEC_RESV18]	= trap,
+	[VEC_RESV19]	= trap,
+	[VEC_RESV20]	= trap,
+	[VEC_RESV21]	= trap,
+	[VEC_RESV22]	= trap,
+	[VEC_RESV23]	= trap,
+	[VEC_SPUR]	= inthandler,
+	[VEC_INT1]	= inthandler,
+	[VEC_INT2]	= inthandler,
+	[VEC_INT3]	= inthandler,
+	[VEC_INT4]	= inthandler,
+	[VEC_INT5]	= inthandler,
+	[VEC_INT6]	= inthandler,
+	[VEC_INT7]	= inthandler,
+	[VEC_SYS]	= system_call,
+	[VEC_TRAP1]	= trap,
+	[VEC_TRAP2]	= trap,
+	[VEC_TRAP3]	= trap,
+	[VEC_TRAP4]	= trap,
+	[VEC_TRAP5]	= trap,
+	[VEC_TRAP6]	= trap,
+	[VEC_TRAP7]	= trap,
+	[VEC_TRAP8]	= trap,
+	[VEC_TRAP9]	= trap,
+	[VEC_TRAP10]	= trap,
+	[VEC_TRAP11]	= trap,
+	[VEC_TRAP12]	= trap,
+	[VEC_TRAP13]	= trap,
+	[VEC_TRAP14]	= trap,
+	[VEC_TRAP15]	= trap,
+};
+
+/* nmi handler for the Amiga */
+asm(".text\n"
+    __ALIGN_STR "\n"
+    "nmihandler: rte");
+
+/*
+ * this must be called very early as the kernel might
+ * use some instruction that are emulated on the 060
+ */
+void __init base_trap_init(void)
+{
+	if(MACH_IS_SUN3X) {
+		extern e_vector *sun3x_prom_vbr;
+
+		__asm__ volatile ("movec %%vbr, %0" : "=r" ((void*)sun3x_prom_vbr));
+	}
+
+	/* setup the exception vector table */
+	__asm__ volatile ("movec %0,%%vbr" : : "r" ((void*)vectors));
+
+	if (CPU_IS_060) {
+		/* set up ISP entry points */
+		asmlinkage void unimp_vec(void) asm ("_060_isp_unimp");
+
+		vectors[VEC_UNIMPII] = unimp_vec;
+	}
+}
+
+void __init trap_init (void)
+{
+	int i;
+
+	for (i = 48; i < 64; i++)
+		if (!vectors[i])
+			vectors[i] = trap;
+
+	for (i = 64; i < 256; i++)
+		vectors[i] = inthandler;
+
+#ifdef CONFIG_M68KFPU_EMU
+	if (FPU_IS_EMU)
+		vectors[VEC_LINE11] = fpu_emu;
+#endif
+
+	if (CPU_IS_040 && !FPU_IS_EMU) {
+		/* set up FPSP entry points */
+		asmlinkage void dz_vec(void) asm ("dz");
+		asmlinkage void inex_vec(void) asm ("inex");
+		asmlinkage void ovfl_vec(void) asm ("ovfl");
+		asmlinkage void unfl_vec(void) asm ("unfl");
+		asmlinkage void snan_vec(void) asm ("snan");
+		asmlinkage void operr_vec(void) asm ("operr");
+		asmlinkage void bsun_vec(void) asm ("bsun");
+		asmlinkage void fline_vec(void) asm ("fline");
+		asmlinkage void unsupp_vec(void) asm ("unsupp");
+
+		vectors[VEC_FPDIVZ] = dz_vec;
+		vectors[VEC_FPIR] = inex_vec;
+		vectors[VEC_FPOVER] = ovfl_vec;
+		vectors[VEC_FPUNDER] = unfl_vec;
+		vectors[VEC_FPNAN] = snan_vec;
+		vectors[VEC_FPOE] = operr_vec;
+		vectors[VEC_FPBRUC] = bsun_vec;
+		vectors[VEC_LINE11] = fline_vec;
+		vectors[VEC_FPUNSUP] = unsupp_vec;
+	}
+
+	if (CPU_IS_060 && !FPU_IS_EMU) {
+		/* set up IFPSP entry points */
+		asmlinkage void snan_vec(void) asm ("_060_fpsp_snan");
+		asmlinkage void operr_vec(void) asm ("_060_fpsp_operr");
+		asmlinkage void ovfl_vec(void) asm ("_060_fpsp_ovfl");
+		asmlinkage void unfl_vec(void) asm ("_060_fpsp_unfl");
+		asmlinkage void dz_vec(void) asm ("_060_fpsp_dz");
+		asmlinkage void inex_vec(void) asm ("_060_fpsp_inex");
+		asmlinkage void fline_vec(void) asm ("_060_fpsp_fline");
+		asmlinkage void unsupp_vec(void) asm ("_060_fpsp_unsupp");
+		asmlinkage void effadd_vec(void) asm ("_060_fpsp_effadd");
+
+		vectors[VEC_FPNAN] = snan_vec;
+		vectors[VEC_FPOE] = operr_vec;
+		vectors[VEC_FPOVER] = ovfl_vec;
+		vectors[VEC_FPUNDER] = unfl_vec;
+		vectors[VEC_FPDIVZ] = dz_vec;
+		vectors[VEC_FPIR] = inex_vec;
+		vectors[VEC_LINE11] = fline_vec;
+		vectors[VEC_FPUNSUP] = unsupp_vec;
+		vectors[VEC_UNIMPEA] = effadd_vec;
+	}
+
+        /* if running on an amiga, make the NMI interrupt do nothing */
+	if (MACH_IS_AMIGA) {
+		vectors[VEC_INT7] = nmihandler;
+	}
+}
+
+
+static const char *vec_names[] = {
+	[VEC_RESETSP]	= "RESET SP",
+	[VEC_RESETPC]	= "RESET PC",
+	[VEC_BUSERR]	= "BUS ERROR",
+	[VEC_ADDRERR]	= "ADDRESS ERROR",
+	[VEC_ILLEGAL]	= "ILLEGAL INSTRUCTION",
+	[VEC_ZERODIV]	= "ZERO DIVIDE",
+	[VEC_CHK]	= "CHK",
+	[VEC_TRAP]	= "TRAPcc",
+	[VEC_PRIV]	= "PRIVILEGE VIOLATION",
+	[VEC_TRACE]	= "TRACE",
+	[VEC_LINE10]	= "LINE 1010",
+	[VEC_LINE11]	= "LINE 1111",
+	[VEC_RESV12]	= "UNASSIGNED RESERVED 12",
+	[VEC_COPROC]	= "COPROCESSOR PROTOCOL VIOLATION",
+	[VEC_FORMAT]	= "FORMAT ERROR",
+	[VEC_UNINT]	= "UNINITIALIZED INTERRUPT",
+	[VEC_RESV16]	= "UNASSIGNED RESERVED 16",
+	[VEC_RESV17]	= "UNASSIGNED RESERVED 17",
+	[VEC_RESV18]	= "UNASSIGNED RESERVED 18",
+	[VEC_RESV19]	= "UNASSIGNED RESERVED 19",
+	[VEC_RESV20]	= "UNASSIGNED RESERVED 20",
+	[VEC_RESV21]	= "UNASSIGNED RESERVED 21",
+	[VEC_RESV22]	= "UNASSIGNED RESERVED 22",
+	[VEC_RESV23]	= "UNASSIGNED RESERVED 23",
+	[VEC_SPUR]	= "SPURIOUS INTERRUPT",
+	[VEC_INT1]	= "LEVEL 1 INT",
+	[VEC_INT2]	= "LEVEL 2 INT",
+	[VEC_INT3]	= "LEVEL 3 INT",
+	[VEC_INT4]	= "LEVEL 4 INT",
+	[VEC_INT5]	= "LEVEL 5 INT",
+	[VEC_INT6]	= "LEVEL 6 INT",
+	[VEC_INT7]	= "LEVEL 7 INT",
+	[VEC_SYS]	= "SYSCALL",
+	[VEC_TRAP1]	= "TRAP #1",
+	[VEC_TRAP2]	= "TRAP #2",
+	[VEC_TRAP3]	= "TRAP #3",
+	[VEC_TRAP4]	= "TRAP #4",
+	[VEC_TRAP5]	= "TRAP #5",
+	[VEC_TRAP6]	= "TRAP #6",
+	[VEC_TRAP7]	= "TRAP #7",
+	[VEC_TRAP8]	= "TRAP #8",
+	[VEC_TRAP9]	= "TRAP #9",
+	[VEC_TRAP10]	= "TRAP #10",
+	[VEC_TRAP11]	= "TRAP #11",
+	[VEC_TRAP12]	= "TRAP #12",
+	[VEC_TRAP13]	= "TRAP #13",
+	[VEC_TRAP14]	= "TRAP #14",
+	[VEC_TRAP15]	= "TRAP #15",
+	[VEC_FPBRUC]	= "FPCP BSUN",
+	[VEC_FPIR]	= "FPCP INEXACT",
+	[VEC_FPDIVZ]	= "FPCP DIV BY 0",
+	[VEC_FPUNDER]	= "FPCP UNDERFLOW",
+	[VEC_FPOE]	= "FPCP OPERAND ERROR",
+	[VEC_FPOVER]	= "FPCP OVERFLOW",
+	[VEC_FPNAN]	= "FPCP SNAN",
+	[VEC_FPUNSUP]	= "FPCP UNSUPPORTED OPERATION",
+	[VEC_MMUCFG]	= "MMU CONFIGURATION ERROR",
+	[VEC_MMUILL]	= "MMU ILLEGAL OPERATION ERROR",
+	[VEC_MMUACC]	= "MMU ACCESS LEVEL VIOLATION ERROR",
+	[VEC_RESV59]	= "UNASSIGNED RESERVED 59",
+	[VEC_UNIMPEA]	= "UNASSIGNED RESERVED 60",
+	[VEC_UNIMPII]	= "UNASSIGNED RESERVED 61",
+	[VEC_RESV62]	= "UNASSIGNED RESERVED 62",
+	[VEC_RESV63]	= "UNASSIGNED RESERVED 63",
+};
+
+static const char *space_names[] = {
+	[0]		= "Space 0",
+	[USER_DATA]	= "User Data",
+	[USER_PROGRAM]	= "User Program",
+#ifndef CONFIG_SUN3
+	[3]		= "Space 3",
+#else
+	[FC_CONTROL]	= "Control",
+#endif
+	[4]		= "Space 4",
+	[SUPER_DATA]	= "Super Data",
+	[SUPER_PROGRAM]	= "Super Program",
+	[CPU_SPACE]	= "CPU"
+};
+
+void die_if_kernel(char *,struct pt_regs *,int);
+asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
+                             unsigned long error_code);
+int send_fault_sig(struct pt_regs *regs);
+
+asmlinkage void trap_c(struct frame *fp);
+
+#if defined (CONFIG_M68060)
+static inline void access_error060 (struct frame *fp)
+{
+	unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */
+
+#ifdef DEBUG
+	printk("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr);
+#endif
+
+	if (fslw & MMU060_BPE) {
+		/* branch prediction error -> clear branch cache */
+		__asm__ __volatile__ ("movec %/cacr,%/d0\n\t"
+				      "orl   #0x00400000,%/d0\n\t"
+				      "movec %/d0,%/cacr"
+				      : : : "d0" );
+		/* return if there's no other error */
+		if (!(fslw & MMU060_ERR_BITS) && !(fslw & MMU060_SEE))
+			return;
+	}
+
+	if (fslw & (MMU060_DESC_ERR | MMU060_WP | MMU060_SP)) {
+		unsigned long errorcode;
+		unsigned long addr = fp->un.fmt4.effaddr;
+
+		if (fslw & MMU060_MA)
+			addr = (addr + PAGE_SIZE - 1) & PAGE_MASK;
+
+		errorcode = 1;
+		if (fslw & MMU060_DESC_ERR) {
+			__flush_tlb040_one(addr);
+			errorcode = 0;
+		}
+		if (fslw & MMU060_W)
+			errorcode |= 2;
+#ifdef DEBUG
+		printk("errorcode = %d\n", errorcode );
+#endif
+		do_page_fault(&fp->ptregs, addr, errorcode);
+	} else if (fslw & (MMU060_SEE)){
+		/* Software Emulation Error.
+		 * fault during mem_read/mem_write in ifpsp060/os.S
+		 */
+		send_fault_sig(&fp->ptregs);
+	} else if (!(fslw & (MMU060_RE|MMU060_WE)) ||
+		   send_fault_sig(&fp->ptregs) > 0) {
+		printk("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, fp->un.fmt4.effaddr);
+		printk( "68060 access error, fslw=%lx\n", fslw );
+		trap_c( fp );
+	}
+}
+#endif /* CONFIG_M68060 */
+
+#if defined (CONFIG_M68040)
+static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
+{
+	unsigned long mmusr;
+	mm_segment_t old_fs = get_fs();
+
+	set_fs(MAKE_MM_SEG(wbs));
+
+	if (iswrite)
+		asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
+	else
+		asm volatile (".chip 68040; ptestr (%0); .chip 68k" : : "a" (addr));
+
+	asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
+
+	set_fs(old_fs);
+
+	return mmusr;
+}
+
+static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
+				   unsigned long wbd)
+{
+	int res = 0;
+	mm_segment_t old_fs = get_fs();
+
+	/* set_fs can not be moved, otherwise put_user() may oops */
+	set_fs(MAKE_MM_SEG(wbs));
+
+	switch (wbs & WBSIZ_040) {
+	case BA_SIZE_BYTE:
+		res = put_user(wbd & 0xff, (char *)wba);
+		break;
+	case BA_SIZE_WORD:
+		res = put_user(wbd & 0xffff, (short *)wba);
+		break;
+	case BA_SIZE_LONG:
+		res = put_user(wbd, (int *)wba);
+		break;
+	}
+
+	/* set_fs can not be moved, otherwise put_user() may oops */
+	set_fs(old_fs);
+
+
+#ifdef DEBUG
+	printk("do_040writeback1, res=%d\n",res);
+#endif
+
+	return res;
+}
+
+/* after an exception in a writeback the stack frame corresponding
+ * to that exception is discarded, set a few bits in the old frame
+ * to simulate what it should look like
+ */
+static inline void fix_xframe040(struct frame *fp, unsigned long wba, unsigned short wbs)
+{
+	fp->un.fmt7.faddr = wba;
+	fp->un.fmt7.ssw = wbs & 0xff;
+	if (wba != current->thread.faddr)
+	    fp->un.fmt7.ssw |= MA_040;
+}
+
+static inline void do_040writebacks(struct frame *fp)
+{
+	int res = 0;
+#if 0
+	if (fp->un.fmt7.wb1s & WBV_040)
+		printk("access_error040: cannot handle 1st writeback. oops.\n");
+#endif
+
+	if ((fp->un.fmt7.wb2s & WBV_040) &&
+	    !(fp->un.fmt7.wb2s & WBTT_040)) {
+		res = do_040writeback1(fp->un.fmt7.wb2s, fp->un.fmt7.wb2a,
+				       fp->un.fmt7.wb2d);
+		if (res)
+			fix_xframe040(fp, fp->un.fmt7.wb2a, fp->un.fmt7.wb2s);
+		else
+			fp->un.fmt7.wb2s = 0;
+	}
+
+	/* do the 2nd wb only if the first one was successful (except for a kernel wb) */
+	if (fp->un.fmt7.wb3s & WBV_040 && (!res || fp->un.fmt7.wb3s & 4)) {
+		res = do_040writeback1(fp->un.fmt7.wb3s, fp->un.fmt7.wb3a,
+				       fp->un.fmt7.wb3d);
+		if (res)
+		    {
+			fix_xframe040(fp, fp->un.fmt7.wb3a, fp->un.fmt7.wb3s);
+
+			fp->un.fmt7.wb2s = fp->un.fmt7.wb3s;
+			fp->un.fmt7.wb3s &= (~WBV_040);
+			fp->un.fmt7.wb2a = fp->un.fmt7.wb3a;
+			fp->un.fmt7.wb2d = fp->un.fmt7.wb3d;
+		    }
+		else
+			fp->un.fmt7.wb3s = 0;
+	}
+
+	if (res)
+		send_fault_sig(&fp->ptregs);
+}
+
+/*
+ * called from sigreturn(), must ensure userspace code didn't
+ * manipulate exception frame to circumvent protection, then complete
+ * pending writebacks
+ * we just clear TM2 to turn it into an userspace access
+ */
+asmlinkage void berr_040cleanup(struct frame *fp)
+{
+	fp->un.fmt7.wb2s &= ~4;
+	fp->un.fmt7.wb3s &= ~4;
+
+	do_040writebacks(fp);
+}
+
+static inline void access_error040(struct frame *fp)
+{
+	unsigned short ssw = fp->un.fmt7.ssw;
+	unsigned long mmusr;
+
+#ifdef DEBUG
+	printk("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr);
+        printk("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s,
+		fp->un.fmt7.wb2s, fp->un.fmt7.wb3s);
+	printk ("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n",
+		fp->un.fmt7.wb2a, fp->un.fmt7.wb3a,
+		fp->un.fmt7.wb2d, fp->un.fmt7.wb3d);
+#endif
+
+	if (ssw & ATC_040) {
+		unsigned long addr = fp->un.fmt7.faddr;
+		unsigned long errorcode;
+
+		/*
+		 * The MMU status has to be determined AFTER the address
+		 * has been corrected if there was a misaligned access (MA).
+		 */
+		if (ssw & MA_040)
+			addr = (addr + 7) & -8;
+
+		/* MMU error, get the MMUSR info for this access */
+		mmusr = probe040(!(ssw & RW_040), addr, ssw);
+#ifdef DEBUG
+		printk("mmusr = %lx\n", mmusr);
+#endif
+		errorcode = 1;
+		if (!(mmusr & MMU_R_040)) {
+			/* clear the invalid atc entry */
+			__flush_tlb040_one(addr);
+			errorcode = 0;
+		}
+
+		/* despite what documentation seems to say, RMW
+		 * accesses have always both the LK and RW bits set */
+		if (!(ssw & RW_040) || (ssw & LK_040))
+			errorcode |= 2;
+
+		if (do_page_fault(&fp->ptregs, addr, errorcode)) {
+#ifdef DEBUG
+		        printk("do_page_fault() !=0 \n");
+#endif
+			if (user_mode(&fp->ptregs)){
+				/* delay writebacks after signal delivery */
+#ifdef DEBUG
+			        printk(".. was usermode - return\n");
+#endif
+				return;
+			}
+			/* disable writeback into user space from kernel
+			 * (if do_page_fault didn't fix the mapping,
+                         * the writeback won't do good)
+			 */
+#ifdef DEBUG
+			printk(".. disabling wb2\n");
+#endif
+			if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr)
+				fp->un.fmt7.wb2s &= ~WBV_040;
+		}
+	} else if (send_fault_sig(&fp->ptregs) > 0) {
+		printk("68040 access error, ssw=%x\n", ssw);
+		trap_c(fp);
+	}
+
+	do_040writebacks(fp);
+}
+#endif /* CONFIG_M68040 */
+
+#if defined(CONFIG_SUN3)
+#include <asm/sun3mmu.h>
+
+extern int mmu_emu_handle_fault (unsigned long, int, int);
+
+/* sun3 version of bus_error030 */
+
+static inline void bus_error030 (struct frame *fp)
+{
+	unsigned char buserr_type = sun3_get_buserr ();
+	unsigned long addr, errorcode;
+	unsigned short ssw = fp->un.fmtb.ssw;
+	extern unsigned long _sun3_map_test_start, _sun3_map_test_end;
+
+#ifdef DEBUG
+	if (ssw & (FC | FB))
+		printk ("Instruction fault at %#010lx\n",
+			ssw & FC ?
+			fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
+			:
+			fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
+	if (ssw & DF)
+		printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+			ssw & RW ? "read" : "write",
+			fp->un.fmtb.daddr,
+			space_names[ssw & DFC], fp->ptregs.pc);
+#endif
+
+	/*
+	 * Check if this page should be demand-mapped. This needs to go before
+	 * the testing for a bad kernel-space access (demand-mapping applies
+	 * to kernel accesses too).
+	 */
+
+	if ((ssw & DF)
+	    && (buserr_type & (SUN3_BUSERR_PROTERR | SUN3_BUSERR_INVALID))) {
+		if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 0))
+			return;
+	}
+
+	/* Check for kernel-space pagefault (BAD). */
+	if (fp->ptregs.sr & PS_S) {
+		/* kernel fault must be a data fault to user space */
+		if (! ((ssw & DF) && ((ssw & DFC) == USER_DATA))) {
+		     // try checking the kernel mappings before surrender
+		     if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 1))
+			  return;
+			/* instruction fault or kernel data fault! */
+			if (ssw & (FC | FB))
+				printk ("Instruction fault at %#010lx\n",
+					fp->ptregs.pc);
+			if (ssw & DF) {
+				/* was this fault incurred testing bus mappings? */
+				if((fp->ptregs.pc >= (unsigned long)&_sun3_map_test_start) &&
+				   (fp->ptregs.pc <= (unsigned long)&_sun3_map_test_end)) {
+					send_fault_sig(&fp->ptregs);
+					return;
+				}
+
+				printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+					ssw & RW ? "read" : "write",
+					fp->un.fmtb.daddr,
+					space_names[ssw & DFC], fp->ptregs.pc);
+			}
+			printk ("BAD KERNEL BUSERR\n");
+
+			die_if_kernel("Oops", &fp->ptregs,0);
+			force_sig(SIGKILL, current);
+			return;
+		}
+	} else {
+		/* user fault */
+		if (!(ssw & (FC | FB)) && !(ssw & DF))
+			/* not an instruction fault or data fault! BAD */
+			panic ("USER BUSERR w/o instruction or data fault");
+	}
+
+
+	/* First handle the data fault, if any.  */
+	if (ssw & DF) {
+		addr = fp->un.fmtb.daddr;
+
+// errorcode bit 0:	0 -> no page		1 -> protection fault
+// errorcode bit 1:	0 -> read fault		1 -> write fault
+
+// (buserr_type & SUN3_BUSERR_PROTERR)	-> protection fault
+// (buserr_type & SUN3_BUSERR_INVALID)	-> invalid page fault
+
+		if (buserr_type & SUN3_BUSERR_PROTERR)
+			errorcode = 0x01;
+		else if (buserr_type & SUN3_BUSERR_INVALID)
+			errorcode = 0x00;
+		else {
+#ifdef DEBUG
+			printk ("*** unexpected busfault type=%#04x\n", buserr_type);
+			printk ("invalid %s access at %#lx from pc %#lx\n",
+				!(ssw & RW) ? "write" : "read", addr,
+				fp->ptregs.pc);
+#endif
+			die_if_kernel ("Oops", &fp->ptregs, buserr_type);
+			force_sig (SIGBUS, current);
+			return;
+		}
+
+//todo: wtf is RM bit? --m
+		if (!(ssw & RW) || ssw & RM)
+			errorcode |= 0x02;
+
+		/* Handle page fault. */
+		do_page_fault (&fp->ptregs, addr, errorcode);
+
+		/* Retry the data fault now. */
+		return;
+	}
+
+	/* Now handle the instruction fault. */
+
+	/* Get the fault address. */
+	if (fp->ptregs.format == 0xA)
+		addr = fp->ptregs.pc + 4;
+	else
+		addr = fp->un.fmtb.baddr;
+	if (ssw & FC)
+		addr -= 2;
+
+	if (buserr_type & SUN3_BUSERR_INVALID) {
+		if (!mmu_emu_handle_fault (fp->un.fmtb.daddr, 1, 0))
+			do_page_fault (&fp->ptregs, addr, 0);
+       } else {
+#ifdef DEBUG
+		printk ("protection fault on insn access (segv).\n");
+#endif
+		force_sig (SIGSEGV, current);
+       }
+}
+#else
+#if defined(CPU_M68020_OR_M68030)
+static inline void bus_error030 (struct frame *fp)
+{
+	volatile unsigned short temp;
+	unsigned short mmusr;
+	unsigned long addr, errorcode;
+	unsigned short ssw = fp->un.fmtb.ssw;
+#ifdef DEBUG
+	unsigned long desc;
+
+	printk ("pid = %x  ", current->pid);
+	printk ("SSW=%#06x  ", ssw);
+
+	if (ssw & (FC | FB))
+		printk ("Instruction fault at %#010lx\n",
+			ssw & FC ?
+			fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
+			:
+			fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
+	if (ssw & DF)
+		printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+			ssw & RW ? "read" : "write",
+			fp->un.fmtb.daddr,
+			space_names[ssw & DFC], fp->ptregs.pc);
+#endif
+
+	/* ++andreas: If a data fault and an instruction fault happen
+	   at the same time map in both pages.  */
+
+	/* First handle the data fault, if any.  */
+	if (ssw & DF) {
+		addr = fp->un.fmtb.daddr;
+
+#ifdef DEBUG
+		asm volatile ("ptestr %3,%2@,#7,%0\n\t"
+			      "pmove %%psr,%1@"
+			      : "=a&" (desc)
+			      : "a" (&temp), "a" (addr), "d" (ssw));
+#else
+		asm volatile ("ptestr %2,%1@,#7\n\t"
+			      "pmove %%psr,%0@"
+			      : : "a" (&temp), "a" (addr), "d" (ssw));
+#endif
+		mmusr = temp;
+
+#ifdef DEBUG
+		printk("mmusr is %#x for addr %#lx in task %p\n",
+		       mmusr, addr, current);
+		printk("descriptor address is %#lx, contents %#lx\n",
+		       __va(desc), *(unsigned long *)__va(desc));
+#endif
+
+		errorcode = (mmusr & MMU_I) ? 0 : 1;
+		if (!(ssw & RW) || (ssw & RM))
+			errorcode |= 2;
+
+		if (mmusr & (MMU_I | MMU_WP)) {
+			if (ssw & 4) {
+				printk("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+				       ssw & RW ? "read" : "write",
+				       fp->un.fmtb.daddr,
+				       space_names[ssw & DFC], fp->ptregs.pc);
+				goto buserr;
+			}
+			/* Don't try to do anything further if an exception was
+			   handled. */
+			if (do_page_fault (&fp->ptregs, addr, errorcode) < 0)
+				return;
+		} else if (!(mmusr & MMU_I)) {
+			/* probably a 020 cas fault */
+			if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0)
+				printk("unexpected bus error (%#x,%#x)\n", ssw, mmusr);
+		} else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
+			printk("invalid %s access at %#lx from pc %#lx\n",
+			       !(ssw & RW) ? "write" : "read", addr,
+			       fp->ptregs.pc);
+			die_if_kernel("Oops",&fp->ptregs,mmusr);
+			force_sig(SIGSEGV, current);
+			return;
+		} else {
+#if 0
+			static volatile long tlong;
+#endif
+
+			printk("weird %s access at %#lx from pc %#lx (ssw is %#x)\n",
+			       !(ssw & RW) ? "write" : "read", addr,
+			       fp->ptregs.pc, ssw);
+			asm volatile ("ptestr #1,%1@,#0\n\t"
+				      "pmove %%psr,%0@"
+				      : /* no outputs */
+				      : "a" (&temp), "a" (addr));
+			mmusr = temp;
+
+			printk ("level 0 mmusr is %#x\n", mmusr);
+#if 0
+			asm volatile ("pmove %%tt0,%0@"
+				      : /* no outputs */
+				      : "a" (&tlong));
+			printk("tt0 is %#lx, ", tlong);
+			asm volatile ("pmove %%tt1,%0@"
+				      : /* no outputs */
+				      : "a" (&tlong));
+			printk("tt1 is %#lx\n", tlong);
+#endif
+#ifdef DEBUG
+			printk("Unknown SIGSEGV - 1\n");
+#endif
+			die_if_kernel("Oops",&fp->ptregs,mmusr);
+			force_sig(SIGSEGV, current);
+			return;
+		}
+
+		/* setup an ATC entry for the access about to be retried */
+		if (!(ssw & RW) || (ssw & RM))
+			asm volatile ("ploadw %1,%0@" : /* no outputs */
+				      : "a" (addr), "d" (ssw));
+		else
+			asm volatile ("ploadr %1,%0@" : /* no outputs */
+				      : "a" (addr), "d" (ssw));
+	}
+
+	/* Now handle the instruction fault. */
+
+	if (!(ssw & (FC|FB)))
+		return;
+
+	if (fp->ptregs.sr & PS_S) {
+		printk("Instruction fault at %#010lx\n",
+			fp->ptregs.pc);
+	buserr:
+		printk ("BAD KERNEL BUSERR\n");
+		die_if_kernel("Oops",&fp->ptregs,0);
+		force_sig(SIGKILL, current);
+		return;
+	}
+
+	/* get the fault address */
+	if (fp->ptregs.format == 10)
+		addr = fp->ptregs.pc + 4;
+	else
+		addr = fp->un.fmtb.baddr;
+	if (ssw & FC)
+		addr -= 2;
+
+	if ((ssw & DF) && ((addr ^ fp->un.fmtb.daddr) & PAGE_MASK) == 0)
+		/* Insn fault on same page as data fault.  But we
+		   should still create the ATC entry.  */
+		goto create_atc_entry;
+
+#ifdef DEBUG
+	asm volatile ("ptestr #1,%2@,#7,%0\n\t"
+		      "pmove %%psr,%1@"
+		      : "=a&" (desc)
+		      : "a" (&temp), "a" (addr));
+#else
+	asm volatile ("ptestr #1,%1@,#7\n\t"
+		      "pmove %%psr,%0@"
+		      : : "a" (&temp), "a" (addr));
+#endif
+	mmusr = temp;
+
+#ifdef DEBUG
+	printk ("mmusr is %#x for addr %#lx in task %p\n",
+		mmusr, addr, current);
+	printk ("descriptor address is %#lx, contents %#lx\n",
+		__va(desc), *(unsigned long *)__va(desc));
+#endif
+
+	if (mmusr & MMU_I)
+		do_page_fault (&fp->ptregs, addr, 0);
+	else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
+		printk ("invalid insn access at %#lx from pc %#lx\n",
+			addr, fp->ptregs.pc);
+#ifdef DEBUG
+		printk("Unknown SIGSEGV - 2\n");
+#endif
+		die_if_kernel("Oops",&fp->ptregs,mmusr);
+		force_sig(SIGSEGV, current);
+		return;
+	}
+
+create_atc_entry:
+	/* setup an ATC entry for the access about to be retried */
+	asm volatile ("ploadr #2,%0@" : /* no outputs */
+		      : "a" (addr));
+}
+#endif /* CPU_M68020_OR_M68030 */
+#endif /* !CONFIG_SUN3 */
+
+asmlinkage void buserr_c(struct frame *fp)
+{
+	/* Only set esp0 if coming from user mode */
+	if (user_mode(&fp->ptregs))
+		current->thread.esp0 = (unsigned long) fp;
+
+#ifdef DEBUG
+	printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format);
+#endif
+
+	switch (fp->ptregs.format) {
+#if defined (CONFIG_M68060)
+	case 4:				/* 68060 access error */
+	  access_error060 (fp);
+	  break;
+#endif
+#if defined (CONFIG_M68040)
+	case 0x7:			/* 68040 access error */
+	  access_error040 (fp);
+	  break;
+#endif
+#if defined (CPU_M68020_OR_M68030)
+	case 0xa:
+	case 0xb:
+	  bus_error030 (fp);
+	  break;
+#endif
+	default:
+	  die_if_kernel("bad frame format",&fp->ptregs,0);
+#ifdef DEBUG
+	  printk("Unknown SIGSEGV - 4\n");
+#endif
+	  force_sig(SIGSEGV, current);
+	}
+}
+
+
+static int kstack_depth_to_print = 48;
+
+void show_trace(unsigned long *stack)
+{
+	unsigned long *endstack;
+	unsigned long addr;
+	int i;
+
+	printk("Call Trace:");
+	addr = (unsigned long)stack + THREAD_SIZE - 1;
+	endstack = (unsigned long *)(addr & -THREAD_SIZE);
+	i = 0;
+	while (stack + 1 <= endstack) {
+		addr = *stack++;
+		/*
+		 * If the address is either in the text segment of the
+		 * kernel, or in the region which contains vmalloc'ed
+		 * memory, it *may* be the address of a calling
+		 * routine; if so, print it so that someone tracing
+		 * down the cause of the crash will be able to figure
+		 * out the call path that was taken.
+		 */
+		if (__kernel_text_address(addr)) {
+#ifndef CONFIG_KALLSYMS
+			if (i % 5 == 0)
+				printk("\n       ");
+#endif
+			printk(" [<%08lx>]", addr);
+			print_symbol(" %s\n", addr);
+			i++;
+		}
+	}
+	printk("\n");
+}
+
+void show_registers(struct pt_regs *regs)
+{
+	struct frame *fp = (struct frame *)regs;
+	unsigned long addr;
+	int i;
+
+	addr = (unsigned long)&fp->un;
+	printk("Frame format=%X ", fp->ptregs.format);
+	switch (fp->ptregs.format) {
+	case 0x2:
+	    printk("instr addr=%08lx\n", fp->un.fmt2.iaddr);
+	    addr += sizeof(fp->un.fmt2);
+	    break;
+	case 0x3:
+	    printk("eff addr=%08lx\n", fp->un.fmt3.effaddr);
+	    addr += sizeof(fp->un.fmt3);
+	    break;
+	case 0x4:
+	    printk((CPU_IS_060 ? "fault addr=%08lx fslw=%08lx\n"
+		    : "eff addr=%08lx pc=%08lx\n"),
+		   fp->un.fmt4.effaddr, fp->un.fmt4.pc);
+	    addr += sizeof(fp->un.fmt4);
+	    break;
+	case 0x7:
+	    printk("eff addr=%08lx ssw=%04x faddr=%08lx\n",
+		   fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr);
+	    printk("wb 1 stat/addr/data: %04x %08lx %08lx\n",
+		   fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0);
+	    printk("wb 2 stat/addr/data: %04x %08lx %08lx\n",
+		   fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d);
+	    printk("wb 3 stat/addr/data: %04x %08lx %08lx\n",
+		   fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d);
+	    printk("push data: %08lx %08lx %08lx %08lx\n",
+		   fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2,
+		   fp->un.fmt7.pd3);
+	    addr += sizeof(fp->un.fmt7);
+	    break;
+	case 0x9:
+	    printk("instr addr=%08lx\n", fp->un.fmt9.iaddr);
+	    addr += sizeof(fp->un.fmt9);
+	    break;
+	case 0xa:
+	    printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
+		   fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb,
+		   fp->un.fmta.daddr, fp->un.fmta.dobuf);
+	    addr += sizeof(fp->un.fmta);
+	    break;
+	case 0xb:
+	    printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
+		   fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb,
+		   fp->un.fmtb.daddr, fp->un.fmtb.dobuf);
+	    printk("baddr=%08lx dibuf=%08lx ver=%x\n",
+		   fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver);
+	    addr += sizeof(fp->un.fmtb);
+	    break;
+	default:
+	    printk("\n");
+	}
+	show_stack(NULL, (unsigned long *)addr);
+
+	printk("Code: ");
+	for (i = 0; i < 10; i++)
+		printk("%04x ", 0xffff & ((short *) fp->ptregs.pc)[i]);
+	printk ("\n");
+}
+
+void show_stack(struct task_struct *task, unsigned long *stack)
+{
+	unsigned long *endstack;
+	int i;
+
+	if (!stack) {
+		if (task)
+			stack = (unsigned long *)task->thread.esp0;
+		else
+			stack = (unsigned long *)&stack;
+	}
+	endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
+
+	printk("Stack from %08lx:", (unsigned long)stack);
+	for (i = 0; i < kstack_depth_to_print; i++) {
+		if (stack + 1 > endstack)
+			break;
+		if (i % 8 == 0)
+			printk("\n       ");
+		printk(" %08lx", *stack++);
+	}
+	printk("\n");
+	show_trace(stack);
+}
+
+/*
+ * The architecture-independent backtrace generator
+ */
+void dump_stack(void)
+{
+	unsigned long stack;
+
+	show_trace(&stack);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+void bad_super_trap (struct frame *fp)
+{
+	console_verbose();
+	if (fp->ptregs.vector < 4*sizeof(vec_names)/sizeof(vec_names[0]))
+		printk ("*** %s ***   FORMAT=%X\n",
+			vec_names[(fp->ptregs.vector) >> 2],
+			fp->ptregs.format);
+	else
+		printk ("*** Exception %d ***   FORMAT=%X\n",
+			(fp->ptregs.vector) >> 2,
+			fp->ptregs.format);
+	if (fp->ptregs.vector >> 2 == VEC_ADDRERR && CPU_IS_020_OR_030) {
+		unsigned short ssw = fp->un.fmtb.ssw;
+
+		printk ("SSW=%#06x  ", ssw);
+
+		if (ssw & RC)
+			printk ("Pipe stage C instruction fault at %#010lx\n",
+				(fp->ptregs.format) == 0xA ?
+				fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2);
+		if (ssw & RB)
+			printk ("Pipe stage B instruction fault at %#010lx\n",
+				(fp->ptregs.format) == 0xA ?
+				fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
+		if (ssw & DF)
+			printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+				ssw & RW ? "read" : "write",
+				fp->un.fmtb.daddr, space_names[ssw & DFC],
+				fp->ptregs.pc);
+	}
+	printk ("Current process id is %d\n", current->pid);
+	die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
+}
+
+asmlinkage void trap_c(struct frame *fp)
+{
+	int sig;
+	siginfo_t info;
+
+	if (fp->ptregs.sr & PS_S) {
+		if ((fp->ptregs.vector >> 2) == VEC_TRACE) {
+			/* traced a trapping instruction */
+			current->ptrace |= PT_DTRACE;
+		} else
+			bad_super_trap(fp);
+		return;
+	}
+
+	/* send the appropriate signal to the user program */
+	switch ((fp->ptregs.vector) >> 2) {
+	    case VEC_ADDRERR:
+		info.si_code = BUS_ADRALN;
+		sig = SIGBUS;
+		break;
+	    case VEC_ILLEGAL:
+	    case VEC_LINE10:
+	    case VEC_LINE11:
+		info.si_code = ILL_ILLOPC;
+		sig = SIGILL;
+		break;
+	    case VEC_PRIV:
+		info.si_code = ILL_PRVOPC;
+		sig = SIGILL;
+		break;
+	    case VEC_COPROC:
+		info.si_code = ILL_COPROC;
+		sig = SIGILL;
+		break;
+	    case VEC_TRAP1:
+	    case VEC_TRAP2:
+	    case VEC_TRAP3:
+	    case VEC_TRAP4:
+	    case VEC_TRAP5:
+	    case VEC_TRAP6:
+	    case VEC_TRAP7:
+	    case VEC_TRAP8:
+	    case VEC_TRAP9:
+	    case VEC_TRAP10:
+	    case VEC_TRAP11:
+	    case VEC_TRAP12:
+	    case VEC_TRAP13:
+	    case VEC_TRAP14:
+		info.si_code = ILL_ILLTRP;
+		sig = SIGILL;
+		break;
+	    case VEC_FPBRUC:
+	    case VEC_FPOE:
+	    case VEC_FPNAN:
+		info.si_code = FPE_FLTINV;
+		sig = SIGFPE;
+		break;
+	    case VEC_FPIR:
+		info.si_code = FPE_FLTRES;
+		sig = SIGFPE;
+		break;
+	    case VEC_FPDIVZ:
+		info.si_code = FPE_FLTDIV;
+		sig = SIGFPE;
+		break;
+	    case VEC_FPUNDER:
+		info.si_code = FPE_FLTUND;
+		sig = SIGFPE;
+		break;
+	    case VEC_FPOVER:
+		info.si_code = FPE_FLTOVF;
+		sig = SIGFPE;
+		break;
+	    case VEC_ZERODIV:
+		info.si_code = FPE_INTDIV;
+		sig = SIGFPE;
+		break;
+	    case VEC_CHK:
+	    case VEC_TRAP:
+		info.si_code = FPE_INTOVF;
+		sig = SIGFPE;
+		break;
+	    case VEC_TRACE:		/* ptrace single step */
+		info.si_code = TRAP_TRACE;
+		sig = SIGTRAP;
+		break;
+	    case VEC_TRAP15:		/* breakpoint */
+		info.si_code = TRAP_BRKPT;
+		sig = SIGTRAP;
+		break;
+	    default:
+		info.si_code = ILL_ILLOPC;
+		sig = SIGILL;
+		break;
+	}
+	info.si_signo = sig;
+	info.si_errno = 0;
+	switch (fp->ptregs.format) {
+	    default:
+		info.si_addr = (void *) fp->ptregs.pc;
+		break;
+	    case 2:
+		info.si_addr = (void *) fp->un.fmt2.iaddr;
+		break;
+	    case 7:
+		info.si_addr = (void *) fp->un.fmt7.effaddr;
+		break;
+	    case 9:
+		info.si_addr = (void *) fp->un.fmt9.iaddr;
+		break;
+	    case 10:
+		info.si_addr = (void *) fp->un.fmta.daddr;
+		break;
+	    case 11:
+		info.si_addr = (void *) fp->un.fmtb.daddr;
+		break;
+	}
+	force_sig_info (sig, &info, current);
+}
+
+void die_if_kernel (char *str, struct pt_regs *fp, int nr)
+{
+	if (!(fp->sr & PS_S))
+		return;
+
+	console_verbose();
+	printk("%s: %08x\n",str,nr);
+	print_modules();
+	printk("PC: [<%08lx>]",fp->pc);
+	print_symbol(" %s\n", fp->pc);
+	printk("\nSR: %04x  SP: %p  a2: %08lx\n",
+	       fp->sr, fp, fp->a2);
+	printk("d0: %08lx    d1: %08lx    d2: %08lx    d3: %08lx\n",
+	       fp->d0, fp->d1, fp->d2, fp->d3);
+	printk("d4: %08lx    d5: %08lx    a0: %08lx    a1: %08lx\n",
+	       fp->d4, fp->d5, fp->a0, fp->a1);
+
+	printk("Process %s (pid: %d, stackpage=%08lx)\n",
+		current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
+	show_stack(NULL, (unsigned long *)fp);
+	do_exit(SIGSEGV);
+}
+
+/*
+ * This function is called if an error occur while accessing
+ * user-space from the fpsp040 code.
+ */
+asmlinkage void fpsp040_die(void)
+{
+	do_exit(SIGSEGV);
+}
+
+#ifdef CONFIG_M68KFPU_EMU
+asmlinkage void fpemu_signal(int signal, int code, void *addr)
+{
+	siginfo_t info;
+
+	info.si_signo = signal;
+	info.si_errno = 0;
+	info.si_code = code;
+	info.si_addr = addr;
+	force_sig_info(signal, &info, current);
+}
+#endif
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
new file mode 100644
index 0000000..e58654f
--- /dev/null
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -0,0 +1,95 @@
+/* ld script to make m68k Linux kernel */
+
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_FORMAT("elf32-m68k", "elf32-m68k", "elf32-m68k")
+OUTPUT_ARCH(m68k)
+ENTRY(_start)
+jiffies = jiffies_64 + 4;
+SECTIONS
+{
+  . = 0x1000;
+  _text = .;			/* Text and read-only data */
+  .text : {
+	*(.text)
+	SCHED_TEXT
+	*(.fixup)
+	*(.gnu.warning)
+	} :text = 0x4e75
+
+  . = ALIGN(16);		/* Exception table */
+  __start___ex_table = .;
+  __ex_table : { *(__ex_table) }
+  __stop___ex_table = .;
+
+  RODATA
+
+  _etext = .;			/* End of text section */
+
+  .data : {			/* Data */
+	*(.data)
+	CONSTRUCTORS
+	}
+
+  .bss : { *(.bss) }		/* BSS */
+
+  . = ALIGN(16);
+  .data.cacheline_aligned : { *(.data.cacheline_aligned) } :data
+
+  _edata = .;			/* End of data section */
+
+  /* will be freed after init */
+  . = ALIGN(4096);		/* Init code and data */
+  __init_begin = .;
+  .init.text : {
+	_sinittext = .;
+	*(.init.text)
+	_einittext = .;
+  }
+  .init.data : { *(.init.data) }
+  . = ALIGN(16);
+  __setup_start = .;
+  .init.setup : { *(.init.setup) }
+  __setup_end = .;
+  __initcall_start = .;
+  .initcall.init : {
+	*(.initcall1.init)
+	*(.initcall2.init)
+	*(.initcall3.init)
+	*(.initcall4.init)
+	*(.initcall5.init)
+	*(.initcall6.init)
+	*(.initcall7.init)
+  }
+  __initcall_end = .;
+  __con_initcall_start = .;
+  .con_initcall.init : { *(.con_initcall.init) }
+  __con_initcall_end = .;
+  SECURITY_INIT
+  . = ALIGN(8192);
+  __initramfs_start = .;
+  .init.ramfs : { *(.init.ramfs) }
+  __initramfs_end = .;
+  . = ALIGN(8192);
+  __init_end = .;
+
+  .data.init_task : { *(.data.init_task) }	/* The initial task and kernel stack */
+
+  _end = . ;
+
+  /* Sections to be discarded */
+  /DISCARD/ : {
+	*(.exit.text)
+	*(.exit.data)
+	*(.exitcall.exit)
+	}
+
+  /* Stabs debugging sections.  */
+  .stab 0 : { *(.stab) }
+  .stabstr 0 : { *(.stabstr) }
+  .stab.excl 0 : { *(.stab.excl) }
+  .stab.exclstr 0 : { *(.stab.exclstr) }
+  .stab.index 0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  .comment 0 : { *(.comment) }
+}
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
new file mode 100644
index 0000000..cc37e8d
--- /dev/null
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -0,0 +1,95 @@
+/* ld script to make m68k Linux kernel */
+
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_FORMAT("elf32-m68k", "elf32-m68k", "elf32-m68k")
+OUTPUT_ARCH(m68k)
+ENTRY(_start)
+jiffies = jiffies_64 + 4;
+SECTIONS
+{
+  . = 0xE004000;
+  _text = .;			/* Text and read-only data */
+  .text : {
+	*(.head)
+	*(.text)
+	SCHED_TEXT
+	*(.fixup)
+	*(.gnu.warning)
+	} :text = 0x4e75
+	RODATA
+
+  _etext = .;			/* End of text section */
+
+  .data : {			/* Data */
+	*(.data)
+	CONSTRUCTORS
+	. = ALIGN(16);		/* Exception table */
+	__start___ex_table = .;
+	*(__ex_table)
+	__stop___ex_table = .;
+	} :data
+  /* End of data goes *here* so that freeing init code works properly. */
+  _edata = .;
+
+  /* will be freed after init */
+  . = ALIGN(8192);	/* Init code and data */
+__init_begin = .;
+	.init.text : {
+		_sinittext = .;
+		*(.init.text)
+		_einittext = .;
+	}
+	.init.data : { *(.init.data) }
+	. = ALIGN(16);
+	__setup_start = .;
+	.init.setup : { *(.init.setup) }
+	__setup_end = .;
+	__initcall_start = .;
+	.initcall.init : {
+		*(.initcall1.init)
+		*(.initcall2.init)
+		*(.initcall3.init)
+		*(.initcall4.init)
+		*(.initcall5.init)
+		*(.initcall6.init)
+		*(.initcall7.init)
+	}
+	__initcall_end = .;
+	__con_initcall_start = .;
+	.con_initcall.init : { *(.con_initcall.init) }
+	__con_initcall_end = .;
+	SECURITY_INIT
+	. = ALIGN(8192);
+	__initramfs_start = .;
+	.init.ramfs : { *(.init.ramfs) }
+	__initramfs_end = .;
+	. = ALIGN(8192);
+	__init_end = .;
+	.init.task : { *(init_task) }
+
+
+  .bss : { *(.bss) }		/* BSS */
+
+  _end = . ;
+
+  /* Sections to be discarded */
+  /DISCARD/ : {
+	*(.exit.text)
+	*(.exit.data)
+	*(.exitcall.exit)
+	}
+
+  .crap : {
+	/* Stabs debugging sections.  */
+	*(.stab)
+	*(.stabstr)
+	*(.stab.excl)
+	*(.stab.exclstr)
+	*(.stab.index)
+	*(.stab.indexstr)
+	*(.comment)
+	*(.note)
+  }
+
+}
diff --git a/arch/m68k/kernel/vmlinux.lds.S b/arch/m68k/kernel/vmlinux.lds.S
new file mode 100644
index 0000000..497b924
--- /dev/null
+++ b/arch/m68k/kernel/vmlinux.lds.S
@@ -0,0 +1,11 @@
+#include <linux/config.h>
+PHDRS
+{
+  text PT_LOAD FILEHDR PHDRS FLAGS (7);
+  data PT_LOAD FLAGS (7);
+}
+#ifdef CONFIG_SUN3
+#include "vmlinux-sun3.lds"
+#else
+#include "vmlinux-std.lds"
+#endif
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile
new file mode 100644
index 0000000..34b6dbc
--- /dev/null
+++ b/arch/m68k/lib/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for m68k-specific library files..
+#
+
+EXTRA_AFLAGS := -traditional
+
+lib-y		:= ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
+			checksum.o memcmp.o memcpy.o memset.o semaphore.o
diff --git a/arch/m68k/lib/ashldi3.c b/arch/m68k/lib/ashldi3.c
new file mode 100644
index 0000000..7729f33
--- /dev/null
+++ b/arch/m68k/lib/ashldi3.c
@@ -0,0 +1,62 @@
+/* ashrdi3.c extracted from gcc-2.95.2/libgcc2.c which is: */
+/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING.  If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.  */
+
+#define BITS_PER_UNIT 8
+
+typedef		 int SItype	__attribute__ ((mode (SI)));
+typedef unsigned int USItype	__attribute__ ((mode (SI)));
+typedef		 int DItype	__attribute__ ((mode (DI)));
+typedef int word_type __attribute__ ((mode (__word__)));
+
+struct DIstruct {SItype high, low;};
+
+typedef union
+{
+  struct DIstruct s;
+  DItype ll;
+} DIunion;
+
+DItype
+__ashldi3 (DItype u, word_type b)
+{
+  DIunion w;
+  word_type bm;
+  DIunion uu;
+
+  if (b == 0)
+    return u;
+
+  uu.ll = u;
+
+  bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
+  if (bm <= 0)
+    {
+      w.s.low = 0;
+      w.s.high = (USItype)uu.s.low << -bm;
+    }
+  else
+    {
+      USItype carries = (USItype)uu.s.low >> bm;
+      w.s.low = (USItype)uu.s.low << b;
+      w.s.high = ((USItype)uu.s.high << b) | carries;
+    }
+
+  return w.ll;
+}
diff --git a/arch/m68k/lib/ashrdi3.c b/arch/m68k/lib/ashrdi3.c
new file mode 100644
index 0000000..18ea5f7
--- /dev/null
+++ b/arch/m68k/lib/ashrdi3.c
@@ -0,0 +1,63 @@
+/* ashrdi3.c extracted from gcc-2.7.2/libgcc2.c which is: */
+/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING.  If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.  */
+
+#define BITS_PER_UNIT 8
+
+typedef		 int SItype	__attribute__ ((mode (SI)));
+typedef unsigned int USItype	__attribute__ ((mode (SI)));
+typedef		 int DItype	__attribute__ ((mode (DI)));
+typedef int word_type __attribute__ ((mode (__word__)));
+
+struct DIstruct {SItype high, low;};
+
+typedef union
+{
+  struct DIstruct s;
+  DItype ll;
+} DIunion;
+
+DItype
+__ashrdi3 (DItype u, word_type b)
+{
+  DIunion w;
+  word_type bm;
+  DIunion uu;
+
+  if (b == 0)
+    return u;
+
+  uu.ll = u;
+
+  bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
+  if (bm <= 0)
+    {
+      /* w.s.high = 1..1 or 0..0 */
+      w.s.high = uu.s.high >> (sizeof (SItype) * BITS_PER_UNIT - 1);
+      w.s.low = uu.s.high >> -bm;
+    }
+  else
+    {
+      USItype carries = (USItype)uu.s.high << bm;
+      w.s.high = uu.s.high >> b;
+      w.s.low = ((USItype)uu.s.low >> b) | carries;
+    }
+
+  return w.ll;
+}
diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c
new file mode 100644
index 0000000..4a5c544
--- /dev/null
+++ b/arch/m68k/lib/checksum.c
@@ -0,0 +1,422 @@
+/*
+ * INET		An implementation of the TCP/IP protocol suite for the LINUX
+ *		operating system.  INET is implemented using the  BSD Socket
+ *		interface as the means of communication with the user level.
+ *
+ *		IP/TCP/UDP checksumming routines
+ *
+ * Authors:	Jorge Cwik, <jorge@laser.satlink.net>
+ *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
+ *		Tom May, <ftom@netcom.com>
+ *		Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
+ *		Lots of code moved from tcp.c and ip.c; see those files
+ *		for more names.
+ *
+ * 03/02/96	Jes Sorensen, Andreas Schwab, Roman Hodek:
+ *		Fixed some nasty bugs, causing some horrible crashes.
+ *		A: At some points, the sum (%0) was used as
+ *		length-counter instead of the length counter
+ *		(%1). Thanks to Roman Hodek for pointing this out.
+ *		B: GCC seems to mess up if one uses too many
+ *		data-registers to hold input values and one tries to
+ *		specify d0 and d1 as scratch registers. Letting gcc
+ *		choose these registers itself solves the problem.
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ *
+ * 1998/8/31	Andreas Schwab:
+ *		Zero out rest of buffer on exception in
+ *		csum_partial_copy_from_user.
+ */
+
+#include <linux/module.h>
+#include <net/checksum.h>
+
+/*
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+
+unsigned int
+csum_partial (const unsigned char *buff, int len, unsigned int sum)
+{
+	unsigned long tmp1, tmp2;
+	  /*
+	   * Experiments with ethernet and slip connections show that buff
+	   * is aligned on either a 2-byte or 4-byte boundary.
+	   */
+	__asm__("movel %2,%3\n\t"
+		"btst #1,%3\n\t"	/* Check alignment */
+		"jeq 2f\n\t"
+		"subql #2,%1\n\t"	/* buff%4==2: treat first word */
+		"jgt 1f\n\t"
+		"addql #2,%1\n\t"	/* len was == 2, treat only rest */
+		"jra 4f\n"
+	     "1:\t"
+		"addw %2@+,%0\n\t"	/* add first word to sum */
+		"clrl %3\n\t"
+		"addxl %3,%0\n"		/* add X bit */
+	     "2:\t"
+		/* unrolled loop for the main part: do 8 longs at once */
+		"movel %1,%3\n\t"	/* save len in tmp1 */
+		"lsrl #5,%1\n\t"	/* len/32 */
+		"jeq 2f\n\t"		/* not enough... */
+		"subql #1,%1\n"
+	     "1:\t"
+		"movel %2@+,%4\n\t"
+		"addxl %4,%0\n\t"
+		"movel %2@+,%4\n\t"
+		"addxl %4,%0\n\t"
+		"movel %2@+,%4\n\t"
+		"addxl %4,%0\n\t"
+		"movel %2@+,%4\n\t"
+		"addxl %4,%0\n\t"
+		"movel %2@+,%4\n\t"
+		"addxl %4,%0\n\t"
+		"movel %2@+,%4\n\t"
+		"addxl %4,%0\n\t"
+		"movel %2@+,%4\n\t"
+		"addxl %4,%0\n\t"
+		"movel %2@+,%4\n\t"
+		"addxl %4,%0\n\t"
+		"dbra %1,1b\n\t"
+		"clrl %4\n\t"
+		"addxl %4,%0\n\t"	/* add X bit */
+		"clrw %1\n\t"
+		"subql #1,%1\n\t"
+		"jcc 1b\n"
+	     "2:\t"
+		"movel %3,%1\n\t"	/* restore len from tmp1 */
+		"andw #0x1c,%3\n\t"	/* number of rest longs */
+		"jeq 4f\n\t"
+		"lsrw #2,%3\n\t"
+		"subqw #1,%3\n"
+	     "3:\t"
+		/* loop for rest longs */
+		"movel %2@+,%4\n\t"
+		"addxl %4,%0\n\t"
+		"dbra %3,3b\n\t"
+		"clrl %4\n\t"
+		"addxl %4,%0\n"		/* add X bit */
+	     "4:\t"
+		/* now check for rest bytes that do not fit into longs */
+		"andw #3,%1\n\t"
+		"jeq 7f\n\t"
+		"clrl %4\n\t"		/* clear tmp2 for rest bytes */
+		"subqw #2,%1\n\t"
+		"jlt 5f\n\t"
+		"movew %2@+,%4\n\t"	/* have rest >= 2: get word */
+		"swap %4\n\t"		/* into bits 16..31 */
+		"tstw %1\n\t"		/* another byte? */
+		"jeq 6f\n"
+	     "5:\t"
+		"moveb %2@,%4\n\t"	/* have odd rest: get byte */
+		"lslw #8,%4\n\t"	/* into bits 8..15; 16..31 untouched */
+	     "6:\t"
+		"addl %4,%0\n\t"	/* now add rest long to sum */
+		"clrl %4\n\t"
+		"addxl %4,%0\n"		/* add X bit */
+	     "7:\t"
+		: "=d" (sum), "=d" (len), "=a" (buff),
+		  "=&d" (tmp1), "=&d" (tmp2)
+		: "0" (sum), "1" (len), "2" (buff)
+	    );
+	return(sum);
+}
+
+EXPORT_SYMBOL(csum_partial);
+
+
+/*
+ * copy from user space while checksumming, with exception handling.
+ */
+
+unsigned int
+csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst,
+			    int len, int sum, int *csum_err)
+{
+	/*
+	 * GCC doesn't like more than 10 operands for the asm
+	 * statements so we have to use tmp2 for the error
+	 * code.
+	 */
+	unsigned long tmp1, tmp2;
+
+	__asm__("movel %2,%4\n\t"
+		"btst #1,%4\n\t"	/* Check alignment */
+		"jeq 2f\n\t"
+		"subql #2,%1\n\t"	/* buff%4==2: treat first word */
+		"jgt 1f\n\t"
+		"addql #2,%1\n\t"	/* len was == 2, treat only rest */
+		"jra 4f\n"
+	     "1:\n"
+	     "10:\t"
+		"movesw %2@+,%4\n\t"	/* add first word to sum */
+		"addw %4,%0\n\t"
+		"movew %4,%3@+\n\t"
+		"clrl %4\n\t"
+		"addxl %4,%0\n"		/* add X bit */
+	     "2:\t"
+		/* unrolled loop for the main part: do 8 longs at once */
+		"movel %1,%4\n\t"	/* save len in tmp1 */
+		"lsrl #5,%1\n\t"	/* len/32 */
+		"jeq 2f\n\t"		/* not enough... */
+		"subql #1,%1\n"
+	     "1:\n"
+	     "11:\t"
+		"movesl %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+	     "12:\t"
+		"movesl %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+	     "13:\t"
+		"movesl %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+	     "14:\t"
+		"movesl %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+	     "15:\t"
+		"movesl %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+	     "16:\t"
+		"movesl %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+	     "17:\t"
+		"movesl %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+	     "18:\t"
+		"movesl %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+		"dbra %1,1b\n\t"
+		"clrl %5\n\t"
+		"addxl %5,%0\n\t"	/* add X bit */
+		"clrw %1\n\t"
+		"subql #1,%1\n\t"
+		"jcc 1b\n"
+	     "2:\t"
+		"movel %4,%1\n\t"	/* restore len from tmp1 */
+		"andw #0x1c,%4\n\t"	/* number of rest longs */
+		"jeq 4f\n\t"
+		"lsrw #2,%4\n\t"
+		"subqw #1,%4\n"
+	     "3:\n"
+		/* loop for rest longs */
+	     "19:\t"
+		"movesl %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+		"dbra %4,3b\n\t"
+		"clrl %5\n\t"
+		"addxl %5,%0\n"		/* add X bit */
+	     "4:\t"
+		/* now check for rest bytes that do not fit into longs */
+		"andw #3,%1\n\t"
+		"jeq 7f\n\t"
+		"clrl %5\n\t"		/* clear tmp2 for rest bytes */
+		"subqw #2,%1\n\t"
+		"jlt 5f\n\t"
+	     "20:\t"
+		"movesw %2@+,%5\n\t"	/* have rest >= 2: get word */
+		"movew %5,%3@+\n\t"
+		"swap %5\n\t"		/* into bits 16..31 */
+		"tstw %1\n\t"		/* another byte? */
+		"jeq 6f\n"
+	     "5:\n"
+	     "21:\t"
+		"movesb %2@,%5\n\t"	/* have odd rest: get byte */
+		"moveb %5,%3@+\n\t"
+		"lslw #8,%5\n\t"	/* into bits 8..15; 16..31 untouched */
+	     "6:\t"
+		"addl %5,%0\n\t"	/* now add rest long to sum */
+		"clrl %5\n\t"
+		"addxl %5,%0\n\t"	/* add X bit */
+	     "7:\t"
+		"clrl %5\n"		/* no error - clear return value */
+	     "8:\n"
+		".section .fixup,\"ax\"\n"
+		".even\n"
+		/* If any exception occurs zero out the rest.
+		   Similarities with the code above are intentional :-) */
+	     "90:\t"
+		"clrw %3@+\n\t"
+		"movel %1,%4\n\t"
+		"lsrl #5,%1\n\t"
+		"jeq 1f\n\t"
+		"subql #1,%1\n"
+	     "91:\t"
+		"clrl %3@+\n"
+	     "92:\t"
+		"clrl %3@+\n"
+	     "93:\t"
+		"clrl %3@+\n"
+	     "94:\t"
+		"clrl %3@+\n"
+	     "95:\t"
+		"clrl %3@+\n"
+	     "96:\t"
+		"clrl %3@+\n"
+	     "97:\t"
+		"clrl %3@+\n"
+	     "98:\t"
+		"clrl %3@+\n\t"
+		"dbra %1,91b\n\t"
+		"clrw %1\n\t"
+		"subql #1,%1\n\t"
+		"jcc 91b\n"
+	     "1:\t"
+		"movel %4,%1\n\t"
+		"andw #0x1c,%4\n\t"
+		"jeq 1f\n\t"
+		"lsrw #2,%4\n\t"
+		"subqw #1,%4\n"
+	     "99:\t"
+		"clrl %3@+\n\t"
+		"dbra %4,99b\n\t"
+	     "1:\t"
+		"andw #3,%1\n\t"
+		"jeq 9f\n"
+	     "100:\t"
+		"clrw %3@+\n\t"
+		"tstw %1\n\t"
+		"jeq 9f\n"
+	     "101:\t"
+		"clrb %3@+\n"
+	     "9:\t"
+#define STR(X) STR1(X)
+#define STR1(X) #X
+		"moveq #-" STR(EFAULT) ",%5\n\t"
+		"jra 8b\n"
+		".previous\n"
+		".section __ex_table,\"a\"\n"
+		".long 10b,90b\n"
+		".long 11b,91b\n"
+		".long 12b,92b\n"
+		".long 13b,93b\n"
+		".long 14b,94b\n"
+		".long 15b,95b\n"
+		".long 16b,96b\n"
+		".long 17b,97b\n"
+		".long 18b,98b\n"
+		".long 19b,99b\n"
+		".long 20b,100b\n"
+		".long 21b,101b\n"
+		".previous"
+		: "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
+		  "=&d" (tmp1), "=d" (tmp2)
+		: "0" (sum), "1" (len), "2" (src), "3" (dst)
+	    );
+
+	*csum_err = tmp2;
+
+	return(sum);
+}
+
+/*
+ * copy from kernel space while checksumming, otherwise like csum_partial
+ */
+
+unsigned int
+csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len, int sum)
+{
+	unsigned long tmp1, tmp2;
+	__asm__("movel %2,%4\n\t"
+		"btst #1,%4\n\t"	/* Check alignment */
+		"jeq 2f\n\t"
+		"subql #2,%1\n\t"	/* buff%4==2: treat first word */
+		"jgt 1f\n\t"
+		"addql #2,%1\n\t"	/* len was == 2, treat only rest */
+		"jra 4f\n"
+	     "1:\t"
+		"movew %2@+,%4\n\t"	/* add first word to sum */
+		"addw %4,%0\n\t"
+		"movew %4,%3@+\n\t"
+		"clrl %4\n\t"
+		"addxl %4,%0\n"		/* add X bit */
+	     "2:\t"
+		/* unrolled loop for the main part: do 8 longs at once */
+		"movel %1,%4\n\t"	/* save len in tmp1 */
+		"lsrl #5,%1\n\t"	/* len/32 */
+		"jeq 2f\n\t"		/* not enough... */
+		"subql #1,%1\n"
+	     "1:\t"
+		"movel %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+		"movel %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+		"movel %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+		"movel %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+		"movel %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+		"movel %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+		"movel %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+		"movel %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+		"dbra %1,1b\n\t"
+		"clrl %5\n\t"
+		"addxl %5,%0\n\t"	/* add X bit */
+		"clrw %1\n\t"
+		"subql #1,%1\n\t"
+		"jcc 1b\n"
+	     "2:\t"
+		"movel %4,%1\n\t"	/* restore len from tmp1 */
+		"andw #0x1c,%4\n\t"	/* number of rest longs */
+		"jeq 4f\n\t"
+		"lsrw #2,%4\n\t"
+		"subqw #1,%4\n"
+	     "3:\t"
+		/* loop for rest longs */
+		"movel %2@+,%5\n\t"
+		"addxl %5,%0\n\t"
+		"movel %5,%3@+\n\t"
+		"dbra %4,3b\n\t"
+		"clrl %5\n\t"
+		"addxl %5,%0\n"		/* add X bit */
+	     "4:\t"
+		/* now check for rest bytes that do not fit into longs */
+		"andw #3,%1\n\t"
+		"jeq 7f\n\t"
+		"clrl %5\n\t"		/* clear tmp2 for rest bytes */
+		"subqw #2,%1\n\t"
+		"jlt 5f\n\t"
+		"movew %2@+,%5\n\t"	/* have rest >= 2: get word */
+		"movew %5,%3@+\n\t"
+		"swap %5\n\t"		/* into bits 16..31 */
+		"tstw %1\n\t"		/* another byte? */
+		"jeq 6f\n"
+	     "5:\t"
+		"moveb %2@,%5\n\t"	/* have odd rest: get byte */
+		"moveb %5,%3@+\n\t"
+		"lslw #8,%5\n"		/* into bits 8..15; 16..31 untouched */
+	     "6:\t"
+		"addl %5,%0\n\t"	/* now add rest long to sum */
+		"clrl %5\n\t"
+		"addxl %5,%0\n"		/* add X bit */
+	     "7:\t"
+		: "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
+		  "=&d" (tmp1), "=&d" (tmp2)
+		: "0" (sum), "1" (len), "2" (src), "3" (dst)
+	    );
+    return(sum);
+}
diff --git a/arch/m68k/lib/lshrdi3.c b/arch/m68k/lib/lshrdi3.c
new file mode 100644
index 0000000..d06442d
--- /dev/null
+++ b/arch/m68k/lib/lshrdi3.c
@@ -0,0 +1,62 @@
+/* lshrdi3.c extracted from gcc-2.7.2/libgcc2.c which is: */
+/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING.  If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.  */
+
+#define BITS_PER_UNIT 8
+
+typedef		 int SItype	__attribute__ ((mode (SI)));
+typedef unsigned int USItype	__attribute__ ((mode (SI)));
+typedef		 int DItype	__attribute__ ((mode (DI)));
+typedef int word_type __attribute__ ((mode (__word__)));
+
+struct DIstruct {SItype high, low;};
+
+typedef union
+{
+  struct DIstruct s;
+  DItype ll;
+} DIunion;
+
+DItype
+__lshrdi3 (DItype u, word_type b)
+{
+  DIunion w;
+  word_type bm;
+  DIunion uu;
+
+  if (b == 0)
+    return u;
+
+  uu.ll = u;
+
+  bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
+  if (bm <= 0)
+    {
+      w.s.high = 0;
+      w.s.low = (USItype)uu.s.high >> -bm;
+    }
+  else
+    {
+      USItype carries = (USItype)uu.s.high << bm;
+      w.s.high = (USItype)uu.s.high >> b;
+      w.s.low = ((USItype)uu.s.low >> b) | carries;
+    }
+
+  return w.ll;
+}
diff --git a/arch/m68k/lib/memcmp.c b/arch/m68k/lib/memcmp.c
new file mode 100644
index 0000000..f4796fe
--- /dev/null
+++ b/arch/m68k/lib/memcmp.c
@@ -0,0 +1,11 @@
+#include <linux/types.h>
+
+int memcmp(const void * cs,const void * ct,size_t count)
+{
+  const unsigned char *su1, *su2;
+
+  for( su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
+    if (*su1 != *su2)
+      return((*su1 < *su2) ? -1 : +1);
+  return(0);
+}
diff --git a/arch/m68k/lib/memcpy.c b/arch/m68k/lib/memcpy.c
new file mode 100644
index 0000000..73e1818
--- /dev/null
+++ b/arch/m68k/lib/memcpy.c
@@ -0,0 +1,75 @@
+#include <linux/types.h>
+
+void * memcpy(void * to, const void * from, size_t n)
+{
+  void *xto = to;
+  size_t temp, temp1;
+
+  if (!n)
+    return xto;
+  if ((long) to & 1)
+    {
+      char *cto = to;
+      const char *cfrom = from;
+      *cto++ = *cfrom++;
+      to = cto;
+      from = cfrom;
+      n--;
+    }
+  if (n > 2 && (long) to & 2)
+    {
+      short *sto = to;
+      const short *sfrom = from;
+      *sto++ = *sfrom++;
+      to = sto;
+      from = sfrom;
+      n -= 2;
+    }
+  temp = n >> 2;
+  if (temp)
+    {
+      long *lto = to;
+      const long *lfrom = from;
+
+      __asm__ __volatile__("movel %2,%3\n\t"
+			   "andw  #7,%3\n\t"
+			   "lsrl  #3,%2\n\t"
+			   "negw  %3\n\t"
+			   "jmp   %%pc@(1f,%3:w:2)\n\t"
+			   "4:\t"
+			   "movel %0@+,%1@+\n\t"
+			   "movel %0@+,%1@+\n\t"
+			   "movel %0@+,%1@+\n\t"
+			   "movel %0@+,%1@+\n\t"
+			   "movel %0@+,%1@+\n\t"
+			   "movel %0@+,%1@+\n\t"
+			   "movel %0@+,%1@+\n\t"
+			   "movel %0@+,%1@+\n\t"
+			   "1:\t"
+			   "dbra  %2,4b\n\t"
+			   "clrw  %2\n\t"
+			   "subql #1,%2\n\t"
+			   "jpl   4b\n\t"
+			   : "=a" (lfrom), "=a" (lto), "=d" (temp),
+			   "=&d" (temp1)
+			   : "0" (lfrom), "1" (lto), "2" (temp)
+			   );
+      to = lto;
+      from = lfrom;
+    }
+  if (n & 2)
+    {
+      short *sto = to;
+      const short *sfrom = from;
+      *sto++ = *sfrom++;
+      to = sto;
+      from = sfrom;
+    }
+  if (n & 1)
+    {
+      char *cto = to;
+      const char *cfrom = from;
+      *cto = *cfrom;
+    }
+  return xto;
+}
diff --git a/arch/m68k/lib/memset.c b/arch/m68k/lib/memset.c
new file mode 100644
index 0000000..d55fdb2
--- /dev/null
+++ b/arch/m68k/lib/memset.c
@@ -0,0 +1,68 @@
+#include <linux/types.h>
+
+void * memset(void * s, int c, size_t count)
+{
+  void *xs = s;
+  size_t temp, temp1;
+
+  if (!count)
+    return xs;
+  c &= 0xff;
+  c |= c << 8;
+  c |= c << 16;
+  if ((long) s & 1)
+    {
+      char *cs = s;
+      *cs++ = c;
+      s = cs;
+      count--;
+    }
+  if (count > 2 && (long) s & 2)
+    {
+      short *ss = s;
+      *ss++ = c;
+      s = ss;
+      count -= 2;
+    }
+  temp = count >> 2;
+  if (temp)
+    {
+      long *ls = s;
+
+      __asm__ __volatile__("movel %1,%2\n\t"
+			   "andw  #7,%2\n\t"
+			   "lsrl  #3,%1\n\t"
+			   "negw  %2\n\t"
+			   "jmp   %%pc@(2f,%2:w:2)\n\t"
+			   "1:\t"
+			   "movel %3,%0@+\n\t"
+			   "movel %3,%0@+\n\t"
+			   "movel %3,%0@+\n\t"
+			   "movel %3,%0@+\n\t"
+			   "movel %3,%0@+\n\t"
+			   "movel %3,%0@+\n\t"
+			   "movel %3,%0@+\n\t"
+			   "movel %3,%0@+\n\t"
+			   "2:\t"
+			   "dbra  %1,1b\n\t"
+			   "clrw  %1\n\t"
+			   "subql #1,%1\n\t"
+			   "jpl   1b\n\t"
+			   : "=a" (ls), "=d" (temp), "=&d" (temp1)
+			   : "d" (c), "0" (ls), "1" (temp)
+			   );
+      s = ls;
+    }
+  if (count & 2)
+    {
+      short *ss = s;
+      *ss++ = c;
+      s = ss;
+    }
+  if (count & 1)
+    {
+      char *cs = s;
+      *cs = c;
+    }
+  return xs;
+}
diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c
new file mode 100644
index 0000000..be4f275
--- /dev/null
+++ b/arch/m68k/lib/muldi3.c
@@ -0,0 +1,63 @@
+/* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and
+			   gcc-2.7.2.3/longlong.h which is: */
+/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING.  If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.  */
+
+#define BITS_PER_UNIT 8
+
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("mulu%.l %3,%1:%0"						\
+           : "=d" ((USItype)(w0)),					\
+             "=d" ((USItype)(w1))					\
+           : "%0" ((USItype)(u)),					\
+             "dmi" ((USItype)(v)))
+
+#define __umulsidi3(u, v) \
+  ({DIunion __w;							\
+    umul_ppmm (__w.s.high, __w.s.low, u, v);				\
+    __w.ll; })
+
+typedef		 int SItype	__attribute__ ((mode (SI)));
+typedef unsigned int USItype	__attribute__ ((mode (SI)));
+typedef		 int DItype	__attribute__ ((mode (DI)));
+typedef int word_type __attribute__ ((mode (__word__)));
+
+struct DIstruct {SItype high, low;};
+
+typedef union
+{
+  struct DIstruct s;
+  DItype ll;
+} DIunion;
+
+DItype
+__muldi3 (DItype u, DItype v)
+{
+  DIunion w;
+  DIunion uu, vv;
+
+  uu.ll = u,
+  vv.ll = v;
+
+  w.ll = __umulsidi3 (uu.s.low, vv.s.low);
+  w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
+	       + (USItype) uu.s.high * (USItype) vv.s.low);
+
+  return w.ll;
+}
diff --git a/arch/m68k/lib/semaphore.S b/arch/m68k/lib/semaphore.S
new file mode 100644
index 0000000..0215624c
--- /dev/null
+++ b/arch/m68k/lib/semaphore.S
@@ -0,0 +1,53 @@
+/*
+ *  linux/arch/m68k/lib/semaphore.S
+ *
+ *  Copyright (C) 1996  Linus Torvalds
+ *
+ *  m68k version by Andreas Schwab
+ */
+
+#include <linux/linkage.h>
+#include <asm/semaphore.h>
+
+/*
+ * The semaphore operations have a special calling sequence that
+ * allow us to do a simpler in-line version of them. These routines
+ * need to convert that sequence back into the C sequence when
+ * there is contention on the semaphore.
+ */
+ENTRY(__down_failed)
+	moveml %a0/%d0/%d1,-(%sp)
+	movel %a1,-(%sp)
+	jbsr __down
+	movel (%sp)+,%a1
+	moveml (%sp)+,%a0/%d0/%d1
+	rts
+
+ENTRY(__down_failed_interruptible)
+	movel %a0,-(%sp)
+	movel %d1,-(%sp)
+	movel %a1,-(%sp)
+	jbsr __down_interruptible
+	movel (%sp)+,%a1
+	movel (%sp)+,%d1
+	movel (%sp)+,%a0
+	rts
+
+ENTRY(__down_failed_trylock)
+	movel %a0,-(%sp)
+	movel %d1,-(%sp)
+	movel %a1,-(%sp)
+	jbsr __down_trylock
+	movel (%sp)+,%a1
+	movel (%sp)+,%d1
+	movel (%sp)+,%a0
+	rts
+
+ENTRY(__up_wakeup)
+	moveml %a0/%d0/%d1,-(%sp)
+	movel %a1,-(%sp)
+	jbsr __up
+	movel (%sp)+,%a1
+	moveml (%sp)+,%a0/%d0/%d1
+	rts
+
diff --git a/arch/m68k/mac/Makefile b/arch/m68k/mac/Makefile
new file mode 100644
index 0000000..995a09d9
--- /dev/null
+++ b/arch/m68k/mac/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for Linux arch/m68k/mac source directory
+#
+
+obj-y		:= config.o bootparse.o macints.o iop.o via.o oss.o psc.o \
+			baboon.o macboing.o debug.o misc.o mac_ksyms.o
diff --git a/arch/m68k/mac/baboon.c b/arch/m68k/mac/baboon.c
new file mode 100644
index 0000000..b19b7dd
--- /dev/null
+++ b/arch/m68k/mac/baboon.c
@@ -0,0 +1,126 @@
+/*
+ * Baboon Custom IC Management
+ *
+ * The Baboon custom IC controls the IDE, PCMCIA and media bay on the
+ * PowerBook 190. It multiplexes multiple interrupt sources onto the
+ * Nubus slot $C interrupt.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ide.h>
+
+#include <asm/traps.h>
+#include <asm/bootinfo.h>
+#include <asm/macintosh.h>
+#include <asm/macints.h>
+#include <asm/mac_baboon.h>
+
+/* #define DEBUG_BABOON */
+/* #define DEBUG_IRQS */
+
+int baboon_present,baboon_active;
+volatile struct baboon *baboon;
+
+irqreturn_t baboon_irq(int, void *, struct pt_regs *);
+
+#if 0
+extern int macide_ack_intr(struct ata_channel *);
+#endif
+
+/*
+ * Baboon initialization.
+ */
+
+void __init baboon_init(void)
+{
+	if (macintosh_config->ident != MAC_MODEL_PB190) {
+		baboon = NULL;
+		baboon_present = 0;
+		return;
+	}
+
+	baboon = (struct baboon *) BABOON_BASE;
+	baboon_present = 1;
+	baboon_active = 0;
+
+	printk("Baboon detected at %p\n", baboon);
+}
+
+/*
+ * Register the Baboon interrupt dispatcher on nubus slot $C.
+ */
+
+void __init baboon_register_interrupts(void)
+{
+	request_irq(IRQ_NUBUS_C, baboon_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
+		    "baboon", (void *) baboon);
+}
+
+/*
+ * Baboon interrupt handler. This works a lot like a VIA.
+ */
+
+irqreturn_t baboon_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+	int irq_bit,i;
+	unsigned char events;
+
+#ifdef DEBUG_IRQS
+	printk("baboon_irq: mb_control %02X mb_ifr %02X mb_status %02X active %02X\n",
+		(uint) baboon->mb_control, (uint) baboon->mb_ifr,
+		(uint) baboon->mb_status,  baboon_active);
+#endif
+
+	if (!(events = baboon->mb_ifr & 0x07))
+		return IRQ_NONE;
+
+	for (i = 0, irq_bit = 1 ; i < 3 ; i++, irq_bit <<= 1) {
+	        if (events & irq_bit/* & baboon_active*/) {
+			baboon_active &= ~irq_bit;
+			mac_do_irq_list(IRQ_BABOON_0 + i, regs);
+			baboon_active |= irq_bit;
+			baboon->mb_ifr &= ~irq_bit;
+		}
+	}
+#if 0
+	if (baboon->mb_ifr & 0x02) macide_ack_intr(NULL);
+	/* for now we need to smash all interrupts */
+	baboon->mb_ifr &= ~events;
+#endif
+	return IRQ_HANDLED;
+}
+
+void baboon_irq_enable(int irq) {
+	int irq_idx	= IRQ_IDX(irq);
+
+#ifdef DEBUG_IRQUSE
+	printk("baboon_irq_enable(%d)\n", irq);
+#endif
+	baboon_active |= (1 << irq_idx);
+}
+
+void baboon_irq_disable(int irq) {
+	int irq_idx	= IRQ_IDX(irq);
+
+#ifdef DEBUG_IRQUSE
+	printk("baboon_irq_disable(%d)\n", irq);
+#endif
+	baboon_active &= ~(1 << irq_idx);
+}
+
+void baboon_irq_clear(int irq) {
+	int irq_idx	= IRQ_IDX(irq);
+
+	baboon->mb_ifr &= ~(1 << irq_idx);
+}
+
+int baboon_irq_pending(int irq)
+{
+	int irq_idx	= IRQ_IDX(irq);
+
+	return baboon->mb_ifr & (1 << irq_idx);
+}
diff --git a/arch/m68k/mac/bootparse.c b/arch/m68k/mac/bootparse.c
new file mode 100644
index 0000000..36d2236
--- /dev/null
+++ b/arch/m68k/mac/bootparse.c
@@ -0,0 +1,122 @@
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/irq.h>
+#include <asm/setup.h>
+#include <asm/bootinfo.h>
+#include <asm/macintosh.h>
+
+/*
+ *	Booter vars
+ */
+
+int boothowto;
+int _boothowto;
+
+/*
+ *	Called early to parse the environment (passed to us from the booter)
+ *	into a bootinfo struct. Will die as soon as we have our own booter
+ */
+
+#define atol(x)	simple_strtoul(x,NULL,0)
+
+void parse_booter(char *env)
+{
+	char *name;
+	char *value;
+#if 0
+	while(0 && *env)
+#else
+	while(*env)
+#endif
+	{
+		name=env;
+		value=name;
+		while(*value!='='&&*value)
+			value++;
+		if(*value=='=')
+			*value++=0;
+		env=value;
+		while(*env)
+			env++;
+		env++;
+#if 0
+		if(strcmp(name,"VIDEO_ADDR")==0)
+			mac_mch.videoaddr=atol(value);
+		if(strcmp(name,"ROW_BYTES")==0)
+			mac_mch.videorow=atol(value);
+		if(strcmp(name,"SCREEN_DEPTH")==0)
+			mac_mch.videodepth=atol(value);
+		if(strcmp(name,"DIMENSIONS")==0)
+			mac_mch.dimensions=atol(value);
+#endif
+		if(strcmp(name,"BOOTTIME")==0)
+			mac_bi_data.boottime=atol(value);
+		if(strcmp(name,"GMTBIAS")==0)
+			mac_bi_data.gmtbias=atol(value);
+		if(strcmp(name,"BOOTERVER")==0)
+			mac_bi_data.bootver=atol(value);
+		if(strcmp(name,"MACOS_VIDEO")==0)
+			mac_bi_data.videological=atol(value);
+		if(strcmp(name,"MACOS_SCC")==0)
+			mac_bi_data.sccbase=atol(value);
+		if(strcmp(name,"MACHINEID")==0)
+			mac_bi_data.id=atol(value);
+		if(strcmp(name,"MEMSIZE")==0)
+			mac_bi_data.memsize=atol(value);
+		if(strcmp(name,"SERIAL_MODEM_FLAGS")==0)
+			mac_bi_data.serialmf=atol(value);
+		if(strcmp(name,"SERIAL_MODEM_HSKICLK")==0)
+			mac_bi_data.serialhsk=atol(value);
+		if(strcmp(name,"SERIAL_MODEM_GPICLK")==0)
+			mac_bi_data.serialgpi=atol(value);
+		if(strcmp(name,"SERIAL_PRINT_FLAGS")==0)
+			mac_bi_data.printmf=atol(value);
+		if(strcmp(name,"SERIAL_PRINT_HSKICLK")==0)
+			mac_bi_data.printhsk=atol(value);
+		if(strcmp(name,"SERIAL_PRINT_GPICLK")==0)
+			mac_bi_data.printgpi=atol(value);
+		if(strcmp(name,"PROCESSOR")==0)
+			mac_bi_data.cpuid=atol(value);
+		if(strcmp(name,"ROMBASE")==0)
+			mac_bi_data.rombase=atol(value);
+		if(strcmp(name,"TIMEDBRA")==0)
+			mac_bi_data.timedbra=atol(value);
+		if(strcmp(name,"ADBDELAY")==0)
+			mac_bi_data.adbdelay=atol(value);
+	}
+#if 0	/* XXX: TODO with m68k_mach_* */
+	/* Fill in the base stuff */
+	boot_info.machtype=MACH_MAC;
+	/* Read this from the macinfo we got ! */
+/*	boot_info.cputype=CPU_68020|FPUB_68881;*/
+/*	boot_info.memory[0].addr=0;*/
+/*	boot_info.memory[0].size=((mac_bi_data.id>>7)&31)<<20;*/
+	boot_info.num_memory=1;		/* On a MacII */
+	boot_info.ramdisk_size=0;	/* For now */
+	*boot_info.command_line=0;
+#endif
+ }
+
+
+void print_booter(char *env)
+{
+	char *name;
+	char *value;
+	while(*env)
+	{
+		name=env;
+		value=name;
+		while(*value!='='&&*value)
+			value++;
+		if(*value=='=')
+			*value++=0;
+		env=value;
+		while(*env)
+			env++;
+		env++;
+		printk("%s=%s\n", name,value);
+	}
+ }
+
+
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
new file mode 100644
index 0000000..cd19cbb
--- /dev/null
+++ b/arch/m68k/mac/config.c
@@ -0,0 +1,902 @@
+/*
+ *  linux/arch/m68k/mac/config.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * Miscellaneous linux stuff
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+/* keyb */
+#include <linux/random.h>
+#include <linux/delay.h>
+/* keyb */
+#include <linux/init.h>
+#include <linux/vt_kern.h>
+
+#define BOOTINFO_COMPAT_1_0
+#include <asm/setup.h>
+#include <asm/bootinfo.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/pgtable.h>
+#include <asm/rtc.h>
+#include <asm/machdep.h>
+
+#include <asm/macintosh.h>
+#include <asm/macints.h>
+#include <asm/machw.h>
+
+#include <asm/mac_iop.h>
+#include <asm/mac_via.h>
+#include <asm/mac_oss.h>
+#include <asm/mac_psc.h>
+
+/* Mac bootinfo struct */
+
+struct mac_booter_data mac_bi_data;
+int mac_bisize = sizeof mac_bi_data;
+
+struct mac_hw_present mac_hw_present;
+
+/* New m68k bootinfo stuff and videobase */
+
+extern int m68k_num_memory;
+extern struct mem_info m68k_memory[NUM_MEMINFO];
+
+extern struct mem_info m68k_ramdisk;
+
+extern char m68k_command_line[CL_SIZE];
+
+void *mac_env;		/* Loaded by the boot asm */
+
+/* The phys. video addr. - might be bogus on some machines */
+unsigned long mac_orig_videoaddr;
+
+/* Mac specific timer functions */
+extern unsigned long mac_gettimeoffset (void);
+extern int mac_hwclk (int, struct rtc_time *);
+extern int mac_set_clock_mmss (unsigned long);
+extern int show_mac_interrupts(struct seq_file *, void *);
+extern void iop_preinit(void);
+extern void iop_init(void);
+extern void via_init(void);
+extern void via_init_clock(irqreturn_t (*func)(int, void *, struct pt_regs *));
+extern void via_flush_cache(void);
+extern void oss_init(void);
+extern void psc_init(void);
+extern void baboon_init(void);
+
+extern void mac_mksound(unsigned int, unsigned int);
+
+extern void nubus_sweep_video(void);
+
+/* Mac specific debug functions (in debug.c) */
+extern void mac_debug_init(void);
+extern void mac_debugging_long(int, long);
+
+static void mac_get_model(char *str);
+
+void mac_bang(int irq, void *vector, struct pt_regs *p)
+{
+	printk(KERN_INFO "Resetting ...\n");
+	mac_reset();
+}
+
+static void mac_sched_init(irqreturn_t (*vector)(int, void *, struct pt_regs *))
+{
+	via_init_clock(vector);
+}
+
+#if 0
+void mac_waitbut (void)
+{
+	;
+}
+#endif
+
+extern irqreturn_t mac_default_handler(int, void *, struct pt_regs *);
+
+irqreturn_t (*mac_handlers[8])(int, void *, struct pt_regs *)=
+{
+	mac_default_handler,
+	mac_default_handler,
+	mac_default_handler,
+	mac_default_handler,
+	mac_default_handler,
+	mac_default_handler,
+	mac_default_handler,
+	mac_default_handler
+};
+
+/*
+ * Parse a Macintosh-specific record in the bootinfo
+ */
+
+int __init mac_parse_bootinfo(const struct bi_record *record)
+{
+    int unknown = 0;
+    const u_long *data = record->data;
+
+    switch (record->tag) {
+	case BI_MAC_MODEL:
+	    mac_bi_data.id = *data;
+	    break;
+	case BI_MAC_VADDR:
+	    mac_bi_data.videoaddr = *data;
+	    break;
+	case BI_MAC_VDEPTH:
+	    mac_bi_data.videodepth = *data;
+	    break;
+	case BI_MAC_VROW:
+	    mac_bi_data.videorow = *data;
+	    break;
+	case BI_MAC_VDIM:
+	    mac_bi_data.dimensions = *data;
+	    break;
+	case BI_MAC_VLOGICAL:
+	    mac_bi_data.videological = VIDEOMEMBASE + (*data & ~VIDEOMEMMASK);
+	    mac_orig_videoaddr = *data;
+	    break;
+	case BI_MAC_SCCBASE:
+	    mac_bi_data.sccbase = *data;
+	    break;
+	case BI_MAC_BTIME:
+	    mac_bi_data.boottime = *data;
+	    break;
+	case BI_MAC_GMTBIAS:
+	    mac_bi_data.gmtbias = *data;
+	    break;
+	case BI_MAC_MEMSIZE:
+	    mac_bi_data.memsize = *data;
+	    break;
+	case BI_MAC_CPUID:
+	    mac_bi_data.cpuid = *data;
+	    break;
+        case BI_MAC_ROMBASE:
+	    mac_bi_data.rombase = *data;
+	    break;
+	default:
+	    unknown = 1;
+    }
+    return(unknown);
+}
+
+/*
+ * Flip into 24bit mode for an instant - flushes the L2 cache card. We
+ * have to disable interrupts for this. Our IRQ handlers will crap
+ * themselves if they take an IRQ in 24bit mode!
+ */
+
+static void mac_cache_card_flush(int writeback)
+{
+	unsigned long flags;
+	local_irq_save(flags);
+	via_flush_cache();
+	local_irq_restore(flags);
+}
+
+void __init config_mac(void)
+{
+	if (!MACH_IS_MAC) {
+	  printk(KERN_ERR "ERROR: no Mac, but config_mac() called!! \n");
+	}
+
+	mach_sched_init      = mac_sched_init;
+	mach_init_IRQ        = mac_init_IRQ;
+	mach_request_irq     = mac_request_irq;
+	mach_free_irq        = mac_free_irq;
+	enable_irq           = mac_enable_irq;
+	disable_irq          = mac_disable_irq;
+	mach_get_model	 = mac_get_model;
+	mach_default_handler = &mac_handlers;
+	mach_get_irq_list    = show_mac_interrupts;
+	mach_gettimeoffset   = mac_gettimeoffset;
+#warning move to adb/via init
+#if 0
+	mach_hwclk           = mac_hwclk;
+#endif
+	mach_set_clock_mmss	 = mac_set_clock_mmss;
+	mach_reset           = mac_reset;
+	mach_halt            = mac_poweroff;
+	mach_power_off       = mac_poweroff;
+#ifdef CONFIG_DUMMY_CONSOLE
+	conswitchp	         = &dummy_con;
+#endif
+	mach_max_dma_address = 0xffffffff;
+#if 0
+	mach_debug_init	 = mac_debug_init;
+#endif
+#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
+        mach_beep            = mac_mksound;
+#endif
+#ifdef CONFIG_HEARTBEAT
+#if 0
+	mach_heartbeat = mac_heartbeat;
+	mach_heartbeat_irq = IRQ_MAC_TIMER;
+#endif
+#endif
+
+	/*
+	 * Determine hardware present
+	 */
+
+	mac_identify();
+	mac_report_hardware();
+
+	/* AFAIK only the IIci takes a cache card.  The IIfx has onboard
+	   cache ... someone needs to figure out how to tell if it's on or
+	   not. */
+
+	if (macintosh_config->ident == MAC_MODEL_IICI
+	    || macintosh_config->ident == MAC_MODEL_IIFX) {
+		mach_l2_flush = mac_cache_card_flush;
+	}
+
+	/*
+	 * Check for machine specific fixups.
+	 */
+
+#ifdef OLD_NUBUS_CODE
+	 nubus_sweep_video();
+#endif
+}
+
+
+/*
+ *	Macintosh Table: hardcoded model configuration data.
+ *
+ *	Much of this was defined by Alan, based on who knows what docs.
+ *	I've added a lot more, and some of that was pure guesswork based
+ *	on hardware pages present on the Mac web site. Possibly wildly
+ *	inaccurate, so look here if a new Mac model won't run. Example: if
+ *	a Mac crashes immediately after the VIA1 registers have been dumped
+ *	to the screen, it probably died attempting to read DirB on a RBV.
+ *	Meaning it should have MAC_VIA_IIci here :-)
+ */
+
+struct mac_model *macintosh_config;
+EXPORT_SYMBOL(macintosh_config);
+
+static struct mac_model mac_data_table[]=
+{
+	/*
+	 *	We'll pretend to be a Macintosh II, that's pretty safe.
+	 */
+
+	{
+		.ident		= MAC_MODEL_II,
+		.name		= "Unknown",
+		.adb_type	= MAC_ADB_II,
+		.via_type	= MAC_VIA_II,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	},
+
+	/*
+	 *	Original MacII hardware
+	 *
+	 */
+
+	{
+		.ident		= MAC_MODEL_II,
+		.name		= "II",
+		.adb_type	= MAC_ADB_II,
+		.via_type	= MAC_VIA_II,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_IIX,
+		.name		= "IIx",
+		.adb_type	= MAC_ADB_II,
+		.via_type	= MAC_VIA_II,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_IICX,
+		.name		= "IIcx",
+		.adb_type	= MAC_ADB_II,
+		.via_type	= MAC_VIA_II,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_SE30,
+		.name		= "SE/30",
+		.adb_type	= MAC_ADB_II,
+		.via_type	= MAC_VIA_II,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	},
+
+	/*
+	 *	Weirdified MacII hardware - all subtley different. Gee thanks
+	 *	Apple. All these boxes seem to have VIA2 in a different place to
+	 *	the MacII (+1A000 rather than +4000)
+	 * CSA: see http://developer.apple.com/technotes/hw/hw_09.html
+	 */
+
+	{
+		.ident		= MAC_MODEL_IICI,
+		.name		= "IIci",
+		.adb_type	= MAC_ADB_II,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_IIFX,
+		.name		= "IIfx",
+		.adb_type	= MAC_ADB_IOP,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_IOP,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_IISI,
+		.name		= "IIsi",
+		.adb_type	= MAC_ADB_IISI,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_IIVI,
+		.name		= "IIvi",
+		.adb_type	= MAC_ADB_IISI,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_IIVX,
+		.name		= "IIvx",
+		.adb_type	= MAC_ADB_IISI,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	},
+
+	/*
+	 *	Classic models (guessing: similar to SE/30 ?? Nope, similar to LC ...)
+	 */
+
+	{
+		.ident		= MAC_MODEL_CLII,
+		.name		= "Classic II",
+		.adb_type	= MAC_ADB_IISI,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_CCL,
+		.name		= "Color Classic",
+		.adb_type	= MAC_ADB_CUDA,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS},
+
+	/*
+	 *	Some Mac LC machines. Basically the same as the IIci, ADB like IIsi
+	 */
+
+	{
+		.ident		= MAC_MODEL_LC,
+		.name		= "LC",
+		.adb_type	= MAC_ADB_IISI,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_LCII,
+		.name		= "LC II",
+		.adb_type	= MAC_ADB_IISI,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_LCIII,
+		.name		= "LC III",
+		.adb_type	= MAC_ADB_IISI,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	},
+
+	/*
+	 *	Quadra. Video is at 0xF9000000, via is like a MacII. We label it differently
+	 *	as some of the stuff connected to VIA2 seems different. Better SCSI chip and
+	 *	onboard ethernet using a NatSemi SONIC except the 660AV and 840AV which use an
+	 *	AMD 79C940 (MACE).
+	 *	The 700, 900 and 950 have some I/O chips in the wrong place to
+	 *	confuse us. The 840AV has a SCSI location of its own (same as
+	 *	the 660AV).
+	 */
+
+	{
+		.ident		= MAC_MODEL_Q605,
+		.name		= "Quadra 605",
+		.adb_type	= MAC_ADB_CUDA,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_Q605_ACC,
+		.name		= "Quadra 605",
+		.adb_type	= MAC_ADB_CUDA,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_Q610,
+		.name		= "Quadra 610",
+		.adb_type	= MAC_ADB_II,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA,
+		.scc_type	= MAC_SCC_QUADRA,
+		.ether_type	= MAC_ETHER_SONIC,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_Q630,
+		.name		= "Quadra 630",
+		.adb_type	= MAC_ADB_CUDA,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA,
+		.ide_type	= MAC_IDE_QUADRA,
+		.scc_type	= MAC_SCC_QUADRA,
+		.ether_type	= MAC_ETHER_SONIC,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_Q650,
+		.name		= "Quadra 650",
+		.adb_type	= MAC_ADB_II,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA,
+		.scc_type	= MAC_SCC_QUADRA,
+		.ether_type	= MAC_ETHER_SONIC,
+		.nubus_type	= MAC_NUBUS
+	},
+	/*	The Q700 does have a NS Sonic */
+	{
+		.ident		= MAC_MODEL_Q700,
+		.name		= "Quadra 700",
+		.adb_type	= MAC_ADB_II,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA2,
+		.scc_type	= MAC_SCC_QUADRA,
+		.ether_type	= MAC_ETHER_SONIC,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_Q800,
+		.name		= "Quadra 800",
+		.adb_type	= MAC_ADB_II,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA,
+		.scc_type	= MAC_SCC_QUADRA,
+		.ether_type	= MAC_ETHER_SONIC,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_Q840,
+		.name		= "Quadra 840AV",
+		.adb_type	= MAC_ADB_CUDA,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA3,
+		.scc_type	= MAC_SCC_PSC,
+		.ether_type	= MAC_ETHER_MACE,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_Q900,
+		.name		= "Quadra 900",
+		.adb_type	= MAC_ADB_IOP,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA2,
+		.scc_type	= MAC_SCC_IOP,
+		.ether_type	= MAC_ETHER_SONIC,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_Q950,
+		.name		= "Quadra 950",
+		.adb_type	= MAC_ADB_IOP,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA2,
+		.scc_type	= MAC_SCC_IOP,
+		.ether_type	= MAC_ETHER_SONIC,
+		.nubus_type	= MAC_NUBUS
+	},
+
+	/*
+	 *	Performa - more LC type machines
+	 */
+
+	{
+		.ident		= MAC_MODEL_P460,
+		.name		=  "Performa 460",
+		.adb_type	= MAC_ADB_IISI,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_P475,
+		.name		=  "Performa 475",
+		.adb_type	= MAC_ADB_CUDA,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_P475F,
+		.name		=  "Performa 475",
+		.adb_type	= MAC_ADB_CUDA,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_P520,
+		.name		=  "Performa 520",
+		.adb_type	= MAC_ADB_CUDA,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_P550,
+		.name		=  "Performa 550",
+		.adb_type	= MAC_ADB_CUDA,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	},
+	/* These have the comm slot, and therefore the possibility of SONIC ethernet */
+	{
+		.ident		= MAC_MODEL_P575,
+		.name		= "Performa 575",
+		.adb_type	= MAC_ADB_CUDA,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA,
+		.scc_type	= MAC_SCC_II,
+		.ether_type	= MAC_ETHER_SONIC,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_P588,
+		.name		= "Performa 588",
+		.adb_type	= MAC_ADB_CUDA,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA,
+		.ide_type	= MAC_IDE_QUADRA,
+		.scc_type	= MAC_SCC_II,
+		.ether_type	= MAC_ETHER_SONIC,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_TV,
+		.name		= "TV",
+		.adb_type	= MAC_ADB_CUDA,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_P600,
+		.name		= "Performa 600",
+		.adb_type	= MAC_ADB_IISI,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_II,
+		.nubus_type	= MAC_NUBUS
+	},
+
+	/*
+	 *	Centris - just guessing again; maybe like Quadra
+	 */
+
+	/* The C610 may or may not have SONIC.  We probe to make sure */
+	{
+		.ident		= MAC_MODEL_C610,
+		.name		= "Centris 610",
+		.adb_type	= MAC_ADB_II,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA,
+		.scc_type	= MAC_SCC_QUADRA,
+		.ether_type	= MAC_ETHER_SONIC,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_C650,
+		.name		= "Centris 650",
+		.adb_type	= MAC_ADB_II,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA,
+		.scc_type	= MAC_SCC_QUADRA,
+		.ether_type	= MAC_ETHER_SONIC,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_C660,
+		.name		= "Centris 660AV",
+		.adb_type	= MAC_ADB_CUDA,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_QUADRA3,
+		.scc_type	= MAC_SCC_PSC,
+		.ether_type	= MAC_ETHER_MACE,
+		.nubus_type	= MAC_NUBUS
+	},
+
+	/*
+	 * The PowerBooks all the same "Combo" custom IC for SCSI and SCC
+	 * and a PMU (in two variations?) for ADB. Most of them use the
+	 * Quadra-style VIAs. A few models also have IDE from hell.
+	 */
+
+	{
+		.ident		= MAC_MODEL_PB140,
+		.name		= "PowerBook 140",
+		.adb_type	= MAC_ADB_PB1,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_PB145,
+		.name		= "PowerBook 145",
+		.adb_type	= MAC_ADB_PB1,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_PB150,
+		.name		= "PowerBook 150",
+		.adb_type	= MAC_ADB_PB1,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.ide_type	= MAC_IDE_PB,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_PB160,
+		.name		= "PowerBook 160",
+		.adb_type	= MAC_ADB_PB1,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_PB165,
+		.name		= "PowerBook 165",
+		.adb_type	= MAC_ADB_PB1,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_PB165C,
+		.name		= "PowerBook 165c",
+		.adb_type	= MAC_ADB_PB1,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_PB170,
+		.name		= "PowerBook 170",
+		.adb_type	= MAC_ADB_PB1,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_PB180,
+		.name		= "PowerBook 180",
+		.adb_type	= MAC_ADB_PB1,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_PB180C,
+		.name		= "PowerBook 180c",
+		.adb_type	= MAC_ADB_PB1,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_PB190,
+		.name		= "PowerBook 190",
+		.adb_type	= MAC_ADB_PB2,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_OLD,
+		.ide_type	= MAC_IDE_BABOON,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_PB520,
+		.name		= "PowerBook 520",
+		.adb_type	= MAC_ADB_PB2,
+		.via_type	= MAC_VIA_QUADRA,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_QUADRA,
+		.ether_type	= MAC_ETHER_SONIC,
+		.nubus_type	= MAC_NUBUS
+	},
+
+	/*
+	 * PowerBook Duos are pretty much like normal PowerBooks
+	 * All of these probably have onboard SONIC in the Dock which
+	 * means we'll have to probe for it eventually.
+	 *
+	 * Are these reallly MAC_VIA_IIci? The developer notes for the
+	 * Duos show pretty much the same custom parts as in most of
+	 * the other PowerBooks which would imply MAC_VIA_QUADRA.
+	 */
+
+	{
+		.ident		= MAC_MODEL_PB210,
+		.name		= "PowerBook Duo 210",
+		.adb_type	= MAC_ADB_PB2,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_PB230,
+		.name		= "PowerBook Duo 230",
+		.adb_type	= MAC_ADB_PB2,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_PB250,
+		.name		= "PowerBook Duo 250",
+		.adb_type	= MAC_ADB_PB2,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_PB270C,
+		.name		= "PowerBook Duo 270c",
+		.adb_type	= MAC_ADB_PB2,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_PB280,
+		.name		= "PowerBook Duo 280",
+		.adb_type	= MAC_ADB_PB2,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	}, {
+		.ident		= MAC_MODEL_PB280C,
+		.name		= "PowerBook Duo 280c",
+		.adb_type	= MAC_ADB_PB2,
+		.via_type	= MAC_VIA_IIci,
+		.scsi_type	= MAC_SCSI_OLD,
+		.scc_type	= MAC_SCC_QUADRA,
+		.nubus_type	= MAC_NUBUS
+	},
+
+	/*
+	 *	Other stuff ??
+	 */
+	{
+		.ident		= -1
+	}
+};
+
+void mac_identify(void)
+{
+	struct mac_model *m;
+
+	/* Penguin data useful? */
+	int model = mac_bi_data.id;
+	if (!model) {
+		/* no bootinfo model id -> NetBSD booter was used! */
+		/* XXX FIXME: breaks for model > 31 */
+		model=(mac_bi_data.cpuid>>2)&63;
+		printk (KERN_WARNING "No bootinfo model ID, using cpuid instead (hey, use Penguin!)\n");
+	}
+
+	macintosh_config = mac_data_table;
+	for (m = macintosh_config ; m->ident != -1 ; m++) {
+		if (m->ident == model) {
+			macintosh_config = m;
+			break;
+		}
+	}
+
+	/* We need to pre-init the IOPs, if any. Otherwise */
+	/* the serial console won't work if the user had   */
+	/* the serial ports set to "Faster" mode in MacOS. */
+
+	iop_preinit();
+	mac_debug_init();
+
+	printk (KERN_INFO "Detected Macintosh model: %d \n", model);
+
+	/*
+	 * Report booter data:
+	 */
+	printk (KERN_DEBUG " Penguin bootinfo data:\n");
+	printk (KERN_DEBUG " Video: addr 0x%lx row 0x%lx depth %lx dimensions %ld x %ld\n",
+		mac_bi_data.videoaddr, mac_bi_data.videorow,
+		mac_bi_data.videodepth, mac_bi_data.dimensions & 0xFFFF,
+		mac_bi_data.dimensions >> 16);
+	printk (KERN_DEBUG " Videological 0x%lx phys. 0x%lx, SCC at 0x%lx \n",
+		mac_bi_data.videological, mac_orig_videoaddr,
+		mac_bi_data.sccbase);
+	printk (KERN_DEBUG " Boottime: 0x%lx GMTBias: 0x%lx \n",
+		mac_bi_data.boottime, mac_bi_data.gmtbias);
+	printk (KERN_DEBUG " Machine ID: %ld CPUid: 0x%lx memory size: 0x%lx \n",
+		mac_bi_data.id, mac_bi_data.cpuid, mac_bi_data.memsize);
+#if 0
+	printk ("Ramdisk: addr 0x%lx size 0x%lx\n",
+		m68k_ramdisk.addr, m68k_ramdisk.size);
+#endif
+
+	/*
+	 * TODO: set the various fields in macintosh_config->hw_present here!
+	 */
+	switch (macintosh_config->scsi_type) {
+	case MAC_SCSI_OLD:
+	  MACHW_SET(MAC_SCSI_80);
+	  break;
+	case MAC_SCSI_QUADRA:
+	case MAC_SCSI_QUADRA2:
+	case MAC_SCSI_QUADRA3:
+	  MACHW_SET(MAC_SCSI_96);
+	  if ((macintosh_config->ident == MAC_MODEL_Q900) ||
+	      (macintosh_config->ident == MAC_MODEL_Q950))
+	    MACHW_SET(MAC_SCSI_96_2);
+	  break;
+	default:
+	  printk(KERN_WARNING "config.c: wtf: unknown scsi, using 53c80\n");
+	  MACHW_SET(MAC_SCSI_80);
+	  break;
+
+	}
+	iop_init();
+	via_init();
+	oss_init();
+	psc_init();
+	baboon_init();
+}
+
+void mac_report_hardware(void)
+{
+	printk(KERN_INFO "Apple Macintosh %s\n", macintosh_config->name);
+}
+
+static void mac_get_model(char *str)
+{
+	strcpy(str,"Macintosh ");
+	strcat(str, macintosh_config->name);
+}
diff --git a/arch/m68k/mac/debug.c b/arch/m68k/mac/debug.c
new file mode 100644
index 0000000..cc62ed6
--- /dev/null
+++ b/arch/m68k/mac/debug.c
@@ -0,0 +1,398 @@
+/*
+ * linux/arch/m68k/mac/debug.c
+ *
+ * Shamelessly stolen (SCC code and general framework) from:
+ *
+ * linux/arch/m68k/atari/debug.c
+ *
+ * Atari debugging and serial console stuff
+ *
+ * Assembled of parts of former atari/config.c 97-12-18 by Roman Hodek
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#define BOOTINFO_COMPAT_1_0
+#include <asm/setup.h>
+#include <asm/bootinfo.h>
+#include <asm/machw.h>
+#include <asm/macints.h>
+
+extern char m68k_debug_device[];
+
+extern struct compat_bootinfo compat_boot_info;
+
+extern unsigned long mac_videobase;
+extern unsigned long mac_videodepth;
+extern unsigned long mac_rowbytes;
+
+extern void mac_serial_print(const char *);
+
+#define DEBUG_HEADS
+#undef DEBUG_SCREEN
+#define DEBUG_SERIAL
+
+/*
+ * These two auxiliary debug functions should go away ASAP. Only usage:
+ * before the console output is up (after head.S come some other crucial
+ * setup routines :-) it permits writing 'data' to the screen as bit patterns
+ * (good luck reading those). Helped to figure that the bootinfo contained
+ * garbage data on the amount and size of memory chunks ...
+ *
+ * The 'pos' argument now simply means 'linefeed after print' ...
+ */
+
+#ifdef DEBUG_SCREEN
+static int peng=0, line=0;
+#endif
+
+void mac_debugging_short(int pos, short num)
+{
+#ifdef DEBUG_SCREEN
+	unsigned char *pengoffset;
+	unsigned char *pptr;
+	int i;
+#endif
+
+#ifdef DEBUG_SERIAL
+	printk("debug: %d !\n", num);
+#endif
+
+#ifdef DEBUG_SCREEN
+	if (!MACH_IS_MAC) {
+		/* printk("debug: %d !\n", num); */
+		return;
+	}
+
+	/* calculate current offset */
+	pengoffset=(unsigned char *)(mac_videobase+(150+line*2)*mac_rowbytes)
+		    +80*peng;
+
+	pptr=pengoffset;
+
+	for(i=0;i<8*sizeof(short);i++) /* # of bits */
+	{
+		/*        value        mask for bit i, reverse order */
+		*pptr++ = (num & ( 1 << (8*sizeof(short)-i-1) ) ? 0xFF : 0x00);
+	}
+
+	peng++;
+
+	if (pos) {
+		line++;
+		peng = 0;
+	}
+#endif
+}
+
+void mac_debugging_long(int pos, long addr)
+{
+#ifdef DEBUG_SCREEN
+	unsigned char *pengoffset;
+	unsigned char *pptr;
+	int i;
+#endif
+
+#ifdef DEBUG_SERIAL
+	printk("debug: #%ld !\n", addr);
+#endif
+
+#ifdef DEBUG_SCREEN
+	if (!MACH_IS_MAC) {
+		/* printk("debug: #%ld !\n", addr); */
+		return;
+	}
+
+	pengoffset=(unsigned char *)(mac_videobase+(150+line*2)*mac_rowbytes)
+		    +80*peng;
+
+	pptr=pengoffset;
+
+	for(i=0;i<8*sizeof(long);i++) /* # of bits */
+	{
+		*pptr++ = (addr & ( 1 << (8*sizeof(long)-i-1) ) ? 0xFF : 0x00);
+	}
+
+	peng++;
+
+	if (pos) {
+		line++;
+		peng = 0;
+	}
+#endif
+}
+
+#ifdef DEBUG_SERIAL
+/*
+ * TODO: serial debug code
+ */
+
+struct mac_SCC
+ {
+  u_char cha_b_ctrl;
+  u_char char_dummy1;
+  u_char cha_a_ctrl;
+  u_char char_dummy2;
+  u_char cha_b_data;
+  u_char char_dummy3;
+  u_char cha_a_data;
+ };
+
+# define scc (*((volatile struct mac_SCC*)mac_bi_data.sccbase))
+
+/* Flag that serial port is already initialized and used */
+int mac_SCC_init_done;
+/* Can be set somewhere, if a SCC master reset has already be done and should
+ * not be repeated; used by kgdb */
+int mac_SCC_reset_done;
+
+static int scc_port = -1;
+
+static struct console mac_console_driver = {
+	.name =		"debug",
+	.flags =	CON_PRINTBUFFER,
+	.index =	-1,
+};
+
+/*
+ * Crude hack to get console output to the screen before the framebuffer
+ * is initialized (happens a lot later in 2.1!).
+ * We just use the console routines declared in head.S, this will interfere
+ * with regular framebuffer console output and should be used exclusively
+ * to debug kernel problems manifesting before framebuffer init (aka WSOD)
+ *
+ * To keep this hack from interfering with the regular console driver, either
+ * deregister this driver before/on framebuffer console init, or silence this
+ * function after the fbcon driver is running (will lose console messages!?).
+ * To debug real early bugs, need to write a 'mac_register_console_hack()'
+ * that is called from start_kernel() before setup_arch() and just registers
+ * this driver if Mac.
+ */
+
+void mac_debug_console_write (struct console *co, const char *str,
+			      unsigned int count)
+{
+	mac_serial_print(str);
+}
+
+
+
+/* Mac: loops_per_jiffy min. 19000 ^= .5 us; MFPDELAY was 0.6 us*/
+
+#define uSEC 1
+
+static inline void mac_sccb_out (char c)
+{
+    int i;
+    do {
+	for( i = uSEC; i > 0; --i )
+		barrier();
+    } while (!(scc.cha_b_ctrl & 0x04)); /* wait for tx buf empty */
+    for( i = uSEC; i > 0; --i )
+	barrier();
+    scc.cha_b_data = c;
+}
+
+static inline void mac_scca_out (char c)
+{
+    int i;
+    do {
+	for( i = uSEC; i > 0; --i )
+		barrier();
+    } while (!(scc.cha_a_ctrl & 0x04)); /* wait for tx buf empty */
+    for( i = uSEC; i > 0; --i )
+	barrier();
+    scc.cha_a_data = c;
+}
+
+void mac_sccb_console_write (struct console *co, const char *str,
+			      unsigned int count)
+{
+    while (count--) {
+	if (*str == '\n')
+	    mac_sccb_out( '\r' );
+	mac_sccb_out( *str++ );
+    }
+}
+
+void mac_scca_console_write (struct console *co, const char *str,
+			      unsigned int count)
+{
+    while (count--) {
+	if (*str == '\n')
+	    mac_scca_out( '\r' );
+	mac_scca_out( *str++ );
+    }
+}
+
+
+/* The following two functions do a quick'n'dirty initialization of the MFP or
+ * SCC serial ports. They're used by the debugging interface, kgdb, and the
+ * serial console code. */
+#define SCCB_WRITE(reg,val)				\
+    do {						\
+	int i;						\
+	scc.cha_b_ctrl = (reg);				\
+	for( i = uSEC; i > 0; --i )			\
+		barrier();				\
+	scc.cha_b_ctrl = (val);				\
+	for( i = uSEC; i > 0; --i )			\
+		barrier();				\
+    } while(0)
+
+#define SCCA_WRITE(reg,val)				\
+    do {						\
+	int i;						\
+	scc.cha_a_ctrl = (reg);				\
+	for( i = uSEC; i > 0; --i )			\
+		barrier();				\
+	scc.cha_a_ctrl = (val);				\
+	for( i = uSEC; i > 0; --i )			\
+		barrier();				\
+    } while(0)
+
+/* loops_per_jiffy isn't initialized yet, so we can't use udelay(). This does a
+ * delay of ~ 60us. */
+/* Mac: loops_per_jiffy min. 19000 ^= .5 us; MFPDELAY was 0.6 us*/
+#define LONG_DELAY()				\
+    do {					\
+	int i;					\
+	for( i = 60*uSEC; i > 0; --i )		\
+	    barrier();				\
+    } while(0)
+
+#ifndef CONFIG_SERIAL_CONSOLE
+static void __init mac_init_scc_port( int cflag, int port )
+#else
+void mac_init_scc_port( int cflag, int port )
+#endif
+{
+	extern int mac_SCC_reset_done;
+
+	/*
+	 * baud rates: 1200, 1800, 2400, 4800, 9600, 19.2k, 38.4k, 57.6k, 115.2k
+	 */
+
+	static int clksrc_table[9] =
+		/* reg 11: 0x50 = BRG, 0x00 = RTxC, 0x28 = TRxC */
+		{ 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x00, 0x00 };
+	static int clkmode_table[9] =
+		/* reg 4: 0x40 = x16, 0x80 = x32, 0xc0 = x64 */
+		{ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0xc0, 0x80 };
+	static int div_table[9] =
+		/* reg12 (BRG low) */
+		{ 94, 62, 46, 22, 10, 4, 1, 0, 0 };
+
+    int baud = cflag & CBAUD;
+    int clksrc, clkmode, div, reg3, reg5;
+
+    if (cflag & CBAUDEX)
+	baud += B38400;
+    if (baud < B1200 || baud > B38400+2)
+	baud = B9600; /* use default 9600bps for non-implemented rates */
+    baud -= B1200; /* tables starts at 1200bps */
+
+    clksrc  = clksrc_table[baud];
+    clkmode = clkmode_table[baud];
+    div     = div_table[baud];
+
+    reg3 = (((cflag & CSIZE) == CS8) ? 0xc0 : 0x40);
+    reg5 = (((cflag & CSIZE) == CS8) ? 0x60 : 0x20) | 0x82 /* assert DTR/RTS */;
+
+    if (port == 1) {
+	    (void)scc.cha_b_ctrl;	/* reset reg pointer */
+	    SCCB_WRITE( 9, 0xc0 );	/* reset */
+	    LONG_DELAY();		/* extra delay after WR9 access */
+	    SCCB_WRITE( 4, (cflag & PARENB) ? ((cflag & PARODD) ? 0x01 : 0x03) : 0 |
+			  0x04 /* 1 stopbit */ |
+			  clkmode );
+	    SCCB_WRITE( 3, reg3 );
+	    SCCB_WRITE( 5, reg5 );
+	    SCCB_WRITE( 9, 0 );		/* no interrupts */
+	    LONG_DELAY();		/* extra delay after WR9 access */
+	    SCCB_WRITE( 10, 0 );	/* NRZ mode */
+	    SCCB_WRITE( 11, clksrc );	/* main clock source */
+	    SCCB_WRITE( 12, div );	/* BRG value */
+	    SCCB_WRITE( 13, 0 );		/* BRG high byte */
+	    SCCB_WRITE( 14, 1 );
+	    SCCB_WRITE( 3, reg3 | 1 );
+	    SCCB_WRITE( 5, reg5 | 8 );
+    } else if (port == 0) {
+	    (void)scc.cha_a_ctrl;	/* reset reg pointer */
+	    SCCA_WRITE( 9, 0xc0 );	/* reset */
+	    LONG_DELAY();		/* extra delay after WR9 access */
+	    SCCA_WRITE( 4, (cflag & PARENB) ? ((cflag & PARODD) ? 0x01 : 0x03) : 0 |
+			  0x04 /* 1 stopbit */ |
+			  clkmode );
+	    SCCA_WRITE( 3, reg3 );
+	    SCCA_WRITE( 5, reg5 );
+	    SCCA_WRITE( 9, 0 );		/* no interrupts */
+	    LONG_DELAY();		/* extra delay after WR9 access */
+	    SCCA_WRITE( 10, 0 );	/* NRZ mode */
+	    SCCA_WRITE( 11, clksrc );	/* main clock source */
+	    SCCA_WRITE( 12, div );	/* BRG value */
+	    SCCA_WRITE( 13, 0 );		/* BRG high byte */
+	    SCCA_WRITE( 14, 1 );
+	    SCCA_WRITE( 3, reg3 | 1 );
+	    SCCA_WRITE( 5, reg5 | 8 );
+    }
+
+    mac_SCC_reset_done = 1;
+    mac_SCC_init_done = 1;
+}
+#endif /* DEBUG_SERIAL */
+
+void mac_init_scca_port( int cflag )
+{
+	mac_init_scc_port(cflag, 0);
+}
+
+void mac_init_sccb_port( int cflag )
+{
+	mac_init_scc_port(cflag, 1);
+}
+
+void __init mac_debug_init(void)
+{
+#ifdef DEBUG_SERIAL
+    if (   !strcmp( m68k_debug_device, "ser"  )
+        || !strcmp( m68k_debug_device, "ser1" )) {
+	/* Mac modem port */
+	mac_init_scc_port( B9600|CS8, 0 );
+	mac_console_driver.write = mac_scca_console_write;
+	scc_port = 0;
+    }
+    else if (!strcmp( m68k_debug_device, "ser2" )) {
+	/* Mac printer port */
+	mac_init_scc_port( B9600|CS8, 1 );
+	mac_console_driver.write = mac_sccb_console_write;
+	scc_port = 1;
+    }
+#endif
+#ifdef DEBUG_HEADS
+    if (   !strcmp( m68k_debug_device, "scn"  )
+        || !strcmp( m68k_debug_device, "con" )) {
+	/* display, using head.S console routines */
+	mac_console_driver.write = mac_debug_console_write;
+    }
+#endif
+    if (mac_console_driver.write)
+	register_console(&mac_console_driver);
+}
+
+/*
+ * Local variables:
+ *  c-indent-level: 4
+ *  tab-width: 8
+ * End:
+ */
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
new file mode 100644
index 0000000..d889ba8
--- /dev/null
+++ b/arch/m68k/mac/iop.c
@@ -0,0 +1,714 @@
+/*
+ * I/O Processor (IOP) management
+ * Written and (C) 1999 by Joshua M. Thompson (funaho@jurai.org)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice and this list of conditions.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice and this list of conditions in the documentation and/or other
+ *    materials provided with the distribution.
+ */
+
+/*
+ * The IOP chips are used in the IIfx and some Quadras (900, 950) to manage
+ * serial and ADB. They are actually a 6502 processor and some glue logic.
+ *
+ * 990429 (jmt) - Initial implementation, just enough to knock the SCC IOP
+ *		  into compatible mode so nobody has to fiddle with the
+ *		  Serial Switch control panel anymore.
+ * 990603 (jmt) - Added code to grab the correct ISM IOP interrupt for OSS
+ *		  and non-OSS machines (at least I hope it's correct on a
+ *		  non-OSS machine -- someone with a Q900 or Q950 needs to
+ *		  check this.)
+ * 990605 (jmt) - Rearranged things a bit wrt IOP detection; iop_present is
+ *		  gone, IOP base addresses are now in an array and the
+ *		  globally-visible functions take an IOP number instead of an
+ *		  an actual base address.
+ * 990610 (jmt) - Finished the message passing framework and it seems to work.
+ *		  Sending _definitely_ works; my adb-bus.c mods can send
+ *		  messages and receive the MSG_COMPLETED status back from the
+ *		  IOP. The trick now is figuring out the message formats.
+ * 990611 (jmt) - More cleanups. Fixed problem where unclaimed messages on a
+ *		  receive channel were never properly acknowledged. Bracketed
+ *		  the remaining debug printk's with #ifdef's and disabled
+ *		  debugging. I can now type on the console.
+ * 990612 (jmt) - Copyright notice added. Reworked the way replies are handled.
+ *		  It turns out that replies are placed back in the send buffer
+ *		  for that channel; messages on the receive channels are always
+ *		  unsolicited messages from the IOP (and our replies to them
+ *		  should go back in the receive channel.) Also added tracking
+ *		  of device names to the listener functions ala the interrupt
+ *		  handlers.
+ * 990729 (jmt) - Added passing of pt_regs structure to IOP handlers. This is
+ *		  used by the new unified ADB driver.
+ *
+ * TODO:
+ *
+ * o Something should be periodically checking iop_alive() to make sure the
+ *   IOP hasn't died.
+ * o Some of the IOP manager routines need better error checking and
+ *   return codes. Nothing major, just prettying up.
+ */
+
+/*
+ * -----------------------
+ * IOP Message Passing 101
+ * -----------------------
+ *
+ * The host talks to the IOPs using a rather simple message-passing scheme via
+ * a shared memory area in the IOP RAM. Each IOP has seven "channels"; each
+ * channel is conneced to a specific software driver on the IOP. For example
+ * on the SCC IOP there is one channel for each serial port. Each channel has
+ * an incoming and and outgoing message queue with a depth of one.
+ *
+ * A message is 32 bytes plus a state byte for the channel (MSG_IDLE, MSG_NEW,
+ * MSG_RCVD, MSG_COMPLETE). To send a message you copy the message into the
+ * buffer, set the state to MSG_NEW and signal the IOP by setting the IRQ flag
+ * in the IOP control to 1. The IOP will move the state to MSG_RCVD when it
+ * receives the message and then to MSG_COMPLETE when the message processing
+ * has completed. It is the host's responsibility at that point to read the
+ * reply back out of the send channel buffer and reset the channel state back
+ * to MSG_IDLE.
+ *
+ * To receive message from the IOP the same procedure is used except the roles
+ * are reversed. That is, the IOP puts message in the channel with a state of
+ * MSG_NEW, and the host receives the message and move its state to MSG_RCVD
+ * and then to MSG_COMPLETE when processing is completed and the reply (if any)
+ * has been placed back in the receive channel. The IOP will then reset the
+ * channel state to MSG_IDLE.
+ *
+ * Two sets of host interrupts are provided, INT0 and INT1. Both appear on one
+ * interrupt level; they are distinguished by a pair of bits in the IOP status
+ * register. The IOP will raise INT0 when one or more messages in the send
+ * channels have gone to the MSG_COMPLETE state and it will raise INT1 when one
+ * or more messages on the receive channels have gone to the MSG_NEW state.
+ *
+ * Since each channel handles only one message we have to implement a small
+ * interrupt-driven queue on our end. Messages to be sent are placed on the
+ * queue for sending and contain a pointer to an optional callback function.
+ * The handler for a message is called when the message state goes to
+ * MSG_COMPLETE.
+ *
+ * For receiving message we maintain a list of handler functions to call when
+ * a message is received on that IOP/channel combination. The handlers are
+ * called much like an interrupt handler and are passed a copy of the message
+ * from the IOP. The message state will be in MSG_RCVD while the handler runs;
+ * it is the handler's responsibility to call iop_complete_message() when
+ * finished; this function moves the message state to MSG_COMPLETE and signals
+ * the IOP. This two-step process is provided to allow the handler to defer
+ * message processing to a bottom-half handler if the processing will take
+ * a signifigant amount of time (handlers are called at interrupt time so they
+ * should execute quickly.)
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/interrupt.h>
+
+#include <asm/bootinfo.h>
+#include <asm/macintosh.h>
+#include <asm/macints.h>
+#include <asm/mac_iop.h>
+#include <asm/mac_oss.h>
+
+/*#define DEBUG_IOP*/
+
+/* Set to nonezero if the IOPs are present. Set by iop_init() */
+
+int iop_scc_present,iop_ism_present;
+
+#ifdef CONFIG_PROC_FS
+static int iop_get_proc_info(char *, char **, off_t, int);
+#endif /* CONFIG_PROC_FS */
+
+/* structure for tracking channel listeners */
+
+struct listener {
+	const char *devname;
+	void (*handler)(struct iop_msg *, struct pt_regs *);
+};
+
+/*
+ * IOP structures for the two IOPs
+ *
+ * The SCC IOP controls both serial ports (A and B) as its two functions.
+ * The ISM IOP controls the SWIM (floppy drive) and ADB.
+ */
+
+static volatile struct mac_iop *iop_base[NUM_IOPS];
+
+/*
+ * IOP message queues
+ */
+
+static struct iop_msg iop_msg_pool[NUM_IOP_MSGS];
+static struct iop_msg *iop_send_queue[NUM_IOPS][NUM_IOP_CHAN];
+static struct listener iop_listeners[NUM_IOPS][NUM_IOP_CHAN];
+
+irqreturn_t iop_ism_irq(int, void *, struct pt_regs *);
+
+extern void oss_irq_enable(int);
+
+/*
+ * Private access functions
+ */
+
+static __inline__ void iop_loadaddr(volatile struct mac_iop *iop, __u16 addr)
+{
+	iop->ram_addr_lo = addr;
+	iop->ram_addr_hi = addr >> 8;
+}
+
+static __inline__ __u8 iop_readb(volatile struct mac_iop *iop, __u16 addr)
+{
+	iop->ram_addr_lo = addr;
+	iop->ram_addr_hi = addr >> 8;
+	return iop->ram_data;
+}
+
+static __inline__ void iop_writeb(volatile struct mac_iop *iop, __u16 addr, __u8 data)
+{
+	iop->ram_addr_lo = addr;
+	iop->ram_addr_hi = addr >> 8;
+	iop->ram_data = data;
+}
+
+static __inline__ void iop_stop(volatile struct mac_iop *iop)
+{
+	iop->status_ctrl &= ~IOP_RUN;
+}
+
+static __inline__ void iop_start(volatile struct mac_iop *iop)
+{
+	iop->status_ctrl = IOP_RUN | IOP_AUTOINC;
+}
+
+static __inline__ void iop_bypass(volatile struct mac_iop *iop)
+{
+	iop->status_ctrl |= IOP_BYPASS;
+}
+
+static __inline__ void iop_interrupt(volatile struct mac_iop *iop)
+{
+	iop->status_ctrl |= IOP_IRQ;
+}
+
+static int iop_alive(volatile struct mac_iop *iop)
+{
+	int retval;
+
+	retval = (iop_readb(iop, IOP_ADDR_ALIVE) == 0xFF);
+	iop_writeb(iop, IOP_ADDR_ALIVE, 0);
+	return retval;
+}
+
+static struct iop_msg *iop_alloc_msg(void)
+{
+	int i;
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	for (i = 0 ; i < NUM_IOP_MSGS ; i++) {
+		if (iop_msg_pool[i].status == IOP_MSGSTATUS_UNUSED) {
+			iop_msg_pool[i].status = IOP_MSGSTATUS_WAITING;
+			local_irq_restore(flags);
+			return &iop_msg_pool[i];
+		}
+	}
+
+	local_irq_restore(flags);
+	return NULL;
+}
+
+static void iop_free_msg(struct iop_msg *msg)
+{
+	msg->status = IOP_MSGSTATUS_UNUSED;
+}
+
+/*
+ * This is called by the startup code before anything else. Its purpose
+ * is to find and initialize the IOPs early in the boot sequence, so that
+ * the serial IOP can be placed into bypass mode _before_ we try to
+ * initialize the serial console.
+ */
+
+void __init iop_preinit(void)
+{
+	if (macintosh_config->scc_type == MAC_SCC_IOP) {
+		if (macintosh_config->ident == MAC_MODEL_IIFX) {
+			iop_base[IOP_NUM_SCC] = (struct mac_iop *) SCC_IOP_BASE_IIFX;
+		} else {
+			iop_base[IOP_NUM_SCC] = (struct mac_iop *) SCC_IOP_BASE_QUADRA;
+		}
+		iop_base[IOP_NUM_SCC]->status_ctrl = 0x87;
+		iop_scc_present = 1;
+	} else {
+		iop_base[IOP_NUM_SCC] = NULL;
+		iop_scc_present = 0;
+	}
+	if (macintosh_config->adb_type == MAC_ADB_IOP) {
+		if (macintosh_config->ident == MAC_MODEL_IIFX) {
+			iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_IIFX;
+		} else {
+			iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_QUADRA;
+		}
+		iop_base[IOP_NUM_ISM]->status_ctrl = 0;
+		iop_ism_present = 1;
+	} else {
+		iop_base[IOP_NUM_ISM] = NULL;
+		iop_ism_present = 0;
+	}
+}
+
+/*
+ * Initialize the IOPs, if present.
+ */
+
+void __init iop_init(void)
+{
+	int i;
+
+	if (iop_scc_present) {
+		printk("IOP: detected SCC IOP at %p\n", iop_base[IOP_NUM_SCC]);
+	}
+	if (iop_ism_present) {
+		printk("IOP: detected ISM IOP at %p\n", iop_base[IOP_NUM_ISM]);
+		iop_start(iop_base[IOP_NUM_ISM]);
+		iop_alive(iop_base[IOP_NUM_ISM]); /* clears the alive flag */
+	}
+
+	/* Make the whole pool available and empty the queues */
+
+	for (i = 0 ; i < NUM_IOP_MSGS ; i++) {
+		iop_msg_pool[i].status = IOP_MSGSTATUS_UNUSED;
+	}
+
+	for (i = 0 ; i < NUM_IOP_CHAN ; i++) {
+		iop_send_queue[IOP_NUM_SCC][i] = 0;
+		iop_send_queue[IOP_NUM_ISM][i] = 0;
+		iop_listeners[IOP_NUM_SCC][i].devname = NULL;
+		iop_listeners[IOP_NUM_SCC][i].handler = NULL;
+		iop_listeners[IOP_NUM_ISM][i].devname = NULL;
+		iop_listeners[IOP_NUM_ISM][i].handler = NULL;
+	}
+
+#if 0	/* Crashing in 2.4 now, not yet sure why.   --jmt */
+#ifdef CONFIG_PROC_FS
+	create_proc_info_entry("mac_iop", 0, &proc_root, iop_get_proc_info);
+#endif
+#endif
+}
+
+/*
+ * Register the interrupt handler for the IOPs.
+ * TODO: might be wrong for non-OSS machines. Anyone?
+ */
+
+void __init iop_register_interrupts(void)
+{
+	if (iop_ism_present) {
+		if (oss_present) {
+			cpu_request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq,
+					IRQ_FLG_LOCK, "ISM IOP",
+					(void *) IOP_NUM_ISM);
+			oss_irq_enable(IRQ_MAC_ADB);
+		} else {
+			request_irq(IRQ_VIA2_0, iop_ism_irq,
+					IRQ_FLG_LOCK|IRQ_FLG_FAST, "ISM IOP",
+					(void *) IOP_NUM_ISM);
+		}
+		if (!iop_alive(iop_base[IOP_NUM_ISM])) {
+			printk("IOP: oh my god, they killed the ISM IOP!\n");
+		} else {
+			printk("IOP: the ISM IOP seems to be alive.\n");
+		}
+	}
+}
+
+/*
+ * Register or unregister a listener for a specific IOP and channel
+ *
+ * If the handler pointer is NULL the current listener (if any) is
+ * unregistered. Otherwise the new listener is registered provided
+ * there is no existing listener registered.
+ */
+
+int iop_listen(uint iop_num, uint chan,
+		void (*handler)(struct iop_msg *, struct pt_regs *),
+		const char *devname)
+{
+	if ((iop_num >= NUM_IOPS) || !iop_base[iop_num]) return -EINVAL;
+	if (chan >= NUM_IOP_CHAN) return -EINVAL;
+	if (iop_listeners[iop_num][chan].handler && handler) return -EINVAL;
+	iop_listeners[iop_num][chan].devname = devname;
+	iop_listeners[iop_num][chan].handler = handler;
+	return 0;
+}
+
+/*
+ * Complete reception of a message, which just means copying the reply
+ * into the buffer, setting the channel state to MSG_COMPLETE and
+ * notifying the IOP.
+ */
+
+void iop_complete_message(struct iop_msg *msg)
+{
+	int iop_num = msg->iop_num;
+	int chan = msg->channel;
+	int i,offset;
+
+#ifdef DEBUG_IOP
+	printk("iop_complete(%p): iop %d chan %d\n", msg, msg->iop_num, msg->channel);
+#endif
+
+	offset = IOP_ADDR_RECV_MSG + (msg->channel * IOP_MSG_LEN);
+
+	for (i = 0 ; i < IOP_MSG_LEN ; i++, offset++) {
+		iop_writeb(iop_base[iop_num], offset, msg->reply[i]);
+	}
+
+	iop_writeb(iop_base[iop_num],
+		   IOP_ADDR_RECV_STATE + chan, IOP_MSG_COMPLETE);
+	iop_interrupt(iop_base[msg->iop_num]);
+
+	iop_free_msg(msg);
+}
+
+/*
+ * Actually put a message into a send channel buffer
+ */
+
+static void iop_do_send(struct iop_msg *msg)
+{
+	volatile struct mac_iop *iop = iop_base[msg->iop_num];
+	int i,offset;
+
+	offset = IOP_ADDR_SEND_MSG + (msg->channel * IOP_MSG_LEN);
+
+	for (i = 0 ; i < IOP_MSG_LEN ; i++, offset++) {
+		iop_writeb(iop, offset, msg->message[i]);
+	}
+
+	iop_writeb(iop, IOP_ADDR_SEND_STATE + msg->channel, IOP_MSG_NEW);
+
+	iop_interrupt(iop);
+}
+
+/*
+ * Handle sending a message on a channel that
+ * has gone into the IOP_MSG_COMPLETE state.
+ */
+
+static void iop_handle_send(uint iop_num, uint chan, struct pt_regs *regs)
+{
+	volatile struct mac_iop *iop = iop_base[iop_num];
+	struct iop_msg *msg,*msg2;
+	int i,offset;
+
+#ifdef DEBUG_IOP
+	printk("iop_handle_send: iop %d channel %d\n", iop_num, chan);
+#endif
+
+	iop_writeb(iop, IOP_ADDR_SEND_STATE + chan, IOP_MSG_IDLE);
+
+	if (!(msg = iop_send_queue[iop_num][chan])) return;
+
+	msg->status = IOP_MSGSTATUS_COMPLETE;
+	offset = IOP_ADDR_SEND_MSG + (chan * IOP_MSG_LEN);
+	for (i = 0 ; i < IOP_MSG_LEN ; i++, offset++) {
+		msg->reply[i] = iop_readb(iop, offset);
+	}
+	if (msg->handler) (*msg->handler)(msg, regs);
+	msg2 = msg;
+	msg = msg->next;
+	iop_free_msg(msg2);
+
+	iop_send_queue[iop_num][chan] = msg;
+	if (msg) iop_do_send(msg);
+}
+
+/*
+ * Handle reception of a message on a channel that has
+ * gone into the IOP_MSG_NEW state.
+ */
+
+static void iop_handle_recv(uint iop_num, uint chan, struct pt_regs *regs)
+{
+	volatile struct mac_iop *iop = iop_base[iop_num];
+	int i,offset;
+	struct iop_msg *msg;
+
+#ifdef DEBUG_IOP
+	printk("iop_handle_recv: iop %d channel %d\n", iop_num, chan);
+#endif
+
+	msg = iop_alloc_msg();
+	msg->iop_num = iop_num;
+	msg->channel = chan;
+	msg->status = IOP_MSGSTATUS_UNSOL;
+	msg->handler = iop_listeners[iop_num][chan].handler;
+
+	offset = IOP_ADDR_RECV_MSG + (chan * IOP_MSG_LEN);
+
+	for (i = 0 ; i < IOP_MSG_LEN ; i++, offset++) {
+		msg->message[i] = iop_readb(iop, offset);
+	}
+
+	iop_writeb(iop, IOP_ADDR_RECV_STATE + chan, IOP_MSG_RCVD);
+
+	/* If there is a listener, call it now. Otherwise complete */
+	/* the message ourselves to avoid possible stalls.         */
+
+	if (msg->handler) {
+		(*msg->handler)(msg, regs);
+	} else {
+#ifdef DEBUG_IOP
+		printk("iop_handle_recv: unclaimed message on iop %d channel %d\n", iop_num, chan);
+		printk("iop_handle_recv:");
+		for (i = 0 ; i < IOP_MSG_LEN ; i++) {
+			printk(" %02X", (uint) msg->message[i]);
+		}
+		printk("\n");
+#endif
+		iop_complete_message(msg);
+	}
+}
+
+/*
+ * Send a message
+ *
+ * The message is placed at the end of the send queue. Afterwards if the
+ * channel is idle we force an immediate send of the next message in the
+ * queue.
+ */
+
+int iop_send_message(uint iop_num, uint chan, void *privdata,
+		      uint msg_len, __u8 *msg_data,
+		      void (*handler)(struct iop_msg *, struct pt_regs *))
+{
+	struct iop_msg *msg, *q;
+
+	if ((iop_num >= NUM_IOPS) || !iop_base[iop_num]) return -EINVAL;
+	if (chan >= NUM_IOP_CHAN) return -EINVAL;
+	if (msg_len > IOP_MSG_LEN) return -EINVAL;
+
+	msg = iop_alloc_msg();
+	if (!msg) return -ENOMEM;
+
+	msg->next = NULL;
+	msg->status = IOP_MSGSTATUS_WAITING;
+	msg->iop_num = iop_num;
+	msg->channel = chan;
+	msg->caller_priv = privdata;
+	memcpy(msg->message, msg_data, msg_len);
+	msg->handler = handler;
+
+	if (!(q = iop_send_queue[iop_num][chan])) {
+		iop_send_queue[iop_num][chan] = msg;
+	} else {
+		while (q->next) q = q->next;
+		q->next = msg;
+	}
+
+	if (iop_readb(iop_base[iop_num],
+	    IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) {
+		iop_do_send(msg);
+	}
+
+	return 0;
+}
+
+/*
+ * Upload code to the shared RAM of an IOP.
+ */
+
+void iop_upload_code(uint iop_num, __u8 *code_start,
+		     uint code_len, __u16 shared_ram_start)
+{
+	if ((iop_num >= NUM_IOPS) || !iop_base[iop_num]) return;
+
+	iop_loadaddr(iop_base[iop_num], shared_ram_start);
+
+	while (code_len--) {
+		iop_base[iop_num]->ram_data = *code_start++;
+	}
+}
+
+/*
+ * Download code from the shared RAM of an IOP.
+ */
+
+void iop_download_code(uint iop_num, __u8 *code_start,
+		       uint code_len, __u16 shared_ram_start)
+{
+	if ((iop_num >= NUM_IOPS) || !iop_base[iop_num]) return;
+
+	iop_loadaddr(iop_base[iop_num], shared_ram_start);
+
+	while (code_len--) {
+		*code_start++ = iop_base[iop_num]->ram_data;
+	}
+}
+
+/*
+ * Compare the code in the shared RAM of an IOP with a copy in system memory
+ * and return 0 on match or the first nonmatching system memory address on
+ * failure.
+ */
+
+__u8 *iop_compare_code(uint iop_num, __u8 *code_start,
+		       uint code_len, __u16 shared_ram_start)
+{
+	if ((iop_num >= NUM_IOPS) || !iop_base[iop_num]) return code_start;
+
+	iop_loadaddr(iop_base[iop_num], shared_ram_start);
+
+	while (code_len--) {
+		if (*code_start != iop_base[iop_num]->ram_data) {
+			return code_start;
+		}
+		code_start++;
+	}
+	return (__u8 *) 0;
+}
+
+/*
+ * Handle an ISM IOP interrupt
+ */
+
+irqreturn_t iop_ism_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+	uint iop_num = (uint) dev_id;
+	volatile struct mac_iop *iop = iop_base[iop_num];
+	int i,state;
+
+#ifdef DEBUG_IOP
+	printk("iop_ism_irq: status = %02X\n", (uint) iop->status_ctrl);
+#endif
+
+	/* INT0 indicates a state change on an outgoing message channel */
+
+	if (iop->status_ctrl & IOP_INT0) {
+		iop->status_ctrl = IOP_INT0 | IOP_RUN | IOP_AUTOINC;
+#ifdef DEBUG_IOP
+		printk("iop_ism_irq: new status = %02X, send states",
+			(uint) iop->status_ctrl);
+#endif
+		for (i = 0 ; i < NUM_IOP_CHAN  ; i++) {
+			state = iop_readb(iop, IOP_ADDR_SEND_STATE + i);
+#ifdef DEBUG_IOP
+			printk(" %02X", state);
+#endif
+			if (state == IOP_MSG_COMPLETE) {
+				iop_handle_send(iop_num, i, regs);
+			}
+		}
+#ifdef DEBUG_IOP
+		printk("\n");
+#endif
+	}
+
+	if (iop->status_ctrl & IOP_INT1) {	/* INT1 for incoming msgs */
+		iop->status_ctrl = IOP_INT1 | IOP_RUN | IOP_AUTOINC;
+#ifdef DEBUG_IOP
+		printk("iop_ism_irq: new status = %02X, recv states",
+			(uint) iop->status_ctrl);
+#endif
+		for (i = 0 ; i < NUM_IOP_CHAN ; i++) {
+			state = iop_readb(iop, IOP_ADDR_RECV_STATE + i);
+#ifdef DEBUG_IOP
+			printk(" %02X", state);
+#endif
+			if (state == IOP_MSG_NEW) {
+				iop_handle_recv(iop_num, i, regs);
+			}
+		}
+#ifdef DEBUG_IOP
+		printk("\n");
+#endif
+	}
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PROC_FS
+
+char *iop_chan_state(int state)
+{
+	switch(state) {
+		case IOP_MSG_IDLE	: return "idle      ";
+		case IOP_MSG_NEW	: return "new       ";
+		case IOP_MSG_RCVD	: return "received  ";
+		case IOP_MSG_COMPLETE	: return "completed ";
+		default			: return "unknown   ";
+	}
+}
+
+int iop_dump_one_iop(char *buf, int iop_num, char *iop_name)
+{
+	int i,len = 0;
+	volatile struct mac_iop *iop = iop_base[iop_num];
+
+	len += sprintf(buf+len, "%s IOP channel states:\n\n", iop_name);
+	len += sprintf(buf+len, "##  send_state  recv_state  device\n");
+	len += sprintf(buf+len, "------------------------------------------------\n");
+	for (i = 0 ; i < NUM_IOP_CHAN ; i++) {
+		len += sprintf(buf+len, "%2d  %10s  %10s  %s\n", i,
+			iop_chan_state(iop_readb(iop, IOP_ADDR_SEND_STATE+i)),
+			iop_chan_state(iop_readb(iop, IOP_ADDR_RECV_STATE+i)),
+			iop_listeners[iop_num][i].handler?
+				      iop_listeners[iop_num][i].devname : "");
+
+	}
+	len += sprintf(buf+len, "\n");
+	return len;
+}
+
+static int iop_get_proc_info(char *buf, char **start, off_t pos, int count)
+{
+	int len, cnt;
+
+	cnt = 0;
+	len =  sprintf(buf, "IOPs detected:\n\n");
+
+	if (iop_scc_present) {
+		len += sprintf(buf+len, "SCC IOP (%p): status %02X\n",
+				iop_base[IOP_NUM_SCC],
+				(uint) iop_base[IOP_NUM_SCC]->status_ctrl);
+	}
+	if (iop_ism_present) {
+		len += sprintf(buf+len, "ISM IOP (%p): status %02X\n\n",
+				iop_base[IOP_NUM_ISM],
+				(uint) iop_base[IOP_NUM_ISM]->status_ctrl);
+	}
+
+	if (iop_scc_present) {
+		len += iop_dump_one_iop(buf+len, IOP_NUM_SCC, "SCC");
+
+	}
+
+	if (iop_ism_present) {
+		len += iop_dump_one_iop(buf+len, IOP_NUM_ISM, "ISM");
+
+	}
+
+	if (len >= pos) {
+		if (!*start) {
+			*start = buf + pos;
+			cnt = len - pos;
+		} else {
+			cnt += len;
+		}
+	}
+	return (count > cnt) ? cnt : count;
+}
+
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/m68k/mac/mac_ksyms.c b/arch/m68k/mac/mac_ksyms.c
new file mode 100644
index 0000000..6e37ceb
--- /dev/null
+++ b/arch/m68k/mac/mac_ksyms.c
@@ -0,0 +1,8 @@
+#include <linux/module.h>
+#include <asm/ptrace.h>
+#include <asm/traps.h>
+
+/* Says whether we're using A/UX interrupts or not */
+extern int via_alt_mapping;
+
+EXPORT_SYMBOL(via_alt_mapping);
diff --git a/arch/m68k/mac/mac_penguin.S b/arch/m68k/mac/mac_penguin.S
new file mode 100644
index 0000000..b3ce30b
--- /dev/null
+++ b/arch/m68k/mac/mac_penguin.S
@@ -0,0 +1,75 @@
+.byte \
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0x0F,0xFF,0xFF,0xF0,0x00,0x0F,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0xFF,0xF0,0xFF,0xFF,0x0F,0xF0,0xF0,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0xFF,0x00,0xFF,0xFF,0x0F,0xFF,0x00,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0xFF,0xF0,0x0F,0xFF,0x0F,0xFF,0xF0,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x0F,0xFF,0x00,0x0F,0x0F,0xFF,0xF0,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x0F,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xF0,0x00,0x00,0x00,0x00,0xFF,0x00,0xFF,0xF0,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x0F,0xF0,0x00,0x00,0xFF,0xF0,0x0F,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x0F,0xF0,0xFF,0xFF,0x00,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0xFF,0xF0,0x00,0x0F,0xFF,0xF0,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0x0F,0xFF,0x00,0xFF,0xF0,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0x00,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xF0,0x00,0x0F,0xFF,0xF0,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xF0,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x0F,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0xFF,0xF0,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0x0F,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x0F,0xF0,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0xF0,0x00,0x00,0x0F,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0xF0,0x00,0x00,0x00,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x0F,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0xF0,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x0F,0xFF,0xFF,0xFF,0x00,0x00,0xF0,0x00,0x00,\
+0x00,0x00,0x00,0xFF,0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x00,0xFF,0xFF,0xF0,0x00,0x00,0xF0,0x00,0x00,\
+0x00,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0x00,0x00,\
+0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,\
+0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,\
+0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0x00,\
+0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xF0,\
+0x0F,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x0F,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,\
+0x0F,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x0F,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x0F,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0x0F,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
+0x0F,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x0F,0xF0,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,\
+0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0xFF,0xF0,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x00,\
+0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0x00,0x00,\
+0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xF0,0xFF,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0x00,0x00,0x00,\
+0x0F,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xF0,0xFF,0x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0xFF,0xF0,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xF0,0x00,0x00,0x0F,0xFF,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0x00,0x0F,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,\
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00
diff --git a/arch/m68k/mac/macboing.c b/arch/m68k/mac/macboing.c
new file mode 100644
index 0000000..44c5cd2
--- /dev/null
+++ b/arch/m68k/mac/macboing.c
@@ -0,0 +1,309 @@
+/*
+ *	Mac bong noise generator. Note - we ought to put a boingy noise
+ *	here 8)
+ *
+ *	----------------------------------------------------------------------
+ *	16.11.98:
+ *	rewrote some functions, added support for Enhanced ASC (Quadras)
+ *	after the NetBSD asc.c console bell patch by Colin Wood/Frederick Bruck
+ *	Juergen Mellinger (juergen.mellinger@t-online.de)
+ */
+
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+#include <asm/macintosh.h>
+#include <asm/mac_asc.h>
+
+static int mac_asc_inited;
+/*
+ * dumb triangular wave table
+ */
+static __u8 mac_asc_wave_tab[ 0x800 ];
+
+/*
+ * Alan's original sine table; needs interpolating to 0x800
+ * (hint: interpolate or hardwire [0 -> Pi/2[, it's symmetric)
+ */
+static const signed char sine_data[] = {
+	0,  39,  75,  103,  121,  127,  121,  103,  75,  39,
+	0, -39, -75, -103, -121, -127, -121, -103, -75, -39
+};
+
+/*
+ * where the ASC hides ...
+ */
+static volatile __u8* mac_asc_regs = ( void* )0x50F14000;
+
+/*
+ * sample rate; is this a good default value?
+ */
+static unsigned long mac_asc_samplespersec = 11050;
+static int mac_bell_duration;
+static unsigned long mac_bell_phase; /* 0..2*Pi -> 0..0x800 (wavetable size) */
+static unsigned long mac_bell_phasepersample;
+
+/*
+ * some function protos
+ */
+static void mac_init_asc( void );
+static void mac_nosound( unsigned long );
+static void mac_quadra_start_bell( unsigned int, unsigned int, unsigned int );
+static void mac_quadra_ring_bell( unsigned long );
+static void mac_av_start_bell( unsigned int, unsigned int, unsigned int );
+static void ( *mac_special_bell )( unsigned int, unsigned int, unsigned int );
+
+/*
+ * our timer to start/continue/stop the bell
+ */
+static struct timer_list mac_sound_timer =
+		TIMER_INITIALIZER(mac_nosound, 0, 0);
+
+/*
+ * Sort of initialize the sound chip (called from mac_mksound on the first
+ * beep).
+ */
+static void mac_init_asc( void )
+{
+	int i;
+
+	/*
+	 * do some machine specific initialization
+	 * BTW:
+	 * the NetBSD Quadra patch identifies the Enhanced Apple Sound Chip via
+	 *	mac_asc_regs[ 0x800 ] & 0xF0 != 0
+	 * this makes no sense here, because we have to set the default sample
+	 * rate anyway if we want correct frequencies
+	 */
+	switch ( macintosh_config->ident )
+	{
+		case MAC_MODEL_IIFX:
+			/*
+			 * The IIfx is always special ...
+			 */
+			mac_asc_regs = ( void* )0x50010000;
+			break;
+			/*
+			 * not sure about how correct this list is
+			 * machines with the EASC enhanced apple sound chip
+			 */
+		case MAC_MODEL_Q630:
+		case MAC_MODEL_P475:
+			mac_special_bell = mac_quadra_start_bell;
+			mac_asc_samplespersec = 22150;
+			break;
+		case MAC_MODEL_C660:
+		case MAC_MODEL_Q840:
+			/*
+			 * The Quadra 660AV and 840AV use the "Singer" custom ASIC for sound I/O.
+			 * It appears to be similar to the "AWACS" custom ASIC in the Power Mac
+			 * [678]100.  Because Singer and AWACS may have a similar hardware
+			 * interface, this would imply that the code in drivers/sound/dmasound.c
+			 * for AWACS could be used as a basis for Singer support.  All we have to
+			 * do is figure out how to do DMA on the 660AV/840AV through the PSC and
+			 * figure out where the Singer hardware sits in memory. (I'd look in the
+			 * vicinity of the AWACS location in a Power Mac [678]100 first, or the
+			 * current location of the Apple Sound Chip--ASC--in other Macs.)  The
+			 * Power Mac [678]100 info can be found in MkLinux Mach kernel sources.
+			 *
+			 * Quoted from Apple's Tech Info Library, article number 16405:
+			 *   "Among desktop Macintosh computers, only the 660AV, 840AV, and Power
+			 *   Macintosh models have 16-bit audio input and output capability
+			 *   because of the AT&T DSP3210 hardware circuitry and the 16-bit Singer
+			 *   codec circuitry in the AVs.  The Audio Waveform Amplifier and
+			 *   Converter (AWAC) chip in the Power Macintosh performs the same
+			 *   16-bit I/O functionality.  The PowerBook 500 series computers
+			 *   support 16-bit stereo output, but only mono input."
+			 *
+			 *   http://til.info.apple.com/techinfo.nsf/artnum/n16405
+			 *
+			 * --David Kilzer
+			 */
+			mac_special_bell = mac_av_start_bell;
+			break;
+		case MAC_MODEL_Q650:
+		case MAC_MODEL_Q700:
+		case MAC_MODEL_Q800:
+		case MAC_MODEL_Q900:
+		case MAC_MODEL_Q950:
+			/*
+			 * Currently not implemented!
+			 */
+			mac_special_bell = NULL;
+			break;
+		default:
+			/*
+			 * Every switch needs a default
+			 */
+			mac_special_bell = NULL;
+			break;
+	}
+
+	/*
+	 * init the wave table with a simple triangular wave
+	 * A sine wave would sure be nicer here ...
+	 */
+	for ( i = 0; i < 0x400; i++ )
+	{
+		mac_asc_wave_tab[ i ] = i / 4;
+		mac_asc_wave_tab[ i + 0x400 ] = 0xFF - i / 4;
+	}
+	mac_asc_inited = 1;
+}
+
+/*
+ * Called to make noise; current single entry to the boing driver.
+ * Does the job for simple ASC, calls other routines else.
+ * XXX Fixme:
+ * Should be split into asc_mksound, easc_mksound, av_mksound and
+ * function pointer set in mac_init_asc which would be called at
+ * init time.
+ * _This_ is rather ugly ...
+ */
+void mac_mksound( unsigned int freq, unsigned int length )
+{
+	__u32 cfreq = ( freq << 5 ) / 468;
+	__u32 flags;
+	int i;
+
+	if ( mac_special_bell == NULL )
+	{
+		/* Do nothing */
+		return;
+	}
+
+	if ( !mac_asc_inited )
+		mac_init_asc();
+
+	if ( mac_special_bell )
+	{
+		mac_special_bell( freq, length, 128 );
+		return;
+	}
+
+	if ( freq < 20 || freq > 20000 || length == 0 )
+	{
+		mac_nosound( 0 );
+		return;
+	}
+
+	local_irq_save(flags);
+
+	del_timer( &mac_sound_timer );
+
+	for ( i = 0; i < 0x800; i++ )
+		mac_asc_regs[ i ] = 0;
+	for ( i = 0; i < 0x800; i++ )
+		mac_asc_regs[ i ] = mac_asc_wave_tab[ i ];
+
+	for ( i = 0; i < 8; i++ )
+		*( __u32* )( ( __u32 )mac_asc_regs + ASC_CONTROL + 0x814 + 8 * i ) = cfreq;
+
+	mac_asc_regs[ 0x807 ] = 0;
+	mac_asc_regs[ ASC_VOLUME ] = 128;
+	mac_asc_regs[ 0x805 ] = 0;
+	mac_asc_regs[ 0x80F ] = 0;
+	mac_asc_regs[ ASC_MODE ] = ASC_MODE_SAMPLE;
+	mac_asc_regs[ ASC_ENABLE ] = ASC_ENABLE_SAMPLE;
+
+	mac_sound_timer.expires = jiffies + length;
+	add_timer( &mac_sound_timer );
+
+	local_irq_restore(flags);
+}
+
+/*
+ * regular ASC: stop whining ..
+ */
+static void mac_nosound( unsigned long ignored )
+{
+	mac_asc_regs[ ASC_ENABLE ] = 0;
+}
+
+/*
+ * EASC entry; init EASC, don't load wavetable, schedule 'start whining'.
+ */
+static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume )
+{
+	__u32 flags;
+
+	/* if the bell is already ringing, ring longer */
+	if ( mac_bell_duration > 0 )
+	{
+		mac_bell_duration += length;
+		return;
+	}
+
+	mac_bell_duration = length;
+	mac_bell_phase = 0;
+	mac_bell_phasepersample = ( freq * sizeof( mac_asc_wave_tab ) ) / mac_asc_samplespersec;
+	/* this is reasonably big for small frequencies */
+
+	local_irq_save(flags);
+
+	/* set the volume */
+	mac_asc_regs[ 0x806 ] = volume;
+
+	/* set up the ASC registers */
+	if ( mac_asc_regs[ 0x801 ] != 1 )
+	{
+		/* select mono mode */
+		mac_asc_regs[ 0x807 ] = 0;
+		/* select sampled sound mode */
+		mac_asc_regs[ 0x802 ] = 0;
+		/* ??? */
+		mac_asc_regs[ 0x801 ] = 1;
+		mac_asc_regs[ 0x803 ] |= 0x80;
+		mac_asc_regs[ 0x803 ] &= 0x7F;
+	}
+
+	mac_sound_timer.function = mac_quadra_ring_bell;
+	mac_sound_timer.expires = jiffies + 1;
+	add_timer( &mac_sound_timer );
+
+	local_irq_restore(flags);
+}
+
+/*
+ * EASC 'start/continue whining'; I'm not sure why the above function didn't
+ * already load the wave table, or at least call this one...
+ * This piece keeps reloading the wave table until done.
+ */
+static void mac_quadra_ring_bell( unsigned long ignored )
+{
+	int	i, count = mac_asc_samplespersec / HZ;
+	__u32 flags;
+
+	/*
+	 * we neither want a sound buffer overflow nor underflow, so we need to match
+	 * the number of samples per timer interrupt as exactly as possible.
+	 * using the asc interrupt will give better results in the future
+	 * ...and the possibility to use a real sample (a boingy noise, maybe...)
+	 */
+
+	local_irq_save(flags);
+
+	del_timer( &mac_sound_timer );
+
+	if ( mac_bell_duration-- > 0 )
+	{
+		for ( i = 0; i < count; i++ )
+		{
+			mac_bell_phase += mac_bell_phasepersample;
+			mac_asc_regs[ 0 ] = mac_asc_wave_tab[ mac_bell_phase & ( sizeof( mac_asc_wave_tab ) - 1 ) ];
+		}
+		mac_sound_timer.expires = jiffies + 1;
+		add_timer( &mac_sound_timer );
+	}
+	else
+		mac_asc_regs[ 0x801 ] = 0;
+
+	local_irq_restore(flags);
+}
+
+/*
+ * AV code - please fill in.
+ */
+static void mac_av_start_bell( unsigned int freq, unsigned int length, unsigned int volume )
+{
+}
diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c
new file mode 100644
index 0000000..1809601
--- /dev/null
+++ b/arch/m68k/mac/macints.c
@@ -0,0 +1,760 @@
+/*
+ *	Macintosh interrupts
+ *
+ * General design:
+ * In contrary to the Amiga and Atari platforms, the Mac hardware seems to
+ * exclusively use the autovector interrupts (the 'generic level0-level7'
+ * interrupts with exception vectors 0x19-0x1f). The following interrupt levels
+ * are used:
+ *	1	- VIA1
+ *		  - slot 0: one second interrupt (CA2)
+ *		  - slot 1: VBlank (CA1)
+ *		  - slot 2: ADB data ready (SR full)
+ *		  - slot 3: ADB data  (CB2)
+ *		  - slot 4: ADB clock (CB1)
+ *		  - slot 5: timer 2
+ *		  - slot 6: timer 1
+ *		  - slot 7: status of IRQ; signals 'any enabled int.'
+ *
+ *	2	- VIA2 or RBV
+ *		  - slot 0: SCSI DRQ (CA2)
+ *		  - slot 1: NUBUS IRQ (CA1) need to read port A to find which
+ *		  - slot 2: /EXP IRQ (only on IIci)
+ *		  - slot 3: SCSI IRQ (CB2)
+ *		  - slot 4: ASC IRQ (CB1)
+ *		  - slot 5: timer 2 (not on IIci)
+ *		  - slot 6: timer 1 (not on IIci)
+ *		  - slot 7: status of IRQ; signals 'any enabled int.'
+ *
+ *	2	- OSS (IIfx only?)
+ *		  - slot 0: SCSI interrupt
+ *		  - slot 1: Sound interrupt
+ *
+ * Levels 3-6 vary by machine type. For VIA or RBV Macintoshes:
+ *
+ *	3	- unused (?)
+ *
+ *	4	- SCC (slot number determined by reading RR3 on the SSC itself)
+ *		  - slot 1: SCC channel A
+ *		  - slot 2: SCC channel B
+ *
+ *	5	- unused (?)
+ *		  [serial errors or special conditions seem to raise level 6
+ *		  interrupts on some models (LC4xx?)]
+ *
+ *	6	- off switch (?)
+ *
+ * For OSS Macintoshes (IIfx only at this point):
+ *
+ *	3	- Nubus interrupt
+ *		  - slot 0: Slot $9
+ *		  - slot 1: Slot $A
+ *		  - slot 2: Slot $B
+ *		  - slot 3: Slot $C
+ *		  - slot 4: Slot $D
+ *		  - slot 5: Slot $E
+ *
+ *	4	- SCC IOP
+ *		  - slot 1: SCC channel A
+ *		  - slot 2: SCC channel B
+ *
+ *	5	- ISM IOP (ADB?)
+ *
+ *	6	- unused
+ *
+ * For PSC Macintoshes (660AV, 840AV):
+ *
+ *	3	- PSC level 3
+ *		  - slot 0: MACE
+ *
+ *	4	- PSC level 4
+ *		  - slot 1: SCC channel A interrupt
+ *		  - slot 2: SCC channel B interrupt
+ *		  - slot 3: MACE DMA
+ *
+ *	5	- PSC level 5
+ *
+ *	6	- PSC level 6
+ *
+ * Finally we have good 'ole level 7, the non-maskable interrupt:
+ *
+ *	7	- NMI (programmer's switch on the back of some Macs)
+ *		  Also RAM parity error on models which support it (IIc, IIfx?)
+ *
+ * The current interrupt logic looks something like this:
+ *
+ * - We install dispatchers for the autovector interrupts (1-7). These
+ *   dispatchers are responsible for querying the hardware (the
+ *   VIA/RBV/OSS/PSC chips) to determine the actual interrupt source. Using
+ *   this information a machspec interrupt number is generated by placing the
+ *   index of the interrupt hardware into the low three bits and the original
+ *   autovector interrupt number in the upper 5 bits. The handlers for the
+ *   resulting machspec interrupt are then called.
+ *
+ * - Nubus is a special case because its interrupts are hidden behind two
+ *   layers of hardware. Nubus interrupts come in as index 1 on VIA #2,
+ *   which translates to IRQ number 17. In this spot we install _another_
+ *   dispatcher. This dispatcher finds the interrupting slot number (9-F) and
+ *   then forms a new machspec interrupt number as above with the slot number
+ *   minus 9 in the low three bits and the pseudo-level 7 in the upper five
+ *   bits.  The handlers for this new machspec interrupt number are then
+ *   called. This puts Nubus interrupts into the range 56-62.
+ *
+ * - The Baboon interrupts (used on some PowerBooks) are an even more special
+ *   case. They're hidden behind the Nubus slot $C interrupt thus adding a
+ *   third layer of indirection. Why oh why did the Apple engineers do that?
+ *
+ * - We support "fast" and "slow" handlers, just like the Amiga port. The
+ *   fast handlers are called first and with all interrupts disabled. They
+ *   are expected to execute quickly (hence the name). The slow handlers are
+ *   called last with interrupts enabled and the interrupt level restored.
+ *   They must therefore be reentrant.
+ *
+ *   TODO:
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h> /* for intr_count */
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/bootinfo.h>
+#include <asm/machw.h>
+#include <asm/macintosh.h>
+#include <asm/mac_via.h>
+#include <asm/mac_psc.h>
+#include <asm/hwtest.h>
+#include <asm/errno.h>
+#include <asm/macints.h>
+
+#define DEBUG_SPURIOUS
+#define SHUTUP_SONIC
+
+/*
+ * The mac_irq_list array is an array of linked lists of irq_node_t nodes.
+ * Each node contains one handler to be called whenever the interrupt
+ * occurs, with fast handlers listed before slow handlers.
+ */
+
+irq_node_t *mac_irq_list[NUM_MAC_SOURCES];
+
+/* SCC interrupt mask */
+
+static int scc_mask;
+
+/*
+ * VIA/RBV hooks
+ */
+
+extern void via_init(void);
+extern void via_register_interrupts(void);
+extern void via_irq_enable(int);
+extern void via_irq_disable(int);
+extern void via_irq_clear(int);
+extern int  via_irq_pending(int);
+
+/*
+ * OSS hooks
+ */
+
+extern int oss_present;
+
+extern void oss_init(void);
+extern void oss_register_interrupts(void);
+extern void oss_irq_enable(int);
+extern void oss_irq_disable(int);
+extern void oss_irq_clear(int);
+extern int  oss_irq_pending(int);
+
+/*
+ * PSC hooks
+ */
+
+extern int psc_present;
+
+extern void psc_init(void);
+extern void psc_register_interrupts(void);
+extern void psc_irq_enable(int);
+extern void psc_irq_disable(int);
+extern void psc_irq_clear(int);
+extern int  psc_irq_pending(int);
+
+/*
+ * IOP hooks
+ */
+
+extern void iop_register_interrupts(void);
+
+/*
+ * Baboon hooks
+ */
+
+extern int baboon_present;
+
+extern void baboon_init(void);
+extern void baboon_register_interrupts(void);
+extern void baboon_irq_enable(int);
+extern void baboon_irq_disable(int);
+extern void baboon_irq_clear(int);
+extern int  baboon_irq_pending(int);
+
+/*
+ * SCC interrupt routines
+ */
+
+static void scc_irq_enable(int);
+static void scc_irq_disable(int);
+
+/*
+ * console_loglevel determines NMI handler function
+ */
+
+extern irqreturn_t mac_bang(int, void *, struct pt_regs *);
+irqreturn_t mac_nmi_handler(int, void *, struct pt_regs *);
+irqreturn_t mac_debug_handler(int, void *, struct pt_regs *);
+
+/* #define DEBUG_MACINTS */
+
+void mac_init_IRQ(void)
+{
+        int i;
+
+#ifdef DEBUG_MACINTS
+	printk("mac_init_IRQ(): Setting things up...\n");
+#endif
+	/* Initialize the IRQ handler lists. Initially each list is empty, */
+
+	for (i = 0; i < NUM_MAC_SOURCES; i++) {
+		mac_irq_list[i] = NULL;
+	}
+
+	scc_mask = 0;
+
+	/* Make sure the SONIC interrupt is cleared or things get ugly */
+#ifdef SHUTUP_SONIC
+	printk("Killing onboard sonic... ");
+	/* This address should hopefully be mapped already */
+	if (hwreg_present((void*)(0x50f0a000))) {
+		*(long *)(0x50f0a014) = 0x7fffL;
+		*(long *)(0x50f0a010) = 0L;
+	}
+	printk("Done.\n");
+#endif /* SHUTUP_SONIC */
+
+	/*
+	 * Now register the handlers for the master IRQ handlers
+	 * at levels 1-7. Most of the work is done elsewhere.
+	 */
+
+	if (oss_present) {
+		oss_register_interrupts();
+	} else {
+		via_register_interrupts();
+	}
+	if (psc_present) psc_register_interrupts();
+	if (baboon_present) baboon_register_interrupts();
+	iop_register_interrupts();
+	cpu_request_irq(7, mac_nmi_handler, IRQ_FLG_LOCK, "NMI",
+			mac_nmi_handler);
+#ifdef DEBUG_MACINTS
+	printk("mac_init_IRQ(): Done!\n");
+#endif
+}
+
+/*
+ * Routines to work with irq_node_t's on linked lists lifted from
+ * the Amiga code written by Roman Zippel.
+ */
+
+static inline void mac_insert_irq(irq_node_t **list, irq_node_t *node)
+{
+	unsigned long flags;
+	irq_node_t *cur;
+
+	if (!node->dev_id)
+		printk("%s: Warning: dev_id of %s is zero\n",
+		       __FUNCTION__, node->devname);
+
+	local_irq_save(flags);
+
+	cur = *list;
+
+	if (node->flags & IRQ_FLG_FAST) {
+		node->flags &= ~IRQ_FLG_SLOW;
+		while (cur && cur->flags & IRQ_FLG_FAST) {
+			list = &cur->next;
+			cur = cur->next;
+		}
+	} else if (node->flags & IRQ_FLG_SLOW) {
+		while (cur) {
+			list = &cur->next;
+			cur = cur->next;
+		}
+	} else {
+		while (cur && !(cur->flags & IRQ_FLG_SLOW)) {
+			list = &cur->next;
+			cur = cur->next;
+		}
+	}
+
+	node->next = cur;
+	*list = node;
+
+	local_irq_restore(flags);
+}
+
+static inline void mac_delete_irq(irq_node_t **list, void *dev_id)
+{
+	unsigned long flags;
+	irq_node_t *node;
+
+	local_irq_save(flags);
+
+	for (node = *list; node; list = &node->next, node = *list) {
+		if (node->dev_id == dev_id) {
+			*list = node->next;
+			/* Mark it as free. */
+			node->handler = NULL;
+			local_irq_restore(flags);
+			return;
+		}
+	}
+	local_irq_restore(flags);
+	printk ("%s: tried to remove invalid irq\n", __FUNCTION__);
+}
+
+/*
+ * Call all the handlers for a given interrupt. Fast handlers are called
+ * first followed by slow handlers.
+ *
+ * This code taken from the original Amiga code written by Roman Zippel.
+ */
+
+void mac_do_irq_list(int irq, struct pt_regs *fp)
+{
+	irq_node_t *node, *slow_nodes;
+	unsigned long flags;
+
+	kstat_cpu(0).irqs[irq]++;
+
+#ifdef DEBUG_SPURIOUS
+	if (!mac_irq_list[irq] && (console_loglevel > 7)) {
+		printk("mac_do_irq_list: spurious interrupt %d!\n", irq);
+		return;
+	}
+#endif
+
+	/* serve first fast and normal handlers */
+	for (node = mac_irq_list[irq];
+	     node && (!(node->flags & IRQ_FLG_SLOW));
+	     node = node->next)
+		node->handler(irq, node->dev_id, fp);
+	if (!node) return;
+	local_save_flags(flags);
+	local_irq_restore((flags & ~0x0700) | (fp->sr & 0x0700));
+	/* if slow handlers exists, serve them now */
+	slow_nodes = node;
+	for (; node; node = node->next) {
+		node->handler(irq, node->dev_id, fp);
+	}
+}
+
+/*
+ *  mac_enable_irq - enable an interrupt source
+ * mac_disable_irq - disable an interrupt source
+ *   mac_clear_irq - clears a pending interrupt
+ * mac_pending_irq - Returns the pending status of an IRQ (nonzero = pending)
+ *
+ * These routines are just dispatchers to the VIA/OSS/PSC routines.
+ */
+
+void mac_enable_irq (unsigned int irq)
+{
+	int irq_src	= IRQ_SRC(irq);
+
+	switch(irq_src) {
+		case 1: via_irq_enable(irq);
+			break;
+		case 2:
+		case 7: if (oss_present) {
+				oss_irq_enable(irq);
+			} else {
+				via_irq_enable(irq);
+			}
+			break;
+		case 3:
+		case 4:
+		case 5:
+		case 6: if (psc_present) {
+				psc_irq_enable(irq);
+			} else if (oss_present) {
+				oss_irq_enable(irq);
+			} else if (irq_src == 4) {
+				scc_irq_enable(irq);
+			}
+			break;
+		case 8: if (baboon_present) {
+				baboon_irq_enable(irq);
+			}
+			break;
+	}
+}
+
+void mac_disable_irq (unsigned int irq)
+{
+	int irq_src	= IRQ_SRC(irq);
+
+	switch(irq_src) {
+		case 1: via_irq_disable(irq);
+			break;
+		case 2:
+		case 7: if (oss_present) {
+				oss_irq_disable(irq);
+			} else {
+				via_irq_disable(irq);
+			}
+			break;
+		case 3:
+		case 4:
+		case 5:
+		case 6: if (psc_present) {
+				psc_irq_disable(irq);
+			} else if (oss_present) {
+				oss_irq_disable(irq);
+			} else if (irq_src == 4) {
+				scc_irq_disable(irq);
+			}
+			break;
+		case 8: if (baboon_present) {
+				baboon_irq_disable(irq);
+			}
+			break;
+	}
+}
+
+void mac_clear_irq( unsigned int irq )
+{
+	switch(IRQ_SRC(irq)) {
+		case 1: via_irq_clear(irq);
+			break;
+		case 2:
+		case 7: if (oss_present) {
+				oss_irq_clear(irq);
+			} else {
+				via_irq_clear(irq);
+			}
+			break;
+		case 3:
+		case 4:
+		case 5:
+		case 6: if (psc_present) {
+				psc_irq_clear(irq);
+			} else if (oss_present) {
+				oss_irq_clear(irq);
+			}
+			break;
+		case 8: if (baboon_present) {
+				baboon_irq_clear(irq);
+			}
+			break;
+	}
+}
+
+int mac_irq_pending( unsigned int irq )
+{
+	switch(IRQ_SRC(irq)) {
+		case 1: return via_irq_pending(irq);
+		case 2:
+		case 7: if (oss_present) {
+				return oss_irq_pending(irq);
+			} else {
+				return via_irq_pending(irq);
+			}
+		case 3:
+		case 4:
+		case 5:
+		case 6: if (psc_present) {
+				return psc_irq_pending(irq);
+			} else if (oss_present) {
+				return oss_irq_pending(irq);
+			}
+	}
+	return 0;
+}
+
+/*
+ * Add an interrupt service routine to an interrupt source.
+ * Returns 0 on success.
+ *
+ * FIXME: You can register interrupts on nonexistent source (ie PSC4 on a
+ *        non-PSC machine). We should return -EINVAL in those cases.
+ */
+
+int mac_request_irq(unsigned int irq,
+		    irqreturn_t (*handler)(int, void *, struct pt_regs *),
+		    unsigned long flags, const char *devname, void *dev_id)
+{
+	irq_node_t *node;
+
+#ifdef DEBUG_MACINTS
+	printk ("%s: irq %d requested for %s\n", __FUNCTION__, irq, devname);
+#endif
+
+	if (irq < VIA1_SOURCE_BASE) {
+		return cpu_request_irq(irq, handler, flags, devname, dev_id);
+	}
+
+	if (irq >= NUM_MAC_SOURCES) {
+		printk ("%s: unknown irq %d requested by %s\n",
+		        __FUNCTION__, irq, devname);
+	}
+
+	/* Get a node and stick it onto the right list */
+
+	if (!(node = new_irq_node())) return -ENOMEM;
+
+	node->handler	= handler;
+	node->flags	= flags;
+	node->dev_id	= dev_id;
+	node->devname	= devname;
+	node->next	= NULL;
+	mac_insert_irq(&mac_irq_list[irq], node);
+
+	/* Now enable the IRQ source */
+
+	mac_enable_irq(irq);
+
+	return 0;
+}
+
+/*
+ * Removes an interrupt service routine from an interrupt source.
+ */
+
+void mac_free_irq(unsigned int irq, void *dev_id)
+{
+#ifdef DEBUG_MACINTS
+	printk ("%s: irq %d freed by %p\n", __FUNCTION__, irq, dev_id);
+#endif
+
+	if (irq < VIA1_SOURCE_BASE) {
+		cpu_free_irq(irq, dev_id);
+		return;
+	}
+
+	if (irq >= NUM_MAC_SOURCES) {
+		printk ("%s: unknown irq %d freed\n",
+		        __FUNCTION__, irq);
+		return;
+	}
+
+	mac_delete_irq(&mac_irq_list[irq], dev_id);
+
+	/* If the list for this interrupt is */
+	/* empty then disable the source.    */
+
+	if (!mac_irq_list[irq]) {
+		mac_disable_irq(irq);
+	}
+}
+
+/*
+ * Generate a pretty listing for /proc/interrupts
+ *
+ * By the time we're called the autovector interrupt list has already been
+ * generated, so we just need to do the machspec interrupts.
+ *
+ * 990506 (jmt) - rewritten to handle chained machspec interrupt handlers.
+ *                Also removed display of num_spurious it is already
+ *		  displayed for us as autovector irq 0.
+ */
+
+int show_mac_interrupts(struct seq_file *p, void *v)
+{
+	int i;
+	irq_node_t *node;
+	char *base;
+
+	/* Don't do Nubus interrupts in this loop; we do them separately  */
+	/* below so that we can print slot numbers instead of IRQ numbers */
+
+	for (i = VIA1_SOURCE_BASE ; i < NUM_MAC_SOURCES ; ++i) {
+
+		/* Nonexistant interrupt or nothing registered; skip it. */
+
+		if ((node = mac_irq_list[i]) == NULL) continue;
+		if (node->flags & IRQ_FLG_STD) continue;
+
+		base = "";
+		switch(IRQ_SRC(i)) {
+			case 1: base = "via1";
+				break;
+			case 2: if (oss_present) {
+					base = "oss";
+				} else {
+					base = "via2";
+				}
+				break;
+			case 3:
+			case 4:
+			case 5:
+			case 6: if (psc_present) {
+					base = "psc";
+				} else if (oss_present) {
+					base = "oss";
+				} else {
+					if (IRQ_SRC(i) == 4) base = "scc";
+				}
+				break;
+			case 7: base = "nbus";
+				break;
+			case 8: base = "bbn";
+				break;
+		}
+		seq_printf(p, "%4s %2d: %10u ", base, i, kstat_cpu(0).irqs[i]);
+
+		do {
+			if (node->flags & IRQ_FLG_FAST) {
+				seq_puts(p, "F ");
+			} else if (node->flags & IRQ_FLG_SLOW) {
+				seq_puts(p, "S ");
+			} else {
+				seq_puts(p, "  ");
+			}
+			seq_printf(p, "%s\n", node->devname);
+			if ((node = node->next)) {
+				seq_puts(p, "                    ");
+			}
+		} while(node);
+
+	}
+	return 0;
+}
+
+void mac_default_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+#ifdef DEBUG_SPURIOUS
+	printk("Unexpected IRQ %d on device %p\n", irq, dev_id);
+#endif
+}
+
+static int num_debug[8];
+
+irqreturn_t mac_debug_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+	if (num_debug[irq] < 10) {
+		printk("DEBUG: Unexpected IRQ %d\n", irq);
+		num_debug[irq]++;
+	}
+	return IRQ_HANDLED;
+}
+
+static int in_nmi;
+static volatile int nmi_hold;
+
+irqreturn_t mac_nmi_handler(int irq, void *dev_id, struct pt_regs *fp)
+{
+	int i;
+	/*
+	 * generate debug output on NMI switch if 'debug' kernel option given
+	 * (only works with Penguin!)
+	 */
+
+	in_nmi++;
+	for (i=0; i<100; i++)
+		udelay(1000);
+
+	if (in_nmi == 1) {
+		nmi_hold = 1;
+		printk("... pausing, press NMI to resume ...");
+	} else {
+		printk(" ok!\n");
+		nmi_hold = 0;
+	}
+
+	barrier();
+
+	while (nmi_hold == 1)
+		udelay(1000);
+
+	if ( console_loglevel >= 8 ) {
+#if 0
+		show_state();
+		printk("PC: %08lx\nSR: %04x  SP: %p\n", fp->pc, fp->sr, fp);
+		printk("d0: %08lx    d1: %08lx    d2: %08lx    d3: %08lx\n",
+		       fp->d0, fp->d1, fp->d2, fp->d3);
+		printk("d4: %08lx    d5: %08lx    a0: %08lx    a1: %08lx\n",
+		       fp->d4, fp->d5, fp->a0, fp->a1);
+
+		if (STACK_MAGIC != *(unsigned long *)current->kernel_stack_page)
+			printk("Corrupted stack page\n");
+		printk("Process %s (pid: %d, stackpage=%08lx)\n",
+			current->comm, current->pid, current->kernel_stack_page);
+		if (intr_count == 1)
+			dump_stack((struct frame *)fp);
+#else
+		/* printk("NMI "); */
+#endif
+	}
+	in_nmi--;
+	return IRQ_HANDLED;
+}
+
+/*
+ * Simple routines for masking and unmasking
+ * SCC interrupts in cases where this can't be
+ * done in hardware (only the PSC can do that.)
+ */
+
+static void scc_irq_enable(int irq) {
+	int irq_idx     = IRQ_IDX(irq);
+
+	scc_mask |= (1 << irq_idx);
+}
+
+static void scc_irq_disable(int irq) {
+	int irq_idx     = IRQ_IDX(irq);
+
+	scc_mask &= ~(1 << irq_idx);
+}
+
+/*
+ * SCC master interrupt handler. We have to do a bit of magic here
+ * to figure out what channel gave us the interrupt; putting this
+ * here is cleaner than hacking it into drivers/char/macserial.c.
+ */
+
+void mac_scc_dispatch(int irq, void *dev_id, struct pt_regs *regs)
+{
+	volatile unsigned char *scc = (unsigned char *) mac_bi_data.sccbase + 2;
+	unsigned char reg;
+	unsigned long flags;
+
+	/* Read RR3 from the chip. Always do this on channel A */
+	/* This must be an atomic operation so disable irqs.   */
+
+	local_irq_save(flags);
+	*scc = 3;
+	reg = *scc;
+	local_irq_restore(flags);
+
+	/* Now dispatch. Bits 0-2 are for channel B and */
+	/* bits 3-5 are for channel A. We can safely    */
+	/* ignore the remaining bits here.              */
+	/*                                              */
+	/* Note that we're ignoring scc_mask for now.   */
+	/* If we actually mask the ints then we tend to */
+	/* get hammered by very persistent SCC irqs,    */
+	/* and since they're autovector interrupts they */
+	/* pretty much kill the system.                 */
+
+	if (reg & 0x38) mac_do_irq_list(IRQ_SCCA, regs);
+	if (reg & 0x07) mac_do_irq_list(IRQ_SCCB, regs);
+}
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
new file mode 100644
index 0000000..5b80d7c
--- /dev/null
+++ b/arch/m68k/mac/misc.c
@@ -0,0 +1,651 @@
+/*
+ * Miscellaneous Mac68K-specific stuff
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/rtc.h>
+#include <linux/mm.h>
+
+#include <linux/adb.h>
+#include <linux/cuda.h>
+#include <linux/pmu.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/rtc.h>
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/setup.h>
+#include <asm/macintosh.h>
+#include <asm/mac_via.h>
+#include <asm/mac_oss.h>
+
+#define BOOTINFO_COMPAT_1_0
+#include <asm/bootinfo.h>
+#include <asm/machdep.h>
+
+/* Offset between Unix time (1970-based) and Mac time (1904-based) */
+
+#define RTC_OFFSET 2082844800
+
+extern struct mac_booter_data mac_bi_data;
+static void (*rom_reset)(void);
+
+#ifdef CONFIG_ADB
+/*
+ * Return the current time as the number of seconds since January 1, 1904.
+ */
+
+static long adb_read_time(void)
+{
+	volatile struct adb_request req;
+	long time;
+
+	adb_request((struct adb_request *) &req, NULL,
+			ADBREQ_RAW|ADBREQ_SYNC,
+			2, CUDA_PACKET, CUDA_GET_TIME);
+
+	time = (req.reply[3] << 24) | (req.reply[4] << 16)
+		| (req.reply[5] << 8) | req.reply[6];
+	return time - RTC_OFFSET;
+}
+
+/*
+ * Set the current system time
+ */
+
+static void adb_write_time(long data)
+{
+	volatile struct adb_request req;
+
+	data += RTC_OFFSET;
+
+	adb_request((struct adb_request *) &req, NULL,
+			ADBREQ_RAW|ADBREQ_SYNC,
+			6, CUDA_PACKET, CUDA_SET_TIME,
+			(data >> 24) & 0xFF, (data >> 16) & 0xFF,
+			(data >> 8) & 0xFF, data & 0xFF);
+}
+
+/*
+ * Get a byte from the NVRAM
+ */
+
+static __u8 adb_read_pram(int offset)
+{
+	volatile struct adb_request req;
+
+	adb_request((struct adb_request *) &req, NULL,
+			ADBREQ_RAW|ADBREQ_SYNC,
+			4, CUDA_PACKET, CUDA_GET_PRAM,
+			(offset >> 8) & 0xFF, offset & 0xFF);
+	return req.reply[3];
+}
+
+/*
+ * Write a byte to the NVRAM
+ */
+
+static void adb_write_pram(int offset, __u8 data)
+{
+	volatile struct adb_request req;
+
+	adb_request((struct adb_request *) &req, NULL,
+			ADBREQ_RAW|ADBREQ_SYNC,
+			5, CUDA_PACKET, CUDA_SET_PRAM,
+			(offset >> 8) & 0xFF, offset & 0xFF,
+			data);
+}
+#endif /* CONFIG_ADB */
+
+/*
+ * VIA PRAM/RTC access routines
+ *
+ * Must be called with interrupts disabled and
+ * the RTC should be enabled.
+ */
+
+static __u8 via_pram_readbyte(void)
+{
+	int	i,reg;
+	__u8	data;
+
+	reg = via1[vBufB] & ~VIA1B_vRTCClk;
+
+	/* Set the RTC data line to be an input. */
+
+	via1[vDirB] &= ~VIA1B_vRTCData;
+
+	/* The bits of the byte come out in MSB order */
+
+	data = 0;
+	for (i = 0 ; i < 8 ; i++) {
+		via1[vBufB] = reg;
+		via1[vBufB] = reg | VIA1B_vRTCClk;
+		data = (data << 1) | (via1[vBufB] & VIA1B_vRTCData);
+	}
+
+	/* Return RTC data line to output state */
+
+	via1[vDirB] |= VIA1B_vRTCData;
+
+	return data;
+}
+
+static void via_pram_writebyte(__u8 data)
+{
+	int	i,reg,bit;
+
+	reg = via1[vBufB] & ~(VIA1B_vRTCClk | VIA1B_vRTCData);
+
+	/* The bits of the byte go in in MSB order */
+
+	for (i = 0 ; i < 8 ; i++) {
+		bit = data & 0x80? 1 : 0;
+		data <<= 1;
+		via1[vBufB] = reg | bit;
+		via1[vBufB] = reg | bit | VIA1B_vRTCClk;
+	}
+}
+
+/*
+ * Execute a VIA PRAM/RTC command. For read commands
+ * data should point to a one-byte buffer for the
+ * resulting data. For write commands it should point
+ * to the data byte to for the command.
+ *
+ * This function disables all interrupts while running.
+ */
+
+static void via_pram_command(int command, __u8 *data)
+{
+	unsigned long flags;
+	int	is_read;
+
+	local_irq_save(flags);
+
+	/* Enable the RTC and make sure the strobe line is high */
+
+	via1[vBufB] = (via1[vBufB] | VIA1B_vRTCClk) & ~VIA1B_vRTCEnb;
+
+	if (command & 0xFF00) {		/* extended (two-byte) command */
+		via_pram_writebyte((command & 0xFF00) >> 8);
+		via_pram_writebyte(command & 0xFF);
+		is_read = command & 0x8000;
+	} else {			/* one-byte command */
+		via_pram_writebyte(command);
+		is_read = command & 0x80;
+	}
+	if (is_read) {
+		*data = via_pram_readbyte();
+	} else {
+		via_pram_writebyte(*data);
+	}
+
+	/* All done, disable the RTC */
+
+	via1[vBufB] |= VIA1B_vRTCEnb;
+
+	local_irq_restore(flags);
+}
+
+static __u8 via_read_pram(int offset)
+{
+	return 0;
+}
+
+static void via_write_pram(int offset, __u8 data)
+{
+}
+
+/*
+ * Return the current time in seconds since January 1, 1904.
+ *
+ * This only works on machines with the VIA-based PRAM/RTC, which
+ * is basically any machine with Mac II-style ADB.
+ */
+
+static long via_read_time(void)
+{
+	union {
+		__u8  cdata[4];
+		long  idata;
+	} result, last_result;
+	int	ct;
+
+	/*
+	 * The NetBSD guys say to loop until you get the same reading
+	 * twice in a row.
+	 */
+
+	ct = 0;
+	do {
+		if (++ct > 10) {
+			printk("via_read_time: couldn't get valid time, "
+			       "last read = 0x%08lx and 0x%08lx\n",
+			       last_result.idata, result.idata);
+			break;
+		}
+
+		last_result.idata = result.idata;
+		result.idata = 0;
+
+		via_pram_command(0x81, &result.cdata[3]);
+		via_pram_command(0x85, &result.cdata[2]);
+		via_pram_command(0x89, &result.cdata[1]);
+		via_pram_command(0x8D, &result.cdata[0]);
+	} while (result.idata != last_result.idata);
+
+	return result.idata - RTC_OFFSET;
+}
+
+/*
+ * Set the current time to a number of seconds since January 1, 1904.
+ *
+ * This only works on machines with the VIA-based PRAM/RTC, which
+ * is basically any machine with Mac II-style ADB.
+ */
+
+static void via_write_time(long time)
+{
+	union {
+		__u8  cdata[4];
+		long  idata;
+	} data;
+	__u8	temp;
+
+	/* Clear the write protect bit */
+
+	temp = 0x55;
+	via_pram_command(0x35, &temp);
+
+	data.idata = time + RTC_OFFSET;
+	via_pram_command(0x01, &data.cdata[3]);
+	via_pram_command(0x05, &data.cdata[2]);
+	via_pram_command(0x09, &data.cdata[1]);
+	via_pram_command(0x0D, &data.cdata[0]);
+
+	/* Set the write protect bit */
+
+	temp = 0xD5;
+	via_pram_command(0x35, &temp);
+}
+
+static void via_shutdown(void)
+{
+	if (rbv_present) {
+		via2[rBufB] &= ~0x04;
+	} else {
+		/* Direction of vDirB is output */
+		via2[vDirB] |= 0x04;
+		/* Send a value of 0 on that line */
+		via2[vBufB] &= ~0x04;
+		mdelay(1000);
+	}
+}
+
+/*
+ * FIXME: not sure how this is supposed to work exactly...
+ */
+
+static void oss_shutdown(void)
+{
+	oss->rom_ctrl = OSS_POWEROFF;
+}
+
+#ifdef CONFIG_ADB_CUDA
+
+static void cuda_restart(void)
+{
+	adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
+			2, CUDA_PACKET, CUDA_RESET_SYSTEM);
+}
+
+static void cuda_shutdown(void)
+{
+	adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
+			2, CUDA_PACKET, CUDA_POWERDOWN);
+}
+
+#endif /* CONFIG_ADB_CUDA */
+
+#ifdef CONFIG_ADB_PMU
+
+void pmu_restart(void)
+{
+	adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
+			3, PMU_PACKET, PMU_SET_INTR_MASK,
+			PMU_INT_ADB|PMU_INT_TICK);
+
+	adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
+			2, PMU_PACKET, PMU_RESET);
+}
+
+void pmu_shutdown(void)
+{
+	adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
+			3, PMU_PACKET, PMU_SET_INTR_MASK,
+			PMU_INT_ADB|PMU_INT_TICK);
+
+	adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
+			6, PMU_PACKET, PMU_SHUTDOWN,
+			'M', 'A', 'T', 'T');
+}
+
+#endif /* CONFIG_ADB_PMU */
+
+/*
+ *-------------------------------------------------------------------
+ * Below this point are the generic routines; they'll dispatch to the
+ * correct routine for the hardware on which we're running.
+ *-------------------------------------------------------------------
+ */
+
+void mac_pram_read(int offset, __u8 *buffer, int len)
+{
+	__u8 (*func)(int) = NULL;
+	int i;
+
+	if (macintosh_config->adb_type == MAC_ADB_IISI ||
+	    macintosh_config->adb_type == MAC_ADB_PB1 ||
+	    macintosh_config->adb_type == MAC_ADB_PB2 ||
+	    macintosh_config->adb_type == MAC_ADB_CUDA) {
+#ifdef CONFIG_ADB
+		func = adb_read_pram;
+#else
+		return;
+#endif
+	} else {
+		func = via_read_pram;
+	}
+	for (i = 0 ; i < len ; i++) {
+		buffer[i] = (*func)(offset++);
+	}
+}
+
+void mac_pram_write(int offset, __u8 *buffer, int len)
+{
+	void (*func)(int, __u8) = NULL;
+	int i;
+
+	if (macintosh_config->adb_type == MAC_ADB_IISI ||
+	    macintosh_config->adb_type == MAC_ADB_PB1 ||
+	    macintosh_config->adb_type == MAC_ADB_PB2 ||
+	    macintosh_config->adb_type == MAC_ADB_CUDA) {
+#ifdef CONFIG_ADB
+		func = adb_write_pram;
+#else
+		return;
+#endif
+	} else {
+		func = via_write_pram;
+	}
+	for (i = 0 ; i < len ; i++) {
+		(*func)(offset++, buffer[i]);
+	}
+}
+
+void mac_poweroff(void)
+{
+	/*
+	 * MAC_ADB_IISI may need to be moved up here if it doesn't actually
+	 * work using the ADB packet method.  --David Kilzer
+	 */
+
+	if (oss_present) {
+		oss_shutdown();
+	} else if (macintosh_config->adb_type == MAC_ADB_II) {
+		via_shutdown();
+#ifdef CONFIG_ADB_CUDA
+	} else if (macintosh_config->adb_type == MAC_ADB_CUDA) {
+		cuda_shutdown();
+#endif
+#ifdef CONFIG_ADB_PMU
+	} else if (macintosh_config->adb_type == MAC_ADB_PB1
+		|| macintosh_config->adb_type == MAC_ADB_PB2) {
+		pmu_shutdown();
+#endif
+	}
+	local_irq_enable();
+	printk("It is now safe to turn off your Macintosh.\n");
+	while(1);
+}
+
+void mac_reset(void)
+{
+	if (macintosh_config->adb_type == MAC_ADB_II) {
+		unsigned long flags;
+
+		/* need ROMBASE in booter */
+		/* indeed, plus need to MAP THE ROM !! */
+
+		if (mac_bi_data.rombase == 0)
+			mac_bi_data.rombase = 0x40800000;
+
+		/* works on some */
+		rom_reset = (void *) (mac_bi_data.rombase + 0xa);
+
+		if (macintosh_config->ident == MAC_MODEL_SE30) {
+			/*
+			 * MSch: Machines known to crash on ROM reset ...
+			 */
+		} else {
+			local_irq_save(flags);
+
+			rom_reset();
+
+			local_irq_restore(flags);
+		}
+#ifdef CONFIG_ADB_CUDA
+	} else if (macintosh_config->adb_type == MAC_ADB_CUDA) {
+		cuda_restart();
+#endif
+#ifdef CONFIG_ADB_PMU
+	} else if (macintosh_config->adb_type == MAC_ADB_PB1
+		|| macintosh_config->adb_type == MAC_ADB_PB2) {
+		pmu_restart();
+#endif
+	} else if (CPU_IS_030) {
+
+		/* 030-specific reset routine.  The idea is general, but the
+		 * specific registers to reset are '030-specific.  Until I
+		 * have a non-030 machine, I can't test anything else.
+		 *  -- C. Scott Ananian <cananian@alumni.princeton.edu>
+		 */
+
+		unsigned long rombase = 0x40000000;
+
+		/* make a 1-to-1 mapping, using the transparent tran. reg. */
+		unsigned long virt = (unsigned long) mac_reset;
+		unsigned long phys = virt_to_phys(mac_reset);
+		unsigned long offset = phys-virt;
+		local_irq_disable(); /* lets not screw this up, ok? */
+		__asm__ __volatile__(".chip 68030\n\t"
+				     "pmove %0,%/tt0\n\t"
+				     ".chip 68k"
+				     : : "m" ((phys&0xFF000000)|0x8777));
+		/* Now jump to physical address so we can disable MMU */
+		__asm__ __volatile__(
+                    ".chip 68030\n\t"
+		    "lea %/pc@(1f),%/a0\n\t"
+		    "addl %0,%/a0\n\t"/* fixup target address and stack ptr */
+		    "addl %0,%/sp\n\t"
+		    "pflusha\n\t"
+		    "jmp %/a0@\n\t" /* jump into physical memory */
+		    "0:.long 0\n\t" /* a constant zero. */
+		    /* OK.  Now reset everything and jump to reset vector. */
+		    "1:\n\t"
+		    "lea %/pc@(0b),%/a0\n\t"
+		    "pmove %/a0@, %/tc\n\t" /* disable mmu */
+		    "pmove %/a0@, %/tt0\n\t" /* disable tt0 */
+		    "pmove %/a0@, %/tt1\n\t" /* disable tt1 */
+		    "movel #0, %/a0\n\t"
+		    "movec %/a0, %/vbr\n\t" /* clear vector base register */
+		    "movec %/a0, %/cacr\n\t" /* disable caches */
+		    "movel #0x0808,%/a0\n\t"
+		    "movec %/a0, %/cacr\n\t" /* flush i&d caches */
+		    "movew #0x2700,%/sr\n\t" /* set up status register */
+		    "movel %1@(0x0),%/a0\n\t"/* load interrupt stack pointer */
+		    "movec %/a0, %/isp\n\t"
+		    "movel %1@(0x4),%/a0\n\t" /* load reset vector */
+		    "reset\n\t" /* reset external devices */
+		    "jmp %/a0@\n\t" /* jump to the reset vector */
+		    ".chip 68k"
+		    : : "r" (offset), "a" (rombase) : "a0");
+	}
+
+	/* should never get here */
+	local_irq_enable();
+	printk ("Restart failed.  Please restart manually.\n");
+	while(1);
+}
+
+/*
+ * This function translates seconds since 1970 into a proper date.
+ *
+ * Algorithm cribbed from glibc2.1, __offtime().
+ */
+#define SECS_PER_MINUTE (60)
+#define SECS_PER_HOUR  (SECS_PER_MINUTE * 60)
+#define SECS_PER_DAY   (SECS_PER_HOUR * 24)
+
+static void unmktime(unsigned long time, long offset,
+		     int *yearp, int *monp, int *dayp,
+		     int *hourp, int *minp, int *secp)
+{
+        /* How many days come before each month (0-12).  */
+	static const unsigned short int __mon_yday[2][13] =
+	{
+		/* Normal years.  */
+		{ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
+		/* Leap years.  */
+		{ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
+	};
+	long int days, rem, y, wday, yday;
+	const unsigned short int *ip;
+
+	days = time / SECS_PER_DAY;
+	rem = time % SECS_PER_DAY;
+	rem += offset;
+	while (rem < 0) {
+		rem += SECS_PER_DAY;
+		--days;
+	}
+	while (rem >= SECS_PER_DAY) {
+		rem -= SECS_PER_DAY;
+		++days;
+	}
+	*hourp = rem / SECS_PER_HOUR;
+	rem %= SECS_PER_HOUR;
+	*minp = rem / SECS_PER_MINUTE;
+	*secp = rem % SECS_PER_MINUTE;
+	/* January 1, 1970 was a Thursday. */
+	wday = (4 + days) % 7; /* Day in the week. Not currently used */
+	if (wday < 0) wday += 7;
+	y = 1970;
+
+#define DIV(a, b) ((a) / (b) - ((a) % (b) < 0))
+#define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400))
+#define __isleap(year)	\
+  ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
+
+	while (days < 0 || days >= (__isleap (y) ? 366 : 365))
+	{
+		/* Guess a corrected year, assuming 365 days per year.  */
+		long int yg = y + days / 365 - (days % 365 < 0);
+
+		/* Adjust DAYS and Y to match the guessed year.  */
+		days -= ((yg - y) * 365
+			 + LEAPS_THRU_END_OF (yg - 1)
+			 - LEAPS_THRU_END_OF (y - 1));
+		y = yg;
+	}
+	*yearp = y - 1900;
+	yday = days; /* day in the year.  Not currently used. */
+	ip = __mon_yday[__isleap(y)];
+	for (y = 11; days < (long int) ip[y]; --y)
+		continue;
+	days -= ip[y];
+	*monp = y;
+	*dayp = days + 1; /* day in the month */
+	return;
+}
+
+/*
+ * Read/write the hardware clock.
+ */
+
+int mac_hwclk(int op, struct rtc_time *t)
+{
+	unsigned long now;
+
+	if (!op) { /* read */
+		if (macintosh_config->adb_type == MAC_ADB_II) {
+			now = via_read_time();
+		} else
+#ifdef CONFIG_ADB
+		if ((macintosh_config->adb_type == MAC_ADB_IISI) ||
+			   (macintosh_config->adb_type == MAC_ADB_PB1) ||
+			   (macintosh_config->adb_type == MAC_ADB_PB2) ||
+			   (macintosh_config->adb_type == MAC_ADB_CUDA)) {
+			now = adb_read_time();
+		} else
+#endif
+		if (macintosh_config->adb_type == MAC_ADB_IOP) {
+			now = via_read_time();
+		} else {
+			now = 0;
+		}
+
+		t->tm_wday = 0;
+		unmktime(now, 0,
+			 &t->tm_year, &t->tm_mon, &t->tm_mday,
+			 &t->tm_hour, &t->tm_min, &t->tm_sec);
+		printk("mac_hwclk: read %04d-%02d-%-2d %02d:%02d:%02d\n",
+			t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, t->tm_sec);
+	} else { /* write */
+		printk("mac_hwclk: tried to write %04d-%02d-%-2d %02d:%02d:%02d\n",
+			t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, t->tm_sec);
+
+#if 0	/* it trashes my rtc */
+		now = mktime(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
+			     t->tm_hour, t->tm_min, t->tm_sec);
+
+		if (macintosh_config->adb_type == MAC_ADB_II) {
+			via_write_time(now);
+		} else if ((macintosh_config->adb_type == MAC_ADB_IISI) ||
+			   (macintosh_config->adb_type == MAC_ADB_PB1) ||
+			   (macintosh_config->adb_type == MAC_ADB_PB2) ||
+			   (macintosh_config->adb_type == MAC_ADB_CUDA)) {
+			adb_write_time(now);
+		} else if (macintosh_config->adb_type == MAC_ADB_IOP) {
+			via_write_time(now);
+		}
+#endif
+	}
+	return 0;
+}
+
+/*
+ * Set minutes/seconds in the hardware clock
+ */
+
+int mac_set_clock_mmss (unsigned long nowtime)
+{
+	struct rtc_time now;
+
+	mac_hwclk(0, &now);
+	now.tm_sec = nowtime % 60;
+	now.tm_min = (nowtime / 60) % 60;
+	mac_hwclk(1, &now);
+
+	return 0;
+}
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
new file mode 100644
index 0000000..3335476
--- /dev/null
+++ b/arch/m68k/mac/oss.c
@@ -0,0 +1,301 @@
+/*
+ *	OSS handling
+ *	Written by Joshua M. Thompson (funaho@jurai.org)
+ *
+ *
+ *	This chip is used in the IIfx in place of VIA #2. It acts like a fancy
+ *	VIA chip with prorammable interrupt levels.
+ *
+ * 990502 (jmt) - Major rewrite for new interrupt architecture as well as some
+ *		  recent insights into OSS operational details.
+ * 990610 (jmt) - Now taking fulll advantage of the OSS. Interrupts are mapped
+ *		  to mostly match the A/UX interrupt scheme supported on the
+ *		  VIA side. Also added support for enabling the ISM irq again
+ *		  since we now have a functional IOP manager.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <asm/bootinfo.h>
+#include <asm/machw.h>
+#include <asm/macintosh.h>
+#include <asm/macints.h>
+#include <asm/mac_via.h>
+#include <asm/mac_oss.h>
+
+int oss_present;
+volatile struct mac_oss *oss;
+
+irqreturn_t oss_irq(int, void *, struct pt_regs *);
+irqreturn_t oss_nubus_irq(int, void *, struct pt_regs *);
+
+extern irqreturn_t via1_irq(int, void *, struct pt_regs *);
+extern irqreturn_t mac_scc_dispatch(int, void *, struct pt_regs *);
+
+/*
+ * Initialize the OSS
+ *
+ * The OSS "detection" code is actually in via_init() which is always called
+ * before us. Thus we can count on oss_present being valid on entry.
+ */
+
+void __init oss_init(void)
+{
+	int i;
+
+	if (!oss_present) return;
+
+	oss = (struct mac_oss *) OSS_BASE;
+
+	/* Disable all interrupts. Unlike a VIA it looks like we    */
+	/* do this by setting the source's interrupt level to zero. */
+
+	for (i = 0; i <= OSS_NUM_SOURCES; i++) {
+		oss->irq_level[i] = OSS_IRQLEV_DISABLED;
+	}
+	/* If we disable VIA1 here, we never really handle it... */
+	oss->irq_level[OSS_VIA1] = OSS_IRQLEV_VIA1;
+}
+
+/*
+ * Register the OSS and NuBus interrupt dispatchers.
+ */
+
+void __init oss_register_interrupts(void)
+{
+	cpu_request_irq(OSS_IRQLEV_SCSI, oss_irq, IRQ_FLG_LOCK,
+			"scsi", (void *) oss);
+	cpu_request_irq(OSS_IRQLEV_IOPSCC, mac_scc_dispatch, IRQ_FLG_LOCK,
+			"scc", mac_scc_dispatch);
+	cpu_request_irq(OSS_IRQLEV_NUBUS, oss_nubus_irq, IRQ_FLG_LOCK,
+			"nubus", (void *) oss);
+	cpu_request_irq(OSS_IRQLEV_SOUND, oss_irq, IRQ_FLG_LOCK,
+			"sound", (void *) oss);
+	cpu_request_irq(OSS_IRQLEV_VIA1, via1_irq, IRQ_FLG_LOCK,
+			"via1", (void *) via1);
+}
+
+/*
+ * Initialize OSS for Nubus access
+ */
+
+void __init oss_nubus_init(void)
+{
+}
+
+/*
+ * Handle miscellaneous OSS interrupts. Right now that's just sound
+ * and SCSI; everything else is routed to its own autovector IRQ.
+ */
+
+irqreturn_t oss_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+	int events;
+
+	events = oss->irq_pending & (OSS_IP_SOUND|OSS_IP_SCSI);
+	if (!events)
+		return IRQ_NONE;
+
+#ifdef DEBUG_IRQS
+	if ((console_loglevel == 10) && !(events & OSS_IP_SCSI)) {
+		printk("oss_irq: irq %d events = 0x%04X\n", irq,
+			(int) oss->irq_pending);
+	}
+#endif
+	/* FIXME: how do you clear a pending IRQ?    */
+
+	if (events & OSS_IP_SOUND) {
+		/* FIXME: call sound handler */
+		oss->irq_pending &= ~OSS_IP_SOUND;
+	} else if (events & OSS_IP_SCSI) {
+		oss->irq_level[OSS_SCSI] = OSS_IRQLEV_DISABLED;
+		mac_do_irq_list(IRQ_MAC_SCSI, regs);
+		oss->irq_pending &= ~OSS_IP_SCSI;
+		oss->irq_level[OSS_SCSI] = OSS_IRQLEV_SCSI;
+	} else {
+		/* FIXME: error check here? */
+	}
+	return IRQ_HANDLED;
+}
+
+/*
+ * Nubus IRQ handler, OSS style
+ *
+ * Unlike the VIA/RBV this is on its own autovector interrupt level.
+ */
+
+irqreturn_t oss_nubus_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+	int events, irq_bit, i;
+
+	events = oss->irq_pending & OSS_IP_NUBUS;
+	if (!events)
+		return IRQ_NONE;
+
+#ifdef DEBUG_NUBUS_INT
+	if (console_loglevel > 7) {
+		printk("oss_nubus_irq: events = 0x%04X\n", events);
+	}
+#endif
+	/* There are only six slots on the OSS, not seven */
+
+	for (i = 0, irq_bit = 1 ; i < 6 ; i++, irq_bit <<= 1) {
+		if (events & irq_bit) {
+			oss->irq_level[i] = OSS_IRQLEV_DISABLED;
+			mac_do_irq_list(NUBUS_SOURCE_BASE + i, regs);
+			oss->irq_pending &= ~irq_bit;
+			oss->irq_level[i] = OSS_IRQLEV_NUBUS;
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+/*
+ * Enable an OSS interrupt
+ *
+ * It looks messy but it's rather straightforward. The switch() statement
+ * just maps the machspec interrupt numbers to the right OSS interrupt
+ * source (if the OSS handles that interrupt) and then sets the interrupt
+ * level for that source to nonzero, thus enabling the interrupt.
+ */
+
+void oss_irq_enable(int irq) {
+#ifdef DEBUG_IRQUSE
+	printk("oss_irq_enable(%d)\n", irq);
+#endif
+	switch(irq) {
+		case IRQ_SCC:
+		case IRQ_SCCA:
+		case IRQ_SCCB:
+			oss->irq_level[OSS_IOPSCC] = OSS_IRQLEV_IOPSCC;
+			break;
+		case IRQ_MAC_ADB:
+			oss->irq_level[OSS_IOPISM] = OSS_IRQLEV_IOPISM;
+			break;
+		case IRQ_MAC_SCSI:
+			oss->irq_level[OSS_SCSI] = OSS_IRQLEV_SCSI;
+			break;
+		case IRQ_NUBUS_9:
+		case IRQ_NUBUS_A:
+		case IRQ_NUBUS_B:
+		case IRQ_NUBUS_C:
+		case IRQ_NUBUS_D:
+		case IRQ_NUBUS_E:
+			irq -= NUBUS_SOURCE_BASE;
+			oss->irq_level[irq] = OSS_IRQLEV_NUBUS;
+			break;
+#ifdef DEBUG_IRQUSE
+		default:
+			printk("%s unknown irq %d\n",__FUNCTION__, irq);
+			break;
+#endif
+	}
+}
+
+/*
+ * Disable an OSS interrupt
+ *
+ * Same as above except we set the source's interrupt level to zero,
+ * to disable the interrupt.
+ */
+
+void oss_irq_disable(int irq) {
+#ifdef DEBUG_IRQUSE
+	printk("oss_irq_disable(%d)\n", irq);
+#endif
+	switch(irq) {
+		case IRQ_SCC:
+		case IRQ_SCCA:
+		case IRQ_SCCB:
+			oss->irq_level[OSS_IOPSCC] = OSS_IRQLEV_DISABLED;
+			break;
+		case IRQ_MAC_ADB:
+			oss->irq_level[OSS_IOPISM] = OSS_IRQLEV_DISABLED;
+			break;
+		case IRQ_MAC_SCSI:
+			oss->irq_level[OSS_SCSI] = OSS_IRQLEV_DISABLED;
+			break;
+		case IRQ_NUBUS_9:
+		case IRQ_NUBUS_A:
+		case IRQ_NUBUS_B:
+		case IRQ_NUBUS_C:
+		case IRQ_NUBUS_D:
+		case IRQ_NUBUS_E:
+			irq -= NUBUS_SOURCE_BASE;
+			oss->irq_level[irq] = OSS_IRQLEV_DISABLED;
+			break;
+#ifdef DEBUG_IRQUSE
+		default:
+			printk("%s unknown irq %d\n", __FUNCTION__, irq);
+			break;
+#endif
+	}
+}
+
+/*
+ * Clear an OSS interrupt
+ *
+ * Not sure if this works or not but it's the only method I could
+ * think of based on the contents of the mac_oss structure.
+ */
+
+void oss_irq_clear(int irq) {
+	/* FIXME: how to do this on OSS? */
+	switch(irq) {
+		case IRQ_SCC:
+		case IRQ_SCCA:
+		case IRQ_SCCB:
+			oss->irq_pending &= ~OSS_IP_IOPSCC;
+			break;
+		case IRQ_MAC_ADB:
+			oss->irq_pending &= ~OSS_IP_IOPISM;
+			break;
+		case IRQ_MAC_SCSI:
+			oss->irq_pending &= ~OSS_IP_SCSI;
+			break;
+		case IRQ_NUBUS_9:
+		case IRQ_NUBUS_A:
+		case IRQ_NUBUS_B:
+		case IRQ_NUBUS_C:
+		case IRQ_NUBUS_D:
+		case IRQ_NUBUS_E:
+			irq -= NUBUS_SOURCE_BASE;
+			oss->irq_pending &= ~(1 << irq);
+			break;
+	}
+}
+
+/*
+ * Check to see if a specific OSS interrupt is pending
+ */
+
+int oss_irq_pending(int irq)
+{
+	switch(irq) {
+		case IRQ_SCC:
+		case IRQ_SCCA:
+		case IRQ_SCCB:
+			return oss->irq_pending & OSS_IP_IOPSCC;
+			break;
+		case IRQ_MAC_ADB:
+			return oss->irq_pending & OSS_IP_IOPISM;
+			break;
+		case IRQ_MAC_SCSI:
+			return oss->irq_pending & OSS_IP_SCSI;
+			break;
+		case IRQ_NUBUS_9:
+		case IRQ_NUBUS_A:
+		case IRQ_NUBUS_B:
+		case IRQ_NUBUS_C:
+		case IRQ_NUBUS_D:
+		case IRQ_NUBUS_E:
+			irq -= NUBUS_SOURCE_BASE;
+			return oss->irq_pending & (1 << irq);
+			break;
+	}
+	return 0;
+}
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
new file mode 100644
index 0000000..e72384e
--- /dev/null
+++ b/arch/m68k/mac/psc.c
@@ -0,0 +1,197 @@
+/*
+ *	Apple Peripheral System Controller (PSC)
+ *
+ *	The PSC is used on the AV Macs to control IO functions not handled
+ *	by the VIAs (Ethernet, DSP, SCC).
+ *
+ * TO DO:
+ *
+ * Try to figure out what's going on in pIFR5 and pIFR6. There seem to be
+ * persisant interrupt conditions in those registers and I have no idea what
+ * they are. Granted it doesn't affect since we're not enabling any interrupts
+ * on those levels at the moment, but it would be nice to know. I have a feeling
+ * they aren't actually interrupt lines but data lines (to the DSP?)
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <asm/traps.h>
+#include <asm/bootinfo.h>
+#include <asm/macintosh.h>
+#include <asm/macints.h>
+#include <asm/mac_psc.h>
+
+#define DEBUG_PSC
+
+int psc_present;
+volatile __u8 *psc;
+
+irqreturn_t psc_irq(int, void *, struct pt_regs *);
+
+/*
+ * Debugging dump, used in various places to see what's going on.
+ */
+
+void psc_debug_dump(void)
+{
+	int	i;
+
+	if (!psc_present) return;
+	for (i = 0x30 ; i < 0x70 ; i += 0x10) {
+		printk("PSC #%d:  IFR = 0x%02X IER = 0x%02X\n",
+			i >> 4,
+			(int) psc_read_byte(pIFRbase + i),
+			(int) psc_read_byte(pIERbase + i));
+	}
+}
+
+/*
+ * Try to kill all DMA channels on the PSC. Not sure how this his
+ * supposed to work; this is code lifted from macmace.c and then
+ * expanded to cover what I think are the other 7 channels.
+ */
+
+void psc_dma_die_die_die(void)
+{
+	int i;
+
+	printk("Killing all PSC DMA channels...");
+	for (i = 0 ; i < 9 ; i++) {
+		psc_write_word(PSC_CTL_BASE + (i << 4), 0x8800);
+		psc_write_word(PSC_CTL_BASE + (i << 4), 0x1000);
+		psc_write_word(PSC_CMD_BASE + (i << 5), 0x1100);
+		psc_write_word(PSC_CMD_BASE + (i << 5) + 0x10, 0x1100);
+	}
+	printk("done!\n");
+}
+
+/*
+ * Initialize the PSC. For now this just involves shutting down all
+ * interrupt sources using the IERs.
+ */
+
+void __init psc_init(void)
+{
+	int i;
+
+	if (macintosh_config->ident != MAC_MODEL_C660
+	 && macintosh_config->ident != MAC_MODEL_Q840)
+	{
+		psc = NULL;
+		psc_present = 0;
+		return;
+	}
+
+	/*
+	 * The PSC is always at the same spot, but using psc
+	 * keeps things consisant with the psc_xxxx functions.
+	 */
+
+	psc = (void *) PSC_BASE;
+	psc_present = 1;
+
+	printk("PSC detected at %p\n", psc);
+
+	psc_dma_die_die_die();
+
+#ifdef DEBUG_PSC
+	psc_debug_dump();
+#endif
+	/*
+	 * Mask and clear all possible interrupts
+	 */
+
+	for (i = 0x30 ; i < 0x70 ; i += 0x10) {
+		psc_write_byte(pIERbase + i, 0x0F);
+		psc_write_byte(pIFRbase + i, 0x0F);
+	}
+}
+
+/*
+ * Register the PSC interrupt dispatchers for autovector interrupts 3-6.
+ */
+
+void __init psc_register_interrupts(void)
+{
+	cpu_request_irq(3, psc_irq, IRQ_FLG_LOCK, "psc3", (void *) 0x30);
+	cpu_request_irq(4, psc_irq, IRQ_FLG_LOCK, "psc4", (void *) 0x40);
+	cpu_request_irq(5, psc_irq, IRQ_FLG_LOCK, "psc5", (void *) 0x50);
+	cpu_request_irq(6, psc_irq, IRQ_FLG_LOCK, "psc6", (void *) 0x60);
+}
+
+/*
+ * PSC interrupt handler. It's a lot like the VIA interrupt handler.
+ */
+
+irqreturn_t psc_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+	int pIFR	= pIFRbase + ((int) dev_id);
+	int pIER	= pIERbase + ((int) dev_id);
+	int base_irq;
+	int irq_bit,i;
+	unsigned char events;
+
+	base_irq = irq << 3;
+
+#ifdef DEBUG_IRQS
+	printk("psc_irq: irq %d pIFR = 0x%02X pIER = 0x%02X\n",
+		irq, (int) psc_read_byte(pIFR), (int) psc_read_byte(pIER));
+#endif
+
+	events = psc_read_byte(pIFR) & psc_read_byte(pIER) & 0xF;
+	if (!events)
+		return IRQ_NONE;
+
+	for (i = 0, irq_bit = 1 ; i < 4 ; i++, irq_bit <<= 1) {
+	        if (events & irq_bit) {
+			psc_write_byte(pIER, irq_bit);
+			mac_do_irq_list(base_irq + i, regs);
+			psc_write_byte(pIFR, irq_bit);
+			psc_write_byte(pIER, irq_bit | 0x80);
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+void psc_irq_enable(int irq) {
+	int irq_src	= IRQ_SRC(irq);
+	int irq_idx	= IRQ_IDX(irq);
+	int pIER	= pIERbase + (irq_src << 4);
+
+#ifdef DEBUG_IRQUSE
+	printk("psc_irq_enable(%d)\n", irq);
+#endif
+	psc_write_byte(pIER, (1 << irq_idx) | 0x80);
+}
+
+void psc_irq_disable(int irq) {
+	int irq_src	= IRQ_SRC(irq);
+	int irq_idx	= IRQ_IDX(irq);
+	int pIER	= pIERbase + (irq_src << 4);
+
+#ifdef DEBUG_IRQUSE
+	printk("psc_irq_disable(%d)\n", irq);
+#endif
+	psc_write_byte(pIER, 1 << irq_idx);
+}
+
+void psc_irq_clear(int irq) {
+	int irq_src	= IRQ_SRC(irq);
+	int irq_idx	= IRQ_IDX(irq);
+	int pIFR	= pIERbase + (irq_src << 4);
+
+	psc_write_byte(pIFR, 1 << irq_idx);
+}
+
+int psc_irq_pending(int irq)
+{
+	int irq_src	= IRQ_SRC(irq);
+	int irq_idx	= IRQ_IDX(irq);
+	int pIFR	= pIERbase + (irq_src << 4);
+
+	return psc_read_byte(pIFR) & (1 << irq_idx);
+}
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
new file mode 100644
index 0000000..cd528bf
--- /dev/null
+++ b/arch/m68k/mac/via.c
@@ -0,0 +1,619 @@
+/*
+ *	6522 Versatile Interface Adapter (VIA)
+ *
+ *	There are two of these on the Mac II. Some IRQ's are vectored
+ *	via them as are assorted bits and bobs - eg RTC, ADB.
+ *
+ * CSA: Motorola seems to have removed documentation on the 6522 from
+ * their web site; try
+ *     http://nerini.drf.com/vectrex/other/text/chips/6522/
+ *     http://www.zymurgy.net/classic/vic20/vicdet1.htm
+ * and
+ *     http://193.23.168.87/mikro_laborversuche/via_iobaustein/via6522_1.html
+ * for info.  A full-text web search on 6522 AND VIA will probably also
+ * net some usefulness. <cananian@alumni.princeton.edu> 20apr1999
+ *
+ * PRAM/RTC access algorithms are from the NetBSD RTC toolkit version 1.08b
+ * by Erik Vogan and adapted to Linux by Joshua M. Thompson (funaho@jurai.org)
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ide.h>
+
+#include <asm/traps.h>
+#include <asm/bootinfo.h>
+#include <asm/macintosh.h>
+#include <asm/macints.h>
+#include <asm/machw.h>
+#include <asm/mac_via.h>
+#include <asm/mac_psc.h>
+
+volatile __u8 *via1, *via2;
+#if 0
+/* See note in mac_via.h about how this is possibly not useful */
+volatile long *via_memory_bogon=(long *)&via_memory_bogon;
+#endif
+int  rbv_present,via_alt_mapping;
+__u8 rbv_clear;
+
+/*
+ * Globals for accessing the VIA chip registers without having to
+ * check if we're hitting a real VIA or an RBV. Normally you could
+ * just hit the combined register (ie, vIER|rIER) but that seems to
+ * break on AV Macs...probably because they actually decode more than
+ * eight address bits. Why can't Apple engineers at least be
+ * _consistently_ lazy?                          - 1999-05-21 (jmt)
+ */
+
+static int gIER,gIFR,gBufA,gBufB;
+
+/*
+ * Timer defs.
+ */
+
+#define TICK_SIZE		10000
+#define MAC_CLOCK_TICK		(783300/HZ)		/* ticks per HZ */
+#define MAC_CLOCK_LOW		(MAC_CLOCK_TICK&0xFF)
+#define MAC_CLOCK_HIGH		(MAC_CLOCK_TICK>>8)
+
+static int  nubus_active;
+
+void via_debug_dump(void);
+irqreturn_t via1_irq(int, void *, struct pt_regs *);
+irqreturn_t via2_irq(int, void *, struct pt_regs *);
+irqreturn_t via_nubus_irq(int, void *, struct pt_regs *);
+void via_irq_enable(int irq);
+void via_irq_disable(int irq);
+void via_irq_clear(int irq);
+
+extern irqreturn_t mac_bang(int, void *, struct pt_regs *);
+extern irqreturn_t mac_scc_dispatch(int, void *, struct pt_regs *);
+extern int oss_present;
+
+/*
+ * Initialize the VIAs
+ *
+ * First we figure out where they actually _are_ as well as what type of
+ * VIA we have for VIA2 (it could be a real VIA or an RBV or even an OSS.)
+ * Then we pretty much clear them out and disable all IRQ sources.
+ *
+ * Note: the OSS is actually "detected" here and not in oss_init(). It just
+ *	 seems more logical to do it here since via_init() needs to know
+ *	 these things anyways.
+ */
+
+void __init via_init(void)
+{
+	switch(macintosh_config->via_type) {
+
+		/* IIci, IIsi, IIvx, IIvi (P6xx), LC series */
+
+		case MAC_VIA_IIci:
+			via1 = (void *) VIA1_BASE;
+			if (macintosh_config->ident == MAC_MODEL_IIFX) {
+				via2 = NULL;
+				rbv_present = 0;
+				oss_present = 1;
+			} else {
+				via2 = (void *) RBV_BASE;
+				rbv_present = 1;
+				oss_present = 0;
+			}
+			if (macintosh_config->ident == MAC_MODEL_LCIII) {
+				rbv_clear = 0x00;
+			} else {
+				/* on most RBVs (& unlike the VIAs), you   */
+				/* need to set bit 7 when you write to IFR */
+				/* in order for your clear to occur.       */
+				rbv_clear = 0x80;
+			}
+			gIER = rIER;
+			gIFR = rIFR;
+			gBufA = rSIFR;
+			gBufB = rBufB;
+			break;
+
+		/* Quadra and early MacIIs agree on the VIA locations */
+
+		case MAC_VIA_QUADRA:
+		case MAC_VIA_II:
+			via1 = (void *) VIA1_BASE;
+			via2 = (void *) VIA2_BASE;
+			rbv_present = 0;
+			oss_present = 0;
+			rbv_clear = 0x00;
+			gIER = vIER;
+			gIFR = vIFR;
+			gBufA = vBufA;
+			gBufB = vBufB;
+			break;
+		default:
+			panic("UNKNOWN VIA TYPE");
+	}
+
+	printk(KERN_INFO "VIA1 at %p is a 6522 or clone\n", via1);
+
+	printk(KERN_INFO "VIA2 at %p is ", via2);
+	if (rbv_present) {
+		printk(KERN_INFO "an RBV\n");
+	} else if (oss_present) {
+		printk(KERN_INFO "an OSS\n");
+	} else {
+		printk(KERN_INFO "a 6522 or clone\n");
+	}
+
+#ifdef DEBUG_VIA
+	via_debug_dump();
+#endif
+
+	/*
+	 * Shut down all IRQ sources, reset the timers, and
+	 * kill the timer latch on VIA1.
+	 */
+
+	via1[vIER] = 0x7F;
+	via1[vIFR] = 0x7F;
+	via1[vT1LL] = 0;
+	via1[vT1LH] = 0;
+	via1[vT1CL] = 0;
+	via1[vT1CH] = 0;
+	via1[vT2CL] = 0;
+	via1[vT2CH] = 0;
+	via1[vACR] &= 0x3F;
+
+	/*
+	 * SE/30: disable video IRQ
+	 * XXX: testing for SE/30 VBL
+	 */
+
+	if (macintosh_config->ident == MAC_MODEL_SE30) {
+		via1[vDirB] |= 0x40;
+		via1[vBufB] |= 0x40;
+	}
+
+	/*
+	 * Set the RTC bits to a known state: all lines to outputs and
+	 * RTC disabled (yes that's 0 to enable and 1 to disable).
+	 */
+
+	via1[vDirB] |= (VIA1B_vRTCEnb | VIA1B_vRTCClk | VIA1B_vRTCData);
+	via1[vBufB] |= (VIA1B_vRTCEnb | VIA1B_vRTCClk);
+
+	/* Everything below this point is VIA2/RBV only... */
+
+	if (oss_present) return;
+
+#if 1
+	/* Some machines support an alternate IRQ mapping that spreads  */
+	/* Ethernet and Sound out to their own autolevel IRQs and moves */
+	/* VIA1 to level 6. A/UX uses this mapping and we do too.  Note */
+	/* that the IIfx emulates this alternate mapping using the OSS. */
+
+	switch(macintosh_config->ident) {
+		case MAC_MODEL_C610:
+		case MAC_MODEL_Q610:
+		case MAC_MODEL_C650:
+		case MAC_MODEL_Q650:
+		case MAC_MODEL_Q700:
+		case MAC_MODEL_Q800:
+		case MAC_MODEL_Q900:
+		case MAC_MODEL_Q950:
+			via_alt_mapping = 1;
+			via1[vDirB] |= 0x40;
+			via1[vBufB] &= ~0x40;
+			break;
+		default:
+			via_alt_mapping = 0;
+			break;
+	}
+#else
+	/* The alernate IRQ mapping seems to just not work. Anyone with a   */
+	/* supported machine is welcome to take a stab at fixing it. It     */
+	/* _should_ work on the following Quadras: 610,650,700,800,900,950  */
+	/*                                               - 1999-06-12 (jmt) */
+
+	via_alt_mapping = 0;
+#endif
+
+	/*
+	 * Now initialize VIA2. For RBV we just kill all interrupts;
+	 * for a regular VIA we also reset the timers and stuff.
+	 */
+
+	via2[gIER] = 0x7F;
+	via2[gIFR] = 0x7F | rbv_clear;
+	if (!rbv_present) {
+		via2[vT1LL] = 0;
+		via2[vT1LH] = 0;
+		via2[vT1CL] = 0;
+		via2[vT1CH] = 0;
+		via2[vT2CL] = 0;
+		via2[vT2CH] = 0;
+		via2[vACR] &= 0x3F;
+	}
+}
+
+/*
+ * Start the 100 Hz clock
+ */
+
+void __init via_init_clock(irqreturn_t (*func)(int, void *, struct pt_regs *))
+{
+	via1[vACR] |= 0x40;
+	via1[vT1LL] = MAC_CLOCK_LOW;
+	via1[vT1LH] = MAC_CLOCK_HIGH;
+	via1[vT1CL] = MAC_CLOCK_LOW;
+	via1[vT1CH] = MAC_CLOCK_HIGH;
+
+	request_irq(IRQ_MAC_TIMER_1, func, IRQ_FLG_LOCK, "timer", func);
+}
+
+/*
+ * Register the interrupt dispatchers for VIA or RBV machines only.
+ */
+
+void __init via_register_interrupts(void)
+{
+	if (via_alt_mapping) {
+		cpu_request_irq(IRQ_AUTO_1, via1_irq,
+				IRQ_FLG_LOCK|IRQ_FLG_FAST, "software",
+				(void *) via1);
+		cpu_request_irq(IRQ_AUTO_6, via1_irq,
+				IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
+				(void *) via1);
+	} else {
+		cpu_request_irq(IRQ_AUTO_1, via1_irq,
+				IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
+				(void *) via1);
+#if 0 /* interferes with serial on some machines */
+		if (!psc_present) {
+			cpu_request_irq(IRQ_AUTO_6, mac_bang, IRQ_FLG_LOCK,
+					"Off Switch", mac_bang);
+		}
+#endif
+	}
+	cpu_request_irq(IRQ_AUTO_2, via2_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
+			"via2", (void *) via2);
+	if (!psc_present) {
+		cpu_request_irq(IRQ_AUTO_4, mac_scc_dispatch, IRQ_FLG_LOCK,
+				"scc", mac_scc_dispatch);
+	}
+	request_irq(IRQ_MAC_NUBUS, via_nubus_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
+			"nubus", (void *) via2);
+}
+
+/*
+ * Debugging dump, used in various places to see what's going on.
+ */
+
+void via_debug_dump(void)
+{
+	printk(KERN_DEBUG "VIA1: DDRA = 0x%02X DDRB = 0x%02X ACR = 0x%02X\n",
+		(uint) via1[vDirA], (uint) via1[vDirB], (uint) via1[vACR]);
+	printk(KERN_DEBUG "         PCR = 0x%02X  IFR = 0x%02X IER = 0x%02X\n",
+		(uint) via1[vPCR], (uint) via1[vIFR], (uint) via1[vIER]);
+	if (oss_present) {
+		printk(KERN_DEBUG "VIA2: <OSS>\n");
+	} else if (rbv_present) {
+		printk(KERN_DEBUG "VIA2:  IFR = 0x%02X  IER = 0x%02X\n",
+			(uint) via2[rIFR], (uint) via2[rIER]);
+		printk(KERN_DEBUG "      SIFR = 0x%02X SIER = 0x%02X\n",
+			(uint) via2[rSIFR], (uint) via2[rSIER]);
+	} else {
+		printk(KERN_DEBUG "VIA2: DDRA = 0x%02X DDRB = 0x%02X ACR = 0x%02X\n",
+			(uint) via2[vDirA], (uint) via2[vDirB],
+			(uint) via2[vACR]);
+		printk(KERN_DEBUG "         PCR = 0x%02X  IFR = 0x%02X IER = 0x%02X\n",
+			(uint) via2[vPCR],
+			(uint) via2[vIFR], (uint) via2[vIER]);
+	}
+}
+
+/*
+ * This is always executed with interrupts disabled.
+ *
+ * TBI: get time offset between scheduling timer ticks
+ */
+
+unsigned long mac_gettimeoffset (void)
+{
+	unsigned long ticks, offset = 0;
+
+	/* read VIA1 timer 2 current value */
+	ticks = via1[vT1CL] | (via1[vT1CH] << 8);
+	/* The probability of underflow is less than 2% */
+	if (ticks > MAC_CLOCK_TICK - MAC_CLOCK_TICK / 50)
+		/* Check for pending timer interrupt in VIA1 IFR */
+		if (via1[vIFR] & 0x40) offset = TICK_SIZE;
+
+	ticks = MAC_CLOCK_TICK - ticks;
+	ticks = ticks * 10000L / MAC_CLOCK_TICK;
+
+	return ticks + offset;
+}
+
+/*
+ * Flush the L2 cache on Macs that have it by flipping
+ * the system into 24-bit mode for an instant.
+ */
+
+void via_flush_cache(void)
+{
+	via2[gBufB] &= ~VIA2B_vMode32;
+	via2[gBufB] |= VIA2B_vMode32;
+}
+
+/*
+ * Return the status of the L2 cache on a IIci
+ */
+
+int via_get_cache_disable(void)
+{
+	/* Safeguard against being called accidentally */
+	if (!via2) {
+		printk(KERN_ERR "via_get_cache_disable called on a non-VIA machine!\n");
+		return 1;
+	}
+
+	return (int) via2[gBufB] & VIA2B_vCDis;
+}
+
+/*
+ * Initialize VIA2 for Nubus access
+ */
+
+void __init via_nubus_init(void)
+{
+	/* don't set nubus_active = 0 here, it kills the Baboon */
+	/* interrupt that we've already registered.		*/
+
+	/* unlock nubus transactions */
+
+	if (!rbv_present) {
+		/* set the line to be an output on non-RBV machines */
+		if ((macintosh_config->adb_type != MAC_ADB_PB1) &&
+		   (macintosh_config->adb_type != MAC_ADB_PB2)) {
+			via2[vDirB] |= 0x02;
+		}
+	}
+
+	/* this seems to be an ADB bit on PMU machines */
+	/* according to MkLinux.  -- jmt               */
+
+	if ((macintosh_config->adb_type != MAC_ADB_PB1) &&
+	    (macintosh_config->adb_type != MAC_ADB_PB2)) {
+		via2[gBufB] |= 0x02;
+	}
+
+	/* disable nubus slot interrupts. */
+	if (rbv_present) {
+		via2[rSIER] = 0x7F;
+		via2[rSIER] = nubus_active | 0x80;
+	} else {
+		/* These are ADB bits on PMU */
+		if ((macintosh_config->adb_type != MAC_ADB_PB1) &&
+		   (macintosh_config->adb_type != MAC_ADB_PB2)) {
+			switch(macintosh_config->ident)
+			{
+				case MAC_MODEL_II:
+				case MAC_MODEL_IIX:
+				case MAC_MODEL_IICX:
+				case MAC_MODEL_SE30:
+					via2[vBufA] |= 0x3F;
+					via2[vDirA] = ~nubus_active | 0xc0;
+					break;
+				default:
+					via2[vBufA] = 0xFF;
+					via2[vDirA] = ~nubus_active;
+			}
+		}
+	}
+}
+
+/*
+ * The generic VIA interrupt routines (shamelessly stolen from Alan Cox's
+ * via6522.c :-), disable/pending masks added.
+ *
+ * The new interrupt architecture in macints.c takes care of a lot of the
+ * gruntwork for us, including tallying the interrupts and calling the
+ * handlers on the linked list. All we need to do here is basically generate
+ * the machspec interrupt number after clearing the interrupt.
+ */
+
+irqreturn_t via1_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+	int irq_bit, i;
+	unsigned char events, mask;
+
+	mask = via1[vIER] & 0x7F;
+	if (!(events = via1[vIFR] & mask))
+		return IRQ_NONE;
+
+	for (i = 0, irq_bit = 1 ; i < 7 ; i++, irq_bit <<= 1)
+		if (events & irq_bit) {
+			via1[vIER] = irq_bit;
+			mac_do_irq_list(VIA1_SOURCE_BASE + i, regs);
+			via1[vIFR] = irq_bit;
+			via1[vIER] = irq_bit | 0x80;
+		}
+
+#if 0 /* freakin' pmu is doing weird stuff */
+	if (!oss_present) {
+		/* This (still) seems to be necessary to get IDE
+		   working.  However, if you enable VBL interrupts,
+		   you're screwed... */
+		/* FIXME: should we check the SLOTIRQ bit before
+                   pulling this stunt? */
+		/* No, it won't be set. that's why we're doing this. */
+		via_irq_disable(IRQ_MAC_NUBUS);
+		via_irq_clear(IRQ_MAC_NUBUS);
+		mac_do_irq_list(IRQ_MAC_NUBUS, regs);
+		via_irq_enable(IRQ_MAC_NUBUS);
+	}
+#endif
+	return IRQ_HANDLED;
+}
+
+irqreturn_t via2_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+	int irq_bit, i;
+	unsigned char events, mask;
+
+	mask = via2[gIER] & 0x7F;
+	if (!(events = via2[gIFR] & mask))
+		return IRQ_NONE;
+
+	for (i = 0, irq_bit = 1 ; i < 7 ; i++, irq_bit <<= 1)
+		if (events & irq_bit) {
+			via2[gIER] = irq_bit;
+			mac_do_irq_list(VIA2_SOURCE_BASE + i, regs);
+			via2[gIFR] = irq_bit | rbv_clear;
+			via2[gIER] = irq_bit | 0x80;
+		}
+	return IRQ_HANDLED;
+}
+
+/*
+ * Dispatch Nubus interrupts. We are called as a secondary dispatch by the
+ * VIA2 dispatcher as a fast interrupt handler.
+ */
+
+irqreturn_t via_nubus_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+	int irq_bit, i;
+	unsigned char events;
+
+	if (!(events = ~via2[gBufA] & nubus_active))
+		return IRQ_NONE;
+
+	for (i = 0, irq_bit = 1 ; i < 7 ; i++, irq_bit <<= 1) {
+		if (events & irq_bit) {
+			via_irq_disable(NUBUS_SOURCE_BASE + i);
+			mac_do_irq_list(NUBUS_SOURCE_BASE + i, regs);
+			via_irq_enable(NUBUS_SOURCE_BASE + i);
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+void via_irq_enable(int irq) {
+	int irq_src	= IRQ_SRC(irq);
+	int irq_idx	= IRQ_IDX(irq);
+	int irq_bit	= 1 << irq_idx;
+
+#ifdef DEBUG_IRQUSE
+	printk(KERN_DEBUG "via_irq_enable(%d)\n", irq);
+#endif
+
+	if (irq_src == 1) {
+		via1[vIER] = irq_bit | 0x80;
+	} else if (irq_src == 2) {
+		/*
+		 * Set vPCR for SCSI interrupts (but not on RBV)
+		 */
+		if ((irq_idx == 0) && !rbv_present) {
+			if (macintosh_config->scsi_type == MAC_SCSI_OLD) {
+				/* CB2 (IRQ) indep. input, positive edge */
+				/* CA2 (DRQ) indep. input, positive edge */
+				via2[vPCR] = 0x66;
+			} else {
+				/* CB2 (IRQ) indep. input, negative edge */
+				/* CA2 (DRQ) indep. input, negative edge */
+				via2[vPCR] = 0x22;
+			}
+		}
+		via2[gIER] = irq_bit | 0x80;
+	} else if (irq_src == 7) {
+		if (rbv_present) {
+			/* enable the slot interrupt. SIER works like IER. */
+			via2[rSIER] = IER_SET_BIT(irq_idx);
+		} else {
+			/* Make sure the bit is an input, to enable the irq */
+			/* But not on PowerBooks, that's ADB... */
+			if ((macintosh_config->adb_type != MAC_ADB_PB1) &&
+			   (macintosh_config->adb_type != MAC_ADB_PB2)) {
+				switch(macintosh_config->ident)
+				{
+					case MAC_MODEL_II:
+					case MAC_MODEL_IIX:
+					case MAC_MODEL_IICX:
+					case MAC_MODEL_SE30:
+						via2[vDirA] &= (~irq_bit | 0xc0);
+						break;
+					default:
+						via2[vDirA] &= ~irq_bit;
+				}
+			}
+		}
+		nubus_active |= irq_bit;
+	}
+}
+
+void via_irq_disable(int irq) {
+	int irq_src	= IRQ_SRC(irq);
+	int irq_idx	= IRQ_IDX(irq);
+	int irq_bit	= 1 << irq_idx;
+
+#ifdef DEBUG_IRQUSE
+	printk(KERN_DEBUG "via_irq_disable(%d)\n", irq);
+#endif
+
+	if (irq_src == 1) {
+		via1[vIER] = irq_bit;
+	} else if (irq_src == 2) {
+		via2[gIER] = irq_bit;
+	} else if (irq_src == 7) {
+		if (rbv_present) {
+			/* disable the slot interrupt.  SIER works like IER. */
+			via2[rSIER] = IER_CLR_BIT(irq_idx);
+		} else {
+			/* disable the nubus irq by changing dir to output */
+			/* except on PMU */
+			if ((macintosh_config->adb_type != MAC_ADB_PB1) &&
+			   (macintosh_config->adb_type != MAC_ADB_PB2)) {
+				via2[vDirA] |= irq_bit;
+			}
+		}
+		nubus_active &= ~irq_bit;
+	}
+}
+
+void via_irq_clear(int irq) {
+	int irq_src	= IRQ_SRC(irq);
+	int irq_idx	= IRQ_IDX(irq);
+	int irq_bit	= 1 << irq_idx;
+
+	if (irq_src == 1) {
+		via1[vIFR] = irq_bit;
+	} else if (irq_src == 2) {
+		via2[gIFR] = irq_bit | rbv_clear;
+	} else if (irq_src == 7) {
+		/* FIXME: hmm.. */
+	}
+}
+
+/*
+ * Returns nonzero if an interrupt is pending on the given
+ * VIA/IRQ combination.
+ */
+
+int via_irq_pending(int irq)
+{
+	int irq_src	= IRQ_SRC(irq);
+	int irq_idx	= IRQ_IDX(irq);
+	int irq_bit	= 1 << irq_idx;
+
+	if (irq_src == 1) {
+		return via1[vIFR] & irq_bit;
+	} else if (irq_src == 2) {
+		return via2[gIFR] & irq_bit;
+	} else if (irq_src == 7) {
+		return ~via2[gBufA] & irq_bit;
+	}
+	return 0;
+}
diff --git a/arch/m68k/math-emu/Makefile b/arch/m68k/math-emu/Makefile
new file mode 100644
index 0000000..5399404
--- /dev/null
+++ b/arch/m68k/math-emu/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the linux kernel.
+#
+
+EXTRA_AFLAGS := -traditional
+
+#EXTRA_AFLAGS += -DFPU_EMU_DEBUG
+#EXTRA_CFLAGS += -DFPU_EMU_DEBUG
+
+obj-y		:= fp_entry.o fp_scan.o fp_util.o fp_move.o fp_movem.o \
+			fp_cond.o fp_arith.o fp_log.o fp_trig.o
diff --git a/arch/m68k/math-emu/fp_arith.c b/arch/m68k/math-emu/fp_arith.c
new file mode 100644
index 0000000..08f286d
--- /dev/null
+++ b/arch/m68k/math-emu/fp_arith.c
@@ -0,0 +1,701 @@
+/*
+
+   fp_arith.c: floating-point math routines for the Linux-m68k
+   floating point emulator.
+
+   Copyright (c) 1998-1999 David Huggins-Daines.
+
+   Somewhat based on the AlphaLinux floating point emulator, by David
+   Mosberger-Tang.
+
+   You may copy, modify, and redistribute this file under the terms of
+   the GNU General Public License, version 2, or any later version, at
+   your convenience.
+ */
+
+#include "fp_emu.h"
+#include "multi_arith.h"
+#include "fp_arith.h"
+
+const struct fp_ext fp_QNaN =
+{
+	.exp = 0x7fff,
+	.mant = { .m64 = ~0 }
+};
+
+const struct fp_ext fp_Inf =
+{
+	.exp = 0x7fff,
+};
+
+/* let's start with the easy ones */
+
+struct fp_ext *
+fp_fabs(struct fp_ext *dest, struct fp_ext *src)
+{
+	dprint(PINSTR, "fabs\n");
+
+	fp_monadic_check(dest, src);
+
+	dest->sign = 0;
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fneg(struct fp_ext *dest, struct fp_ext *src)
+{
+	dprint(PINSTR, "fneg\n");
+
+	fp_monadic_check(dest, src);
+
+	dest->sign = !dest->sign;
+
+	return dest;
+}
+
+/* Now, the slightly harder ones */
+
+/* fp_fadd: Implements the kernel of the FADD, FSADD, FDADD, FSUB,
+   FDSUB, and FCMP instructions. */
+
+struct fp_ext *
+fp_fadd(struct fp_ext *dest, struct fp_ext *src)
+{
+	int diff;
+
+	dprint(PINSTR, "fadd\n");
+
+	fp_dyadic_check(dest, src);
+
+	if (IS_INF(dest)) {
+		/* infinity - infinity == NaN */
+		if (IS_INF(src) && (src->sign != dest->sign))
+			fp_set_nan(dest);
+		return dest;
+	}
+	if (IS_INF(src)) {
+		fp_copy_ext(dest, src);
+		return dest;
+	}
+
+	if (IS_ZERO(dest)) {
+		if (IS_ZERO(src)) {
+			if (src->sign != dest->sign) {
+				if (FPDATA->rnd == FPCR_ROUND_RM)
+					dest->sign = 1;
+				else
+					dest->sign = 0;
+			}
+		} else
+			fp_copy_ext(dest, src);
+		return dest;
+	}
+
+	dest->lowmant = src->lowmant = 0;
+
+	if ((diff = dest->exp - src->exp) > 0)
+		fp_denormalize(src, diff);
+	else if ((diff = -diff) > 0)
+		fp_denormalize(dest, diff);
+
+	if (dest->sign == src->sign) {
+		if (fp_addmant(dest, src))
+			if (!fp_addcarry(dest))
+				return dest;
+	} else {
+		if (dest->mant.m64 < src->mant.m64) {
+			fp_submant(dest, src, dest);
+			dest->sign = !dest->sign;
+		} else
+			fp_submant(dest, dest, src);
+	}
+
+	return dest;
+}
+
+/* fp_fsub: Implements the kernel of the FSUB, FSSUB, and FDSUB
+   instructions.
+
+   Remember that the arguments are in assembler-syntax order! */
+
+struct fp_ext *
+fp_fsub(struct fp_ext *dest, struct fp_ext *src)
+{
+	dprint(PINSTR, "fsub ");
+
+	src->sign = !src->sign;
+	return fp_fadd(dest, src);
+}
+
+
+struct fp_ext *
+fp_fcmp(struct fp_ext *dest, struct fp_ext *src)
+{
+	dprint(PINSTR, "fcmp ");
+
+	FPDATA->temp[1] = *dest;
+	src->sign = !src->sign;
+	return fp_fadd(&FPDATA->temp[1], src);
+}
+
+struct fp_ext *
+fp_ftst(struct fp_ext *dest, struct fp_ext *src)
+{
+	dprint(PINSTR, "ftst\n");
+
+	(void)dest;
+
+	return src;
+}
+
+struct fp_ext *
+fp_fmul(struct fp_ext *dest, struct fp_ext *src)
+{
+	union fp_mant128 temp;
+	int exp;
+
+	dprint(PINSTR, "fmul\n");
+
+	fp_dyadic_check(dest, src);
+
+	/* calculate the correct sign now, as it's necessary for infinities */
+	dest->sign = src->sign ^ dest->sign;
+
+	/* Handle infinities */
+	if (IS_INF(dest)) {
+		if (IS_ZERO(src))
+			fp_set_nan(dest);
+		return dest;
+	}
+	if (IS_INF(src)) {
+		if (IS_ZERO(dest))
+			fp_set_nan(dest);
+		else
+			fp_copy_ext(dest, src);
+		return dest;
+	}
+
+	/* Of course, as we all know, zero * anything = zero.  You may
+	   not have known that it might be a positive or negative
+	   zero... */
+	if (IS_ZERO(dest) || IS_ZERO(src)) {
+		dest->exp = 0;
+		dest->mant.m64 = 0;
+		dest->lowmant = 0;
+
+		return dest;
+	}
+
+	exp = dest->exp + src->exp - 0x3ffe;
+
+	/* shift up the mantissa for denormalized numbers,
+	   so that the highest bit is set, this makes the
+	   shift of the result below easier */
+	if ((long)dest->mant.m32[0] >= 0)
+		exp -= fp_overnormalize(dest);
+	if ((long)src->mant.m32[0] >= 0)
+		exp -= fp_overnormalize(src);
+
+	/* now, do a 64-bit multiply with expansion */
+	fp_multiplymant(&temp, dest, src);
+
+	/* normalize it back to 64 bits and stuff it back into the
+	   destination struct */
+	if ((long)temp.m32[0] > 0) {
+		exp--;
+		fp_putmant128(dest, &temp, 1);
+	} else
+		fp_putmant128(dest, &temp, 0);
+
+	if (exp >= 0x7fff) {
+		fp_set_ovrflw(dest);
+		return dest;
+	}
+	dest->exp = exp;
+	if (exp < 0) {
+		fp_set_sr(FPSR_EXC_UNFL);
+		fp_denormalize(dest, -exp);
+	}
+
+	return dest;
+}
+
+/* fp_fdiv: Implements the "kernel" of the FDIV, FSDIV, FDDIV and
+   FSGLDIV instructions.
+
+   Note that the order of the operands is counter-intuitive: instead
+   of src / dest, the result is actually dest / src. */
+
+struct fp_ext *
+fp_fdiv(struct fp_ext *dest, struct fp_ext *src)
+{
+	union fp_mant128 temp;
+	int exp;
+
+	dprint(PINSTR, "fdiv\n");
+
+	fp_dyadic_check(dest, src);
+
+	/* calculate the correct sign now, as it's necessary for infinities */
+	dest->sign = src->sign ^ dest->sign;
+
+	/* Handle infinities */
+	if (IS_INF(dest)) {
+		/* infinity / infinity = NaN (quiet, as always) */
+		if (IS_INF(src))
+			fp_set_nan(dest);
+		/* infinity / anything else = infinity (with approprate sign) */
+		return dest;
+	}
+	if (IS_INF(src)) {
+		/* anything / infinity = zero (with appropriate sign) */
+		dest->exp = 0;
+		dest->mant.m64 = 0;
+		dest->lowmant = 0;
+
+		return dest;
+	}
+
+	/* zeroes */
+	if (IS_ZERO(dest)) {
+		/* zero / zero = NaN */
+		if (IS_ZERO(src))
+			fp_set_nan(dest);
+		/* zero / anything else = zero */
+		return dest;
+	}
+	if (IS_ZERO(src)) {
+		/* anything / zero = infinity (with appropriate sign) */
+		fp_set_sr(FPSR_EXC_DZ);
+		dest->exp = 0x7fff;
+		dest->mant.m64 = 0;
+
+		return dest;
+	}
+
+	exp = dest->exp - src->exp + 0x3fff;
+
+	/* shift up the mantissa for denormalized numbers,
+	   so that the highest bit is set, this makes lots
+	   of things below easier */
+	if ((long)dest->mant.m32[0] >= 0)
+		exp -= fp_overnormalize(dest);
+	if ((long)src->mant.m32[0] >= 0)
+		exp -= fp_overnormalize(src);
+
+	/* now, do the 64-bit divide */
+	fp_dividemant(&temp, dest, src);
+
+	/* normalize it back to 64 bits and stuff it back into the
+	   destination struct */
+	if (!temp.m32[0]) {
+		exp--;
+		fp_putmant128(dest, &temp, 32);
+	} else
+		fp_putmant128(dest, &temp, 31);
+
+	if (exp >= 0x7fff) {
+		fp_set_ovrflw(dest);
+		return dest;
+	}
+	dest->exp = exp;
+	if (exp < 0) {
+		fp_set_sr(FPSR_EXC_UNFL);
+		fp_denormalize(dest, -exp);
+	}
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fsglmul(struct fp_ext *dest, struct fp_ext *src)
+{
+	int exp;
+
+	dprint(PINSTR, "fsglmul\n");
+
+	fp_dyadic_check(dest, src);
+
+	/* calculate the correct sign now, as it's necessary for infinities */
+	dest->sign = src->sign ^ dest->sign;
+
+	/* Handle infinities */
+	if (IS_INF(dest)) {
+		if (IS_ZERO(src))
+			fp_set_nan(dest);
+		return dest;
+	}
+	if (IS_INF(src)) {
+		if (IS_ZERO(dest))
+			fp_set_nan(dest);
+		else
+			fp_copy_ext(dest, src);
+		return dest;
+	}
+
+	/* Of course, as we all know, zero * anything = zero.  You may
+	   not have known that it might be a positive or negative
+	   zero... */
+	if (IS_ZERO(dest) || IS_ZERO(src)) {
+		dest->exp = 0;
+		dest->mant.m64 = 0;
+		dest->lowmant = 0;
+
+		return dest;
+	}
+
+	exp = dest->exp + src->exp - 0x3ffe;
+
+	/* do a 32-bit multiply */
+	fp_mul64(dest->mant.m32[0], dest->mant.m32[1],
+		 dest->mant.m32[0] & 0xffffff00,
+		 src->mant.m32[0] & 0xffffff00);
+
+	if (exp >= 0x7fff) {
+		fp_set_ovrflw(dest);
+		return dest;
+	}
+	dest->exp = exp;
+	if (exp < 0) {
+		fp_set_sr(FPSR_EXC_UNFL);
+		fp_denormalize(dest, -exp);
+	}
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fsgldiv(struct fp_ext *dest, struct fp_ext *src)
+{
+	int exp;
+	unsigned long quot, rem;
+
+	dprint(PINSTR, "fsgldiv\n");
+
+	fp_dyadic_check(dest, src);
+
+	/* calculate the correct sign now, as it's necessary for infinities */
+	dest->sign = src->sign ^ dest->sign;
+
+	/* Handle infinities */
+	if (IS_INF(dest)) {
+		/* infinity / infinity = NaN (quiet, as always) */
+		if (IS_INF(src))
+			fp_set_nan(dest);
+		/* infinity / anything else = infinity (with approprate sign) */
+		return dest;
+	}
+	if (IS_INF(src)) {
+		/* anything / infinity = zero (with appropriate sign) */
+		dest->exp = 0;
+		dest->mant.m64 = 0;
+		dest->lowmant = 0;
+
+		return dest;
+	}
+
+	/* zeroes */
+	if (IS_ZERO(dest)) {
+		/* zero / zero = NaN */
+		if (IS_ZERO(src))
+			fp_set_nan(dest);
+		/* zero / anything else = zero */
+		return dest;
+	}
+	if (IS_ZERO(src)) {
+		/* anything / zero = infinity (with appropriate sign) */
+		fp_set_sr(FPSR_EXC_DZ);
+		dest->exp = 0x7fff;
+		dest->mant.m64 = 0;
+
+		return dest;
+	}
+
+	exp = dest->exp - src->exp + 0x3fff;
+
+	dest->mant.m32[0] &= 0xffffff00;
+	src->mant.m32[0] &= 0xffffff00;
+
+	/* do the 32-bit divide */
+	if (dest->mant.m32[0] >= src->mant.m32[0]) {
+		fp_sub64(dest->mant, src->mant);
+		fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
+		dest->mant.m32[0] = 0x80000000 | (quot >> 1);
+		dest->mant.m32[1] = (quot & 1) | rem;	/* only for rounding */
+	} else {
+		fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
+		dest->mant.m32[0] = quot;
+		dest->mant.m32[1] = rem;		/* only for rounding */
+		exp--;
+	}
+
+	if (exp >= 0x7fff) {
+		fp_set_ovrflw(dest);
+		return dest;
+	}
+	dest->exp = exp;
+	if (exp < 0) {
+		fp_set_sr(FPSR_EXC_UNFL);
+		fp_denormalize(dest, -exp);
+	}
+
+	return dest;
+}
+
+/* fp_roundint: Internal rounding function for use by several of these
+   emulated instructions.
+
+   This one rounds off the fractional part using the rounding mode
+   specified. */
+
+static void fp_roundint(struct fp_ext *dest, int mode)
+{
+	union fp_mant64 oldmant;
+	unsigned long mask;
+
+	if (!fp_normalize_ext(dest))
+		return;
+
+	/* infinities and zeroes */
+	if (IS_INF(dest) || IS_ZERO(dest))
+		return;
+
+	/* first truncate the lower bits */
+	oldmant = dest->mant;
+	switch (dest->exp) {
+	case 0 ... 0x3ffe:
+		dest->mant.m64 = 0;
+		break;
+	case 0x3fff ... 0x401e:
+		dest->mant.m32[0] &= 0xffffffffU << (0x401e - dest->exp);
+		dest->mant.m32[1] = 0;
+		if (oldmant.m64 == dest->mant.m64)
+			return;
+		break;
+	case 0x401f ... 0x403e:
+		dest->mant.m32[1] &= 0xffffffffU << (0x403e - dest->exp);
+		if (oldmant.m32[1] == dest->mant.m32[1])
+			return;
+		break;
+	default:
+		return;
+	}
+	fp_set_sr(FPSR_EXC_INEX2);
+
+	/* We might want to normalize upwards here... however, since
+	   we know that this is only called on the output of fp_fdiv,
+	   or with the input to fp_fint or fp_fintrz, and the inputs
+	   to all these functions are either normal or denormalized
+	   (no subnormals allowed!), there's really no need.
+
+	   In the case of fp_fdiv, observe that 0x80000000 / 0xffff =
+	   0xffff8000, and the same holds for 128-bit / 64-bit. (i.e. the
+	   smallest possible normal dividend and the largest possible normal
+	   divisor will still produce a normal quotient, therefore, (normal
+	   << 64) / normal is normal in all cases) */
+
+	switch (mode) {
+	case FPCR_ROUND_RN:
+		switch (dest->exp) {
+		case 0 ... 0x3ffd:
+			return;
+		case 0x3ffe:
+			/* As noted above, the input is always normal, so the
+			   guard bit (bit 63) is always set.  therefore, the
+			   only case in which we will NOT round to 1.0 is when
+			   the input is exactly 0.5. */
+			if (oldmant.m64 == (1ULL << 63))
+				return;
+			break;
+		case 0x3fff ... 0x401d:
+			mask = 1 << (0x401d - dest->exp);
+			if (!(oldmant.m32[0] & mask))
+				return;
+			if (oldmant.m32[0] & (mask << 1))
+				break;
+			if (!(oldmant.m32[0] << (dest->exp - 0x3ffd)) &&
+					!oldmant.m32[1])
+				return;
+			break;
+		case 0x401e:
+			if (!(oldmant.m32[1] >= 0))
+				return;
+			if (oldmant.m32[0] & 1)
+				break;
+			if (!(oldmant.m32[1] << 1))
+				return;
+			break;
+		case 0x401f ... 0x403d:
+			mask = 1 << (0x403d - dest->exp);
+			if (!(oldmant.m32[1] & mask))
+				return;
+			if (oldmant.m32[1] & (mask << 1))
+				break;
+			if (!(oldmant.m32[1] << (dest->exp - 0x401d)))
+				return;
+			break;
+		default:
+			return;
+		}
+		break;
+	case FPCR_ROUND_RZ:
+		return;
+	default:
+		if (dest->sign ^ (mode - FPCR_ROUND_RM))
+			break;
+		return;
+	}
+
+	switch (dest->exp) {
+	case 0 ... 0x3ffe:
+		dest->exp = 0x3fff;
+		dest->mant.m64 = 1ULL << 63;
+		break;
+	case 0x3fff ... 0x401e:
+		mask = 1 << (0x401e - dest->exp);
+		if (dest->mant.m32[0] += mask)
+			break;
+		dest->mant.m32[0] = 0x80000000;
+		dest->exp++;
+		break;
+	case 0x401f ... 0x403e:
+		mask = 1 << (0x403e - dest->exp);
+		if (dest->mant.m32[1] += mask)
+			break;
+		if (dest->mant.m32[0] += 1)
+                        break;
+		dest->mant.m32[0] = 0x80000000;
+                dest->exp++;
+		break;
+	}
+}
+
+/* modrem_kernel: Implementation of the FREM and FMOD instructions
+   (which are exactly the same, except for the rounding used on the
+   intermediate value) */
+
+static struct fp_ext *
+modrem_kernel(struct fp_ext *dest, struct fp_ext *src, int mode)
+{
+	struct fp_ext tmp;
+
+	fp_dyadic_check(dest, src);
+
+	/* Infinities and zeros */
+	if (IS_INF(dest) || IS_ZERO(src)) {
+		fp_set_nan(dest);
+		return dest;
+	}
+	if (IS_ZERO(dest) || IS_INF(src))
+		return dest;
+
+	/* FIXME: there is almost certainly a smarter way to do this */
+	fp_copy_ext(&tmp, dest);
+	fp_fdiv(&tmp, src);		/* NOTE: src might be modified */
+	fp_roundint(&tmp, mode);
+	fp_fmul(&tmp, src);
+	fp_fsub(dest, &tmp);
+
+	/* set the quotient byte */
+	fp_set_quotient((dest->mant.m64 & 0x7f) | (dest->sign << 7));
+	return dest;
+}
+
+/* fp_fmod: Implements the kernel of the FMOD instruction.
+
+   Again, the argument order is backwards.  The result, as defined in
+   the Motorola manuals, is:
+
+   fmod(src,dest) = (dest - (src * floor(dest / src))) */
+
+struct fp_ext *
+fp_fmod(struct fp_ext *dest, struct fp_ext *src)
+{
+	dprint(PINSTR, "fmod\n");
+	return modrem_kernel(dest, src, FPCR_ROUND_RZ);
+}
+
+/* fp_frem: Implements the kernel of the FREM instruction.
+
+   frem(src,dest) = (dest - (src * round(dest / src)))
+ */
+
+struct fp_ext *
+fp_frem(struct fp_ext *dest, struct fp_ext *src)
+{
+	dprint(PINSTR, "frem\n");
+	return modrem_kernel(dest, src, FPCR_ROUND_RN);
+}
+
+struct fp_ext *
+fp_fint(struct fp_ext *dest, struct fp_ext *src)
+{
+	dprint(PINSTR, "fint\n");
+
+	fp_copy_ext(dest, src);
+
+	fp_roundint(dest, FPDATA->rnd);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fintrz(struct fp_ext *dest, struct fp_ext *src)
+{
+	dprint(PINSTR, "fintrz\n");
+
+	fp_copy_ext(dest, src);
+
+	fp_roundint(dest, FPCR_ROUND_RZ);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fscale(struct fp_ext *dest, struct fp_ext *src)
+{
+	int scale, oldround;
+
+	dprint(PINSTR, "fscale\n");
+
+	fp_dyadic_check(dest, src);
+
+	/* Infinities */
+	if (IS_INF(src)) {
+		fp_set_nan(dest);
+		return dest;
+	}
+	if (IS_INF(dest))
+		return dest;
+
+	/* zeroes */
+	if (IS_ZERO(src) || IS_ZERO(dest))
+		return dest;
+
+	/* Source exponent out of range */
+	if (src->exp >= 0x400c) {
+		fp_set_ovrflw(dest);
+		return dest;
+	}
+
+	/* src must be rounded with round to zero. */
+	oldround = FPDATA->rnd;
+	FPDATA->rnd = FPCR_ROUND_RZ;
+	scale = fp_conv_ext2long(src);
+	FPDATA->rnd = oldround;
+
+	/* new exponent */
+	scale += dest->exp;
+
+	if (scale >= 0x7fff) {
+		fp_set_ovrflw(dest);
+	} else if (scale <= 0) {
+		fp_set_sr(FPSR_EXC_UNFL);
+		fp_denormalize(dest, -scale);
+	} else
+		dest->exp = scale;
+
+	return dest;
+}
+
diff --git a/arch/m68k/math-emu/fp_arith.h b/arch/m68k/math-emu/fp_arith.h
new file mode 100644
index 0000000..2cc3f84
--- /dev/null
+++ b/arch/m68k/math-emu/fp_arith.h
@@ -0,0 +1,52 @@
+/*
+
+   fp_arith.h: floating-point math routines for the Linux-m68k
+   floating point emulator.
+
+   Copyright (c) 1998 David Huggins-Daines.
+
+   Somewhat based on the AlphaLinux floating point emulator, by David
+   Mosberger-Tang.
+
+   You may copy, modify, and redistribute this file under the terms of
+   the GNU General Public License, version 2, or any later version, at
+   your convenience.
+
+ */
+
+#ifndef FP_ARITH_H
+#define FP_ARITH_H
+
+/* easy ones */
+struct fp_ext *
+fp_fabs(struct fp_ext *dest, struct fp_ext *src);
+struct fp_ext *
+fp_fneg(struct fp_ext *dest, struct fp_ext *src);
+
+/* straightforward arithmetic */
+struct fp_ext *
+fp_fadd(struct fp_ext *dest, struct fp_ext *src);
+struct fp_ext *
+fp_fsub(struct fp_ext *dest, struct fp_ext *src);
+struct fp_ext *
+fp_fcmp(struct fp_ext *dest, struct fp_ext *src);
+struct fp_ext *
+fp_ftst(struct fp_ext *dest, struct fp_ext *src);
+struct fp_ext *
+fp_fmul(struct fp_ext *dest, struct fp_ext *src);
+struct fp_ext *
+fp_fdiv(struct fp_ext *dest, struct fp_ext *src);
+
+/* ones that do rounding and integer conversions */
+struct fp_ext *
+fp_fmod(struct fp_ext *dest, struct fp_ext *src);
+struct fp_ext *
+fp_frem(struct fp_ext *dest, struct fp_ext *src);
+struct fp_ext *
+fp_fint(struct fp_ext *dest, struct fp_ext *src);
+struct fp_ext *
+fp_fintrz(struct fp_ext *dest, struct fp_ext *src);
+struct fp_ext *
+fp_fscale(struct fp_ext *dest, struct fp_ext *src);
+
+#endif	/* FP_ARITH__H */
diff --git a/arch/m68k/math-emu/fp_cond.S b/arch/m68k/math-emu/fp_cond.S
new file mode 100644
index 0000000..ddae8b1
--- /dev/null
+++ b/arch/m68k/math-emu/fp_cond.S
@@ -0,0 +1,334 @@
+/*
+ * fp_cond.S
+ *
+ * Copyright Roman Zippel, 1997.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, and the entire permission notice in its entirety,
+ *    including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions.  (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "fp_emu.h"
+#include "fp_decode.h"
+
+	.globl	fp_fscc, fp_fbccw, fp_fbccl
+
+#ifdef FPU_EMU_DEBUG
+fp_fnop:
+	printf	PDECODE,"fnop\n"
+	jra	fp_end
+#else
+#define fp_fnop fp_end
+#endif
+
+fp_fbccw:
+	tst.w	%d2
+	jeq	fp_fnop
+	printf	PDECODE,"fbccw "
+	fp_get_pc %a0
+	lea	(-2,%a0,%d2.w),%a0
+	jra	1f
+
+fp_fbccl:
+	printf	PDECODE,"fbccl "
+	fp_get_pc %a0
+	move.l	%d2,%d0
+	swap	%d0
+	fp_get_instr_word %d0,fp_err_ua1
+	lea	(-2,%a0,%d0.l),%a0
+1:	printf	PDECODE,"%x",1,%a0
+	move.l	%d2,%d0
+	swap	%d0
+	jsr	fp_compute_cond
+	tst.l	%d0
+	jeq	1f
+	fp_put_pc %a0,1
+1:	printf	PDECODE,"\n"
+	jra	fp_end
+
+fp_fdbcc:
+	printf	PDECODE,"fdbcc "
+	fp_get_pc %a1				| calculate new pc
+	fp_get_instr_word %d0,fp_err_ua1
+	add.w	%d0,%a1
+	fp_decode_addr_reg
+	printf	PDECODE,"d%d,%x\n",2,%d0,%a1
+	swap	%d1				| test condition in %d1
+	tst.w	%d1
+	jne	2f
+	move.l	%d0,%d1
+	jsr	fp_get_data_reg
+	subq.w	#1,%d0
+	jcs	1f
+	fp_put_pc %a1,1
+1:	jsr	fp_put_data_reg
+2:	jra	fp_end
+
+| set flags for decode macros for fs<cc>
+do_fscc=1
+do_no_pc_mode=1
+
+fp_fscc:
+	printf	PDECODE,"fscc "
+	move.l	%d2,%d0
+	jsr	fp_compute_cond
+	move.w	%d0,%d1
+	swap	%d1
+
+	| decode addressing mode
+	fp_decode_addr_mode
+
+	.long	fp_data, fp_fdbcc
+	.long	fp_indirect, fp_postinc
+	.long	fp_predecr, fp_disp16
+	.long	fp_extmode0, fp_extmode1
+
+	| addressing mode: data register direct
+fp_data:
+	fp_mode_data_direct
+	move.w	%d0,%d1			| save register nr
+	jsr	fp_get_data_reg
+	swap	%d1
+	move.b	%d1,%d0
+	swap	%d1
+	jsr	fp_put_data_reg
+	printf	PDECODE,"\n"
+	jra	fp_end
+
+fp_indirect:
+	fp_mode_addr_indirect
+	jra	fp_do_scc
+
+fp_postinc:
+	fp_mode_addr_indirect_postinc
+	jra	fp_do_scc
+
+fp_predecr:
+	fp_mode_addr_indirect_predec
+	jra	fp_do_scc
+
+fp_disp16:
+	fp_mode_addr_indirect_disp16
+	jra	fp_do_scc
+
+fp_extmode0:
+	fp_mode_addr_indirect_extmode0
+	jra	fp_do_scc
+
+fp_extmode1:
+	bfextu	%d2{#13,#3},%d0
+	jmp	([0f:w,%pc,%d0*4])
+
+	.align	4
+0:
+	.long	fp_absolute_short, fp_absolute_long
+	.long	fp_ill, fp_ill		| NOTE: jump here to ftrap.x
+	.long	fp_ill, fp_ill
+	.long	fp_ill, fp_ill
+
+fp_absolute_short:
+	fp_mode_abs_short
+	jra	fp_do_scc
+
+fp_absolute_long:
+	fp_mode_abs_long
+|	jra	fp_do_scc
+
+fp_do_scc:
+	swap	%d1
+	putuser.b %d1,(%a0),fp_err_ua1,%a0
+	printf	PDECODE,"\n"
+	jra	fp_end
+
+
+#define tst_NAN	btst #24,%d1
+#define tst_Z	btst #26,%d1
+#define tst_N	btst #27,%d1
+
+fp_compute_cond:
+	move.l	(FPD_FPSR,FPDATA),%d1
+	btst	#4,%d0
+	jeq	1f
+	tst_NAN
+	jeq	1f
+	bset	#15,%d1
+	bset	#7,%d1
+	move.l	%d1,(FPD_FPSR,FPDATA)
+1:	and.w	#0xf,%d0
+	jmp	([0f:w,%pc,%d0.w*4])
+
+	.align	4
+0:
+	.long	fp_f  , fp_eq , fp_ogt, fp_oge
+	.long	fp_olt, fp_ole, fp_ogl, fp_or
+	.long	fp_un , fp_ueq, fp_ugt, fp_uge
+	.long	fp_ult, fp_ule, fp_ne , fp_t
+
+fp_f:
+	moveq	#0,%d0
+	rts
+
+fp_eq:
+	moveq	#0,%d0
+	tst_Z
+	jeq	1f
+	moveq	#-1,%d0
+1:	rts
+
+fp_ogt:
+	moveq	#0,%d0
+	tst_NAN
+	jne	1f
+	tst_Z
+	jne	1f
+	tst_N
+	jne	1f
+	moveq	#-1,%d0
+1:	rts
+
+fp_oge:
+	moveq	#-1,%d0
+	tst_Z
+	jne	2f
+	tst_NAN
+	jne	1f
+	tst_N
+	jeq	2f
+1:	moveq	#0,%d0
+2:	rts
+
+fp_olt:
+	moveq	#0,%d0
+	tst_NAN
+	jne	1f
+	tst_Z
+	jne	1f
+	tst_N
+	jeq	1f
+	moveq	#-1,%d0
+1:	rts
+
+fp_ole:
+	moveq	#-1,%d0
+	tst_Z
+	jne	2f
+	tst_NAN
+	jne	1f
+	tst_N
+	jne	2f
+1:	moveq	#0,%d0
+2:	rts
+
+fp_ogl:
+	moveq	#0,%d0
+	tst_NAN
+	jne	1f
+	tst_Z
+	jne	1f
+	moveq	#-1,%d0
+1:	rts
+
+fp_or:
+	moveq	#0,%d0
+	tst_NAN
+	jne	1f
+	moveq	#-1,%d0
+1:	rts
+
+fp_un:
+	moveq	#0,%d0
+	tst_NAN
+	jeq	1f
+	moveq	#-1,%d0
+	rts
+
+fp_ueq:
+	moveq	#-1,%d0
+	tst_NAN
+	jne	1f
+	tst_Z
+	jne	1f
+	moveq	#0,%d0
+1:	rts
+
+fp_ugt:
+	moveq	#-1,%d0
+	tst_NAN
+	jne	2f
+	tst_N
+	jne	1f
+	tst_Z
+	jeq	2f
+1:	moveq	#0,%d0
+2:	rts
+
+fp_uge:
+	moveq	#-1,%d0
+	tst_NAN
+	jne	1f
+	tst_Z
+	jne	1f
+	tst_N
+	jeq	1f
+	moveq	#0,%d0
+1:	rts
+
+fp_ult:
+	moveq	#-1,%d0
+	tst_NAN
+	jne	2f
+	tst_Z
+	jne	1f
+	tst_N
+	jne	2f
+1:	moveq	#0,%d0
+2:	rts
+
+fp_ule:
+	moveq	#-1,%d0
+	tst_NAN
+	jne	1f
+	tst_Z
+	jne	1f
+	tst_N
+	jne	1f
+	moveq	#0,%d0
+1:	rts
+
+fp_ne:
+	moveq	#0,%d0
+	tst_Z
+	jne	1f
+	moveq	#-1,%d0
+1:	rts
+
+fp_t:
+	moveq	#-1,%d0
+	rts
diff --git a/arch/m68k/math-emu/fp_decode.h b/arch/m68k/math-emu/fp_decode.h
new file mode 100644
index 0000000..759679d
--- /dev/null
+++ b/arch/m68k/math-emu/fp_decode.h
@@ -0,0 +1,417 @@
+/*
+ * fp_decode.h
+ *
+ * Copyright Roman Zippel, 1997.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, and the entire permission notice in its entirety,
+ *    including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions.  (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FP_DECODE_H
+#define _FP_DECODE_H
+
+/* These macros do the dirty work of the instr decoding, several variables
+ * can be defined in the source file to modify the work of these macros,
+ * currently the following variables are used:
+ * ...
+ * The register usage:
+ * d0 - will contain source operand for data direct mode,
+ *	otherwise scratch register
+ * d1 - upper 16bit are reserved for caller
+ *	lower 16bit may contain further arguments,
+ *	is destroyed during decoding
+ * d2 - contains first two instruction words,
+ *	first word will be used for extension word
+ * a0 - will point to source/dest operand for any indirect mode
+ *	otherwise scratch register
+ * a1 - scratch register
+ * a2 - base addr to the task structure
+ *
+ * the current implementation doesn't check for every disallowed
+ * addressing mode (e.g. pc relative modes as destination), as long
+ * as it only means a new addressing mode, which should not appear
+ * in a program and that doesn't crash the emulation, I think it's
+ * not a problem to allow these modes.
+ */
+
+do_fmovem=0
+do_fmovem_cr=0
+do_no_pc_mode=0
+do_fscc=0
+
+| first decoding of the instr type
+| this separates the conditional instr
+.macro	fp_decode_cond_instr_type
+	bfextu	%d2{#8,#2},%d0
+	jmp	([0f:w,%pc,%d0*4])
+
+	.align	4
+0:
+|	.long	"f<op>","fscc/fdbcc"
+|	.long	"fbccw","fbccl"
+.endm
+
+| second decoding of the instr type
+| this separates most move instr
+.macro	fp_decode_move_instr_type
+	bfextu	%d2{#16,#3},%d0
+	jmp	([0f:w,%pc,%d0*4])
+
+	.align	4
+0:
+|	.long	"f<op> fpx,fpx","invalid instr"
+|	.long	"f<op> <ea>,fpx","fmove fpx,<ea>"
+|	.long	"fmovem <ea>,fpcr","fmovem <ea>,fpx"
+|	.long	"fmovem fpcr,<ea>","fmovem fpx,<ea>"
+.endm
+
+| extract the source specifier, specifies
+| either source fp register or data format
+.macro	fp_decode_sourcespec
+	bfextu	%d2{#19,#3},%d0
+.endm
+
+| decode destination format for fmove reg,ea
+.macro	fp_decode_dest_format
+	bfextu	%d2{#19,#3},%d0
+.endm
+
+| decode source register for fmove reg,ea
+.macro	fp_decode_src_reg
+	bfextu	%d2{#22,#3},%d0
+.endm
+
+| extract the addressing mode
+| it depends on the instr which of the modes is valid
+.macro	fp_decode_addr_mode
+	bfextu	%d2{#10,#3},%d0
+	jmp	([0f:w,%pc,%d0*4])
+
+	.align	4
+0:
+|	.long	"data register direct","addr register direct"
+|	.long	"addr register indirect"
+|	.long	"addr register indirect postincrement"
+|	.long	"addr register indirect predecrement"
+|	.long	"addr register + index16"
+|	.long	"extension mode1","extension mode2"
+.endm
+
+| extract the register for the addressing mode
+.macro	fp_decode_addr_reg
+	bfextu	%d2{#13,#3},%d0
+.endm
+
+| decode the 8bit diplacement from the brief extension word
+.macro	fp_decode_disp8
+	move.b	%d2,%d0
+	ext.w	%d0
+.endm
+
+| decode the index of the brief/full extension word
+.macro	fp_decode_index
+	bfextu	%d2{#17,#3},%d0		| get the register nr
+	btst	#15,%d2			| test for data/addr register
+	jne	1\@f
+	printf	PDECODE,"d%d",1,%d0
+	jsr	fp_get_data_reg
+	jra	2\@f
+1\@:	printf	PDECODE,"a%d",1,%d0
+	jsr	fp_get_addr_reg
+	move.l	%a0,%d0
+2\@:
+debug	lea	"'l'.w,%a0"
+	btst	#11,%d2			| 16/32 bit size?
+	jne	3\@f
+debug	lea	"'w'.w,%a0"
+	ext.l	%d0
+3\@:	printf	PDECODE,":%c",1,%a0
+	move.w	%d2,%d1			| scale factor
+	rol.w	#7,%d1
+	and.w	#3,%d1
+debug	move.l	"%d1,-(%sp)"
+debug	ext.l	"%d1"
+	printf	PDECODE,":%d",1,%d1
+debug	move.l	"(%sp)+,%d1"
+	lsl.l	%d1,%d0
+.endm
+
+| decode the base displacement size
+.macro	fp_decode_basedisp
+	bfextu	%d2{#26,#2},%d0
+	jmp	([0f:w,%pc,%d0*4])
+
+	.align	4
+0:
+|	.long	"reserved","null displacement"
+|	.long	"word displacement","long displacement"
+.endm
+
+.macro	fp_decode_outerdisp
+	bfextu	%d2{#30,#2},%d0
+	jmp	([0f:w,%pc,%d0*4])
+
+	.align	4
+0:
+|	.long	"no memory indirect action/reserved","null outer displacement"
+|	.long	"word outer displacement","long outer displacement"
+.endm
+
+| get the extension word and test for brief or full extension type
+.macro	fp_get_test_extword label
+	fp_get_instr_word %d2,fp_err_ua1
+	btst	#8,%d2
+	jne	\label
+.endm
+
+
+| test if %pc is the base register for the indirect addr mode
+.macro	fp_test_basereg_d16	label
+	btst	#20,%d2
+	jeq	\label
+.endm
+
+| test if %pc is the base register for one of the extended modes
+.macro	fp_test_basereg_ext	label
+	btst	#19,%d2
+	jeq	\label
+.endm
+
+.macro	fp_test_suppr_index label
+	btst	#6,%d2
+	jne	\label
+.endm
+
+
+| addressing mode: data register direct
+.macro	fp_mode_data_direct
+	fp_decode_addr_reg
+	printf	PDECODE,"d%d",1,%d0
+.endm
+
+| addressing mode: address register indirect
+.macro	fp_mode_addr_indirect
+	fp_decode_addr_reg
+	printf	PDECODE,"(a%d)",1,%d0
+	jsr	fp_get_addr_reg
+.endm
+
+| adjust stack for byte moves from/to stack
+.macro	fp_test_sp_byte_move
+	.if	!do_fmovem
+	.if	do_fscc
+	move.w	#6,%d1
+	.endif
+	cmp.w	#7,%d0
+	jne	1\@f
+	.if	!do_fscc
+	cmp.w	#6,%d1
+	jne	1\@f
+	.endif
+	move.w	#4,%d1
+1\@:
+	.endif
+.endm
+
+| addressing mode: address register indirect with postincrement
+.macro	fp_mode_addr_indirect_postinc
+	fp_decode_addr_reg
+	printf	PDECODE,"(a%d)+",1,%d0
+	fp_test_sp_byte_move
+	jsr	fp_get_addr_reg
+	move.l	%a0,%a1			| save addr
+	.if	do_fmovem
+	lea	(%a0,%d1.w*4),%a0
+	.if	!do_fmovem_cr
+	lea	(%a0,%d1.w*8),%a0
+	.endif
+	.else
+	add.w	(fp_datasize,%d1.w*2),%a0
+	.endif
+	jsr	fp_put_addr_reg
+	move.l	%a1,%a0
+.endm
+
+| addressing mode: address register indirect with predecrement
+.macro	fp_mode_addr_indirect_predec
+	fp_decode_addr_reg
+	printf	PDECODE,"-(a%d)",1,%d0
+	fp_test_sp_byte_move
+	jsr	fp_get_addr_reg
+	.if	do_fmovem
+	.if	!do_fmovem_cr
+	lea	(-12,%a0),%a1		| setup to addr of 1st reg to move
+	neg.w	%d1
+	lea	(%a0,%d1.w*4),%a0
+	add.w	%d1,%d1
+	lea	(%a0,%d1.w*4),%a0
+	jsr	fp_put_addr_reg
+	move.l	%a1,%a0
+	.else
+	neg.w	%d1
+	lea	(%a0,%d1.w*4),%a0
+	jsr	fp_put_addr_reg
+	.endif
+	.else
+	sub.w	(fp_datasize,%d1.w*2),%a0
+	jsr	fp_put_addr_reg
+	.endif
+.endm
+
+| addressing mode: address register/programm counter indirect
+|		   with 16bit displacement
+.macro	fp_mode_addr_indirect_disp16
+	.if	!do_no_pc_mode
+	fp_test_basereg_d16 1f
+	printf	PDECODE,"pc"
+	fp_get_pc %a0
+	jra	2f
+	.endif
+1:	fp_decode_addr_reg
+	printf	PDECODE,"a%d",1,%d0
+	jsr	fp_get_addr_reg
+2:	fp_get_instr_word %a1,fp_err_ua1
+	printf	PDECODE,"@(%x)",1,%a1
+	add.l	%a1,%a0
+.endm
+
+| perform preindex (if I/IS == 0xx and xx != 00)
+.macro	fp_do_preindex
+	moveq	#3,%d0
+	and.w	%d2,%d0
+	jeq	1f
+	btst	#2,%d2
+	jne	1f
+	printf	PDECODE,")@("
+	getuser.l (%a1),%a1,fp_err_ua1,%a1
+debug	jra	"2f"
+1:	printf	PDECODE,","
+2:
+.endm
+
+| perform postindex (if I/IS == 1xx)
+.macro	fp_do_postindex
+	btst	#2,%d2
+	jeq	1f
+	printf	PDECODE,")@("
+	getuser.l (%a1),%a1,fp_err_ua1,%a1
+debug	jra	"2f"
+1:	printf	PDECODE,","
+2:
+.endm
+
+| all other indirect addressing modes will finally end up here
+.macro	fp_mode_addr_indirect_extmode0
+	.if	!do_no_pc_mode
+	fp_test_basereg_ext 1f
+	printf	PDECODE,"pc"
+	fp_get_pc %a0
+	jra	2f
+	.endif
+1:	fp_decode_addr_reg
+	printf	PDECODE,"a%d",1,%d0
+	jsr	fp_get_addr_reg
+2:	move.l	%a0,%a1
+	swap	%d2
+	fp_get_test_extword 3f
+	| addressing mode: address register/programm counter indirect
+	|		   with index and 8bit displacement
+	fp_decode_disp8
+debug	ext.l	"%d0"
+	printf	PDECODE,"@(%x,",1,%d0
+	add.w	%d0,%a1
+	fp_decode_index
+	add.l	%d0,%a1
+	printf	PDECODE,")"
+	jra	9f
+3:	| addressing mode: address register/programm counter memory indirect
+	|		   with base and/or outer displacement
+	btst	#7,%d2			| base register suppressed?
+	jeq	1f
+	printf	PDECODE,"!"
+	sub.l	%a1,%a1
+1:	printf	PDECODE,"@("
+	fp_decode_basedisp
+
+	.long	fp_ill,1f
+	.long	2f,3f
+
+#ifdef FPU_EMU_DEBUG
+1:	printf	PDECODE,"0"		| null base displacement
+	jra	1f
+#endif
+2:	fp_get_instr_word %a0,fp_err_ua1 | 16bit base displacement
+	printf	PDECODE,"%x:w",1,%a0
+	jra	4f
+3:	fp_get_instr_long %a0,fp_err_ua1 | 32bit base displacement
+	printf	PDECODE,"%x:l",1,%a0
+4:	add.l	%a0,%a1
+1:
+	fp_do_postindex
+	fp_test_suppr_index 1f
+	fp_decode_index
+	add.l	%d0,%a1
+1:	fp_do_preindex
+
+	fp_decode_outerdisp
+
+	.long	5f,1f
+	.long	2f,3f
+
+#ifdef FPU_EMU_DEBUG
+1:	printf	PDECODE,"0"		| null outer displacement
+	jra	1f
+#endif
+2:	fp_get_instr_word %a0,fp_err_ua1 | 16bit outer displacement
+	printf	PDECODE,"%x:w",1,%a0
+	jra	4f
+3:	fp_get_instr_long %a0,fp_err_ua1 | 32bit outer displacement
+	printf	PDECODE,"%x:l",1,%a0
+4:	add.l	%a0,%a1
+1:
+5:	printf	PDECODE,")"
+9:	move.l	%a1,%a0
+	swap	%d2
+.endm
+
+| get the absolute short address from user space
+.macro	fp_mode_abs_short
+	fp_get_instr_word %a0,fp_err_ua1
+	printf	PDECODE,"%x.w",1,%a0
+.endm
+
+| get the absolute long address from user space
+.macro	fp_mode_abs_long
+	fp_get_instr_long %a0,fp_err_ua1
+	printf	PDECODE,"%x.l",1,%a0
+.endm
+
+#endif /* _FP_DECODE_H */
diff --git a/arch/m68k/math-emu/fp_emu.h b/arch/m68k/math-emu/fp_emu.h
new file mode 100644
index 0000000..1d6edc9
--- /dev/null
+++ b/arch/m68k/math-emu/fp_emu.h
@@ -0,0 +1,146 @@
+/*
+ * fp_emu.h
+ *
+ * Copyright Roman Zippel, 1997.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, and the entire permission notice in its entirety,
+ *    including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions.  (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FP_EMU_H
+#define _FP_EMU_H
+
+#ifdef __ASSEMBLY__
+#include <asm/offsets.h>
+#endif
+#include <asm/math-emu.h>
+
+#ifndef __ASSEMBLY__
+
+#define IS_INF(a) ((a)->exp == 0x7fff)
+#define IS_ZERO(a) ((a)->mant.m64 == 0)
+
+
+#define fp_set_sr(bit) ({					\
+	FPDATA->fpsr |= 1 << (bit);				\
+})
+
+#define fp_set_quotient(quotient) ({				\
+	FPDATA->fpsr &= 0xff00ffff;				\
+	FPDATA->fpsr |= ((quotient) & 0xff) << 16;		\
+})
+
+/* linkage for several useful functions */
+
+/* Normalize the extended struct, return 0 for a NaN */
+#define fp_normalize_ext(fpreg) ({				\
+	register struct fp_ext *reg asm ("a0") = fpreg;		\
+	register int res asm ("d0");				\
+								\
+	asm volatile ("jsr fp_conv_ext2ext"			\
+			: "=d" (res) : "a" (reg)		\
+			: "a1", "d1", "d2", "memory");		\
+	res;							\
+})
+
+#define fp_copy_ext(dest, src) ({				\
+	*dest = *src;						\
+})
+
+#define fp_monadic_check(dest, src) ({				\
+	fp_copy_ext(dest, src);					\
+	if (!fp_normalize_ext(dest))				\
+		return dest;					\
+})
+
+#define fp_dyadic_check(dest, src) ({				\
+	if (!fp_normalize_ext(dest))				\
+		return dest;					\
+	if (!fp_normalize_ext(src)) {				\
+		fp_copy_ext(dest, src);				\
+		return dest;					\
+	}							\
+})
+
+extern const struct fp_ext fp_QNaN;
+extern const struct fp_ext fp_Inf;
+
+#define fp_set_nan(dest) ({					\
+	fp_set_sr(FPSR_EXC_OPERR);				\
+	*dest = fp_QNaN;					\
+})
+
+/* TODO check rounding mode? */
+#define fp_set_ovrflw(dest) ({					\
+	fp_set_sr(FPSR_EXC_OVFL);				\
+	dest->exp = 0x7fff;					\
+	dest->mant.m64 = 0;					\
+})
+
+#define fp_conv_ext2long(src) ({				\
+	register struct fp_ext *__src asm ("a0") = src;		\
+	register int __res asm ("d0");				\
+								\
+	asm volatile ("jsr fp_conv_ext2long"			\
+			: "=d" (__res) : "a" (__src)		\
+			: "a1", "d1", "d2", "memory");		\
+	__res;							\
+})
+
+#define fp_conv_long2ext(dest, src) ({				\
+	register struct fp_ext *__dest asm ("a0") = dest;	\
+	register int __src asm ("d0") = src;			\
+								\
+	asm volatile ("jsr fp_conv_ext2long"			\
+			: : "d" (__src), "a" (__dest)		\
+			: "a1", "d1", "d2", "memory");		\
+})
+
+#else /* __ASSEMBLY__ */
+
+/*
+ * set, reset or clear a bit in the fp status register
+ */
+.macro	fp_set_sr	bit
+	bset	#(\bit&7),(FPD_FPSR+3-(\bit/8),FPDATA)
+.endm
+
+.macro	fp_clr_sr	bit
+	bclr	#(\bit&7),(FPD_FPSR+3-(\bit/8),FPDATA)
+.endm
+
+.macro	fp_tst_sr	bit
+	btst	#(\bit&7),(FPD_FPSR+3-(\bit/8),FPDATA)
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _FP_EMU_H */
diff --git a/arch/m68k/math-emu/fp_entry.S b/arch/m68k/math-emu/fp_entry.S
new file mode 100644
index 0000000..5ec2d91
--- /dev/null
+++ b/arch/m68k/math-emu/fp_entry.S
@@ -0,0 +1,325 @@
+/*
+ * fp_emu.S
+ *
+ * Copyright Roman Zippel, 1997.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, and the entire permission notice in its entirety,
+ *    including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions.  (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <asm/entry.h>
+
+#include "fp_emu.h"
+
+	.globl	fpu_emu
+	.globl	fp_debugprint
+	.globl	fp_err_ua1,fp_err_ua2
+
+	.text
+fpu_emu:
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+
+#if defined(CPU_M68020_OR_M68030) && defined(CPU_M68040_OR_M68060)
+        tst.l	m68k_is040or060
+        jeq	1f
+#endif
+#if defined(CPU_M68040_OR_M68060)
+	move.l	(FPS_PC2,%sp),(FPS_PC,%sp)
+#endif
+1:
+	| emulate the instruction
+	jsr	fp_scan
+
+#if defined(CONFIG_M68060)
+#if !defined(CPU_M68060_ONLY)
+	btst	#3,m68k_cputype+3
+	jeq	1f
+#endif
+	btst	#7,(FPS_SR,%sp)
+	jne	fp_sendtrace060
+#endif
+1:
+	| emulation successful?
+	tst.l	%d0
+	jeq	ret_from_exception
+
+	| send some signal to program here
+
+	jra	ret_from_exception
+
+	| we jump here after an access error while trying to access
+	| user space, we correct stackpointer and send a SIGSEGV to
+	| the user process
+fp_err_ua2:
+	addq.l	#4,%sp
+fp_err_ua1:
+	addq.l	#4,%sp
+	move.l	%a0,-(%sp)
+	pea	SEGV_MAPERR
+	pea	SIGSEGV
+	jsr	fpemu_signal
+	add.w	#12,%sp
+	jra	ret_from_exception
+
+#if defined(CONFIG_M68060)
+	| send a trace signal if we are debugged
+	| it does not really belong here, but...
+fp_sendtrace060:
+	move.l	(FPS_PC,%sp),-(%sp)
+	pea	TRAP_TRACE
+	pea	SIGTRAP
+	jsr	fpemu_signal
+	add.w	#12,%sp
+	jra	ret_from_exception
+#endif
+
+	.globl	fp_get_data_reg, fp_put_data_reg
+	.globl	fp_get_addr_reg, fp_put_addr_reg
+
+	| Entry points to get/put a register. Some of them can be get/put
+	| directly, others are on the stack, as we read/write the stack
+	| directly here, these function may only be called from within
+	| instruction decoding, otherwise the stack pointer is incorrect
+	| and the stack gets corrupted.
+fp_get_data_reg:
+	jmp	([0f:w,%pc,%d0.w*4])
+
+	.align	4
+0:
+	.long	fp_get_d0, fp_get_d1
+	.long	fp_get_d2, fp_get_d3
+	.long	fp_get_d4, fp_get_d5
+	.long	fp_get_d6, fp_get_d7
+
+fp_get_d0:
+	move.l	(PT_D0+8,%sp),%d0
+	printf	PREGISTER,"{d0->%08x}",1,%d0
+	rts
+
+fp_get_d1:
+	move.l	(PT_D1+8,%sp),%d0
+	printf	PREGISTER,"{d1->%08x}",1,%d0
+	rts
+
+fp_get_d2:
+	move.l	(PT_D2+8,%sp),%d0
+	printf	PREGISTER,"{d2->%08x}",1,%d0
+	rts
+
+fp_get_d3:
+	move.l	%d3,%d0
+	printf	PREGISTER,"{d3->%08x}",1,%d0
+	rts
+
+fp_get_d4:
+	move.l	%d4,%d0
+	printf	PREGISTER,"{d4->%08x}",1,%d0
+	rts
+
+fp_get_d5:
+	move.l	%d5,%d0
+	printf	PREGISTER,"{d5->%08x}",1,%d0
+	rts
+
+fp_get_d6:
+	move.l	%d6,%d0
+	printf	PREGISTER,"{d6->%08x}",1,%d0
+	rts
+
+fp_get_d7:
+	move.l	%d7,%d0
+	printf	PREGISTER,"{d7->%08x}",1,%d0
+	rts
+
+fp_put_data_reg:
+	jmp	([0f:w,%pc,%d1.w*4])
+
+	.align	4
+0:
+	.long	fp_put_d0, fp_put_d1
+	.long	fp_put_d2, fp_put_d3
+	.long	fp_put_d4, fp_put_d5
+	.long	fp_put_d6, fp_put_d7
+
+fp_put_d0:
+	printf	PREGISTER,"{d0<-%08x}",1,%d0
+	move.l	%d0,(PT_D0+8,%sp)
+	rts
+
+fp_put_d1:
+	printf	PREGISTER,"{d1<-%08x}",1,%d0
+	move.l	%d0,(PT_D1+8,%sp)
+	rts
+
+fp_put_d2:
+	printf	PREGISTER,"{d2<-%08x}",1,%d0
+	move.l	%d0,(PT_D2+8,%sp)
+	rts
+
+fp_put_d3:
+	printf	PREGISTER,"{d3<-%08x}",1,%d0
+|	move.l	%d0,%d3
+	move.l	%d0,(PT_D3+8,%sp)
+	rts
+
+fp_put_d4:
+	printf	PREGISTER,"{d4<-%08x}",1,%d0
+|	move.l	%d0,%d4
+	move.l	%d0,(PT_D4+8,%sp)
+	rts
+
+fp_put_d5:
+	printf	PREGISTER,"{d5<-%08x}",1,%d0
+|	move.l	%d0,%d5
+	move.l	%d0,(PT_D5+8,%sp)
+	rts
+
+fp_put_d6:
+	printf	PREGISTER,"{d6<-%08x}",1,%d0
+	move.l	%d0,%d6
+	rts
+
+fp_put_d7:
+	printf	PREGISTER,"{d7<-%08x}",1,%d0
+	move.l	%d0,%d7
+	rts
+
+fp_get_addr_reg:
+	jmp	([0f:w,%pc,%d0.w*4])
+
+	.align	4
+0:
+	.long	fp_get_a0, fp_get_a1
+	.long	fp_get_a2, fp_get_a3
+	.long	fp_get_a4, fp_get_a5
+	.long	fp_get_a6, fp_get_a7
+
+fp_get_a0:
+	move.l	(PT_A0+8,%sp),%a0
+	printf	PREGISTER,"{a0->%08x}",1,%a0
+	rts
+
+fp_get_a1:
+	move.l	(PT_A1+8,%sp),%a0
+	printf	PREGISTER,"{a1->%08x}",1,%a0
+	rts
+
+fp_get_a2:
+	move.l	(PT_A2+8,%sp),%a0
+	printf	PREGISTER,"{a2->%08x}",1,%a0
+	rts
+
+fp_get_a3:
+	move.l	%a3,%a0
+	printf	PREGISTER,"{a3->%08x}",1,%a0
+	rts
+
+fp_get_a4:
+	move.l	%a4,%a0
+	printf	PREGISTER,"{a4->%08x}",1,%a0
+	rts
+
+fp_get_a5:
+	move.l	%a5,%a0
+	printf	PREGISTER,"{a5->%08x}",1,%a0
+	rts
+
+fp_get_a6:
+	move.l	%a6,%a0
+	printf	PREGISTER,"{a6->%08x}",1,%a0
+	rts
+
+fp_get_a7:
+	move.l	%usp,%a0
+	printf	PREGISTER,"{a7->%08x}",1,%a0
+	rts
+
+fp_put_addr_reg:
+	jmp	([0f:w,%pc,%d0.w*4])
+
+	.align	4
+0:
+	.long	fp_put_a0, fp_put_a1
+	.long	fp_put_a2, fp_put_a3
+	.long	fp_put_a4, fp_put_a5
+	.long	fp_put_a6, fp_put_a7
+
+fp_put_a0:
+	printf	PREGISTER,"{a0<-%08x}",1,%a0
+	move.l	%a0,(PT_A0+8,%sp)
+	rts
+
+fp_put_a1:
+	printf	PREGISTER,"{a1<-%08x}",1,%a0
+	move.l	%a0,(PT_A1+8,%sp)
+	rts
+
+fp_put_a2:
+	printf	PREGISTER,"{a2<-%08x}",1,%a0
+	move.l	%a0,(PT_A2+8,%sp)
+	rts
+
+fp_put_a3:
+	printf	PREGISTER,"{a3<-%08x}",1,%a0
+	move.l	%a0,%a3
+	rts
+
+fp_put_a4:
+	printf	PREGISTER,"{a4<-%08x}",1,%a0
+	move.l	%a0,%a4
+	rts
+
+fp_put_a5:
+	printf	PREGISTER,"{a5<-%08x}",1,%a0
+	move.l	%a0,%a5
+	rts
+
+fp_put_a6:
+	printf	PREGISTER,"{a6<-%08x}",1,%a0
+	move.l	%a0,%a6
+	rts
+
+fp_put_a7:
+	printf	PREGISTER,"{a7<-%08x}",1,%a0
+	move.l	%a0,%usp
+	rts
+
+	.data
+	.align	4
+
+fp_debugprint:
+|	.long	PMDECODE
+	.long	PMINSTR+PMDECODE+PMCONV+PMNORM
+|	.long	PMCONV+PMNORM+PMINSTR
+|	.long	0
diff --git a/arch/m68k/math-emu/fp_log.c b/arch/m68k/math-emu/fp_log.c
new file mode 100644
index 0000000..87b4f01
--- /dev/null
+++ b/arch/m68k/math-emu/fp_log.c
@@ -0,0 +1,223 @@
+/*
+
+  fp_trig.c: floating-point math routines for the Linux-m68k
+  floating point emulator.
+
+  Copyright (c) 1998-1999 David Huggins-Daines / Roman Zippel.
+
+  I hereby give permission, free of charge, to copy, modify, and
+  redistribute this software, in source or binary form, provided that
+  the above copyright notice and the following disclaimer are included
+  in all such copies.
+
+  THIS SOFTWARE IS PROVIDED "AS IS", WITH ABSOLUTELY NO WARRANTY, REAL
+  OR IMPLIED.
+
+*/
+
+#include "fp_emu.h"
+
+static const struct fp_ext fp_one =
+{
+	.exp = 0x3fff,
+};
+
+extern struct fp_ext *fp_fadd(struct fp_ext *dest, const struct fp_ext *src);
+extern struct fp_ext *fp_fdiv(struct fp_ext *dest, const struct fp_ext *src);
+extern struct fp_ext *fp_fmul(struct fp_ext *dest, const struct fp_ext *src);
+
+struct fp_ext *
+fp_fsqrt(struct fp_ext *dest, struct fp_ext *src)
+{
+	struct fp_ext tmp, src2;
+	int i, exp;
+
+	dprint(PINSTR, "fsqrt\n");
+
+	fp_monadic_check(dest, src);
+
+	if (IS_ZERO(dest))
+		return dest;
+
+	if (dest->sign) {
+		fp_set_nan(dest);
+		return dest;
+	}
+	if (IS_INF(dest))
+		return dest;
+
+	/*
+	 *		 sqrt(m) * 2^(p)	, if e = 2*p
+	 * sqrt(m*2^e) =
+	 *		 sqrt(2*m) * 2^(p)	, if e = 2*p + 1
+	 *
+	 * So we use the last bit of the exponent to decide wether to
+	 * use the m or 2*m.
+	 *
+	 * Since only the fractional part of the mantissa is stored and
+	 * the integer part is assumed to be one, we place a 1 or 2 into
+	 * the fixed point representation.
+	 */
+	exp = dest->exp;
+	dest->exp = 0x3FFF;
+	if (!(exp & 1))		/* lowest bit of exponent is set */
+		dest->exp++;
+	fp_copy_ext(&src2, dest);
+
+	/*
+	 * The taylor row arround a for sqrt(x) is:
+	 *	sqrt(x) = sqrt(a) + 1/(2*sqrt(a))*(x-a) + R
+	 * With a=1 this gives:
+	 *	sqrt(x) = 1 + 1/2*(x-1)
+	 *		= 1/2*(1+x)
+	 */
+	fp_fadd(dest, &fp_one);
+	dest->exp--;		/* * 1/2 */
+
+	/*
+	 * We now apply the newton rule to the function
+	 *	f(x) := x^2 - r
+	 * which has a null point on x = sqrt(r).
+	 *
+	 * It gives:
+	 *	x' := x - f(x)/f'(x)
+	 *	    = x - (x^2 -r)/(2*x)
+	 *	    = x - (x - r/x)/2
+	 *          = (2*x - x + r/x)/2
+	 *	    = (x + r/x)/2
+	 */
+	for (i = 0; i < 9; i++) {
+		fp_copy_ext(&tmp, &src2);
+
+		fp_fdiv(&tmp, dest);
+		fp_fadd(dest, &tmp);
+		dest->exp--;
+	}
+
+	dest->exp += (exp - 0x3FFF) / 2;
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fetoxm1(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fetoxm1\n");
+
+	fp_monadic_check(dest, src);
+
+	if (IS_ZERO(dest))
+		return dest;
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fetox(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fetox\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_ftwotox(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("ftwotox\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_ftentox(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("ftentox\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_flogn(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("flogn\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_flognp1(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("flognp1\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_flog10(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("flog10\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_flog2(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("flog2\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fgetexp(struct fp_ext *dest, struct fp_ext *src)
+{
+	dprint(PINSTR, "fgetexp\n");
+
+	fp_monadic_check(dest, src);
+
+	if (IS_INF(dest)) {
+		fp_set_nan(dest);
+		return dest;
+	}
+	if (IS_ZERO(dest))
+		return dest;
+
+	fp_conv_long2ext(dest, (int)dest->exp - 0x3FFF);
+
+	fp_normalize_ext(dest);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fgetman(struct fp_ext *dest, struct fp_ext *src)
+{
+	dprint(PINSTR, "fgetman\n");
+
+	fp_monadic_check(dest, src);
+
+	if (IS_ZERO(dest))
+		return dest;
+
+	if (IS_INF(dest))
+		return dest;
+
+	dest->exp = 0x3FFF;
+
+	return dest;
+}
+
diff --git a/arch/m68k/math-emu/fp_move.S b/arch/m68k/math-emu/fp_move.S
new file mode 100644
index 0000000..71bdf83
--- /dev/null
+++ b/arch/m68k/math-emu/fp_move.S
@@ -0,0 +1,244 @@
+/*
+ * fp_move.S
+ *
+ * Copyright Roman Zippel, 1997.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, and the entire permission notice in its entirety,
+ *    including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions.  (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "fp_emu.h"
+#include "fp_decode.h"
+
+do_no_pc_mode=1
+
+	.globl	fp_fmove_fp2mem
+
+fp_fmove_fp2mem:
+	clr.b	(2+FPD_FPSR,FPDATA)
+	fp_decode_dest_format
+	move.w	%d0,%d1			| store data size twice in %d1
+	swap	%d1			| one can be trashed below
+	move.w	%d0,%d1
+#ifdef FPU_EMU_DEBUG
+	lea	0f,%a0
+	clr.l	%d0
+	move.b	(%a0,%d1.w),%d0
+	printf	PDECODE,"fmove.%c ",1,%d0
+	fp_decode_src_reg
+	printf	PDECODE,"fp%d,",1,%d0
+
+	.data
+0:	.byte	'l','s','x','p','w','d','b','p'
+	.previous
+#endif
+
+	| encode addressing mode for dest
+	fp_decode_addr_mode
+
+	.long	fp_data, fp_ill
+	.long	fp_indirect, fp_postinc
+	.long	fp_predecr, fp_disp16
+	.long	fp_extmode0, fp_extmode1
+
+	| addressing mode: data register direct
+fp_data:
+	fp_mode_data_direct
+	move.w	%d0,%d1
+	fp_decode_src_reg
+	fp_get_fp_reg
+	lea	(FPD_TEMPFP1,FPDATA),%a1
+	move.l	(%a0)+,(%a1)+
+	move.l	(%a0)+,(%a1)+
+	move.l	(%a0),(%a1)
+	lea	(-8,%a1),%a0
+	swap	%d1
+	move.l	%d1,%d2
+	printf	PDECODE,"\n"
+	jmp	([0f:w,%pc,%d1.w*4])
+
+	.align	4
+0:
+	.long	fp_data_long, fp_data_single
+	.long	fp_ill, fp_ill
+	.long	fp_data_word, fp_ill
+	.long	fp_data_byte, fp_ill
+
+fp_data_byte:
+	jsr	fp_normalize_ext
+	jsr	fp_conv_ext2byte
+	move.l	%d0,%d1
+	swap	%d2
+	move.w	%d2,%d0
+	jsr	fp_get_data_reg
+	move.b	%d1,%d0
+	move.w	%d2,%d1
+	jsr	fp_put_data_reg
+	jra	fp_final
+
+fp_data_word:
+	jsr	fp_normalize_ext
+	jsr	fp_conv_ext2short
+	move.l	%d0,%d1
+	swap	%d2
+	move.w	%d2,%d0
+	jsr	fp_get_data_reg
+	move.w	%d1,%d0
+	move.l	%d2,%d1
+	jsr	fp_put_data_reg
+	jra	fp_final
+
+fp_data_long:
+	jsr	fp_normalize_ext
+	jsr	fp_conv_ext2long
+	swap	%d2
+	move.w	%d2,%d1
+	jsr	fp_put_data_reg
+	jra	fp_final
+
+fp_data_single:
+	jsr	fp_normalize_ext
+	jsr	fp_conv_ext2single
+	swap	%d2
+	move.w	%d2,%d1
+	jsr	fp_put_data_reg
+	jra	fp_final
+
+	| addressing mode: address register indirect
+fp_indirect:
+	fp_mode_addr_indirect
+	jra	fp_putdest
+
+	| addressing mode: address register indirect with postincrement
+fp_postinc:
+	fp_mode_addr_indirect_postinc
+	jra	fp_putdest
+
+	| addressing mode: address register indirect with predecrement
+fp_predecr:
+	fp_mode_addr_indirect_predec
+	jra	fp_putdest
+
+	| addressing mode: address register indirect with 16bit displacement
+fp_disp16:
+	fp_mode_addr_indirect_disp16
+	jra     fp_putdest
+
+fp_extmode0:
+	fp_mode_addr_indirect_extmode0
+	jra	fp_putdest
+
+fp_extmode1:
+	fp_decode_addr_reg
+	jmp	([0f:w,%pc,%d0*4])
+
+	.align	4
+0:
+	.long	fp_abs_short, fp_abs_long
+	.long	fp_ill, fp_ill
+	.long	fp_ill, fp_ill
+	.long	fp_ill, fp_ill
+
+fp_abs_short:
+	fp_mode_abs_short
+	jra	fp_putdest
+
+fp_abs_long:
+	fp_mode_abs_long
+	jra	fp_putdest
+
+fp_putdest:
+	move.l	%a0,%a1
+	fp_decode_src_reg
+	move.l	%d1,%d2			| save size
+	fp_get_fp_reg
+	printf	PDECODE,"\n"
+	addq.l	#8,%a0
+	move.l	(%a0),-(%sp)
+	move.l	-(%a0),-(%sp)
+	move.l	-(%a0),-(%sp)
+	move.l	%sp,%a0
+	jsr	fp_normalize_ext
+
+	swap	%d2
+	jmp	([0f:w,%pc,%d2.w*4])
+
+	.align	4
+0:
+	.long	fp_format_long, fp_format_single
+	.long	fp_format_extended, fp_format_packed
+	.long	fp_format_word, fp_format_double
+	.long	fp_format_byte, fp_format_packed
+
+fp_format_long:
+	jsr	fp_conv_ext2long
+	putuser.l %d0,(%a1),fp_err_ua1,%a1
+	jra	fp_finish_move
+
+fp_format_single:
+	jsr	fp_conv_ext2single
+	putuser.l %d0,(%a1),fp_err_ua1,%a1
+	jra	fp_finish_move
+
+fp_format_extended:
+	move.l	(%a0)+,%d0
+	lsl.w	#1,%d0
+	lsl.l	#7,%d0
+	lsl.l	#8,%d0
+	putuser.l %d0,(%a1)+,fp_err_ua1,%a1
+	move.l	(%a0)+,%d0
+	putuser.l %d0,(%a1)+,fp_err_ua1,%a1
+	move.l	(%a0),%d0
+	putuser.l %d0,(%a1),fp_err_ua1,%a1
+	jra	fp_finish_move
+
+fp_format_packed:
+	/* not supported yet */
+	lea	(12,%sp),%sp
+	jra	fp_ill
+
+fp_format_word:
+	jsr	fp_conv_ext2short
+	putuser.w %d0,(%a1),fp_err_ua1,%a1
+	jra	fp_finish_move
+
+fp_format_double:
+	jsr	fp_conv_ext2double
+	jra	fp_finish_move
+
+fp_format_byte:
+	jsr	fp_conv_ext2byte
+	putuser.b %d0,(%a1),fp_err_ua1,%a1
+|	jra	fp_finish_move
+
+fp_finish_move:
+	lea	(12,%sp),%sp
+	jra	fp_final
diff --git a/arch/m68k/math-emu/fp_movem.S b/arch/m68k/math-emu/fp_movem.S
new file mode 100644
index 0000000..8354d39
--- /dev/null
+++ b/arch/m68k/math-emu/fp_movem.S
@@ -0,0 +1,368 @@
+/*
+ * fp_movem.S
+ *
+ * Copyright Roman Zippel, 1997.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, and the entire permission notice in its entirety,
+ *    including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions.  (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "fp_emu.h"
+#include "fp_decode.h"
+
+| set flags for decode macros for fmovem
+do_fmovem=1
+
+	.globl	fp_fmovem_fp, fp_fmovem_cr
+
+| %d1 contains the mask and count of the register list
+| for other register usage see fp_decode.h
+
+fp_fmovem_fp:
+	printf	PDECODE,"fmovem.x "
+	| get register list and count them
+	btst	#11,%d2
+	jne	1f
+	bfextu	%d2{#24,#8},%d0		| static register list
+	jra	2f
+1:	bfextu	%d2{#25,#3},%d0		| dynamic register list
+	jsr	fp_get_data_reg
+2:	move.l	%d0,%d1
+	swap	%d1
+	jra	2f
+1:	addq.w	#1,%d1			| count the # of registers in
+2:	lsr.b	#1,%d0			| register list and keep it in %d1
+	jcs	1b
+	jne	2b
+	printf	PDECODE,"#%08x",1,%d1
+#ifdef FPU_EMU_DEBUG
+	btst	#12,%d2
+	jne	1f
+	printf	PDECODE,"-"		| decremental move
+	jra	2f
+1:	printf	PDECODE,"+"		| incremental move
+2:	btst	#13,%d2
+	jeq	1f
+	printf	PDECODE,"->"		| fpu -> cpu
+	jra	2f
+1:	printf	PDECODE,"<-"		| fpu <- cpu
+2:
+#endif
+
+	| decode address mode
+	fp_decode_addr_mode
+
+	.long	fp_ill, fp_ill
+	.long	fpr_indirect, fpr_postinc
+	.long	fpr_predecr, fpr_disp16
+	.long	fpr_extmode0, fpr_extmode1
+
+	| addressing mode: address register indirect
+fpr_indirect:
+	fp_mode_addr_indirect
+	jra	fpr_do_movem
+
+	| addressing mode: address register indirect with postincrement
+fpr_postinc:
+	fp_mode_addr_indirect_postinc
+	jra	fpr_do_movem
+
+fpr_predecr:
+	fp_mode_addr_indirect_predec
+	jra	fpr_do_movem
+
+	| addressing mode: address register/programm counter indirect
+	|		   with 16bit displacement
+fpr_disp16:
+	fp_mode_addr_indirect_disp16
+	jra	fpr_do_movem
+
+fpr_extmode0:
+	fp_mode_addr_indirect_extmode0
+	jra	fpr_do_movem
+
+fpr_extmode1:
+	fp_decode_addr_reg
+	jmp	([0f:w,%pc,%d0*4])
+
+	.align	4
+0:
+	.long	fpr_absolute_short, fpr_absolute_long
+	.long	fpr_disp16, fpr_extmode0
+	.long	fp_ill, fp_ill
+	.long	fp_ill, fp_ill
+
+fpr_absolute_short:
+	fp_mode_abs_short
+	jra	fpr_do_movem
+
+fpr_absolute_long:
+	fp_mode_abs_long
+|	jra	fpr_do_movem
+
+fpr_do_movem:
+	swap	%d1			| get fpu register list
+	lea	(FPD_FPREG,FPDATA),%a1
+	moveq	#12,%d0
+	btst	#12,%d2
+	jne	1f
+	lea	(-12,%a1,%d0*8),%a1
+	neg.l	%d0
+1:	btst	#13,%d2
+	jne	4f
+	| move register from memory into fpu
+	jra	3f
+1:	printf	PMOVEM,"(%p>%p)",2,%a0,%a1
+	getuser.l (%a0)+,%d2,fp_err_ua1,%a0
+	lsr.l	#8,%d2
+	lsr.l	#7,%d2
+	lsr.w	#1,%d2
+	move.l	%d2,(%a1)+
+	getuser.l (%a0)+,%d2,fp_err_ua1,%a0
+	move.l	%d2,(%a1)+
+	getuser.l (%a0),%d2,fp_err_ua1,%a0
+	move.l	%d2,(%a1)
+	subq.l	#8,%a0
+	subq.l	#8,%a1
+	add.l	%d0,%a0
+2:	add.l	%d0,%a1
+3:	lsl.b	#1,%d1
+	jcs	1b
+	jne	2b
+	jra	5f
+	| move register from fpu into memory
+1:	printf	PMOVEM,"(%p>%p)",2,%a1,%a0
+	move.l	(%a1)+,%d2
+	lsl.w	#1,%d2
+	lsl.l	#7,%d2
+	lsl.l	#8,%d2
+	putuser.l %d2,(%a0)+,fp_err_ua1,%a0
+	move.l	(%a1)+,%d2
+	putuser.l %d2,(%a0)+,fp_err_ua1,%a0
+	move.l	(%a1),%d2
+	putuser.l %d2,(%a0),fp_err_ua1,%a0
+	subq.l	#8,%a1
+	subq.l	#8,%a0
+	add.l	%d0,%a0
+2:	add.l	%d0,%a1
+4:	lsl.b	#1,%d1
+	jcs	1b
+	jne	2b
+5:
+	printf	PDECODE,"\n"
+#if 0
+	lea	(FPD_FPREG,FPDATA),%a0
+	printf	PMOVEM,"fp:"
+	printx	PMOVEM,%a0@(0)
+	printx	PMOVEM,%a0@(12)
+	printf	PMOVEM,"\n   "
+	printx	PMOVEM,%a0@(24)
+	printx	PMOVEM,%a0@(36)
+	printf	PMOVEM,"\n   "
+	printx	PMOVEM,%a0@(48)
+	printx	PMOVEM,%a0@(60)
+	printf	PMOVEM,"\n   "
+	printx	PMOVEM,%a0@(72)
+	printx	PMOVEM,%a0@(84)
+	printf	PMOVEM,"\n"
+#endif
+	jra	fp_end
+
+| set flags for decode macros for fmovem control register
+do_fmovem=1
+do_fmovem_cr=1
+
+fp_fmovem_cr:
+	printf	PDECODE,"fmovem.cr "
+	| get register list and count them
+	bfextu	%d2{#19,#3},%d0
+	move.l	%d0,%d1
+	swap	%d1
+	jra	2f
+1:	addq.w	#1,%d1
+2:	lsr.l	#1,%d0
+	jcs	1b
+	jne	2b
+	printf	PDECODE,"#%08x",1,%d1
+#ifdef FPU_EMU_DEBUG
+	btst	#13,%d2
+	jeq	1f
+	printf	PDECODE,"->"		| fpu -> cpu
+	jra	2f
+1:	printf	PDECODE,"<-"		| fpu <- cpu
+2:
+#endif
+
+	| decode address mode
+	fp_decode_addr_mode
+
+	.long	fpc_data, fpc_addr
+	.long	fpc_indirect, fpc_postinc
+	.long	fpc_predecr, fpc_disp16
+	.long	fpc_extmode0, fpc_extmode1
+
+fpc_data:
+	fp_mode_data_direct
+	move.w	%d0,%d1
+	bfffo	%d2{#19,#3},%d0
+	sub.w	#19,%d0
+	lea	(FPD_FPCR,FPDATA,%d0.w*4),%a1
+	btst	#13,%d2
+	jne	1f
+	move.w	%d1,%d0
+	jsr	fp_get_data_reg
+	move.l	%d0,(%a1)
+	jra	fpc_movem_fin
+1:	move.l	(%a1),%d0
+	jsr	fp_put_data_reg
+	jra	fpc_movem_fin
+
+fpc_addr:
+	fp_decode_addr_reg
+	printf	PDECODE,"a%d",1,%d0
+	btst	#13,%d2
+	jne	1f
+	jsr	fp_get_addr_reg
+	move.l	%a0,(FPD_FPIAR,FPDATA)
+	jra	fpc_movem_fin
+1:	move.l	(FPD_FPIAR,FPDATA),%a0
+	jsr	fp_put_addr_reg
+	jra	fpc_movem_fin
+
+fpc_indirect:
+	fp_mode_addr_indirect
+	jra	fpc_do_movem
+
+fpc_postinc:
+	fp_mode_addr_indirect_postinc
+	jra	fpc_do_movem
+
+fpc_predecr:
+	fp_mode_addr_indirect_predec
+	jra	fpc_do_movem
+
+fpc_disp16:
+	fp_mode_addr_indirect_disp16
+	jra	fpc_do_movem
+
+fpc_extmode0:
+	fp_mode_addr_indirect_extmode0
+	jra	fpc_do_movem
+
+fpc_extmode1:
+	fp_decode_addr_reg
+	jmp	([0f:w,%pc,%d0*4])
+
+	.align	4
+0:
+	.long	fpc_absolute_short, fpc_absolute_long
+	.long	fpc_disp16, fpc_extmode0
+	.long	fpc_immediate, fp_ill
+	.long	fp_ill, fp_ill
+
+fpc_absolute_short:
+	fp_mode_abs_short
+	jra	fpc_do_movem
+
+fpc_absolute_long:
+	fp_mode_abs_long
+	jra	fpc_do_movem
+
+fpc_immediate:
+	fp_get_pc %a0
+	lea	(%a0,%d1.w*4),%a1
+	fp_put_pc %a1
+	printf	PDECODE,"#imm"
+|	jra	fpc_do_movem
+#if 0
+	swap	%d1
+	lsl.l	#5,%d1
+	lea	(FPD_FPCR,FPDATA),%a0
+	jra	3f
+1:	move.l	%d0,(%a0)
+2:	addq.l	#4,%a0
+3:	lsl.b	#1,%d1
+	jcs	1b
+	jne	2b
+	jra	fpc_movem_fin
+#endif
+
+fpc_do_movem:
+	swap	%d1			| get fpu register list
+	lsl.l	#5,%d1
+	lea	(FPD_FPCR,FPDATA),%a1
+1:	btst	#13,%d2
+	jne	4f
+
+	| move register from memory into fpu
+	jra	3f
+1:	printf	PMOVEM,"(%p>%p)",2,%a0,%a1
+	getuser.l (%a0)+,%d0,fp_err_ua1,%a0
+	move.l	%d0,(%a1)
+2:	addq.l	#4,%a1
+3:	lsl.b	#1,%d1
+	jcs	1b
+	jne	2b
+	jra	fpc_movem_fin
+
+	| move register from fpu into memory
+1:	printf	PMOVEM,"(%p>%p)",2,%a1,%a0
+	move.l	(%a1),%d0
+	putuser.l %d0,(%a0)+,fp_err_ua1,%a0
+2:	addq.l	#4,%a1
+4:	lsl.b	#1,%d1
+	jcs	1b
+	jne	2b
+
+fpc_movem_fin:
+	and.l	#0x0000fff0,(FPD_FPCR,FPDATA)
+	and.l	#0x0ffffff8,(FPD_FPSR,FPDATA)
+	move.l	(FPD_FPCR,FPDATA),%d0
+	lsr.l	#4,%d0
+	moveq	#3,%d1
+	and.l	%d0,%d1
+	move.w	%d1,(FPD_RND,FPDATA)
+	lsr.l	#2,%d0
+	moveq	#3,%d1
+	and.l	%d0,%d1
+	move.w	%d1,(FPD_PREC,FPDATA)
+	printf	PDECODE,"\n"
+#if 0
+	printf	PMOVEM,"fpcr : %08x\n",1,FPDATA@(FPD_FPCR)
+	printf	PMOVEM,"fpsr : %08x\n",1,FPDATA@(FPD_FPSR)
+	printf	PMOVEM,"fpiar: %08x\n",1,FPDATA@(FPD_FPIAR)
+	clr.l	%d0
+	move.w	(FPD_PREC,FPDATA),%d0
+	printf	PMOVEM,"prec : %04x\n",1,%d0
+	move.w	(FPD_RND,FPDATA),%d0
+	printf	PMOVEM,"rnd  : %04x\n",1,%d0
+#endif
+	jra	fp_end
diff --git a/arch/m68k/math-emu/fp_scan.S b/arch/m68k/math-emu/fp_scan.S
new file mode 100644
index 0000000..e4146ed
--- /dev/null
+++ b/arch/m68k/math-emu/fp_scan.S
@@ -0,0 +1,478 @@
+/*
+ * fp_scan.S
+ *
+ * Copyright Roman Zippel, 1997.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, and the entire permission notice in its entirety,
+ *    including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions.  (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "fp_emu.h"
+#include "fp_decode.h"
+
+	.globl	fp_scan, fp_datasize
+
+	.data
+
+| %d2 - first two instr words
+| %d1 - operand size
+
+/* operand formats are:
+
+	Long = 0,		i.e. fmove.l
+	Single,			i.e. fmove.s
+	Extended,		i.e. fmove.x
+	Packed-BCD,		i.e. fmove.p
+	Word,			i.e. fmove.w
+	Double,			i.e. fmove.d
+*/
+
+	.text
+
+| On entry:
+| FPDATA - base of emulated FPU registers
+
+fp_scan:
+| normal fpu instruction? (this excludes fsave/frestore)
+	fp_get_pc %a0
+	printf	PDECODE,"%08x: ",1,%a0
+	getuser.b (%a0),%d0,fp_err_ua1,%a0
+#if 1
+	cmp.b	#0xf2,%d0		| cpid = 1
+#else
+	cmp.b	#0xfc,%d0		| cpid = 6
+#endif
+	jne	fp_nonstd
+| first two instruction words are kept in %d2
+	getuser.l (%a0)+,%d2,fp_err_ua1,%a0
+	fp_put_pc %a0
+fp_decode_cond:				| separate conditional instr
+	fp_decode_cond_instr_type
+
+	.long	fp_decode_move, fp_fscc
+	.long	fp_fbccw, fp_fbccl
+
+fp_decode_move:				| separate move instr
+	fp_decode_move_instr_type
+
+	.long	fp_fgen_fp, fp_ill
+	.long	fp_fgen_ea, fp_fmove_fp2mem
+	.long	fp_fmovem_cr, fp_fmovem_cr
+	.long	fp_fmovem_fp, fp_fmovem_fp
+
+| now all arithmetic instr and a few move instr are left
+fp_fgen_fp:				| source is a fpu register
+	clr.b	(FPD_FPSR+2,FPDATA)	| clear the exception byte
+	fp_decode_sourcespec
+	printf	PDECODE,"f<op>.x fp%d",1,%d0
+	fp_get_fp_reg
+	lea	(FPD_TEMPFP1,FPDATA),%a1 | copy src into a temp location
+	move.l	(%a0)+,(%a1)+
+	move.l	(%a0)+,(%a1)+
+	move.l	(%a0),(%a1)
+	lea	(-8,%a1),%a0
+	jra	fp_getdest
+
+fp_fgen_ea:				| source is <ea>
+	clr.b	(FPD_FPSR+2,FPDATA)	| clear the exception byte
+	| sort out fmovecr, keep data size in %d1
+	fp_decode_sourcespec
+	cmp.w	#7,%d0
+	jeq	fp_fmovecr
+	move.w	%d0,%d1			| store data size twice in %d1
+	swap	%d1			| one can be trashed below
+	move.w	%d0,%d1
+#ifdef FPU_EMU_DEBUG
+	lea	0f,%a0
+	clr.l	%d0
+	move.b	(%a0,%d1.w),%d0
+	printf	PDECODE,"f<op>.%c ",1,%d0
+
+	.data
+0:	.byte	'l','s','x','p','w','d','b',0
+	.previous
+#endif
+
+/*
+	fp_getsource, fp_getdest
+
+	basically, we end up with a pointer to the source operand in
+	%a1, and a pointer to the destination operand in %a0.  both
+	are, of course, 96-bit extended floating point numbers.
+*/
+
+fp_getsource:
+	| decode addressing mode for source
+	fp_decode_addr_mode
+
+	.long	fp_data, fp_ill
+	.long	fp_indirect, fp_postinc
+	.long	fp_predecr, fp_disp16
+	.long	fp_extmode0, fp_extmode1
+
+	| addressing mode: data register direct
+fp_data:
+	fp_mode_data_direct
+	jsr	fp_get_data_reg
+	lea	(FPD_TEMPFP1,FPDATA),%a0
+	jmp	([0f:w,%pc,%d1.w*4])
+
+	.align	4
+0:
+	.long	fp_data_long, fp_data_single
+	.long	fp_ill, fp_ill
+	.long	fp_data_word, fp_ill
+	.long	fp_data_byte, fp_ill
+
+	| data types that fit in an integer data register
+fp_data_byte:
+	extb.l	%d0
+	jra	fp_data_long
+
+fp_data_word:
+	ext.l	%d0
+
+fp_data_long:
+	jsr	fp_conv_long2ext
+	jra	fp_getdest
+
+fp_data_single:
+	jsr	fp_conv_single2ext
+	jra	fp_getdest
+
+	| addressing mode: address register indirect
+fp_indirect:
+	fp_mode_addr_indirect
+	jra	fp_fetchsource
+
+	| addressing mode: address register indirect with postincrement
+fp_postinc:
+	fp_mode_addr_indirect_postinc
+	jra	fp_fetchsource
+
+	| addressing mode: address register indirect with predecrement
+fp_predecr:
+	fp_mode_addr_indirect_predec
+	jra	fp_fetchsource
+
+	| addressing mode: address register/programm counter indirect
+	|		   with 16bit displacement
+fp_disp16:
+	fp_mode_addr_indirect_disp16
+	jra	fp_fetchsource
+
+	| all other indirect addressing modes will finally end up here
+fp_extmode0:
+	fp_mode_addr_indirect_extmode0
+	jra	fp_fetchsource
+
+| all pc relative addressing modes and immediate/absolute modes end up here
+| the first ones are sent to fp_extmode0 or fp_disp16
+| and only the latter are handled here
+fp_extmode1:
+	fp_decode_addr_reg
+	jmp	([0f:w,%pc,%d0*4])
+
+	.align	4
+0:
+	.long	fp_abs_short, fp_abs_long
+	.long	fp_disp16, fp_extmode0
+	.long	fp_immediate, fp_ill
+	.long	fp_ill, fp_ill
+
+	| addressing mode: absolute short
+fp_abs_short:
+	fp_mode_abs_short
+	jra	fp_fetchsource
+
+	| addressing mode: absolute long
+fp_abs_long:
+	fp_mode_abs_long
+	jra	fp_fetchsource
+
+	| addressing mode: immediate data
+fp_immediate:
+	printf	PDECODE,"#"
+	fp_get_pc %a0
+	move.w	(fp_datasize,%d1.w*2),%d0
+	addq.w	#1,%d0
+	and.w	#-2,%d0
+#ifdef FPU_EMU_DEBUG
+	movem.l	%d0/%d1,-(%sp)
+	movel	%a0,%a1
+	clr.l	%d1
+	jra	2f
+1:	getuser.b (%a1)+,%d1,fp_err_ua1,%a1
+	printf	PDECODE,"%02x",1,%d1
+2:	dbra	%d0,1b
+	movem.l	(%sp)+,%d0/%d1
+#endif
+	lea	(%a0,%d0.w),%a1
+	fp_put_pc %a1
+|	jra	fp_fetchsource
+
+fp_fetchsource:
+	move.l	%a0,%a1
+	swap	%d1
+	lea	(FPD_TEMPFP1,FPDATA),%a0
+	jmp	([0f:w,%pc,%d1.w*4])
+
+	.align	4
+0:	.long	fp_long, fp_single
+	.long	fp_ext, fp_pack
+	.long	fp_word, fp_double
+	.long	fp_byte, fp_ill
+
+fp_long:
+	getuser.l (%a1),%d0,fp_err_ua1,%a1
+	jsr	fp_conv_long2ext
+	jra	fp_getdest
+
+fp_single:
+	getuser.l (%a1),%d0,fp_err_ua1,%a1
+	jsr	fp_conv_single2ext
+	jra	fp_getdest
+
+fp_ext:
+	getuser.l (%a1)+,%d0,fp_err_ua1,%a1
+	lsr.l	#8,%d0
+	lsr.l	#7,%d0
+	lsr.w	#1,%d0
+	move.l	%d0,(%a0)+
+	getuser.l (%a1)+,%d0,fp_err_ua1,%a1
+	move.l	%d0,(%a0)+
+	getuser.l (%a1),%d0,fp_err_ua1,%a1
+	move.l	%d0,(%a0)
+	subq.l	#8,%a0
+	jra	fp_getdest
+
+fp_pack:
+	/* not supported yet */
+	jra	fp_ill
+
+fp_word:
+	getuser.w (%a1),%d0,fp_err_ua1,%a1
+	ext.l	%d0
+	jsr	fp_conv_long2ext
+	jra	fp_getdest
+
+fp_double:
+	jsr	fp_conv_double2ext
+	jra	fp_getdest
+
+fp_byte:
+	getuser.b (%a1),%d0,fp_err_ua1,%a1
+	extb.l	%d0
+	jsr	fp_conv_long2ext
+|	jra	fp_getdest
+
+fp_getdest:
+	move.l	%a0,%a1
+	bfextu	%d2{#22,#3},%d0
+	printf	PDECODE,",fp%d\n",1,%d0
+	fp_get_fp_reg
+	movem.l	%a0/%a1,-(%sp)
+	pea	fp_finalrounding
+	bfextu	%d2{#25,#7},%d0
+	jmp	([0f:w,%pc,%d0*4])
+
+	.align	4
+0:
+	.long	fp_fmove_mem2fp, fp_fint, fp_fsinh, fp_fintrz
+	.long	fp_fsqrt, fp_ill, fp_flognp1, fp_ill
+	.long	fp_fetoxm1, fp_ftanh, fp_fatan, fp_ill
+	.long	fp_fasin, fp_fatanh, fp_fsin, fp_ftan
+	.long	fp_fetox, fp_ftwotox, fp_ftentox, fp_ill
+	.long	fp_flogn, fp_flog10, fp_flog2, fp_ill
+	.long	fp_fabs, fp_fcosh, fp_fneg, fp_ill
+	.long	fp_facos, fp_fcos, fp_fgetexp, fp_fgetman
+	.long	fp_fdiv, fp_fmod, fp_fadd, fp_fmul
+	.long	fpa_fsgldiv, fp_frem, fp_fscale, fpa_fsglmul
+	.long	fp_fsub, fp_ill, fp_ill, fp_ill
+	.long	fp_ill, fp_ill, fp_ill, fp_ill
+	.long	fp_fsincos0, fp_fsincos1, fp_fsincos2, fp_fsincos3
+	.long	fp_fsincos4, fp_fsincos5, fp_fsincos6, fp_fsincos7
+	.long	fp_fcmp, fp_ill, fp_ftst, fp_ill
+	.long	fp_ill, fp_ill, fp_ill, fp_ill
+	.long	fp_fsmove, fp_fssqrt, fp_ill, fp_ill
+	.long	fp_fdmove, fp_fdsqrt, fp_ill, fp_ill
+	.long	fp_ill, fp_ill, fp_ill, fp_ill
+	.long	fp_ill, fp_ill, fp_ill, fp_ill
+	.long	fp_ill, fp_ill, fp_ill, fp_ill
+	.long	fp_ill, fp_ill, fp_ill, fp_ill
+	.long	fp_fsabs, fp_ill, fp_fsneg, fp_ill
+	.long	fp_fdabs, fp_ill, fp_fdneg, fp_ill
+	.long	fp_fsdiv, fp_ill, fp_fsadd, fp_fsmul
+	.long	fp_fddiv, fp_ill, fp_fdadd, fp_fdmul
+	.long	fp_fssub, fp_ill, fp_ill, fp_ill
+	.long	fp_fdsub, fp_ill, fp_ill, fp_ill
+	.long	fp_ill, fp_ill, fp_ill, fp_ill
+	.long	fp_ill, fp_ill, fp_ill, fp_ill
+	.long	fp_ill, fp_ill, fp_ill, fp_ill
+	.long	fp_ill, fp_ill, fp_ill, fp_ill
+
+	| Instructions follow
+
+	| Move an (emulated) ROM constant
+fp_fmovecr:
+	bfextu	%d2{#27,#5},%d0
+	printf	PINSTR,"fp_fmovecr #%d",1,%d0
+	move.l	%d0,%d1
+	add.l	%d0,%d0
+	add.l	%d1,%d0
+	lea	(fp_constants,%d0*4),%a0
+	move.l	#0x801cc0ff,%d0
+	addq.l	#1,%d1
+	lsl.l	%d1,%d0
+	jcc	1f
+	fp_set_sr FPSR_EXC_INEX2			| INEX2 exception
+1:	moveq	#-128,%d0				| continue with fmove
+	and.l	%d0,%d2
+	jra	fp_getdest
+
+	.data
+	.align	4
+fp_constants:
+	.long	0x00004000,0xc90fdaa2,0x2168c235	| pi
+	.extend	0,0,0,0,0,0,0,0,0,0
+	.long	0x00003ffd,0x9a209a84,0xfbcff798	| log10(2)
+	.long	0x00004000,0xadf85458,0xa2bb4a9a	| e
+	.long	0x00003fff,0xb8aa3b29,0x5c17f0bc	| log2(e)
+	.long	0x00003ffd,0xde5bd8a9,0x37287195	| log10(e)
+	.long	0x00000000,0x00000000,0x00000000	| 0.0
+	.long	0x00003ffe,0xb17217f7,0xd1cf79ac	| 1n(2)
+	.long	0x00004000,0x935d8ddd,0xaaa8ac17	| 1n(10)
+	| read this as "1.0 * 2^0" - note the high bit in the mantissa
+	.long	0x00003fff,0x80000000,0x00000000	| 10^0
+	.long	0x00004002,0xa0000000,0x00000000	| 10^1
+	.long	0x00004005,0xc8000000,0x00000000	| 10^2
+	.long	0x0000400c,0x9c400000,0x00000000	| 10^4
+	.long	0x00004019,0xbebc2000,0x00000000	| 10^8
+	.long	0x00004034,0x8e1bc9bf,0x04000000	| 10^16
+	.long	0x00004069,0x9dc5ada8,0x2b70b59e	| 10^32
+	.long	0x000040d3,0xc2781f49,0xffcfa6d5	| 10^64
+	.long	0x000041a8,0x93ba47c9,0x80e98ce0	| 10^128
+	.long	0x00004351,0xaa7eebfb,0x9df9de8e	| 10^256
+	.long	0x000046a3,0xe319a0ae,0xa60e91c7	| 10^512
+	.long	0x00004d48,0xc9767586,0x81750c17	| 10^1024
+	.long	0x00005a92,0x9e8b3b5d,0xc53d5de5	| 10^2048
+	.long	0x00007525,0xc4605202,0x8a20979b	| 10^4096
+	.previous
+
+fp_fmove_mem2fp:
+	printf	PINSTR,"fmove %p,%p\n",2,%a0,%a1
+	move.l	(%a1)+,(%a0)+
+	move.l	(%a1)+,(%a0)+
+	move.l	(%a1),(%a0)
+	subq.l	#8,%a0
+	rts
+
+fpa_fsglmul:
+	move.l	#fp_finalrounding_single_fast,(%sp)
+	jra	fp_fsglmul
+
+fpa_fsgldiv:
+	move.l	#fp_finalrounding_single_fast,(%sp)
+	jra	fp_fsgldiv
+
+.macro	fp_dosingleprec instr
+	printf	PINSTR,"single "
+	move.l	#fp_finalrounding_single,(%sp)
+	jra	\instr
+.endm
+
+.macro	fp_dodoubleprec instr
+	printf	PINSTR,"double "
+	move.l	#fp_finalrounding_double,(%sp)
+	jra	\instr
+.endm
+
+fp_fsmove:
+	fp_dosingleprec fp_fmove_mem2fp
+
+fp_fssqrt:
+	fp_dosingleprec fp_fsqrt
+
+fp_fdmove:
+	fp_dodoubleprec fp_fmove_mem2fp
+
+fp_fdsqrt:
+	fp_dodoubleprec fp_fsqrt
+
+fp_fsabs:
+	fp_dosingleprec fp_fabs
+
+fp_fsneg:
+	fp_dosingleprec fp_fneg
+
+fp_fdabs:
+	fp_dodoubleprec fp_fabs
+
+fp_fdneg:
+	fp_dodoubleprec fp_fneg
+
+fp_fsdiv:
+	fp_dosingleprec fp_fdiv
+
+fp_fsadd:
+	fp_dosingleprec fp_fadd
+
+fp_fsmul:
+	fp_dosingleprec fp_fmul
+
+fp_fddiv:
+	fp_dodoubleprec fp_fdiv
+
+fp_fdadd:
+	fp_dodoubleprec fp_fadd
+
+fp_fdmul:
+	fp_dodoubleprec fp_fmul
+
+fp_fssub:
+	fp_dosingleprec fp_fsub
+
+fp_fdsub:
+	fp_dodoubleprec fp_fsub
+
+fp_nonstd:
+	fp_get_pc %a0
+	getuser.l (%a0),%d0,fp_err_ua1,%a0
+	printf	,"nonstd ((%08x)=%08x)\n",2,%a0,%d0
+	moveq	#-1,%d0
+	rts
+
+	.data
+	.align	4
+
+	| data sizes corresponding to the operand formats
+fp_datasize:
+	.word	4, 4, 12, 12, 2, 8, 1, 0
diff --git a/arch/m68k/math-emu/fp_trig.c b/arch/m68k/math-emu/fp_trig.c
new file mode 100644
index 0000000..6361d07
--- /dev/null
+++ b/arch/m68k/math-emu/fp_trig.c
@@ -0,0 +1,183 @@
+/*
+
+  fp_trig.c: floating-point math routines for the Linux-m68k
+  floating point emulator.
+
+  Copyright (c) 1998-1999 David Huggins-Daines / Roman Zippel.
+
+  I hereby give permission, free of charge, to copy, modify, and
+  redistribute this software, in source or binary form, provided that
+  the above copyright notice and the following disclaimer are included
+  in all such copies.
+
+  THIS SOFTWARE IS PROVIDED "AS IS", WITH ABSOLUTELY NO WARRANTY, REAL
+  OR IMPLIED.
+
+*/
+
+#include "fp_emu.h"
+#include "fp_trig.h"
+
+struct fp_ext *
+fp_fsin(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fsin\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fcos(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fcos\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_ftan(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("ftan\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fasin(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fasin\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_facos(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("facos\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fatan(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fatan\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fsinh(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fsinh\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fcosh(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fcosh\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_ftanh(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("ftanh\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fatanh(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fatanh\n");
+
+	fp_monadic_check(dest, src);
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fsincos0(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fsincos0\n");
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fsincos1(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fsincos1\n");
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fsincos2(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fsincos2\n");
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fsincos3(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fsincos3\n");
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fsincos4(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fsincos4\n");
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fsincos5(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fsincos5\n");
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fsincos6(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fsincos6\n");
+
+	return dest;
+}
+
+struct fp_ext *
+fp_fsincos7(struct fp_ext *dest, struct fp_ext *src)
+{
+	uprint("fsincos7\n");
+
+	return dest;
+}
diff --git a/arch/m68k/math-emu/fp_trig.h b/arch/m68k/math-emu/fp_trig.h
new file mode 100644
index 0000000..af8b247
--- /dev/null
+++ b/arch/m68k/math-emu/fp_trig.h
@@ -0,0 +1,32 @@
+/*
+
+  fp_trig.h: floating-point math routines for the Linux-m68k
+  floating point emulator.
+
+  Copyright (c) 1998 David Huggins-Daines.
+
+  I hereby give permission, free of charge, to copy, modify, and
+  redistribute this software, in source or binary form, provided that
+  the above copyright notice and the following disclaimer are included
+  in all such copies.
+
+  THIS SOFTWARE IS PROVIDED "AS IS", WITH ABSOLUTELY NO WARRANTY, REAL
+  OR IMPLIED.
+
+*/
+
+#ifndef FP_TRIG_H
+#define FP_TRIG_H
+
+#include "fp_emu.h"
+
+/* floating point trigonometric instructions:
+
+   the arguments to these are in the "internal" extended format, that
+   is, an "exploded" version of the 96-bit extended fp format used by
+   the 68881.
+
+   they return a status code, which should end up in %d0, if all goes
+   well.  */
+
+#endif /* FP_TRIG__H */
diff --git a/arch/m68k/math-emu/fp_util.S b/arch/m68k/math-emu/fp_util.S
new file mode 100644
index 0000000..a9f7f01
--- /dev/null
+++ b/arch/m68k/math-emu/fp_util.S
@@ -0,0 +1,1455 @@
+/*
+ * fp_util.S
+ *
+ * Copyright Roman Zippel, 1997.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, and the entire permission notice in its entirety,
+ *    including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions.  (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/config.h>
+#include "fp_emu.h"
+
+/*
+ * Here are lots of conversion and normalization functions mainly
+ * used by fp_scan.S
+ * Note that these functions are optimized for "normal" numbers,
+ * these are handled first and exit as fast as possible, this is
+ * especially important for fp_normalize_ext/fp_conv_ext2ext, as
+ * it's called very often.
+ * The register usage is optimized for fp_scan.S and which register
+ * is currently at that time unused, be careful if you want change
+ * something here. %d0 and %d1 is always usable, sometimes %d2 (or
+ * only the lower half) most function have to return the %a0
+ * unmodified, so that the caller can immediately reuse it.
+ */
+
+	.globl	fp_ill, fp_end
+
+	| exits from fp_scan:
+	| illegal instruction
+fp_ill:
+	printf	,"fp_illegal\n"
+	rts
+	| completed instruction
+fp_end:
+	tst.l	(TASK_MM-8,%a2)
+	jmi	1f
+	tst.l	(TASK_MM-4,%a2)
+	jmi	1f
+	tst.l	(TASK_MM,%a2)
+	jpl	2f
+1:	printf	,"oops:%p,%p,%p\n",3,%a2@(TASK_MM-8),%a2@(TASK_MM-4),%a2@(TASK_MM)
+2:	clr.l	%d0
+	rts
+
+	.globl	fp_conv_long2ext, fp_conv_single2ext
+	.globl	fp_conv_double2ext, fp_conv_ext2ext
+	.globl	fp_normalize_ext, fp_normalize_double
+	.globl	fp_normalize_single, fp_normalize_single_fast
+	.globl	fp_conv_ext2double, fp_conv_ext2single
+	.globl	fp_conv_ext2long, fp_conv_ext2short
+	.globl	fp_conv_ext2byte
+	.globl	fp_finalrounding_single, fp_finalrounding_single_fast
+	.globl	fp_finalrounding_double
+	.globl	fp_finalrounding, fp_finaltest, fp_final
+
+/*
+ * First several conversion functions from a source operand
+ * into the extended format. Note, that only fp_conv_ext2ext
+ * normalizes the number and is always called after the other
+ * conversion functions, which only move the information into
+ * fp_ext structure.
+ */
+
+	| fp_conv_long2ext:
+	|
+	| args:	%d0 = source (32-bit long)
+	|	%a0 = destination (ptr to struct fp_ext)
+
+fp_conv_long2ext:
+	printf	PCONV,"l2e: %p -> %p(",2,%d0,%a0
+	clr.l	%d1			| sign defaults to zero
+	tst.l	%d0
+	jeq	fp_l2e_zero		| is source zero?
+	jpl	1f			| positive?
+	moveq	#1,%d1
+	neg.l	%d0
+1:	swap	%d1
+	move.w	#0x3fff+31,%d1
+	move.l	%d1,(%a0)+		| set sign / exp
+	move.l	%d0,(%a0)+		| set mantissa
+	clr.l	(%a0)
+	subq.l	#8,%a0			| restore %a0
+	printx	PCONV,%a0@
+	printf	PCONV,")\n"
+	rts
+	| source is zero
+fp_l2e_zero:
+	clr.l	(%a0)+
+	clr.l	(%a0)+
+	clr.l	(%a0)
+	subq.l	#8,%a0
+	printx	PCONV,%a0@
+	printf	PCONV,")\n"
+	rts
+
+	| fp_conv_single2ext
+	| args:	%d0 = source (single-precision fp value)
+	|	%a0 = dest (struct fp_ext *)
+
+fp_conv_single2ext:
+	printf	PCONV,"s2e: %p -> %p(",2,%d0,%a0
+	move.l	%d0,%d1
+	lsl.l	#8,%d0			| shift mantissa
+	lsr.l	#8,%d1			| exponent / sign
+	lsr.l	#7,%d1
+	lsr.w	#8,%d1
+	jeq	fp_s2e_small		| zero / denormal?
+	cmp.w	#0xff,%d1		| NaN / Inf?
+	jeq	fp_s2e_large
+	bset	#31,%d0			| set explizit bit
+	add.w	#0x3fff-0x7f,%d1	| re-bias the exponent.
+9:	move.l	%d1,(%a0)+		| fp_ext.sign, fp_ext.exp
+	move.l	%d0,(%a0)+		| high lword of fp_ext.mant
+	clr.l	(%a0)			| low lword = 0
+	subq.l	#8,%a0
+	printx	PCONV,%a0@
+	printf	PCONV,")\n"
+	rts
+	| zeros and denormalized
+fp_s2e_small:
+	| exponent is zero, so explizit bit is already zero too
+	tst.l	%d0
+	jeq	9b
+	move.w	#0x4000-0x7f,%d1
+	jra	9b
+	| infinities and NAN
+fp_s2e_large:
+	bclr	#31,%d0			| clear explizit bit
+	move.w	#0x7fff,%d1
+	jra	9b
+
+fp_conv_double2ext:
+#ifdef FPU_EMU_DEBUG
+	getuser.l %a1@(0),%d0,fp_err_ua2,%a1
+	getuser.l %a1@(4),%d1,fp_err_ua2,%a1
+	printf	PCONV,"d2e: %p%p -> %p(",3,%d0,%d1,%a0
+#endif
+	getuser.l (%a1)+,%d0,fp_err_ua2,%a1
+	move.l	%d0,%d1
+	lsl.l	#8,%d0			| shift high mantissa
+	lsl.l	#3,%d0
+	lsr.l	#8,%d1			| exponent / sign
+	lsr.l	#7,%d1
+	lsr.w	#5,%d1
+	jeq	fp_d2e_small		| zero / denormal?
+	cmp.w	#0x7ff,%d1		| NaN / Inf?
+	jeq	fp_d2e_large
+	bset	#31,%d0			| set explizit bit
+	add.w	#0x3fff-0x3ff,%d1	| re-bias the exponent.
+9:	move.l	%d1,(%a0)+		| fp_ext.sign, fp_ext.exp
+	move.l	%d0,(%a0)+
+	getuser.l (%a1)+,%d0,fp_err_ua2,%a1
+	move.l	%d0,%d1
+	lsl.l	#8,%d0
+	lsl.l	#3,%d0
+	move.l	%d0,(%a0)
+	moveq	#21,%d0
+	lsr.l	%d0,%d1
+	or.l	%d1,-(%a0)
+	subq.l	#4,%a0
+	printx	PCONV,%a0@
+	printf	PCONV,")\n"
+	rts
+	| zeros and denormalized
+fp_d2e_small:
+	| exponent is zero, so explizit bit is already zero too
+	tst.l	%d0
+	jeq	9b
+	move.w	#0x4000-0x3ff,%d1
+	jra	9b
+	| infinities and NAN
+fp_d2e_large:
+	bclr	#31,%d0			| clear explizit bit
+	move.w	#0x7fff,%d1
+	jra	9b
+
+	| fp_conv_ext2ext:
+	| originally used to get longdouble from userspace, now it's
+	| called before arithmetic operations to make sure the number
+	| is normalized [maybe rename it?].
+	| args:	%a0 = dest (struct fp_ext *)
+	| returns 0 in %d0 for a NaN, otherwise 1
+
+fp_conv_ext2ext:
+	printf	PCONV,"e2e: %p(",1,%a0
+	printx	PCONV,%a0@
+	printf	PCONV,"), "
+	move.l	(%a0)+,%d0
+	cmp.w	#0x7fff,%d0		| Inf / NaN?
+	jeq	fp_e2e_large
+	move.l	(%a0),%d0
+	jpl	fp_e2e_small		| zero / denorm?
+	| The high bit is set, so normalization is irrelevant.
+fp_e2e_checkround:
+	subq.l	#4,%a0
+#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
+	move.b	(%a0),%d0
+	jne	fp_e2e_round
+#endif
+	printf	PCONV,"%p(",1,%a0
+	printx	PCONV,%a0@
+	printf	PCONV,")\n"
+	moveq	#1,%d0
+	rts
+#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
+fp_e2e_round:
+	fp_set_sr FPSR_EXC_INEX2
+	clr.b	(%a0)
+	move.w	(FPD_RND,FPDATA),%d2
+	jne	fp_e2e_roundother	| %d2 == 0, round to nearest
+	tst.b	%d0			| test guard bit
+	jpl	9f			| zero is closer
+	btst	#0,(11,%a0)		| test lsb bit
+	jne	fp_e2e_doroundup	| round to infinity
+	lsl.b	#1,%d0			| check low bits
+	jeq	9f			| round to zero
+fp_e2e_doroundup:
+	addq.l	#1,(8,%a0)
+	jcc	9f
+	addq.l	#1,(4,%a0)
+	jcc	9f
+	move.w	#0x8000,(4,%a0)
+	addq.w	#1,(2,%a0)
+9:	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+fp_e2e_roundother:
+	subq.w	#2,%d2
+	jcs	9b			| %d2 < 2, round to zero
+	jhi	1f			| %d2 > 2, round to +infinity
+	tst.b	(1,%a0)			| to -inf
+	jne	fp_e2e_doroundup	| negative, round to infinity
+	jra	9b			| positive, round to zero
+1:	tst.b	(1,%a0)			| to +inf
+	jeq	fp_e2e_doroundup	| positive, round to infinity
+	jra	9b			| negative, round to zero
+#endif
+	| zeros and subnormals:
+	| try to normalize these anyway.
+fp_e2e_small:
+	jne	fp_e2e_small1		| high lword zero?
+	move.l	(4,%a0),%d0
+	jne	fp_e2e_small2
+#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
+	clr.l	%d0
+	move.b	(-4,%a0),%d0
+	jne	fp_e2e_small3
+#endif
+	| Genuine zero.
+	clr.w	-(%a0)
+	subq.l	#2,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	moveq	#1,%d0
+	rts
+	| definitely subnormal, need to shift all 64 bits
+fp_e2e_small1:
+	bfffo	%d0{#0,#32},%d1
+	move.w	-(%a0),%d2
+	sub.w	%d1,%d2
+	jcc	1f
+	| Pathologically small, denormalize.
+	add.w	%d2,%d1
+	clr.w	%d2
+1:	move.w	%d2,(%a0)+
+	move.w	%d1,%d2
+	jeq	fp_e2e_checkround
+	| fancy 64-bit double-shift begins here
+	lsl.l	%d2,%d0
+	move.l	%d0,(%a0)+
+	move.l	(%a0),%d0
+	move.l	%d0,%d1
+	lsl.l	%d2,%d0
+	move.l	%d0,(%a0)
+	neg.w	%d2
+	and.w	#0x1f,%d2
+	lsr.l	%d2,%d1
+	or.l	%d1,-(%a0)
+#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
+fp_e2e_extra1:
+	clr.l	%d0
+	move.b	(-4,%a0),%d0
+	neg.w	%d2
+	add.w	#24,%d2
+	jcc	1f
+	clr.b	(-4,%a0)
+	lsl.l	%d2,%d0
+	or.l	%d0,(4,%a0)
+	jra	fp_e2e_checkround
+1:	addq.w	#8,%d2
+	lsl.l	%d2,%d0
+	move.b	%d0,(-4,%a0)
+	lsr.l	#8,%d0
+	or.l	%d0,(4,%a0)
+#endif
+	jra	fp_e2e_checkround
+	| pathologically small subnormal
+fp_e2e_small2:
+	bfffo	%d0{#0,#32},%d1
+	add.w	#32,%d1
+	move.w	-(%a0),%d2
+	sub.w	%d1,%d2
+	jcc	1f
+	| Beyond pathologically small, denormalize.
+	add.w	%d2,%d1
+	clr.w	%d2
+1:	move.w	%d2,(%a0)+
+	ext.l	%d1
+	jeq	fp_e2e_checkround
+	clr.l	(4,%a0)
+	sub.w	#32,%d2
+	jcs	1f
+	lsl.l	%d1,%d0			| lower lword needs only to be shifted
+	move.l	%d0,(%a0)		| into the higher lword
+#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
+	clr.l	%d0
+	move.b	(-4,%a0),%d0
+	clr.b	(-4,%a0)
+	neg.w	%d1
+	add.w	#32,%d1
+	bfins	%d0,(%a0){%d1,#8}
+#endif
+	jra	fp_e2e_checkround
+1:	neg.w	%d1			| lower lword is splitted between
+	bfins	%d0,(%a0){%d1,#32}	| higher and lower lword
+#ifndef CONFIG_M68KFPU_EMU_EXTRAPREC
+	jra	fp_e2e_checkround
+#else
+	move.w	%d1,%d2
+	jra	fp_e2e_extra1
+	| These are extremely small numbers, that will mostly end up as zero
+	| anyway, so this is only important for correct rounding.
+fp_e2e_small3:
+	bfffo	%d0{#24,#8},%d1
+	add.w	#40,%d1
+	move.w	-(%a0),%d2
+	sub.w	%d1,%d2
+	jcc	1f
+	| Pathologically small, denormalize.
+	add.w	%d2,%d1
+	clr.w	%d2
+1:	move.w	%d2,(%a0)+
+	ext.l	%d1
+	jeq	fp_e2e_checkround
+	cmp.w	#8,%d1
+	jcs	2f
+1:	clr.b	(-4,%a0)
+	sub.w	#64,%d1
+	jcs	1f
+	add.w	#24,%d1
+	lsl.l	%d1,%d0
+	move.l	%d0,(%a0)
+	jra	fp_e2e_checkround
+1:	neg.w	%d1
+	bfins	%d0,(%a0){%d1,#8}
+	jra	fp_e2e_checkround
+2:	lsl.l	%d1,%d0
+	move.b	%d0,(-4,%a0)
+	lsr.l	#8,%d0
+	move.b	%d0,(7,%a0)
+	jra	fp_e2e_checkround
+#endif
+1:	move.l	%d0,%d1			| lower lword is splitted between
+	lsl.l	%d2,%d0			| higher and lower lword
+	move.l	%d0,(%a0)
+	move.l	%d1,%d0
+	neg.w	%d2
+	add.w	#32,%d2
+	lsr.l	%d2,%d0
+	move.l	%d0,-(%a0)
+	jra	fp_e2e_checkround
+	| Infinities and NaNs
+fp_e2e_large:
+	move.l	(%a0)+,%d0
+	jne	3f
+1:	tst.l	(%a0)
+	jne	4f
+	moveq	#1,%d0
+2:	subq.l	#8,%a0
+	printf	PCONV,"%p(",1,%a0
+	printx	PCONV,%a0@
+	printf	PCONV,")\n"
+	rts
+	| we have maybe a NaN, shift off the highest bit
+3:	lsl.l	#1,%d0
+	jeq	1b
+	| we have a NaN, clear the return value
+4:	clrl	%d0
+	jra	2b
+
+
+/*
+ * Normalization functions.  Call these on the output of general
+ * FP operators, and before any conversion into the destination
+ * formats. fp_normalize_ext has always to be called first, the
+ * following conversion functions expect an already normalized
+ * number.
+ */
+
+	| fp_normalize_ext:
+	| normalize an extended in extended (unpacked) format, basically
+	| it does the same as fp_conv_ext2ext, additionally it also does
+	| the necessary postprocessing checks.
+	| args:	%a0 (struct fp_ext *)
+	| NOTE: it does _not_ modify %a0/%a1 and the upper word of %d2
+
+fp_normalize_ext:
+	printf	PNORM,"ne: %p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,"), "
+	move.l	(%a0)+,%d0
+	cmp.w	#0x7fff,%d0		| Inf / NaN?
+	jeq	fp_ne_large
+	move.l	(%a0),%d0
+	jpl	fp_ne_small		| zero / denorm?
+	| The high bit is set, so normalization is irrelevant.
+fp_ne_checkround:
+	subq.l	#4,%a0
+#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
+	move.b	(%a0),%d0
+	jne	fp_ne_round
+#endif
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
+fp_ne_round:
+	fp_set_sr FPSR_EXC_INEX2
+	clr.b	(%a0)
+	move.w	(FPD_RND,FPDATA),%d2
+	jne	fp_ne_roundother	| %d2 == 0, round to nearest
+	tst.b	%d0			| test guard bit
+	jpl	9f			| zero is closer
+	btst	#0,(11,%a0)		| test lsb bit
+	jne	fp_ne_doroundup		| round to infinity
+	lsl.b	#1,%d0			| check low bits
+	jeq	9f			| round to zero
+fp_ne_doroundup:
+	addq.l	#1,(8,%a0)
+	jcc	9f
+	addq.l	#1,(4,%a0)
+	jcc	9f
+	addq.w	#1,(2,%a0)
+	move.w	#0x8000,(4,%a0)
+9:	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+fp_ne_roundother:
+	subq.w	#2,%d2
+	jcs	9b			| %d2 < 2, round to zero
+	jhi	1f			| %d2 > 2, round to +infinity
+	tst.b	(1,%a0)			| to -inf
+	jne	fp_ne_doroundup		| negative, round to infinity
+	jra	9b			| positive, round to zero
+1:	tst.b	(1,%a0)			| to +inf
+	jeq	fp_ne_doroundup		| positive, round to infinity
+	jra	9b			| negative, round to zero
+#endif
+	| Zeros and subnormal numbers
+	| These are probably merely subnormal, rather than "denormalized"
+	|  numbers, so we will try to make them normal again.
+fp_ne_small:
+	jne	fp_ne_small1		| high lword zero?
+	move.l	(4,%a0),%d0
+	jne	fp_ne_small2
+#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
+	clr.l	%d0
+	move.b	(-4,%a0),%d0
+	jne	fp_ne_small3
+#endif
+	| Genuine zero.
+	clr.w	-(%a0)
+	subq.l	#2,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+	| Subnormal.
+fp_ne_small1:
+	bfffo	%d0{#0,#32},%d1
+	move.w	-(%a0),%d2
+	sub.w	%d1,%d2
+	jcc	1f
+	| Pathologically small, denormalize.
+	add.w	%d2,%d1
+	clr.w	%d2
+	fp_set_sr FPSR_EXC_UNFL
+1:	move.w	%d2,(%a0)+
+	move.w	%d1,%d2
+	jeq	fp_ne_checkround
+	| This is exactly the same 64-bit double shift as seen above.
+	lsl.l	%d2,%d0
+	move.l	%d0,(%a0)+
+	move.l	(%a0),%d0
+	move.l	%d0,%d1
+	lsl.l	%d2,%d0
+	move.l	%d0,(%a0)
+	neg.w	%d2
+	and.w	#0x1f,%d2
+	lsr.l	%d2,%d1
+	or.l	%d1,-(%a0)
+#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
+fp_ne_extra1:
+	clr.l	%d0
+	move.b	(-4,%a0),%d0
+	neg.w	%d2
+	add.w	#24,%d2
+	jcc	1f
+	clr.b	(-4,%a0)
+	lsl.l	%d2,%d0
+	or.l	%d0,(4,%a0)
+	jra	fp_ne_checkround
+1:	addq.w	#8,%d2
+	lsl.l	%d2,%d0
+	move.b	%d0,(-4,%a0)
+	lsr.l	#8,%d0
+	or.l	%d0,(4,%a0)
+#endif
+	jra	fp_ne_checkround
+	| May or may not be subnormal, if so, only 32 bits to shift.
+fp_ne_small2:
+	bfffo	%d0{#0,#32},%d1
+	add.w	#32,%d1
+	move.w	-(%a0),%d2
+	sub.w	%d1,%d2
+	jcc	1f
+	| Beyond pathologically small, denormalize.
+	add.w	%d2,%d1
+	clr.w	%d2
+	fp_set_sr FPSR_EXC_UNFL
+1:	move.w	%d2,(%a0)+
+	ext.l	%d1
+	jeq	fp_ne_checkround
+	clr.l	(4,%a0)
+	sub.w	#32,%d1
+	jcs	1f
+	lsl.l	%d1,%d0			| lower lword needs only to be shifted
+	move.l	%d0,(%a0)		| into the higher lword
+#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
+	clr.l	%d0
+	move.b	(-4,%a0),%d0
+	clr.b	(-4,%a0)
+	neg.w	%d1
+	add.w	#32,%d1
+	bfins	%d0,(%a0){%d1,#8}
+#endif
+	jra	fp_ne_checkround
+1:	neg.w	%d1			| lower lword is splitted between
+	bfins	%d0,(%a0){%d1,#32}	| higher and lower lword
+#ifndef CONFIG_M68KFPU_EMU_EXTRAPREC
+	jra	fp_ne_checkround
+#else
+	move.w	%d1,%d2
+	jra	fp_ne_extra1
+	| These are extremely small numbers, that will mostly end up as zero
+	| anyway, so this is only important for correct rounding.
+fp_ne_small3:
+	bfffo	%d0{#24,#8},%d1
+	add.w	#40,%d1
+	move.w	-(%a0),%d2
+	sub.w	%d1,%d2
+	jcc	1f
+	| Pathologically small, denormalize.
+	add.w	%d2,%d1
+	clr.w	%d2
+1:	move.w	%d2,(%a0)+
+	ext.l	%d1
+	jeq	fp_ne_checkround
+	cmp.w	#8,%d1
+	jcs	2f
+1:	clr.b	(-4,%a0)
+	sub.w	#64,%d1
+	jcs	1f
+	add.w	#24,%d1
+	lsl.l	%d1,%d0
+	move.l	%d0,(%a0)
+	jra	fp_ne_checkround
+1:	neg.w	%d1
+	bfins	%d0,(%a0){%d1,#8}
+	jra	fp_ne_checkround
+2:	lsl.l	%d1,%d0
+	move.b	%d0,(-4,%a0)
+	lsr.l	#8,%d0
+	move.b	%d0,(7,%a0)
+	jra	fp_ne_checkround
+#endif
+	| Infinities and NaNs, again, same as above.
+fp_ne_large:
+	move.l	(%a0)+,%d0
+	jne	3f
+1:	tst.l	(%a0)
+	jne	4f
+2:	subq.l	#8,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+	| we have maybe a NaN, shift off the highest bit
+3:	move.l	%d0,%d1
+	lsl.l	#1,%d1
+	jne	4f
+	clr.l	(-4,%a0)
+	jra	1b
+	| we have a NaN, test if it is signaling
+4:	bset	#30,%d0
+	jne	2b
+	fp_set_sr FPSR_EXC_SNAN
+	move.l	%d0,(-4,%a0)
+	jra	2b
+
+	| these next two do rounding as per the IEEE standard.
+	| values for the rounding modes appear to be:
+	| 0:	Round to nearest
+	| 1:	Round to zero
+	| 2:	Round to -Infinity
+	| 3:	Round to +Infinity
+	| both functions expect that fp_normalize was already
+	| called (and extended argument is already normalized
+	| as far as possible), these are used if there is different
+	| rounding precision is selected and before converting
+	| into single/double
+
+	| fp_normalize_double:
+	| normalize an extended with double (52-bit) precision
+	| args:	 %a0 (struct fp_ext *)
+
+fp_normalize_double:
+	printf	PNORM,"nd: %p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,"), "
+	move.l	(%a0)+,%d2
+	tst.w	%d2
+	jeq	fp_nd_zero		| zero / denormalized
+	cmp.w	#0x7fff,%d2
+	jeq	fp_nd_huge		| NaN / infinitive.
+	sub.w	#0x4000-0x3ff,%d2	| will the exponent fit?
+	jcs	fp_nd_small		| too small.
+	cmp.w	#0x7fe,%d2
+	jcc	fp_nd_large		| too big.
+	addq.l	#4,%a0
+	move.l	(%a0),%d0		| low lword of mantissa
+	| now, round off the low 11 bits.
+fp_nd_round:
+	moveq	#21,%d1
+	lsl.l	%d1,%d0			| keep 11 low bits.
+	jne	fp_nd_checkround	| Are they non-zero?
+	| nothing to do here
+9:	subq.l	#8,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+	| Be careful with the X bit! It contains the lsb
+	| from the shift above, it is needed for round to nearest.
+fp_nd_checkround:
+	fp_set_sr FPSR_EXC_INEX2	| INEX2 bit
+	and.w	#0xf800,(2,%a0)		| clear bits 0-10
+	move.w	(FPD_RND,FPDATA),%d2	| rounding mode
+	jne	2f			| %d2 == 0, round to nearest
+	tst.l	%d0			| test guard bit
+	jpl	9b			| zero is closer
+	| here we test the X bit by adding it to %d2
+	clr.w	%d2			| first set z bit, addx only clears it
+	addx.w	%d2,%d2			| test lsb bit
+	| IEEE754-specified "round to even" behaviour.  If the guard
+	| bit is set, then the number is odd, so rounding works like
+	| in grade-school arithmetic (i.e. 1.5 rounds to 2.0)
+	| Otherwise, an equal distance rounds towards zero, so as not
+	| to produce an odd number.  This is strange, but it is what
+	| the standard says.
+	jne	fp_nd_doroundup		| round to infinity
+	lsl.l	#1,%d0			| check low bits
+	jeq	9b			| round to zero
+fp_nd_doroundup:
+	| round (the mantissa, that is) towards infinity
+	add.l	#0x800,(%a0)
+	jcc	9b			| no overflow, good.
+	addq.l	#1,-(%a0)		| extend to high lword
+	jcc	1f			| no overflow, good.
+	| Yow! we have managed to overflow the mantissa.  Since this
+	| only happens when %d1 was 0xfffff800, it is now zero, so
+	| reset the high bit, and increment the exponent.
+	move.w	#0x8000,(%a0)
+	addq.w	#1,-(%a0)
+	cmp.w	#0x43ff,(%a0)+		| exponent now overflown?
+	jeq	fp_nd_large		| yes, so make it infinity.
+1:	subq.l	#4,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+2:	subq.w	#2,%d2
+	jcs	9b			| %d2 < 2, round to zero
+	jhi	3f			| %d2 > 2, round to +infinity
+	| Round to +Inf or -Inf.  High word of %d2 contains the
+	| sign of the number, by the way.
+	swap	%d2			| to -inf
+	tst.b	%d2
+	jne	fp_nd_doroundup		| negative, round to infinity
+	jra	9b			| positive, round to zero
+3:	swap	%d2			| to +inf
+	tst.b	%d2
+	jeq	fp_nd_doroundup		| positive, round to infinity
+	jra	9b			| negative, round to zero
+	| Exponent underflow.  Try to make a denormal, and set it to
+	| the smallest possible fraction if this fails.
+fp_nd_small:
+	fp_set_sr FPSR_EXC_UNFL		| set UNFL bit
+	move.w	#0x3c01,(-2,%a0)	| 2**-1022
+	neg.w	%d2			| degree of underflow
+	cmp.w	#32,%d2			| single or double shift?
+	jcc	1f
+	| Again, another 64-bit double shift.
+	move.l	(%a0),%d0
+	move.l	%d0,%d1
+	lsr.l	%d2,%d0
+	move.l	%d0,(%a0)+
+	move.l	(%a0),%d0
+	lsr.l	%d2,%d0
+	neg.w	%d2
+	add.w	#32,%d2
+	lsl.l	%d2,%d1
+	or.l	%d1,%d0
+	move.l	(%a0),%d1
+	move.l	%d0,(%a0)
+	| Check to see if we shifted off any significant bits
+	lsl.l	%d2,%d1
+	jeq	fp_nd_round		| Nope, round.
+	bset	#0,%d0			| Yes, so set the "sticky bit".
+	jra	fp_nd_round		| Now, round.
+	| Another 64-bit single shift and store
+1:	sub.w	#32,%d2
+	cmp.w	#32,%d2			| Do we really need to shift?
+	jcc	2f			| No, the number is too small.
+	move.l	(%a0),%d0
+	clr.l	(%a0)+
+	move.l	%d0,%d1
+	lsr.l	%d2,%d0
+	neg.w	%d2
+	add.w	#32,%d2
+	| Again, check to see if we shifted off any significant bits.
+	tst.l	(%a0)
+	jeq	1f
+	bset	#0,%d0			| Sticky bit.
+1:	move.l	%d0,(%a0)
+	lsl.l	%d2,%d1
+	jeq	fp_nd_round
+	bset	#0,%d0
+	jra	fp_nd_round
+	| Sorry, the number is just too small.
+2:	clr.l	(%a0)+
+	clr.l	(%a0)
+	moveq	#1,%d0			| Smallest possible fraction,
+	jra	fp_nd_round		| round as desired.
+	| zero and denormalized
+fp_nd_zero:
+	tst.l	(%a0)+
+	jne	1f
+	tst.l	(%a0)
+	jne	1f
+	subq.l	#8,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts				| zero.  nothing to do.
+	| These are not merely subnormal numbers, but true denormals,
+	| i.e. pathologically small (exponent is 2**-16383) numbers.
+	| It is clearly impossible for even a normal extended number
+	| with that exponent to fit into double precision, so just
+	| write these ones off as "too darn small".
+1:	fp_set_sr FPSR_EXC_UNFL		| Set UNFL bit
+	clr.l	(%a0)
+	clr.l	-(%a0)
+	move.w	#0x3c01,-(%a0)		| i.e. 2**-1022
+	addq.l	#6,%a0
+	moveq	#1,%d0
+	jra	fp_nd_round		| round.
+	| Exponent overflow.  Just call it infinity.
+fp_nd_large:
+	move.w	#0x7ff,%d0
+	and.w	(6,%a0),%d0
+	jeq	1f
+	fp_set_sr FPSR_EXC_INEX2
+1:	fp_set_sr FPSR_EXC_OVFL
+	move.w	(FPD_RND,FPDATA),%d2
+	jne	3f			| %d2 = 0 round to nearest
+1:	move.w	#0x7fff,(-2,%a0)
+	clr.l	(%a0)+
+	clr.l	(%a0)
+2:	subq.l	#8,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+3:	subq.w	#2,%d2
+	jcs	5f			| %d2 < 2, round to zero
+	jhi	4f			| %d2 > 2, round to +infinity
+	tst.b	(-3,%a0)		| to -inf
+	jne	1b
+	jra	5f
+4:	tst.b	(-3,%a0)		| to +inf
+	jeq	1b
+5:	move.w	#0x43fe,(-2,%a0)
+	moveq	#-1,%d0
+	move.l	%d0,(%a0)+
+	move.w	#0xf800,%d0
+	move.l	%d0,(%a0)
+	jra	2b
+	| Infinities or NaNs
+fp_nd_huge:
+	subq.l	#4,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+
+	| fp_normalize_single:
+	| normalize an extended with single (23-bit) precision
+	| args:	 %a0 (struct fp_ext *)
+
+fp_normalize_single:
+	printf	PNORM,"ns: %p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,") "
+	addq.l	#2,%a0
+	move.w	(%a0)+,%d2
+	jeq	fp_ns_zero		| zero / denormalized
+	cmp.w	#0x7fff,%d2
+	jeq	fp_ns_huge		| NaN / infinitive.
+	sub.w	#0x4000-0x7f,%d2	| will the exponent fit?
+	jcs	fp_ns_small		| too small.
+	cmp.w	#0xfe,%d2
+	jcc	fp_ns_large		| too big.
+	move.l	(%a0)+,%d0		| get high lword of mantissa
+fp_ns_round:
+	tst.l	(%a0)			| check the low lword
+	jeq	1f
+	| Set a sticky bit if it is non-zero.  This should only
+	| affect the rounding in what would otherwise be equal-
+	| distance situations, which is what we want it to do.
+	bset	#0,%d0
+1:	clr.l	(%a0)			| zap it from memory.
+	| now, round off the low 8 bits of the hi lword.
+	tst.b	%d0			| 8 low bits.
+	jne	fp_ns_checkround	| Are they non-zero?
+	| nothing to do here
+	subq.l	#8,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+fp_ns_checkround:
+	fp_set_sr FPSR_EXC_INEX2	| INEX2 bit
+	clr.b	-(%a0)			| clear low byte of high lword
+	subq.l	#3,%a0
+	move.w	(FPD_RND,FPDATA),%d2	| rounding mode
+	jne	2f			| %d2 == 0, round to nearest
+	tst.b	%d0			| test guard bit
+	jpl	9f			| zero is closer
+	btst	#8,%d0			| test lsb bit
+	| round to even behaviour, see above.
+	jne	fp_ns_doroundup		| round to infinity
+	lsl.b	#1,%d0			| check low bits
+	jeq	9f			| round to zero
+fp_ns_doroundup:
+	| round (the mantissa, that is) towards infinity
+	add.l	#0x100,(%a0)
+	jcc	9f			| no overflow, good.
+	| Overflow.  This means that the %d1 was 0xffffff00, so it
+	| is now zero.  We will set the mantissa to reflect this, and
+	| increment the exponent (checking for overflow there too)
+	move.w	#0x8000,(%a0)
+	addq.w	#1,-(%a0)
+	cmp.w	#0x407f,(%a0)+		| exponent now overflown?
+	jeq	fp_ns_large		| yes, so make it infinity.
+9:	subq.l	#4,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+	| check nondefault rounding modes
+2:	subq.w	#2,%d2
+	jcs	9b			| %d2 < 2, round to zero
+	jhi	3f			| %d2 > 2, round to +infinity
+	tst.b	(-3,%a0)		| to -inf
+	jne	fp_ns_doroundup		| negative, round to infinity
+	jra	9b			| positive, round to zero
+3:	tst.b	(-3,%a0)		| to +inf
+	jeq	fp_ns_doroundup		| positive, round to infinity
+	jra	9b			| negative, round to zero
+	| Exponent underflow.  Try to make a denormal, and set it to
+	| the smallest possible fraction if this fails.
+fp_ns_small:
+	fp_set_sr FPSR_EXC_UNFL		| set UNFL bit
+	move.w	#0x3f81,(-2,%a0)	| 2**-126
+	neg.w	%d2			| degree of underflow
+	cmp.w	#32,%d2			| single or double shift?
+	jcc	2f
+	| a 32-bit shift.
+	move.l	(%a0),%d0
+	move.l	%d0,%d1
+	lsr.l	%d2,%d0
+	move.l	%d0,(%a0)+
+	| Check to see if we shifted off any significant bits.
+	neg.w	%d2
+	add.w	#32,%d2
+	lsl.l	%d2,%d1
+	jeq	1f
+	bset	#0,%d0			| Sticky bit.
+	| Check the lower lword
+1:	tst.l	(%a0)
+	jeq	fp_ns_round
+	clr	(%a0)
+	bset	#0,%d0			| Sticky bit.
+	jra	fp_ns_round
+	| Sorry, the number is just too small.
+2:	clr.l	(%a0)+
+	clr.l	(%a0)
+	moveq	#1,%d0			| Smallest possible fraction,
+	jra	fp_ns_round		| round as desired.
+	| Exponent overflow.  Just call it infinity.
+fp_ns_large:
+	tst.b	(3,%a0)
+	jeq	1f
+	fp_set_sr FPSR_EXC_INEX2
+1:	fp_set_sr FPSR_EXC_OVFL
+	move.w	(FPD_RND,FPDATA),%d2
+	jne	3f			| %d2 = 0 round to nearest
+1:	move.w	#0x7fff,(-2,%a0)
+	clr.l	(%a0)+
+	clr.l	(%a0)
+2:	subq.l	#8,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+3:	subq.w	#2,%d2
+	jcs	5f			| %d2 < 2, round to zero
+	jhi	4f			| %d2 > 2, round to +infinity
+	tst.b	(-3,%a0)		| to -inf
+	jne	1b
+	jra	5f
+4:	tst.b	(-3,%a0)		| to +inf
+	jeq	1b
+5:	move.w	#0x407e,(-2,%a0)
+	move.l	#0xffffff00,(%a0)+
+	clr.l	(%a0)
+	jra	2b
+	| zero and denormalized
+fp_ns_zero:
+	tst.l	(%a0)+
+	jne	1f
+	tst.l	(%a0)
+	jne	1f
+	subq.l	#8,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts				| zero.  nothing to do.
+	| These are not merely subnormal numbers, but true denormals,
+	| i.e. pathologically small (exponent is 2**-16383) numbers.
+	| It is clearly impossible for even a normal extended number
+	| with that exponent to fit into single precision, so just
+	| write these ones off as "too darn small".
+1:	fp_set_sr FPSR_EXC_UNFL		| Set UNFL bit
+	clr.l	(%a0)
+	clr.l	-(%a0)
+	move.w	#0x3f81,-(%a0)		| i.e. 2**-126
+	addq.l	#6,%a0
+	moveq	#1,%d0
+	jra	fp_ns_round		| round.
+	| Infinities or NaNs
+fp_ns_huge:
+	subq.l	#4,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+
+	| fp_normalize_single_fast:
+	| normalize an extended with single (23-bit) precision
+	| this is only used by fsgldiv/fsgdlmul, where the
+	| operand is not completly normalized.
+	| args:	 %a0 (struct fp_ext *)
+
+fp_normalize_single_fast:
+	printf	PNORM,"nsf: %p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,") "
+	addq.l	#2,%a0
+	move.w	(%a0)+,%d2
+	cmp.w	#0x7fff,%d2
+	jeq	fp_nsf_huge		| NaN / infinitive.
+	move.l	(%a0)+,%d0		| get high lword of mantissa
+fp_nsf_round:
+	tst.l	(%a0)			| check the low lword
+	jeq	1f
+	| Set a sticky bit if it is non-zero.  This should only
+	| affect the rounding in what would otherwise be equal-
+	| distance situations, which is what we want it to do.
+	bset	#0,%d0
+1:	clr.l	(%a0)			| zap it from memory.
+	| now, round off the low 8 bits of the hi lword.
+	tst.b	%d0			| 8 low bits.
+	jne	fp_nsf_checkround	| Are they non-zero?
+	| nothing to do here
+	subq.l	#8,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+fp_nsf_checkround:
+	fp_set_sr FPSR_EXC_INEX2	| INEX2 bit
+	clr.b	-(%a0)			| clear low byte of high lword
+	subq.l	#3,%a0
+	move.w	(FPD_RND,FPDATA),%d2	| rounding mode
+	jne	2f			| %d2 == 0, round to nearest
+	tst.b	%d0			| test guard bit
+	jpl	9f			| zero is closer
+	btst	#8,%d0			| test lsb bit
+	| round to even behaviour, see above.
+	jne	fp_nsf_doroundup		| round to infinity
+	lsl.b	#1,%d0			| check low bits
+	jeq	9f			| round to zero
+fp_nsf_doroundup:
+	| round (the mantissa, that is) towards infinity
+	add.l	#0x100,(%a0)
+	jcc	9f			| no overflow, good.
+	| Overflow.  This means that the %d1 was 0xffffff00, so it
+	| is now zero.  We will set the mantissa to reflect this, and
+	| increment the exponent (checking for overflow there too)
+	move.w	#0x8000,(%a0)
+	addq.w	#1,-(%a0)
+	cmp.w	#0x407f,(%a0)+		| exponent now overflown?
+	jeq	fp_nsf_large		| yes, so make it infinity.
+9:	subq.l	#4,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+	| check nondefault rounding modes
+2:	subq.w	#2,%d2
+	jcs	9b			| %d2 < 2, round to zero
+	jhi	3f			| %d2 > 2, round to +infinity
+	tst.b	(-3,%a0)		| to -inf
+	jne	fp_nsf_doroundup	| negative, round to infinity
+	jra	9b			| positive, round to zero
+3:	tst.b	(-3,%a0)		| to +inf
+	jeq	fp_nsf_doroundup		| positive, round to infinity
+	jra	9b			| negative, round to zero
+	| Exponent overflow.  Just call it infinity.
+fp_nsf_large:
+	tst.b	(3,%a0)
+	jeq	1f
+	fp_set_sr FPSR_EXC_INEX2
+1:	fp_set_sr FPSR_EXC_OVFL
+	move.w	(FPD_RND,FPDATA),%d2
+	jne	3f			| %d2 = 0 round to nearest
+1:	move.w	#0x7fff,(-2,%a0)
+	clr.l	(%a0)+
+	clr.l	(%a0)
+2:	subq.l	#8,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+3:	subq.w	#2,%d2
+	jcs	5f			| %d2 < 2, round to zero
+	jhi	4f			| %d2 > 2, round to +infinity
+	tst.b	(-3,%a0)		| to -inf
+	jne	1b
+	jra	5f
+4:	tst.b	(-3,%a0)		| to +inf
+	jeq	1b
+5:	move.w	#0x407e,(-2,%a0)
+	move.l	#0xffffff00,(%a0)+
+	clr.l	(%a0)
+	jra	2b
+	| Infinities or NaNs
+fp_nsf_huge:
+	subq.l	#4,%a0
+	printf	PNORM,"%p(",1,%a0
+	printx	PNORM,%a0@
+	printf	PNORM,")\n"
+	rts
+
+	| conv_ext2int (macro):
+	| Generates a subroutine that converts an extended value to an
+	| integer of a given size, again, with the appropriate type of
+	| rounding.
+
+	| Macro arguments:
+	| s:	size, as given in an assembly instruction.
+	| b:	number of bits in that size.
+
+	| Subroutine arguments:
+	| %a0:	source (struct fp_ext *)
+
+	| Returns the integer in %d0 (like it should)
+
+.macro conv_ext2int s,b
+	.set	inf,(1<<(\b-1))-1	| i.e. MAXINT
+	printf	PCONV,"e2i%d: %p(",2,#\b,%a0
+	printx	PCONV,%a0@
+	printf	PCONV,") "
+	addq.l	#2,%a0
+	move.w	(%a0)+,%d2		| exponent
+	jeq	fp_e2i_zero\b		| zero / denorm (== 0, here)
+	cmp.w	#0x7fff,%d2
+	jeq	fp_e2i_huge\b		| Inf / NaN
+	sub.w	#0x3ffe,%d2
+	jcs	fp_e2i_small\b
+	cmp.w	#\b,%d2
+	jhi	fp_e2i_large\b
+	move.l	(%a0),%d0
+	move.l	%d0,%d1
+	lsl.l	%d2,%d1
+	jne	fp_e2i_round\b
+	tst.l	(4,%a0)
+	jne	fp_e2i_round\b
+	neg.w	%d2
+	add.w	#32,%d2
+	lsr.l	%d2,%d0
+9:	tst.w	(-4,%a0)
+	jne	1f
+	tst.\s	%d0
+	jmi	fp_e2i_large\b
+	printf	PCONV,"-> %p\n",1,%d0
+	rts
+1:	neg.\s	%d0
+	jeq	1f
+	jpl	fp_e2i_large\b
+1:	printf	PCONV,"-> %p\n",1,%d0
+	rts
+fp_e2i_round\b:
+	fp_set_sr FPSR_EXC_INEX2	| INEX2 bit
+	neg.w	%d2
+	add.w	#32,%d2
+	.if	\b>16
+	jeq	5f
+	.endif
+	lsr.l	%d2,%d0
+	move.w	(FPD_RND,FPDATA),%d2	| rounding mode
+	jne	2f			| %d2 == 0, round to nearest
+	tst.l	%d1			| test guard bit
+	jpl	9b			| zero is closer
+	btst	%d2,%d0			| test lsb bit (%d2 still 0)
+	jne	fp_e2i_doroundup\b
+	lsl.l	#1,%d1			| check low bits
+	jne	fp_e2i_doroundup\b
+	tst.l	(4,%a0)
+	jeq	9b
+fp_e2i_doroundup\b:
+	addq.l	#1,%d0
+	jra	9b
+	| check nondefault rounding modes
+2:	subq.w	#2,%d2
+	jcs	9b			| %d2 < 2, round to zero
+	jhi	3f			| %d2 > 2, round to +infinity
+	tst.w	(-4,%a0)		| to -inf
+	jne	fp_e2i_doroundup\b	| negative, round to infinity
+	jra	9b			| positive, round to zero
+3:	tst.w	(-4,%a0)		| to +inf
+	jeq	fp_e2i_doroundup\b	| positive, round to infinity
+	jra	9b	| negative, round to zero
+	| we are only want -2**127 get correctly rounded here,
+	| since the guard bit is in the lower lword.
+	| everything else ends up anyway as overflow.
+	.if	\b>16
+5:	move.w	(FPD_RND,FPDATA),%d2	| rounding mode
+	jne	2b			| %d2 == 0, round to nearest
+	move.l	(4,%a0),%d1		| test guard bit
+	jpl	9b			| zero is closer
+	lsl.l	#1,%d1			| check low bits
+	jne	fp_e2i_doroundup\b
+	jra	9b
+	.endif
+fp_e2i_zero\b:
+	clr.l	%d0
+	tst.l	(%a0)+
+	jne	1f
+	tst.l	(%a0)
+	jeq	3f
+1:	subq.l	#4,%a0
+	fp_clr_sr FPSR_EXC_UNFL		| fp_normalize_ext has set this bit
+fp_e2i_small\b:
+	fp_set_sr FPSR_EXC_INEX2
+	clr.l	%d0
+	move.w	(FPD_RND,FPDATA),%d2	| rounding mode
+	subq.w	#2,%d2
+	jcs	3f			| %d2 < 2, round to nearest/zero
+	jhi	2f			| %d2 > 2, round to +infinity
+	tst.w	(-4,%a0)		| to -inf
+	jeq	3f
+	subq.\s	#1,%d0
+	jra	3f
+2:	tst.w	(-4,%a0)		| to +inf
+	jne	3f
+	addq.\s	#1,%d0
+3:	printf	PCONV,"-> %p\n",1,%d0
+	rts
+fp_e2i_large\b:
+	fp_set_sr FPSR_EXC_OPERR
+	move.\s	#inf,%d0
+	tst.w	(-4,%a0)
+	jeq	1f
+	addq.\s	#1,%d0
+1:	printf	PCONV,"-> %p\n",1,%d0
+	rts
+fp_e2i_huge\b:
+	move.\s	(%a0),%d0
+	tst.l	(%a0)
+	jne	1f
+	tst.l	(%a0)
+	jeq	fp_e2i_large\b
+	| fp_normalize_ext has set this bit already
+	| and made the number nonsignaling
+1:	fp_tst_sr FPSR_EXC_SNAN
+	jne	1f
+	fp_set_sr FPSR_EXC_OPERR
+1:	printf	PCONV,"-> %p\n",1,%d0
+	rts
+.endm
+
+fp_conv_ext2long:
+	conv_ext2int l,32
+
+fp_conv_ext2short:
+	conv_ext2int w,16
+
+fp_conv_ext2byte:
+	conv_ext2int b,8
+
+fp_conv_ext2double:
+	jsr	fp_normalize_double
+	printf	PCONV,"e2d: %p(",1,%a0
+	printx	PCONV,%a0@
+	printf	PCONV,"), "
+	move.l	(%a0)+,%d2
+	cmp.w	#0x7fff,%d2
+	jne	1f
+	move.w	#0x7ff,%d2
+	move.l	(%a0)+,%d0
+	jra	2f
+1:	sub.w	#0x3fff-0x3ff,%d2
+	move.l	(%a0)+,%d0
+	jmi	2f
+	clr.w	%d2
+2:	lsl.w	#5,%d2
+	lsl.l	#7,%d2
+	lsl.l	#8,%d2
+	move.l	%d0,%d1
+	lsl.l	#1,%d0
+	lsr.l	#4,%d0
+	lsr.l	#8,%d0
+	or.l	%d2,%d0
+	putuser.l %d0,(%a1)+,fp_err_ua2,%a1
+	moveq	#21,%d0
+	lsl.l	%d0,%d1
+	move.l	(%a0),%d0
+	lsr.l	#4,%d0
+	lsr.l	#7,%d0
+	or.l	%d1,%d0
+	putuser.l %d0,(%a1),fp_err_ua2,%a1
+#ifdef FPU_EMU_DEBUG
+	getuser.l %a1@(-4),%d0,fp_err_ua2,%a1
+	getuser.l %a1@(0),%d1,fp_err_ua2,%a1
+	printf	PCONV,"%p(%08x%08x)\n",3,%a1,%d0,%d1
+#endif
+	rts
+
+fp_conv_ext2single:
+	jsr	fp_normalize_single
+	printf	PCONV,"e2s: %p(",1,%a0
+	printx	PCONV,%a0@
+	printf	PCONV,"), "
+	move.l	(%a0)+,%d1
+	cmp.w	#0x7fff,%d1
+	jne	1f
+	move.w	#0xff,%d1
+	move.l	(%a0)+,%d0
+	jra	2f
+1:	sub.w	#0x3fff-0x7f,%d1
+	move.l	(%a0)+,%d0
+	jmi	2f
+	clr.w	%d1
+2:	lsl.w	#8,%d1
+	lsl.l	#7,%d1
+	lsl.l	#8,%d1
+	bclr	#31,%d0
+	lsr.l	#8,%d0
+	or.l	%d1,%d0
+	printf	PCONV,"%08x\n",1,%d0
+	rts
+
+	| special return addresses for instr that
+	| encode the rounding precision in the opcode
+	| (e.g. fsmove,fdmove)
+
+fp_finalrounding_single:
+	addq.l	#8,%sp
+	jsr	fp_normalize_ext
+	jsr	fp_normalize_single
+	jra	fp_finaltest
+
+fp_finalrounding_single_fast:
+	addq.l	#8,%sp
+	jsr	fp_normalize_ext
+	jsr	fp_normalize_single_fast
+	jra	fp_finaltest
+
+fp_finalrounding_double:
+	addq.l	#8,%sp
+	jsr	fp_normalize_ext
+	jsr	fp_normalize_double
+	jra	fp_finaltest
+
+	| fp_finaltest:
+	| set the emulated status register based on the outcome of an
+	| emulated instruction.
+
+fp_finalrounding:
+	addq.l	#8,%sp
+|	printf	,"f: %p\n",1,%a0
+	jsr	fp_normalize_ext
+	move.w	(FPD_PREC,FPDATA),%d0
+	subq.w	#1,%d0
+	jcs	fp_finaltest
+	jne	1f
+	jsr	fp_normalize_single
+	jra	2f
+1:	jsr	fp_normalize_double
+2:|	printf	,"f: %p\n",1,%a0
+fp_finaltest:
+	| First, we do some of the obvious tests for the exception
+	| status byte and condition code bytes of fp_sr here, so that
+	| they do not have to be handled individually by every
+	| emulated instruction.
+	clr.l	%d0
+	addq.l	#1,%a0
+	tst.b	(%a0)+			| sign
+	jeq	1f
+	bset	#FPSR_CC_NEG-24,%d0	| N bit
+1:	cmp.w	#0x7fff,(%a0)+		| exponent
+	jeq	2f
+	| test for zero
+	moveq	#FPSR_CC_Z-24,%d1
+	tst.l	(%a0)+
+	jne	9f
+	tst.l	(%a0)
+	jne	9f
+	jra	8f
+	| infinitiv and NAN
+2:	moveq	#FPSR_CC_NAN-24,%d1
+	move.l	(%a0)+,%d2
+	lsl.l	#1,%d2			| ignore high bit
+	jne	8f
+	tst.l	(%a0)
+	jne	8f
+	moveq	#FPSR_CC_INF-24,%d1
+8:	bset	%d1,%d0
+9:	move.b	%d0,(FPD_FPSR+0,FPDATA)	| set condition test result
+	| move instructions enter here
+	| Here, we test things in the exception status byte, and set
+	| other things in the accrued exception byte accordingly.
+	| Emulated instructions can set various things in the former,
+	| as defined in fp_emu.h.
+fp_final:
+	move.l	(FPD_FPSR,FPDATA),%d0
+#if 0
+	btst	#FPSR_EXC_SNAN,%d0	| EXC_SNAN
+	jne	1f
+	btst	#FPSR_EXC_OPERR,%d0	| EXC_OPERR
+	jeq	2f
+1:	bset	#FPSR_AEXC_IOP,%d0	| set IOP bit
+2:	btst	#FPSR_EXC_OVFL,%d0	| EXC_OVFL
+	jeq	1f
+	bset	#FPSR_AEXC_OVFL,%d0	| set OVFL bit
+1:	btst	#FPSR_EXC_UNFL,%d0	| EXC_UNFL
+	jeq	1f
+	btst	#FPSR_EXC_INEX2,%d0	| EXC_INEX2
+	jeq	1f
+	bset	#FPSR_AEXC_UNFL,%d0	| set UNFL bit
+1:	btst	#FPSR_EXC_DZ,%d0	| EXC_INEX1
+	jeq	1f
+	bset	#FPSR_AEXC_DZ,%d0	| set DZ bit
+1:	btst	#FPSR_EXC_OVFL,%d0	| EXC_OVFL
+	jne	1f
+	btst	#FPSR_EXC_INEX2,%d0	| EXC_INEX2
+	jne	1f
+	btst	#FPSR_EXC_INEX1,%d0	| EXC_INEX1
+	jeq	2f
+1:	bset	#FPSR_AEXC_INEX,%d0	| set INEX bit
+2:	move.l	%d0,(FPD_FPSR,FPDATA)
+#else
+	| same as above, greatly optimized, but untested (yet)
+	move.l	%d0,%d2
+	lsr.l	#5,%d0
+	move.l	%d0,%d1
+	lsr.l	#4,%d1
+	or.l	%d0,%d1
+	and.b	#0x08,%d1
+	move.l	%d2,%d0
+	lsr.l	#6,%d0
+	or.l	%d1,%d0
+	move.l	%d2,%d1
+	lsr.l	#4,%d1
+	or.b	#0xdf,%d1
+	and.b	%d1,%d0
+	move.l	%d2,%d1
+	lsr.l	#7,%d1
+	and.b	#0x80,%d1
+	or.b	%d1,%d0
+	and.b	#0xf8,%d0
+	or.b	%d0,%d2
+	move.l	%d2,(FPD_FPSR,FPDATA)
+#endif
+	move.b	(FPD_FPSR+2,FPDATA),%d0
+	and.b	(FPD_FPCR+2,FPDATA),%d0
+	jeq	1f
+	printf	,"send signal!!!\n"
+1:	jra	fp_end
diff --git a/arch/m68k/math-emu/multi_arith.h b/arch/m68k/math-emu/multi_arith.h
new file mode 100644
index 0000000..02251e5
--- /dev/null
+++ b/arch/m68k/math-emu/multi_arith.h
@@ -0,0 +1,819 @@
+/* multi_arith.h: multi-precision integer arithmetic functions, needed
+   to do extended-precision floating point.
+
+   (c) 1998 David Huggins-Daines.
+
+   Somewhat based on arch/alpha/math-emu/ieee-math.c, which is (c)
+   David Mosberger-Tang.
+
+   You may copy, modify, and redistribute this file under the terms of
+   the GNU General Public License, version 2, or any later version, at
+   your convenience. */
+
+/* Note:
+
+   These are not general multi-precision math routines.  Rather, they
+   implement the subset of integer arithmetic that we need in order to
+   multiply, divide, and normalize 128-bit unsigned mantissae.  */
+
+#ifndef MULTI_ARITH_H
+#define MULTI_ARITH_H
+
+#if 0	/* old code... */
+
+/* Unsigned only, because we don't need signs to multiply and divide. */
+typedef unsigned int int128[4];
+
+/* Word order */
+enum {
+	MSW128,
+	NMSW128,
+	NLSW128,
+	LSW128
+};
+
+/* big-endian */
+#define LO_WORD(ll) (((unsigned int *) &ll)[1])
+#define HI_WORD(ll) (((unsigned int *) &ll)[0])
+
+/* Convenience functions to stuff various integer values into int128s */
+
+static inline void zero128(int128 a)
+{
+	a[LSW128] = a[NLSW128] = a[NMSW128] = a[MSW128] = 0;
+}
+
+/* Human-readable word order in the arguments */
+static inline void set128(unsigned int i3, unsigned int i2, unsigned int i1,
+			  unsigned int i0, int128 a)
+{
+	a[LSW128] = i0;
+	a[NLSW128] = i1;
+	a[NMSW128] = i2;
+	a[MSW128] = i3;
+}
+
+/* Convenience functions (for testing as well) */
+static inline void int64_to_128(unsigned long long src, int128 dest)
+{
+	dest[LSW128] = (unsigned int) src;
+	dest[NLSW128] = src >> 32;
+	dest[NMSW128] = dest[MSW128] = 0;
+}
+
+static inline void int128_to_64(const int128 src, unsigned long long *dest)
+{
+	*dest = src[LSW128] | (long long) src[NLSW128] << 32;
+}
+
+static inline void put_i128(const int128 a)
+{
+	printk("%08x %08x %08x %08x\n", a[MSW128], a[NMSW128],
+	       a[NLSW128], a[LSW128]);
+}
+
+/* Internal shifters:
+
+   Note that these are only good for 0 < count < 32.
+ */
+
+static inline void _lsl128(unsigned int count, int128 a)
+{
+	a[MSW128] = (a[MSW128] << count) | (a[NMSW128] >> (32 - count));
+	a[NMSW128] = (a[NMSW128] << count) | (a[NLSW128] >> (32 - count));
+	a[NLSW128] = (a[NLSW128] << count) | (a[LSW128] >> (32 - count));
+	a[LSW128] <<= count;
+}
+
+static inline void _lsr128(unsigned int count, int128 a)
+{
+	a[LSW128] = (a[LSW128] >> count) | (a[NLSW128] << (32 - count));
+	a[NLSW128] = (a[NLSW128] >> count) | (a[NMSW128] << (32 - count));
+	a[NMSW128] = (a[NMSW128] >> count) | (a[MSW128] << (32 - count));
+	a[MSW128] >>= count;
+}
+
+/* Should be faster, one would hope */
+
+static inline void lslone128(int128 a)
+{
+	asm volatile ("lsl.l #1,%0\n"
+		      "roxl.l #1,%1\n"
+		      "roxl.l #1,%2\n"
+		      "roxl.l #1,%3\n"
+		      :
+		      "=d" (a[LSW128]),
+		      "=d"(a[NLSW128]),
+		      "=d"(a[NMSW128]),
+		      "=d"(a[MSW128])
+		      :
+		      "0"(a[LSW128]),
+		      "1"(a[NLSW128]),
+		      "2"(a[NMSW128]),
+		      "3"(a[MSW128]));
+}
+
+static inline void lsrone128(int128 a)
+{
+	asm volatile ("lsr.l #1,%0\n"
+		      "roxr.l #1,%1\n"
+		      "roxr.l #1,%2\n"
+		      "roxr.l #1,%3\n"
+		      :
+		      "=d" (a[MSW128]),
+		      "=d"(a[NMSW128]),
+		      "=d"(a[NLSW128]),
+		      "=d"(a[LSW128])
+		      :
+		      "0"(a[MSW128]),
+		      "1"(a[NMSW128]),
+		      "2"(a[NLSW128]),
+		      "3"(a[LSW128]));
+}
+
+/* Generalized 128-bit shifters:
+
+   These bit-shift to a multiple of 32, then move whole longwords.  */
+
+static inline void lsl128(unsigned int count, int128 a)
+{
+	int wordcount, i;
+
+	if (count % 32)
+		_lsl128(count % 32, a);
+
+	if (0 == (wordcount = count / 32))
+		return;
+
+	/* argh, gak, endian-sensitive */
+	for (i = 0; i < 4 - wordcount; i++) {
+		a[i] = a[i + wordcount];
+	}
+	for (i = 3; i >= 4 - wordcount; --i) {
+		a[i] = 0;
+	}
+}
+
+static inline void lsr128(unsigned int count, int128 a)
+{
+	int wordcount, i;
+
+	if (count % 32)
+		_lsr128(count % 32, a);
+
+	if (0 == (wordcount = count / 32))
+		return;
+
+	for (i = 3; i >= wordcount; --i) {
+		a[i] = a[i - wordcount];
+	}
+	for (i = 0; i < wordcount; i++) {
+		a[i] = 0;
+	}
+}
+
+static inline int orl128(int a, int128 b)
+{
+	b[LSW128] |= a;
+}
+
+static inline int btsthi128(const int128 a)
+{
+	return a[MSW128] & 0x80000000;
+}
+
+/* test bits (numbered from 0 = LSB) up to and including "top" */
+static inline int bftestlo128(int top, const int128 a)
+{
+	int r = 0;
+
+	if (top > 31)
+		r |= a[LSW128];
+	if (top > 63)
+		r |= a[NLSW128];
+	if (top > 95)
+		r |= a[NMSW128];
+
+	r |= a[3 - (top / 32)] & ((1 << (top % 32 + 1)) - 1);
+
+	return (r != 0);
+}
+
+/* Aargh.  We need these because GCC is broken */
+/* FIXME: do them in assembly, for goodness' sake! */
+static inline void mask64(int pos, unsigned long long *mask)
+{
+	*mask = 0;
+
+	if (pos < 32) {
+		LO_WORD(*mask) = (1 << pos) - 1;
+		return;
+	}
+	LO_WORD(*mask) = -1;
+	HI_WORD(*mask) = (1 << (pos - 32)) - 1;
+}
+
+static inline void bset64(int pos, unsigned long long *dest)
+{
+	/* This conditional will be optimized away.  Thanks, GCC! */
+	if (pos < 32)
+		asm volatile ("bset %1,%0":"=m"
+			      (LO_WORD(*dest)):"id"(pos));
+	else
+		asm volatile ("bset %1,%0":"=m"
+			      (HI_WORD(*dest)):"id"(pos - 32));
+}
+
+static inline int btst64(int pos, unsigned long long dest)
+{
+	if (pos < 32)
+		return (0 != (LO_WORD(dest) & (1 << pos)));
+	else
+		return (0 != (HI_WORD(dest) & (1 << (pos - 32))));
+}
+
+static inline void lsl64(int count, unsigned long long *dest)
+{
+	if (count < 32) {
+		HI_WORD(*dest) = (HI_WORD(*dest) << count)
+		    | (LO_WORD(*dest) >> count);
+		LO_WORD(*dest) <<= count;
+		return;
+	}
+	count -= 32;
+	HI_WORD(*dest) = LO_WORD(*dest) << count;
+	LO_WORD(*dest) = 0;
+}
+
+static inline void lsr64(int count, unsigned long long *dest)
+{
+	if (count < 32) {
+		LO_WORD(*dest) = (LO_WORD(*dest) >> count)
+		    | (HI_WORD(*dest) << (32 - count));
+		HI_WORD(*dest) >>= count;
+		return;
+	}
+	count -= 32;
+	LO_WORD(*dest) = HI_WORD(*dest) >> count;
+	HI_WORD(*dest) = 0;
+}
+#endif
+
+static inline void fp_denormalize(struct fp_ext *reg, unsigned int cnt)
+{
+	reg->exp += cnt;
+
+	switch (cnt) {
+	case 0 ... 8:
+		reg->lowmant = reg->mant.m32[1] << (8 - cnt);
+		reg->mant.m32[1] = (reg->mant.m32[1] >> cnt) |
+				   (reg->mant.m32[0] << (32 - cnt));
+		reg->mant.m32[0] = reg->mant.m32[0] >> cnt;
+		break;
+	case 9 ... 32:
+		reg->lowmant = reg->mant.m32[1] >> (cnt - 8);
+		if (reg->mant.m32[1] << (40 - cnt))
+			reg->lowmant |= 1;
+		reg->mant.m32[1] = (reg->mant.m32[1] >> cnt) |
+				   (reg->mant.m32[0] << (32 - cnt));
+		reg->mant.m32[0] = reg->mant.m32[0] >> cnt;
+		break;
+	case 33 ... 39:
+		asm volatile ("bfextu %1{%2,#8},%0" : "=d" (reg->lowmant)
+			: "m" (reg->mant.m32[0]), "d" (64 - cnt));
+		if (reg->mant.m32[1] << (40 - cnt))
+			reg->lowmant |= 1;
+		reg->mant.m32[1] = reg->mant.m32[0] >> (cnt - 32);
+		reg->mant.m32[0] = 0;
+		break;
+	case 40 ... 71:
+		reg->lowmant = reg->mant.m32[0] >> (cnt - 40);
+		if ((reg->mant.m32[0] << (72 - cnt)) || reg->mant.m32[1])
+			reg->lowmant |= 1;
+		reg->mant.m32[1] = reg->mant.m32[0] >> (cnt - 32);
+		reg->mant.m32[0] = 0;
+		break;
+	default:
+		reg->lowmant = reg->mant.m32[0] || reg->mant.m32[1];
+		reg->mant.m32[0] = 0;
+		reg->mant.m32[1] = 0;
+		break;
+	}
+}
+
+static inline int fp_overnormalize(struct fp_ext *reg)
+{
+	int shift;
+
+	if (reg->mant.m32[0]) {
+		asm ("bfffo %1{#0,#32},%0" : "=d" (shift) : "dm" (reg->mant.m32[0]));
+		reg->mant.m32[0] = (reg->mant.m32[0] << shift) | (reg->mant.m32[1] >> (32 - shift));
+		reg->mant.m32[1] = (reg->mant.m32[1] << shift);
+	} else {
+		asm ("bfffo %1{#0,#32},%0" : "=d" (shift) : "dm" (reg->mant.m32[1]));
+		reg->mant.m32[0] = (reg->mant.m32[1] << shift);
+		reg->mant.m32[1] = 0;
+		shift += 32;
+	}
+
+	return shift;
+}
+
+static inline int fp_addmant(struct fp_ext *dest, struct fp_ext *src)
+{
+	int carry;
+
+	/* we assume here, gcc only insert move and a clr instr */
+	asm volatile ("add.b %1,%0" : "=d,g" (dest->lowmant)
+		: "g,d" (src->lowmant), "0,0" (dest->lowmant));
+	asm volatile ("addx.l %1,%0" : "=d" (dest->mant.m32[1])
+		: "d" (src->mant.m32[1]), "0" (dest->mant.m32[1]));
+	asm volatile ("addx.l %1,%0" : "=d" (dest->mant.m32[0])
+		: "d" (src->mant.m32[0]), "0" (dest->mant.m32[0]));
+	asm volatile ("addx.l %0,%0" : "=d" (carry) : "0" (0));
+
+	return carry;
+}
+
+static inline int fp_addcarry(struct fp_ext *reg)
+{
+	if (++reg->exp == 0x7fff) {
+		if (reg->mant.m64)
+			fp_set_sr(FPSR_EXC_INEX2);
+		reg->mant.m64 = 0;
+		fp_set_sr(FPSR_EXC_OVFL);
+		return 0;
+	}
+	reg->lowmant = (reg->mant.m32[1] << 7) | (reg->lowmant ? 1 : 0);
+	reg->mant.m32[1] = (reg->mant.m32[1] >> 1) |
+			   (reg->mant.m32[0] << 31);
+	reg->mant.m32[0] = (reg->mant.m32[0] >> 1) | 0x80000000;
+
+	return 1;
+}
+
+static inline void fp_submant(struct fp_ext *dest, struct fp_ext *src1,
+			      struct fp_ext *src2)
+{
+	/* we assume here, gcc only insert move and a clr instr */
+	asm volatile ("sub.b %1,%0" : "=d,g" (dest->lowmant)
+		: "g,d" (src2->lowmant), "0,0" (src1->lowmant));
+	asm volatile ("subx.l %1,%0" : "=d" (dest->mant.m32[1])
+		: "d" (src2->mant.m32[1]), "0" (src1->mant.m32[1]));
+	asm volatile ("subx.l %1,%0" : "=d" (dest->mant.m32[0])
+		: "d" (src2->mant.m32[0]), "0" (src1->mant.m32[0]));
+}
+
+#define fp_mul64(desth, destl, src1, src2) ({				\
+	asm ("mulu.l %2,%1:%0" : "=d" (destl), "=d" (desth)		\
+		: "g" (src1), "0" (src2));				\
+})
+#define fp_div64(quot, rem, srch, srcl, div)				\
+	asm ("divu.l %2,%1:%0" : "=d" (quot), "=d" (rem)		\
+		: "dm" (div), "1" (srch), "0" (srcl))
+#define fp_add64(dest1, dest2, src1, src2) ({				\
+	asm ("add.l %1,%0" : "=d,dm" (dest2)				\
+		: "dm,d" (src2), "0,0" (dest2));			\
+	asm ("addx.l %1,%0" : "=d" (dest1)				\
+		: "d" (src1), "0" (dest1));				\
+})
+#define fp_addx96(dest, src) ({						\
+	/* we assume here, gcc only insert move and a clr instr */	\
+	asm volatile ("add.l %1,%0" : "=d,g" (dest->m32[2])		\
+		: "g,d" (temp.m32[1]), "0,0" (dest->m32[2]));		\
+	asm volatile ("addx.l %1,%0" : "=d" (dest->m32[1])		\
+		: "d" (temp.m32[0]), "0" (dest->m32[1]));		\
+	asm volatile ("addx.l %1,%0" : "=d" (dest->m32[0])		\
+		: "d" (0), "0" (dest->m32[0]));				\
+})
+#define fp_sub64(dest, src) ({						\
+	asm ("sub.l %1,%0" : "=d,dm" (dest.m32[1])			\
+		: "dm,d" (src.m32[1]), "0,0" (dest.m32[1]));		\
+	asm ("subx.l %1,%0" : "=d" (dest.m32[0])			\
+		: "d" (src.m32[0]), "0" (dest.m32[0]));			\
+})
+#define fp_sub96c(dest, srch, srcm, srcl) ({				\
+	char carry;							\
+	asm ("sub.l %1,%0" : "=d,dm" (dest.m32[2])			\
+		: "dm,d" (srcl), "0,0" (dest.m32[2]));			\
+	asm ("subx.l %1,%0" : "=d" (dest.m32[1])			\
+		: "d" (srcm), "0" (dest.m32[1]));			\
+	asm ("subx.l %2,%1; scs %0" : "=d" (carry), "=d" (dest.m32[0])	\
+		: "d" (srch), "1" (dest.m32[0]));			\
+	carry;								\
+})
+
+static inline void fp_multiplymant(union fp_mant128 *dest, struct fp_ext *src1,
+				   struct fp_ext *src2)
+{
+	union fp_mant64 temp;
+
+	fp_mul64(dest->m32[0], dest->m32[1], src1->mant.m32[0], src2->mant.m32[0]);
+	fp_mul64(dest->m32[2], dest->m32[3], src1->mant.m32[1], src2->mant.m32[1]);
+
+	fp_mul64(temp.m32[0], temp.m32[1], src1->mant.m32[0], src2->mant.m32[1]);
+	fp_addx96(dest, temp);
+
+	fp_mul64(temp.m32[0], temp.m32[1], src1->mant.m32[1], src2->mant.m32[0]);
+	fp_addx96(dest, temp);
+}
+
+static inline void fp_dividemant(union fp_mant128 *dest, struct fp_ext *src,
+				 struct fp_ext *div)
+{
+	union fp_mant128 tmp;
+	union fp_mant64 tmp64;
+	unsigned long *mantp = dest->m32;
+	unsigned long fix, rem, first, dummy;
+	int i;
+
+	/* the algorithm below requires dest to be smaller than div,
+	   but both have the high bit set */
+	if (src->mant.m64 >= div->mant.m64) {
+		fp_sub64(src->mant, div->mant);
+		*mantp = 1;
+	} else
+		*mantp = 0;
+	mantp++;
+
+	/* basic idea behind this algorithm: we can't divide two 64bit numbers
+	   (AB/CD) directly, but we can calculate AB/C0, but this means this
+	   quotient is off by C0/CD, so we have to multiply the first result
+	   to fix the result, after that we have nearly the correct result
+	   and only a few corrections are needed. */
+
+	/* C0/CD can be precalculated, but it's an 64bit division again, but
+	   we can make it a bit easier, by dividing first through C so we get
+	   10/1D and now only a single shift and the value fits into 32bit. */
+	fix = 0x80000000;
+	dummy = div->mant.m32[1] / div->mant.m32[0] + 1;
+	dummy = (dummy >> 1) | fix;
+	fp_div64(fix, dummy, fix, 0, dummy);
+	fix--;
+
+	for (i = 0; i < 3; i++, mantp++) {
+		if (src->mant.m32[0] == div->mant.m32[0]) {
+			fp_div64(first, rem, 0, src->mant.m32[1], div->mant.m32[0]);
+
+			fp_mul64(*mantp, dummy, first, fix);
+			*mantp += fix;
+		} else {
+			fp_div64(first, rem, src->mant.m32[0], src->mant.m32[1], div->mant.m32[0]);
+
+			fp_mul64(*mantp, dummy, first, fix);
+		}
+
+		fp_mul64(tmp.m32[0], tmp.m32[1], div->mant.m32[0], first - *mantp);
+		fp_add64(tmp.m32[0], tmp.m32[1], 0, rem);
+		tmp.m32[2] = 0;
+
+		fp_mul64(tmp64.m32[0], tmp64.m32[1], *mantp, div->mant.m32[1]);
+		fp_sub96c(tmp, 0, tmp64.m32[0], tmp64.m32[1]);
+
+		src->mant.m32[0] = tmp.m32[1];
+		src->mant.m32[1] = tmp.m32[2];
+
+		while (!fp_sub96c(tmp, 0, div->mant.m32[0], div->mant.m32[1])) {
+			src->mant.m32[0] = tmp.m32[1];
+			src->mant.m32[1] = tmp.m32[2];
+			*mantp += 1;
+		}
+	}
+}
+
+#if 0
+static inline unsigned int fp_fls128(union fp_mant128 *src)
+{
+	unsigned long data;
+	unsigned int res, off;
+
+	if ((data = src->m32[0]))
+		off = 0;
+	else if ((data = src->m32[1]))
+		off = 32;
+	else if ((data = src->m32[2]))
+		off = 64;
+	else if ((data = src->m32[3]))
+		off = 96;
+	else
+		return 128;
+
+	asm ("bfffo %1{#0,#32},%0" : "=d" (res) : "dm" (data));
+	return res + off;
+}
+
+static inline void fp_shiftmant128(union fp_mant128 *src, int shift)
+{
+	unsigned long sticky;
+
+	switch (shift) {
+	case 0:
+		return;
+	case 1:
+		asm volatile ("lsl.l #1,%0"
+			: "=d" (src->m32[3]) : "0" (src->m32[3]));
+		asm volatile ("roxl.l #1,%0"
+			: "=d" (src->m32[2]) : "0" (src->m32[2]));
+		asm volatile ("roxl.l #1,%0"
+			: "=d" (src->m32[1]) : "0" (src->m32[1]));
+		asm volatile ("roxl.l #1,%0"
+			: "=d" (src->m32[0]) : "0" (src->m32[0]));
+		return;
+	case 2 ... 31:
+		src->m32[0] = (src->m32[0] << shift) | (src->m32[1] >> (32 - shift));
+		src->m32[1] = (src->m32[1] << shift) | (src->m32[2] >> (32 - shift));
+		src->m32[2] = (src->m32[2] << shift) | (src->m32[3] >> (32 - shift));
+		src->m32[3] = (src->m32[3] << shift);
+		return;
+	case 32 ... 63:
+		shift -= 32;
+		src->m32[0] = (src->m32[1] << shift) | (src->m32[2] >> (32 - shift));
+		src->m32[1] = (src->m32[2] << shift) | (src->m32[3] >> (32 - shift));
+		src->m32[2] = (src->m32[3] << shift);
+		src->m32[3] = 0;
+		return;
+	case 64 ... 95:
+		shift -= 64;
+		src->m32[0] = (src->m32[2] << shift) | (src->m32[3] >> (32 - shift));
+		src->m32[1] = (src->m32[3] << shift);
+		src->m32[2] = src->m32[3] = 0;
+		return;
+	case 96 ... 127:
+		shift -= 96;
+		src->m32[0] = (src->m32[3] << shift);
+		src->m32[1] = src->m32[2] = src->m32[3] = 0;
+		return;
+	case -31 ... -1:
+		shift = -shift;
+		sticky = 0;
+		if (src->m32[3] << (32 - shift))
+			sticky = 1;
+		src->m32[3] = (src->m32[3] >> shift) | (src->m32[2] << (32 - shift)) | sticky;
+		src->m32[2] = (src->m32[2] >> shift) | (src->m32[1] << (32 - shift));
+		src->m32[1] = (src->m32[1] >> shift) | (src->m32[0] << (32 - shift));
+		src->m32[0] = (src->m32[0] >> shift);
+		return;
+	case -63 ... -32:
+		shift = -shift - 32;
+		sticky = 0;
+		if ((src->m32[2] << (32 - shift)) || src->m32[3])
+			sticky = 1;
+		src->m32[3] = (src->m32[2] >> shift) | (src->m32[1] << (32 - shift)) | sticky;
+		src->m32[2] = (src->m32[1] >> shift) | (src->m32[0] << (32 - shift));
+		src->m32[1] = (src->m32[0] >> shift);
+		src->m32[0] = 0;
+		return;
+	case -95 ... -64:
+		shift = -shift - 64;
+		sticky = 0;
+		if ((src->m32[1] << (32 - shift)) || src->m32[2] || src->m32[3])
+			sticky = 1;
+		src->m32[3] = (src->m32[1] >> shift) | (src->m32[0] << (32 - shift)) | sticky;
+		src->m32[2] = (src->m32[0] >> shift);
+		src->m32[1] = src->m32[0] = 0;
+		return;
+	case -127 ... -96:
+		shift = -shift - 96;
+		sticky = 0;
+		if ((src->m32[0] << (32 - shift)) || src->m32[1] || src->m32[2] || src->m32[3])
+			sticky = 1;
+		src->m32[3] = (src->m32[0] >> shift) | sticky;
+		src->m32[2] = src->m32[1] = src->m32[0] = 0;
+		return;
+	}
+
+	if (shift < 0 && (src->m32[0] || src->m32[1] || src->m32[2] || src->m32[3]))
+		src->m32[3] = 1;
+	else
+		src->m32[3] = 0;
+	src->m32[2] = 0;
+	src->m32[1] = 0;
+	src->m32[0] = 0;
+}
+#endif
+
+static inline void fp_putmant128(struct fp_ext *dest, union fp_mant128 *src,
+				 int shift)
+{
+	unsigned long tmp;
+
+	switch (shift) {
+	case 0:
+		dest->mant.m64 = src->m64[0];
+		dest->lowmant = src->m32[2] >> 24;
+		if (src->m32[3] || (src->m32[2] << 8))
+			dest->lowmant |= 1;
+		break;
+	case 1:
+		asm volatile ("lsl.l #1,%0"
+			: "=d" (tmp) : "0" (src->m32[2]));
+		asm volatile ("roxl.l #1,%0"
+			: "=d" (dest->mant.m32[1]) : "0" (src->m32[1]));
+		asm volatile ("roxl.l #1,%0"
+			: "=d" (dest->mant.m32[0]) : "0" (src->m32[0]));
+		dest->lowmant = tmp >> 24;
+		if (src->m32[3] || (tmp << 8))
+			dest->lowmant |= 1;
+		break;
+	case 31:
+		asm volatile ("lsr.l #1,%1; roxr.l #1,%0"
+			: "=d" (dest->mant.m32[0])
+			: "d" (src->m32[0]), "0" (src->m32[1]));
+		asm volatile ("roxr.l #1,%0"
+			: "=d" (dest->mant.m32[1]) : "0" (src->m32[2]));
+		asm volatile ("roxr.l #1,%0"
+			: "=d" (tmp) : "0" (src->m32[3]));
+		dest->lowmant = tmp >> 24;
+		if (src->m32[3] << 7)
+			dest->lowmant |= 1;
+		break;
+	case 32:
+		dest->mant.m32[0] = src->m32[1];
+		dest->mant.m32[1] = src->m32[2];
+		dest->lowmant = src->m32[3] >> 24;
+		if (src->m32[3] << 8)
+			dest->lowmant |= 1;
+		break;
+	}
+}
+
+#if 0 /* old code... */
+static inline int fls(unsigned int a)
+{
+	int r;
+
+	asm volatile ("bfffo %1{#0,#32},%0"
+		      : "=d" (r) : "md" (a));
+	return r;
+}
+
+/* fls = "find last set" (cf. ffs(3)) */
+static inline int fls128(const int128 a)
+{
+	if (a[MSW128])
+		return fls(a[MSW128]);
+	if (a[NMSW128])
+		return fls(a[NMSW128]) + 32;
+	/* XXX: it probably never gets beyond this point in actual
+	   use, but that's indicative of a more general problem in the
+	   algorithm (i.e. as per the actual 68881 implementation, we
+	   really only need at most 67 bits of precision [plus
+	   overflow]) so I'm not going to fix it. */
+	if (a[NLSW128])
+		return fls(a[NLSW128]) + 64;
+	if (a[LSW128])
+		return fls(a[LSW128]) + 96;
+	else
+		return -1;
+}
+
+static inline int zerop128(const int128 a)
+{
+	return !(a[LSW128] | a[NLSW128] | a[NMSW128] | a[MSW128]);
+}
+
+static inline int nonzerop128(const int128 a)
+{
+	return (a[LSW128] | a[NLSW128] | a[NMSW128] | a[MSW128]);
+}
+
+/* Addition and subtraction */
+/* Do these in "pure" assembly, because "extended" asm is unmanageable
+   here */
+static inline void add128(const int128 a, int128 b)
+{
+	/* rotating carry flags */
+	unsigned int carry[2];
+
+	carry[0] = a[LSW128] > (0xffffffff - b[LSW128]);
+	b[LSW128] += a[LSW128];
+
+	carry[1] = a[NLSW128] > (0xffffffff - b[NLSW128] - carry[0]);
+	b[NLSW128] = a[NLSW128] + b[NLSW128] + carry[0];
+
+	carry[0] = a[NMSW128] > (0xffffffff - b[NMSW128] - carry[1]);
+	b[NMSW128] = a[NMSW128] + b[NMSW128] + carry[1];
+
+	b[MSW128] = a[MSW128] + b[MSW128] + carry[0];
+}
+
+/* Note: assembler semantics: "b -= a" */
+static inline void sub128(const int128 a, int128 b)
+{
+	/* rotating borrow flags */
+	unsigned int borrow[2];
+
+	borrow[0] = b[LSW128] < a[LSW128];
+	b[LSW128] -= a[LSW128];
+
+	borrow[1] = b[NLSW128] < a[NLSW128] + borrow[0];
+	b[NLSW128] = b[NLSW128] - a[NLSW128] - borrow[0];
+
+	borrow[0] = b[NMSW128] < a[NMSW128] + borrow[1];
+	b[NMSW128] = b[NMSW128] - a[NMSW128] - borrow[1];
+
+	b[MSW128] = b[MSW128] - a[MSW128] - borrow[0];
+}
+
+/* Poor man's 64-bit expanding multiply */
+static inline void mul64(unsigned long long a, unsigned long long b, int128 c)
+{
+	unsigned long long acc;
+	int128 acc128;
+
+	zero128(acc128);
+	zero128(c);
+
+	/* first the low words */
+	if (LO_WORD(a) && LO_WORD(b)) {
+		acc = (long long) LO_WORD(a) * LO_WORD(b);
+		c[NLSW128] = HI_WORD(acc);
+		c[LSW128] = LO_WORD(acc);
+	}
+	/* Next the high words */
+	if (HI_WORD(a) && HI_WORD(b)) {
+		acc = (long long) HI_WORD(a) * HI_WORD(b);
+		c[MSW128] = HI_WORD(acc);
+		c[NMSW128] = LO_WORD(acc);
+	}
+	/* The middle words */
+	if (LO_WORD(a) && HI_WORD(b)) {
+		acc = (long long) LO_WORD(a) * HI_WORD(b);
+		acc128[NMSW128] = HI_WORD(acc);
+		acc128[NLSW128] = LO_WORD(acc);
+		add128(acc128, c);
+	}
+	/* The first and last words */
+	if (HI_WORD(a) && LO_WORD(b)) {
+		acc = (long long) HI_WORD(a) * LO_WORD(b);
+		acc128[NMSW128] = HI_WORD(acc);
+		acc128[NLSW128] = LO_WORD(acc);
+		add128(acc128, c);
+	}
+}
+
+/* Note: unsigned */
+static inline int cmp128(int128 a, int128 b)
+{
+	if (a[MSW128] < b[MSW128])
+		return -1;
+	if (a[MSW128] > b[MSW128])
+		return 1;
+	if (a[NMSW128] < b[NMSW128])
+		return -1;
+	if (a[NMSW128] > b[NMSW128])
+		return 1;
+	if (a[NLSW128] < b[NLSW128])
+		return -1;
+	if (a[NLSW128] > b[NLSW128])
+		return 1;
+
+	return (signed) a[LSW128] - b[LSW128];
+}
+
+inline void div128(int128 a, int128 b, int128 c)
+{
+	int128 mask;
+
+	/* Algorithm:
+
+	   Shift the divisor until it's at least as big as the
+	   dividend, keeping track of the position to which we've
+	   shifted it, i.e. the power of 2 which we've multiplied it
+	   by.
+
+	   Then, for this power of 2 (the mask), and every one smaller
+	   than it, subtract the mask from the dividend and add it to
+	   the quotient until the dividend is smaller than the raised
+	   divisor.  At this point, divide the dividend and the mask
+	   by 2 (i.e. shift one place to the right).  Lather, rinse,
+	   and repeat, until there are no more powers of 2 left. */
+
+	/* FIXME: needless to say, there's room for improvement here too. */
+
+	/* Shift up */
+	/* XXX: since it just has to be "at least as big", we can
+	   probably eliminate this horribly wasteful loop.  I will
+	   have to prove this first, though */
+	set128(0, 0, 0, 1, mask);
+	while (cmp128(b, a) < 0 && !btsthi128(b)) {
+		lslone128(b);
+		lslone128(mask);
+	}
+
+	/* Shift down */
+	zero128(c);
+	do {
+		if (cmp128(a, b) >= 0) {
+			sub128(b, a);
+			add128(mask, c);
+		}
+		lsrone128(mask);
+		lsrone128(b);
+	} while (nonzerop128(mask));
+
+	/* The remainder is in a... */
+}
+#endif
+
+#endif	/* MULTI_ARITH_H */
diff --git a/arch/m68k/mm/Makefile b/arch/m68k/mm/Makefile
new file mode 100644
index 0000000..90f1c73
--- /dev/null
+++ b/arch/m68k/mm/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the linux m68k-specific parts of the memory manager.
+#
+
+obj-y		:= init.o fault.o hwtest.o
+
+obj-$(CONFIG_MMU_MOTOROLA)	+= kmap.o memory.o motorola.o
+obj-$(CONFIG_MMU_SUN3)		+= sun3kmap.o sun3mmu.o
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
new file mode 100644
index 0000000..ac48b6d
--- /dev/null
+++ b/arch/m68k/mm/fault.c
@@ -0,0 +1,219 @@
+/*
+ *  linux/arch/m68k/mm/fault.c
+ *
+ *  Copyright (C) 1995  Hamish Macdonald
+ */
+
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include <asm/setup.h>
+#include <asm/traps.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+
+extern void die_if_kernel(char *, struct pt_regs *, long);
+extern const int frame_extra_sizes[]; /* in m68k/kernel/signal.c */
+
+int send_fault_sig(struct pt_regs *regs)
+{
+	siginfo_t siginfo = { 0, 0, 0, };
+
+	siginfo.si_signo = current->thread.signo;
+	siginfo.si_code = current->thread.code;
+	siginfo.si_addr = (void *)current->thread.faddr;
+#ifdef DEBUG
+	printk("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, siginfo.si_signo, siginfo.si_code);
+#endif
+
+	if (user_mode(regs)) {
+		force_sig_info(siginfo.si_signo,
+			       &siginfo, current);
+	} else {
+		const struct exception_table_entry *fixup;
+
+		/* Are we prepared to handle this kernel fault? */
+		if ((fixup = search_exception_tables(regs->pc))) {
+			struct pt_regs *tregs;
+			/* Create a new four word stack frame, discarding the old
+			   one.  */
+			regs->stkadj = frame_extra_sizes[regs->format];
+			tregs =	(struct pt_regs *)((ulong)regs + regs->stkadj);
+			tregs->vector = regs->vector;
+			tregs->format = 0;
+			tregs->pc = fixup->fixup;
+			tregs->sr = regs->sr;
+			return -1;
+		}
+
+		//if (siginfo.si_signo == SIGBUS)
+		//	force_sig_info(siginfo.si_signo,
+		//		       &siginfo, current);
+
+		/*
+		 * Oops. The kernel tried to access some bad page. We'll have to
+		 * terminate things with extreme prejudice.
+		 */
+		if ((unsigned long)siginfo.si_addr < PAGE_SIZE)
+			printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+		else
+			printk(KERN_ALERT "Unable to handle kernel access");
+		printk(" at virtual address %p\n", siginfo.si_addr);
+		die_if_kernel("Oops", regs, 0 /*error_code*/);
+		do_exit(SIGKILL);
+	}
+
+	return 1;
+}
+
+/*
+ * This routine handles page faults.  It determines the problem, and
+ * then passes it off to one of the appropriate routines.
+ *
+ * error_code:
+ *	bit 0 == 0 means no page found, 1 means protection fault
+ *	bit 1 == 0 means read, 1 means write
+ *
+ * If this routine detects a bad access, it returns 1, otherwise it
+ * returns 0.
+ */
+int do_page_fault(struct pt_regs *regs, unsigned long address,
+			      unsigned long error_code)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct * vma;
+	int write, fault;
+
+#ifdef DEBUG
+	printk ("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n",
+		regs->sr, regs->pc, address, error_code,
+		current->mm->pgd);
+#endif
+
+	/*
+	 * If we're in an interrupt or have no user
+	 * context, we must not take the fault..
+	 */
+	if (in_interrupt() || !mm)
+		goto no_context;
+
+	down_read(&mm->mmap_sem);
+
+	vma = find_vma(mm, address);
+	if (!vma)
+		goto map_err;
+	if (vma->vm_flags & VM_IO)
+		goto acc_err;
+	if (vma->vm_start <= address)
+		goto good_area;
+	if (!(vma->vm_flags & VM_GROWSDOWN))
+		goto map_err;
+	if (user_mode(regs)) {
+		/* Accessing the stack below usp is always a bug.  The
+		   "+ 256" is there due to some instructions doing
+		   pre-decrement on the stack and that doesn't show up
+		   until later.  */
+		if (address + 256 < rdusp())
+			goto map_err;
+	}
+	if (expand_stack(vma, address))
+		goto map_err;
+
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+#ifdef DEBUG
+	printk("do_page_fault: good_area\n");
+#endif
+	write = 0;
+	switch (error_code & 3) {
+		default:	/* 3: write, present */
+			/* fall through */
+		case 2:		/* write, not present */
+			if (!(vma->vm_flags & VM_WRITE))
+				goto acc_err;
+			write++;
+			break;
+		case 1:		/* read, present */
+			goto acc_err;
+		case 0:		/* read, not present */
+			if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+				goto acc_err;
+	}
+
+	/*
+	 * If for any reason at all we couldn't handle the fault,
+	 * make sure we exit gracefully rather than endlessly redo
+	 * the fault.
+	 */
+
+ survive:
+	fault = handle_mm_fault(mm, vma, address, write);
+#ifdef DEBUG
+	printk("handle_mm_fault returns %d\n",fault);
+#endif
+	switch (fault) {
+	case 1:
+		current->min_flt++;
+		break;
+	case 2:
+		current->maj_flt++;
+		break;
+	case 0:
+		goto bus_err;
+	default:
+		goto out_of_memory;
+	}
+
+	up_read(&mm->mmap_sem);
+	return 0;
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+	up_read(&mm->mmap_sem);
+	if (current->pid == 1) {
+		yield();
+		down_read(&mm->mmap_sem);
+		goto survive;
+	}
+
+	printk("VM: killing process %s\n", current->comm);
+	if (user_mode(regs))
+		do_exit(SIGKILL);
+
+no_context:
+	current->thread.signo = SIGBUS;
+	current->thread.faddr = address;
+	return send_fault_sig(regs);
+
+bus_err:
+	current->thread.signo = SIGBUS;
+	current->thread.code = BUS_ADRERR;
+	current->thread.faddr = address;
+	goto send_sig;
+
+map_err:
+	current->thread.signo = SIGSEGV;
+	current->thread.code = SEGV_MAPERR;
+	current->thread.faddr = address;
+	goto send_sig;
+
+acc_err:
+	current->thread.signo = SIGSEGV;
+	current->thread.code = SEGV_ACCERR;
+	current->thread.faddr = address;
+
+send_sig:
+	up_read(&mm->mmap_sem);
+	return send_fault_sig(regs);
+}
diff --git a/arch/m68k/mm/hwtest.c b/arch/m68k/mm/hwtest.c
new file mode 100644
index 0000000..2c7dde3
--- /dev/null
+++ b/arch/m68k/mm/hwtest.c
@@ -0,0 +1,85 @@
+/* Tests for presence or absence of hardware registers.
+ * This code was originally in atari/config.c, but I noticed
+ * that it was also in drivers/nubus/nubus.c and I wanted to
+ * use it in hp300/config.c, so it seemed sensible to pull it
+ * out into its own file.
+ *
+ * The test is for use when trying to read a hardware register
+ * that isn't present would cause a bus error. We set up a
+ * temporary handler so that this doesn't kill the kernel.
+ *
+ * There is a test-by-reading and a test-by-writing; I present
+ * them here complete with the comments from the original atari
+ * config.c...
+ *                -- PMM <pmaydell@chiark.greenend.org.uk>, 05/1998
+ */
+
+/* This function tests for the presence of an address, specially a
+ * hardware register address. It is called very early in the kernel
+ * initialization process, when the VBR register isn't set up yet. On
+ * an Atari, it still points to address 0, which is unmapped. So a bus
+ * error would cause another bus error while fetching the exception
+ * vector, and the CPU would do nothing at all. So we needed to set up
+ * a temporary VBR and a vector table for the duration of the test.
+ */
+
+#include <linux/module.h>
+
+int hwreg_present( volatile void *regp )
+{
+    int	ret = 0;
+    long	save_sp, save_vbr;
+    long	tmp_vectors[3];
+
+    __asm__ __volatile__
+	(	"movec	%/vbr,%2\n\t"
+		"movel	#Lberr1,%4@(8)\n\t"
+                "movec	%4,%/vbr\n\t"
+		"movel	%/sp,%1\n\t"
+		"moveq	#0,%0\n\t"
+		"tstb	%3@\n\t"
+		"nop\n\t"
+		"moveq	#1,%0\n"
+                "Lberr1:\n\t"
+		"movel	%1,%/sp\n\t"
+		"movec	%2,%/vbr"
+		: "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
+		: "a" (regp), "a" (tmp_vectors)
+                );
+
+    return( ret );
+}
+EXPORT_SYMBOL(hwreg_present);
+
+/* Basically the same, but writes a value into a word register, protected
+ * by a bus error handler. Returns 1 if successful, 0 otherwise.
+ */
+
+int hwreg_write( volatile void *regp, unsigned short val )
+{
+	int		ret;
+	long	save_sp, save_vbr;
+	long	tmp_vectors[3];
+
+	__asm__ __volatile__
+	(	"movec	%/vbr,%2\n\t"
+		"movel	#Lberr2,%4@(8)\n\t"
+		"movec	%4,%/vbr\n\t"
+		"movel	%/sp,%1\n\t"
+		"moveq	#0,%0\n\t"
+		"movew	%5,%3@\n\t"
+		"nop	\n\t"	/* If this nop isn't present, 'ret' may already be
+				 * loaded with 1 at the time the bus error
+				 * happens! */
+		"moveq	#1,%0\n"
+	"Lberr2:\n\t"
+		"movel	%1,%/sp\n\t"
+		"movec	%2,%/vbr"
+		: "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
+		: "a" (regp), "a" (tmp_vectors), "g" (val)
+	);
+
+	return( ret );
+}
+EXPORT_SYMBOL(hwreg_write);
+
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
new file mode 100644
index 0000000..c45beb9
--- /dev/null
+++ b/arch/m68k/mm/init.c
@@ -0,0 +1,147 @@
+/*
+ *  linux/arch/m68k/mm/init.c
+ *
+ *  Copyright (C) 1995  Hamish Macdonald
+ *
+ *  Contains common initialization routines, specific init code moved
+ *  to motorola.c and sun3mmu.c
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/setup.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/system.h>
+#include <asm/machdep.h>
+#include <asm/io.h>
+#ifdef CONFIG_ATARI
+#include <asm/atari_stram.h>
+#endif
+#include <asm/tlb.h>
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+/*
+ * ZERO_PAGE is a special page that is used for zero-initialized
+ * data and COW.
+ */
+
+void *empty_zero_page;
+
+void show_mem(void)
+{
+    unsigned long i;
+    int free = 0, total = 0, reserved = 0, shared = 0;
+    int cached = 0;
+
+    printk("\nMem-info:\n");
+    show_free_areas();
+    printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+    i = max_mapnr;
+    while (i-- > 0) {
+	total++;
+	if (PageReserved(mem_map+i))
+	    reserved++;
+	else if (PageSwapCache(mem_map+i))
+	    cached++;
+	else if (!page_count(mem_map+i))
+	    free++;
+	else
+	    shared += page_count(mem_map+i) - 1;
+    }
+    printk("%d pages of RAM\n",total);
+    printk("%d free pages\n",free);
+    printk("%d reserved pages\n",reserved);
+    printk("%d pages shared\n",shared);
+    printk("%d pages swap cached\n",cached);
+}
+
+extern void init_pointer_table(unsigned long ptable);
+
+/* References to section boundaries */
+
+extern char _text, _etext, _edata, __bss_start, _end;
+extern char __init_begin, __init_end;
+
+extern pmd_t *zero_pgtable;
+
+void __init mem_init(void)
+{
+	int codepages = 0;
+	int datapages = 0;
+	int initpages = 0;
+	unsigned long tmp;
+#ifndef CONFIG_SUN3
+	int i;
+#endif
+
+	max_mapnr = num_physpages = (((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT);
+
+#ifdef CONFIG_ATARI
+	if (MACH_IS_ATARI)
+		atari_stram_mem_init_hook();
+#endif
+
+	/* this will put all memory onto the freelists */
+	totalram_pages = free_all_bootmem();
+
+	for (tmp = PAGE_OFFSET ; tmp < (unsigned long)high_memory; tmp += PAGE_SIZE) {
+		if (PageReserved(virt_to_page(tmp))) {
+			if (tmp >= (unsigned long)&_text
+			    && tmp < (unsigned long)&_etext)
+				codepages++;
+			else if (tmp >= (unsigned long) &__init_begin
+				 && tmp < (unsigned long) &__init_end)
+				initpages++;
+			else
+				datapages++;
+			continue;
+		}
+	}
+
+#ifndef CONFIG_SUN3
+	/* insert pointer tables allocated so far into the tablelist */
+	init_pointer_table((unsigned long)kernel_pg_dir);
+	for (i = 0; i < PTRS_PER_PGD; i++) {
+		if (pgd_present(kernel_pg_dir[i]))
+			init_pointer_table(__pgd_page(kernel_pg_dir[i]));
+	}
+
+	/* insert also pointer table that we used to unmap the zero page */
+	if (zero_pgtable)
+		init_pointer_table((unsigned long)zero_pgtable);
+#endif
+
+	printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n",
+	       (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
+	       max_mapnr << (PAGE_SHIFT-10),
+	       codepages << (PAGE_SHIFT-10),
+	       datapages << (PAGE_SHIFT-10),
+	       initpages << (PAGE_SHIFT-10));
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+	int pages = 0;
+	for (; start < end; start += PAGE_SIZE) {
+		ClearPageReserved(virt_to_page(start));
+		set_page_count(virt_to_page(start), 1);
+		free_page(start);
+		totalram_pages++;
+		pages++;
+	}
+	printk ("Freeing initrd memory: %dk freed\n", pages);
+}
+#endif
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
new file mode 100644
index 0000000..5dcb3fa
--- /dev/null
+++ b/arch/m68k/mm/kmap.c
@@ -0,0 +1,361 @@
+/*
+ *  linux/arch/m68k/mm/kmap.c
+ *
+ *  Copyright (C) 1997 Roman Hodek
+ *
+ *  10/01/99 cleaned up the code and changing to the same interface
+ *	     used by other architectures		/Roman Zippel
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <asm/setup.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#undef DEBUG
+
+#define PTRTREESIZE	(256*1024)
+
+/*
+ * For 040/060 we can use the virtual memory area like other architectures,
+ * but for 020/030 we want to use early termination page descriptor and we
+ * can't mix this with normal page descriptors, so we have to copy that code
+ * (mm/vmalloc.c) and return appriorate aligned addresses.
+ */
+
+#ifdef CPU_M68040_OR_M68060_ONLY
+
+#define IO_SIZE		PAGE_SIZE
+
+static inline struct vm_struct *get_io_area(unsigned long size)
+{
+	return get_vm_area(size, VM_IOREMAP);
+}
+
+
+static inline void free_io_area(void *addr)
+{
+	vfree((void *)(PAGE_MASK & (unsigned long)addr));
+}
+
+#else
+
+#define IO_SIZE		(256*1024)
+
+static struct vm_struct *iolist;
+
+static struct vm_struct *get_io_area(unsigned long size)
+{
+	unsigned long addr;
+	struct vm_struct **p, *tmp, *area;
+
+	area = (struct vm_struct *)kmalloc(sizeof(*area), GFP_KERNEL);
+	if (!area)
+		return NULL;
+	addr = KMAP_START;
+	for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
+		if (size + addr < (unsigned long)tmp->addr)
+			break;
+		if (addr > KMAP_END-size)
+			return NULL;
+		addr = tmp->size + (unsigned long)tmp->addr;
+	}
+	area->addr = (void *)addr;
+	area->size = size + IO_SIZE;
+	area->next = *p;
+	*p = area;
+	return area;
+}
+
+static inline void free_io_area(void *addr)
+{
+	struct vm_struct **p, *tmp;
+
+	if (!addr)
+		return;
+	addr = (void *)((unsigned long)addr & -IO_SIZE);
+	for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
+		if (tmp->addr == addr) {
+			*p = tmp->next;
+			__iounmap(tmp->addr, tmp->size);
+			kfree(tmp);
+			return;
+		}
+	}
+}
+
+#endif
+
+/*
+ * Map some physical address range into the kernel address space. The
+ * code is copied and adapted from map_chunk().
+ */
+/* Rewritten by Andreas Schwab to remove all races. */
+
+void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
+{
+	struct vm_struct *area;
+	unsigned long virtaddr, retaddr;
+	long offset;
+	pgd_t *pgd_dir;
+	pmd_t *pmd_dir;
+	pte_t *pte_dir;
+
+	/*
+	 * Don't allow mappings that wrap..
+	 */
+	if (!size || size > physaddr + size)
+		return NULL;
+
+#ifdef CONFIG_AMIGA
+	if (MACH_IS_AMIGA) {
+		if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
+		    && (cacheflag == IOMAP_NOCACHE_SER))
+			return (void *)physaddr;
+	}
+#endif
+
+#ifdef DEBUG
+	printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
+#endif
+	/*
+	 * Mappings have to be aligned
+	 */
+	offset = physaddr & (IO_SIZE - 1);
+	physaddr &= -IO_SIZE;
+	size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
+
+	/*
+	 * Ok, go for it..
+	 */
+	area = get_io_area(size);
+	if (!area)
+		return NULL;
+
+	virtaddr = (unsigned long)area->addr;
+	retaddr = virtaddr + offset;
+#ifdef DEBUG
+	printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
+#endif
+
+	/*
+	 * add cache and table flags to physical address
+	 */
+	if (CPU_IS_040_OR_060) {
+		physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
+			     _PAGE_ACCESSED | _PAGE_DIRTY);
+		switch (cacheflag) {
+		case IOMAP_FULL_CACHING:
+			physaddr |= _PAGE_CACHE040;
+			break;
+		case IOMAP_NOCACHE_SER:
+		default:
+			physaddr |= _PAGE_NOCACHE_S;
+			break;
+		case IOMAP_NOCACHE_NONSER:
+			physaddr |= _PAGE_NOCACHE;
+			break;
+		case IOMAP_WRITETHROUGH:
+			physaddr |= _PAGE_CACHE040W;
+			break;
+		}
+	} else {
+		physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
+		switch (cacheflag) {
+		case IOMAP_NOCACHE_SER:
+		case IOMAP_NOCACHE_NONSER:
+		default:
+			physaddr |= _PAGE_NOCACHE030;
+			break;
+		case IOMAP_FULL_CACHING:
+		case IOMAP_WRITETHROUGH:
+			break;
+		}
+	}
+
+	while ((long)size > 0) {
+#ifdef DEBUG
+		if (!(virtaddr & (PTRTREESIZE-1)))
+			printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
+#endif
+		pgd_dir = pgd_offset_k(virtaddr);
+		pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
+		if (!pmd_dir) {
+			printk("ioremap: no mem for pmd_dir\n");
+			return NULL;
+		}
+
+		if (CPU_IS_020_OR_030) {
+			pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
+			physaddr += PTRTREESIZE;
+			virtaddr += PTRTREESIZE;
+			size -= PTRTREESIZE;
+		} else {
+			pte_dir = pte_alloc_kernel(&init_mm, pmd_dir, virtaddr);
+			if (!pte_dir) {
+				printk("ioremap: no mem for pte_dir\n");
+				return NULL;
+			}
+
+			pte_val(*pte_dir) = physaddr;
+			virtaddr += PAGE_SIZE;
+			physaddr += PAGE_SIZE;
+			size -= PAGE_SIZE;
+		}
+	}
+#ifdef DEBUG
+	printk("\n");
+#endif
+	flush_tlb_all();
+
+	return (void *)retaddr;
+}
+
+/*
+ * Unmap a ioremap()ed region again
+ */
+void iounmap(void *addr)
+{
+#ifdef CONFIG_AMIGA
+	if ((!MACH_IS_AMIGA) ||
+	    (((unsigned long)addr < 0x40000000) ||
+	     ((unsigned long)addr > 0x60000000)))
+			free_io_area(addr);
+#else
+	free_io_area(addr);
+#endif
+}
+
+/*
+ * __iounmap unmaps nearly everything, so be careful
+ * it doesn't free currently pointer/page tables anymore but it
+ * wans't used anyway and might be added later.
+ */
+void __iounmap(void *addr, unsigned long size)
+{
+	unsigned long virtaddr = (unsigned long)addr;
+	pgd_t *pgd_dir;
+	pmd_t *pmd_dir;
+	pte_t *pte_dir;
+
+	while ((long)size > 0) {
+		pgd_dir = pgd_offset_k(virtaddr);
+		if (pgd_bad(*pgd_dir)) {
+			printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
+			pgd_clear(pgd_dir);
+			return;
+		}
+		pmd_dir = pmd_offset(pgd_dir, virtaddr);
+
+		if (CPU_IS_020_OR_030) {
+			int pmd_off = (virtaddr/PTRTREESIZE) & 15;
+
+			if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
+				pmd_dir->pmd[pmd_off] = 0;
+				virtaddr += PTRTREESIZE;
+				size -= PTRTREESIZE;
+				continue;
+			}
+		}
+
+		if (pmd_bad(*pmd_dir)) {
+			printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
+			pmd_clear(pmd_dir);
+			return;
+		}
+		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
+
+		pte_val(*pte_dir) = 0;
+		virtaddr += PAGE_SIZE;
+		size -= PAGE_SIZE;
+	}
+
+	flush_tlb_all();
+}
+
+/*
+ * Set new cache mode for some kernel address space.
+ * The caller must push data for that range itself, if such data may already
+ * be in the cache.
+ */
+void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
+{
+	unsigned long virtaddr = (unsigned long)addr;
+	pgd_t *pgd_dir;
+	pmd_t *pmd_dir;
+	pte_t *pte_dir;
+
+	if (CPU_IS_040_OR_060) {
+		switch (cmode) {
+		case IOMAP_FULL_CACHING:
+			cmode = _PAGE_CACHE040;
+			break;
+		case IOMAP_NOCACHE_SER:
+		default:
+			cmode = _PAGE_NOCACHE_S;
+			break;
+		case IOMAP_NOCACHE_NONSER:
+			cmode = _PAGE_NOCACHE;
+			break;
+		case IOMAP_WRITETHROUGH:
+			cmode = _PAGE_CACHE040W;
+			break;
+		}
+	} else {
+		switch (cmode) {
+		case IOMAP_NOCACHE_SER:
+		case IOMAP_NOCACHE_NONSER:
+		default:
+			cmode = _PAGE_NOCACHE030;
+			break;
+		case IOMAP_FULL_CACHING:
+		case IOMAP_WRITETHROUGH:
+			cmode = 0;
+		}
+	}
+
+	while ((long)size > 0) {
+		pgd_dir = pgd_offset_k(virtaddr);
+		if (pgd_bad(*pgd_dir)) {
+			printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
+			pgd_clear(pgd_dir);
+			return;
+		}
+		pmd_dir = pmd_offset(pgd_dir, virtaddr);
+
+		if (CPU_IS_020_OR_030) {
+			int pmd_off = (virtaddr/PTRTREESIZE) & 15;
+
+			if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
+				pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
+							 _CACHEMASK040) | cmode;
+				virtaddr += PTRTREESIZE;
+				size -= PTRTREESIZE;
+				continue;
+			}
+		}
+
+		if (pmd_bad(*pmd_dir)) {
+			printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
+			pmd_clear(pmd_dir);
+			return;
+		}
+		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
+
+		pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
+		virtaddr += PAGE_SIZE;
+		size -= PAGE_SIZE;
+	}
+
+	flush_tlb_all();
+}
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
new file mode 100644
index 0000000..1453a60
--- /dev/null
+++ b/arch/m68k/mm/memory.c
@@ -0,0 +1,471 @@
+/*
+ *  linux/arch/m68k/mm/memory.c
+ *
+ *  Copyright (C) 1995  Hamish Macdonald
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+
+#include <asm/setup.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+#include <asm/machdep.h>
+
+
+/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
+   struct page instead of separately kmalloced struct.  Stolen from
+   arch/sparc/mm/srmmu.c ... */
+
+typedef struct list_head ptable_desc;
+static LIST_HEAD(ptable_list);
+
+#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
+#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
+#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
+
+#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
+
+void __init init_pointer_table(unsigned long ptable)
+{
+	ptable_desc *dp;
+	unsigned long page = ptable & PAGE_MASK;
+	unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
+
+	dp = PD_PTABLE(page);
+	if (!(PD_MARKBITS(dp) & mask)) {
+		PD_MARKBITS(dp) = 0xff;
+		list_add(dp, &ptable_list);
+	}
+
+	PD_MARKBITS(dp) &= ~mask;
+#ifdef DEBUG
+	printk("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
+#endif
+
+	/* unreserve the page so it's possible to free that page */
+	PD_PAGE(dp)->flags &= ~(1 << PG_reserved);
+	set_page_count(PD_PAGE(dp), 1);
+
+	return;
+}
+
+pmd_t *get_pointer_table (void)
+{
+	ptable_desc *dp = ptable_list.next;
+	unsigned char mask = PD_MARKBITS (dp);
+	unsigned char tmp;
+	unsigned int off;
+
+	/*
+	 * For a pointer table for a user process address space, a
+	 * table is taken from a page allocated for the purpose.  Each
+	 * page can hold 8 pointer tables.  The page is remapped in
+	 * virtual address space to be noncacheable.
+	 */
+	if (mask == 0) {
+		void *page;
+		ptable_desc *new;
+
+		if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
+			return NULL;
+
+		flush_tlb_kernel_page(page);
+		nocache_page(page);
+
+		new = PD_PTABLE(page);
+		PD_MARKBITS(new) = 0xfe;
+		list_add_tail(new, dp);
+
+		return (pmd_t *)page;
+	}
+
+	for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
+		;
+	PD_MARKBITS(dp) = mask & ~tmp;
+	if (!PD_MARKBITS(dp)) {
+		/* move to end of list */
+		list_del(dp);
+		list_add_tail(dp, &ptable_list);
+	}
+	return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
+}
+
+int free_pointer_table (pmd_t *ptable)
+{
+	ptable_desc *dp;
+	unsigned long page = (unsigned long)ptable & PAGE_MASK;
+	unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
+
+	dp = PD_PTABLE(page);
+	if (PD_MARKBITS (dp) & mask)
+		panic ("table already free!");
+
+	PD_MARKBITS (dp) |= mask;
+
+	if (PD_MARKBITS(dp) == 0xff) {
+		/* all tables in page are free, free page */
+		list_del(dp);
+		cache_page((void *)page);
+		free_page (page);
+		return 1;
+	} else if (ptable_list.next != dp) {
+		/*
+		 * move this descriptor to the front of the list, since
+		 * it has one or more free tables.
+		 */
+		list_del(dp);
+		list_add(dp, &ptable_list);
+	}
+	return 0;
+}
+
+#ifdef DEBUG_INVALID_PTOV
+int mm_inv_cnt = 5;
+#endif
+
+#ifndef CONFIG_SINGLE_MEMORY_CHUNK
+/*
+ * The following two routines map from a physical address to a kernel
+ * virtual address and vice versa.
+ */
+unsigned long mm_vtop(unsigned long vaddr)
+{
+	int i=0;
+	unsigned long voff = (unsigned long)vaddr - PAGE_OFFSET;
+
+	do {
+		if (voff < m68k_memory[i].size) {
+#ifdef DEBUGPV
+			printk ("VTOP(%p)=%lx\n", vaddr,
+				m68k_memory[i].addr + voff);
+#endif
+			return m68k_memory[i].addr + voff;
+		}
+		voff -= m68k_memory[i].size;
+	} while (++i < m68k_num_memory);
+
+	/* As a special case allow `__pa(high_memory)'.  */
+	if (voff == 0)
+		return m68k_memory[i-1].addr + m68k_memory[i-1].size;
+
+	return -1;
+}
+#endif
+
+#ifndef CONFIG_SINGLE_MEMORY_CHUNK
+unsigned long mm_ptov (unsigned long paddr)
+{
+	int i = 0;
+	unsigned long poff, voff = PAGE_OFFSET;
+
+	do {
+		poff = paddr - m68k_memory[i].addr;
+		if (poff < m68k_memory[i].size) {
+#ifdef DEBUGPV
+			printk ("PTOV(%lx)=%lx\n", paddr, poff + voff);
+#endif
+			return poff + voff;
+		}
+		voff += m68k_memory[i].size;
+	} while (++i < m68k_num_memory);
+
+#ifdef DEBUG_INVALID_PTOV
+	if (mm_inv_cnt > 0) {
+		mm_inv_cnt--;
+		printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n",
+			paddr, __builtin_return_address(0));
+	}
+#endif
+	return -1;
+}
+#endif
+
+/* invalidate page in both caches */
+static inline void clear040(unsigned long paddr)
+{
+	asm volatile (
+		"nop\n\t"
+		".chip 68040\n\t"
+		"cinvp %%bc,(%0)\n\t"
+		".chip 68k"
+		: : "a" (paddr));
+}
+
+/* invalidate page in i-cache */
+static inline void cleari040(unsigned long paddr)
+{
+	asm volatile (
+		"nop\n\t"
+		".chip 68040\n\t"
+		"cinvp %%ic,(%0)\n\t"
+		".chip 68k"
+		: : "a" (paddr));
+}
+
+/* push page in both caches */
+/* RZ: cpush %bc DOES invalidate %ic, regardless of DPI */
+static inline void push040(unsigned long paddr)
+{
+	asm volatile (
+		"nop\n\t"
+		".chip 68040\n\t"
+		"cpushp %%bc,(%0)\n\t"
+		".chip 68k"
+		: : "a" (paddr));
+}
+
+/* push and invalidate page in both caches, must disable ints
+ * to avoid invalidating valid data */
+static inline void pushcl040(unsigned long paddr)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	push040(paddr);
+	if (CPU_IS_060)
+		clear040(paddr);
+	local_irq_restore(flags);
+}
+
+/*
+ * 040: Hit every page containing an address in the range paddr..paddr+len-1.
+ * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
+ * Hit every page until there is a page or less to go. Hit the next page,
+ * and the one after that if the range hits it.
+ */
+/* ++roman: A little bit more care is required here: The CINVP instruction
+ * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
+ * and the end of the region must be treated differently if they are not
+ * exactly at the beginning or end of a page boundary. Else, maybe too much
+ * data becomes invalidated and thus lost forever. CPUSHP does what we need:
+ * it invalidates the page after pushing dirty data to memory. (Thanks to Jes
+ * for discovering the problem!)
+ */
+/* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
+ * the DPI bit in the CACR; would it cause problems with temporarily changing
+ * this?). So we have to push first and then additionally to invalidate.
+ */
+
+
+/*
+ * cache_clear() semantics: Clear any cache entries for the area in question,
+ * without writing back dirty entries first. This is useful if the data will
+ * be overwritten anyway, e.g. by DMA to memory. The range is defined by a
+ * _physical_ address.
+ */
+
+void cache_clear (unsigned long paddr, int len)
+{
+    if (CPU_IS_040_OR_060) {
+	int tmp;
+
+	/*
+	 * We need special treatment for the first page, in case it
+	 * is not page-aligned. Page align the addresses to work
+	 * around bug I17 in the 68060.
+	 */
+	if ((tmp = -paddr & (PAGE_SIZE - 1))) {
+	    pushcl040(paddr & PAGE_MASK);
+	    if ((len -= tmp) <= 0)
+		return;
+	    paddr += tmp;
+	}
+	tmp = PAGE_SIZE;
+	paddr &= PAGE_MASK;
+	while ((len -= tmp) >= 0) {
+	    clear040(paddr);
+	    paddr += tmp;
+	}
+	if ((len += tmp))
+	    /* a page boundary gets crossed at the end */
+	    pushcl040(paddr);
+    }
+    else /* 68030 or 68020 */
+	asm volatile ("movec %/cacr,%/d0\n\t"
+		      "oriw %0,%/d0\n\t"
+		      "movec %/d0,%/cacr"
+		      : : "i" (FLUSH_I_AND_D)
+		      : "d0");
+#ifdef CONFIG_M68K_L2_CACHE
+    if(mach_l2_flush)
+	mach_l2_flush(0);
+#endif
+}
+
+
+/*
+ * cache_push() semantics: Write back any dirty cache data in the given area,
+ * and invalidate the range in the instruction cache. It needs not (but may)
+ * invalidate those entries also in the data cache. The range is defined by a
+ * _physical_ address.
+ */
+
+void cache_push (unsigned long paddr, int len)
+{
+    if (CPU_IS_040_OR_060) {
+	int tmp = PAGE_SIZE;
+
+	/*
+         * on 68040 or 68060, push cache lines for pages in the range;
+	 * on the '040 this also invalidates the pushed lines, but not on
+	 * the '060!
+	 */
+	len += paddr & (PAGE_SIZE - 1);
+
+	/*
+	 * Work around bug I17 in the 68060 affecting some instruction
+	 * lines not being invalidated properly.
+	 */
+	paddr &= PAGE_MASK;
+
+	do {
+	    push040(paddr);
+	    paddr += tmp;
+	} while ((len -= tmp) > 0);
+    }
+    /*
+     * 68030/68020 have no writeback cache. On the other hand,
+     * cache_push is actually a superset of cache_clear (the lines
+     * get written back and invalidated), so we should make sure
+     * to perform the corresponding actions. After all, this is getting
+     * called in places where we've just loaded code, or whatever, so
+     * flushing the icache is appropriate; flushing the dcache shouldn't
+     * be required.
+     */
+    else /* 68030 or 68020 */
+	asm volatile ("movec %/cacr,%/d0\n\t"
+		      "oriw %0,%/d0\n\t"
+		      "movec %/d0,%/cacr"
+		      : : "i" (FLUSH_I)
+		      : "d0");
+#ifdef CONFIG_M68K_L2_CACHE
+    if(mach_l2_flush)
+	mach_l2_flush(1);
+#endif
+}
+
+static unsigned long virt_to_phys_slow(unsigned long vaddr)
+{
+	if (CPU_IS_060) {
+		mm_segment_t fs = get_fs();
+		unsigned long paddr;
+
+		set_fs(get_ds());
+
+		/* The PLPAR instruction causes an access error if the translation
+		 * is not possible. To catch this we use the same exception mechanism
+		 * as for user space accesses in <asm/uaccess.h>. */
+		asm volatile (".chip 68060\n"
+			      "1: plpar (%0)\n"
+			      ".chip 68k\n"
+			      "2:\n"
+			      ".section .fixup,\"ax\"\n"
+			      "   .even\n"
+			      "3: sub.l %0,%0\n"
+			      "   jra 2b\n"
+			      ".previous\n"
+			      ".section __ex_table,\"a\"\n"
+			      "   .align 4\n"
+			      "   .long 1b,3b\n"
+			      ".previous"
+			      : "=a" (paddr)
+			      : "0" (vaddr));
+		set_fs(fs);
+		return paddr;
+	} else if (CPU_IS_040) {
+		mm_segment_t fs = get_fs();
+		unsigned long mmusr;
+
+		set_fs(get_ds());
+
+		asm volatile (".chip 68040\n\t"
+			      "ptestr (%1)\n\t"
+			      "movec %%mmusr, %0\n\t"
+			      ".chip 68k"
+			      : "=r" (mmusr)
+			      : "a" (vaddr));
+		set_fs(fs);
+
+		if (mmusr & MMU_R_040)
+			return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
+	} else {
+		unsigned short mmusr;
+		unsigned long *descaddr;
+
+		asm volatile ("ptestr #5,%2@,#7,%0\n\t"
+			      "pmove %%psr,%1@"
+			      : "=a&" (descaddr)
+			      : "a" (&mmusr), "a" (vaddr));
+		if (mmusr & (MMU_I|MMU_B|MMU_L))
+			return 0;
+		descaddr = phys_to_virt((unsigned long)descaddr);
+		switch (mmusr & MMU_NUM) {
+		case 1:
+			return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
+		case 2:
+			return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
+		case 3:
+			return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
+		}
+	}
+	return 0;
+}
+
+/* Push n pages at kernel virtual address and clear the icache */
+/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
+void flush_icache_range(unsigned long address, unsigned long endaddr)
+{
+	if (CPU_IS_040_OR_060) {
+		address &= PAGE_MASK;
+
+		if (address >= PAGE_OFFSET && address < (unsigned long)high_memory) {
+			do {
+				asm volatile ("nop\n\t"
+					      ".chip 68040\n\t"
+					      "cpushp %%bc,(%0)\n\t"
+					      ".chip 68k"
+					      : : "a" (virt_to_phys((void *)address)));
+				address += PAGE_SIZE;
+			} while (address < endaddr);
+		} else {
+			do {
+				asm volatile ("nop\n\t"
+					      ".chip 68040\n\t"
+					      "cpushp %%bc,(%0)\n\t"
+					      ".chip 68k"
+					      : : "a" (virt_to_phys_slow(address)));
+				address += PAGE_SIZE;
+			} while (address < endaddr);
+		}
+	} else {
+		unsigned long tmp;
+		asm volatile ("movec %%cacr,%0\n\t"
+			      "orw %1,%0\n\t"
+			      "movec %0,%%cacr"
+			      : "=&d" (tmp)
+			      : "di" (FLUSH_I));
+	}
+}
+
+
+#ifndef CONFIG_SINGLE_MEMORY_CHUNK
+int mm_end_of_chunk (unsigned long addr, int len)
+{
+	int i;
+
+	for (i = 0; i < m68k_num_memory; i++)
+		if (m68k_memory[i].addr + m68k_memory[i].size == addr + len)
+			return 1;
+	return 0;
+}
+#endif
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
new file mode 100644
index 0000000..d855fec
--- /dev/null
+++ b/arch/m68k/mm/motorola.c
@@ -0,0 +1,285 @@
+/*
+ * linux/arch/m68k/motorola.c
+ *
+ * Routines specific to the Motorola MMU, originally from:
+ * linux/arch/m68k/init.c
+ * which are Copyright (C) 1995 Hamish Macdonald
+ *
+ * Moved 8/20/1999 Sam Creasey
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/setup.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/system.h>
+#include <asm/machdep.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#ifdef CONFIG_ATARI
+#include <asm/atari_stram.h>
+#endif
+
+#undef DEBUG
+
+#ifndef mm_cachebits
+/*
+ * Bits to add to page descriptors for "normal" caching mode.
+ * For 68020/030 this is 0.
+ * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
+ */
+unsigned long mm_cachebits;
+EXPORT_SYMBOL(mm_cachebits);
+#endif
+
+static pte_t * __init kernel_page_table(void)
+{
+	pte_t *ptablep;
+
+	ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+
+	clear_page(ptablep);
+	__flush_page_to_ram(ptablep);
+	flush_tlb_kernel_page(ptablep);
+	nocache_page(ptablep);
+
+	return ptablep;
+}
+
+static pmd_t *last_pgtable __initdata = NULL;
+pmd_t *zero_pgtable __initdata = NULL;
+
+static pmd_t * __init kernel_ptr_table(void)
+{
+	if (!last_pgtable) {
+		unsigned long pmd, last;
+		int i;
+
+		/* Find the last ptr table that was used in head.S and
+		 * reuse the remaining space in that page for further
+		 * ptr tables.
+		 */
+		last = (unsigned long)kernel_pg_dir;
+		for (i = 0; i < PTRS_PER_PGD; i++) {
+			if (!pgd_present(kernel_pg_dir[i]))
+				continue;
+			pmd = __pgd_page(kernel_pg_dir[i]);
+			if (pmd > last)
+				last = pmd;
+		}
+
+		last_pgtable = (pmd_t *)last;
+#ifdef DEBUG
+		printk("kernel_ptr_init: %p\n", last_pgtable);
+#endif
+	}
+
+	last_pgtable += PTRS_PER_PMD;
+	if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
+		last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+
+		clear_page(last_pgtable);
+		__flush_page_to_ram(last_pgtable);
+		flush_tlb_kernel_page(last_pgtable);
+		nocache_page(last_pgtable);
+	}
+
+	return last_pgtable;
+}
+
+static unsigned long __init
+map_chunk (unsigned long addr, long size)
+{
+#define PTRTREESIZE (256*1024)
+#define ROOTTREESIZE (32*1024*1024)
+	static unsigned long virtaddr = PAGE_OFFSET;
+	unsigned long physaddr;
+	pgd_t *pgd_dir;
+	pmd_t *pmd_dir;
+	pte_t *pte_dir;
+
+	physaddr = (addr | m68k_supervisor_cachemode |
+		    _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
+	if (CPU_IS_040_OR_060)
+		physaddr |= _PAGE_GLOBAL040;
+
+	while (size > 0) {
+#ifdef DEBUG
+		if (!(virtaddr & (PTRTREESIZE-1)))
+			printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
+				virtaddr);
+#endif
+		pgd_dir = pgd_offset_k(virtaddr);
+		if (virtaddr && CPU_IS_020_OR_030) {
+			if (!(virtaddr & (ROOTTREESIZE-1)) &&
+			    size >= ROOTTREESIZE) {
+#ifdef DEBUG
+				printk ("[very early term]");
+#endif
+				pgd_val(*pgd_dir) = physaddr;
+				size -= ROOTTREESIZE;
+				virtaddr += ROOTTREESIZE;
+				physaddr += ROOTTREESIZE;
+				continue;
+			}
+		}
+		if (!pgd_present(*pgd_dir)) {
+			pmd_dir = kernel_ptr_table();
+#ifdef DEBUG
+			printk ("[new pointer %p]", pmd_dir);
+#endif
+			pgd_set(pgd_dir, pmd_dir);
+		} else
+			pmd_dir = pmd_offset(pgd_dir, virtaddr);
+
+		if (CPU_IS_020_OR_030) {
+			if (virtaddr) {
+#ifdef DEBUG
+				printk ("[early term]");
+#endif
+				pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
+				physaddr += PTRTREESIZE;
+			} else {
+				int i;
+#ifdef DEBUG
+				printk ("[zero map]");
+#endif
+				zero_pgtable = kernel_ptr_table();
+				pte_dir = (pte_t *)zero_pgtable;
+				pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
+					_PAGE_TABLE | _PAGE_ACCESSED;
+				pte_val(*pte_dir++) = 0;
+				physaddr += PAGE_SIZE;
+				for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
+					pte_val(*pte_dir++) = physaddr;
+			}
+			size -= PTRTREESIZE;
+			virtaddr += PTRTREESIZE;
+		} else {
+			if (!pmd_present(*pmd_dir)) {
+#ifdef DEBUG
+				printk ("[new table]");
+#endif
+				pte_dir = kernel_page_table();
+				pmd_set(pmd_dir, pte_dir);
+			}
+			pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
+
+			if (virtaddr) {
+				if (!pte_present(*pte_dir))
+					pte_val(*pte_dir) = physaddr;
+			} else
+				pte_val(*pte_dir) = 0;
+			size -= PAGE_SIZE;
+			virtaddr += PAGE_SIZE;
+			physaddr += PAGE_SIZE;
+		}
+
+	}
+#ifdef DEBUG
+	printk("\n");
+#endif
+
+	return virtaddr;
+}
+
+/*
+ * paging_init() continues the virtual memory environment setup which
+ * was begun by the code in arch/head.S.
+ */
+void __init paging_init(void)
+{
+	int chunk;
+	unsigned long mem_avail = 0;
+	unsigned long zones_size[3] = { 0, };
+
+#ifdef DEBUG
+	{
+		extern unsigned long availmem;
+		printk ("start of paging_init (%p, %lx, %lx, %lx)\n",
+			kernel_pg_dir, availmem, start_mem, end_mem);
+	}
+#endif
+
+	/* Fix the cache mode in the page descriptors for the 680[46]0.  */
+	if (CPU_IS_040_OR_060) {
+		int i;
+#ifndef mm_cachebits
+		mm_cachebits = _PAGE_CACHE040;
+#endif
+		for (i = 0; i < 16; i++)
+			pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
+	}
+
+	/*
+	 * Map the physical memory available into the kernel virtual
+	 * address space.  It may allocate some memory for page
+	 * tables and thus modify availmem.
+	 */
+
+	for (chunk = 0; chunk < m68k_num_memory; chunk++) {
+		mem_avail = map_chunk (m68k_memory[chunk].addr,
+				       m68k_memory[chunk].size);
+
+	}
+
+	flush_tlb_all();
+#ifdef DEBUG
+	printk ("memory available is %ldKB\n", mem_avail >> 10);
+	printk ("start_mem is %#lx\nvirtual_end is %#lx\n",
+		start_mem, end_mem);
+#endif
+
+	/*
+	 * initialize the bad page table and bad page to point
+	 * to a couple of allocated pages
+	 */
+	empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
+	memset(empty_zero_page, 0, PAGE_SIZE);
+
+	/*
+	 * Set up SFC/DFC registers
+	 */
+	set_fs(KERNEL_DS);
+
+#ifdef DEBUG
+	printk ("before free_area_init\n");
+#endif
+	zones_size[0] = (mach_max_dma_address < (unsigned long)high_memory ?
+			 (mach_max_dma_address+1) : (unsigned long)high_memory);
+	zones_size[1] = (unsigned long)high_memory - zones_size[0];
+
+	zones_size[0] = (zones_size[0] - PAGE_OFFSET) >> PAGE_SHIFT;
+	zones_size[1] >>= PAGE_SHIFT;
+
+	free_area_init(zones_size);
+}
+
+extern char __init_begin, __init_end;
+
+void free_initmem(void)
+{
+	unsigned long addr;
+
+	addr = (unsigned long)&__init_begin;
+	for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
+		virt_to_page(addr)->flags &= ~(1 << PG_reserved);
+		set_page_count(virt_to_page(addr), 1);
+		free_page(addr);
+		totalram_pages++;
+	}
+}
+
+
diff --git a/arch/m68k/mm/sun3kmap.c b/arch/m68k/mm/sun3kmap.c
new file mode 100644
index 0000000..7f0d86f
--- /dev/null
+++ b/arch/m68k/mm/sun3kmap.c
@@ -0,0 +1,156 @@
+/*
+ * linux/arch/m68k/mm/sun3kmap.c
+ *
+ * Copyright (C) 2002 Sam Creasey <sammy@sammy.net>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/sun3mmu.h>
+
+#undef SUN3_KMAP_DEBUG
+
+#ifdef SUN3_KMAP_DEBUG
+extern void print_pte_vaddr(unsigned long vaddr);
+#endif
+
+extern void mmu_emu_map_pmeg (int context, int vaddr);
+
+static inline void do_page_mapin(unsigned long phys, unsigned long virt,
+				 unsigned long type)
+{
+	unsigned long pte;
+	pte_t ptep;
+
+	ptep = pfn_pte(phys >> PAGE_SHIFT, PAGE_KERNEL);
+	pte = pte_val(ptep);
+	pte |= type;
+
+	sun3_put_pte(virt, pte);
+
+#ifdef SUN3_KMAP_DEBUG
+	print_pte_vaddr(virt);
+#endif
+
+}
+
+static inline void do_pmeg_mapin(unsigned long phys, unsigned long virt,
+				 unsigned long type, int pages)
+{
+
+	if(sun3_get_segmap(virt & ~SUN3_PMEG_MASK) == SUN3_INVALID_PMEG)
+		mmu_emu_map_pmeg(sun3_get_context(), virt);
+
+	while(pages) {
+		do_page_mapin(phys, virt, type);
+		phys += PAGE_SIZE;
+		virt += PAGE_SIZE;
+		pages--;
+	}
+}
+
+void *sun3_ioremap(unsigned long phys, unsigned long size,
+		   unsigned long type)
+{
+	struct vm_struct *area;
+	unsigned long offset, virt, ret;
+	int pages;
+
+	if(!size)
+		return NULL;
+
+	/* page align */
+	offset = phys & (PAGE_SIZE-1);
+	phys &= ~(PAGE_SIZE-1);
+
+	size += offset;
+	size = PAGE_ALIGN(size);
+	if((area = get_vm_area(size, VM_IOREMAP)) == NULL)
+		return NULL;
+
+#ifdef SUN3_KMAP_DEBUG
+	printk("ioremap: got virt %p size %lx(%lx)\n",
+	       area->addr, size, area->size);
+#endif
+
+	pages = size / PAGE_SIZE;
+	virt = (unsigned long)area->addr;
+	ret = virt + offset;
+
+	while(pages) {
+		int seg_pages;
+
+		seg_pages = (SUN3_PMEG_SIZE - (virt & SUN3_PMEG_MASK)) / PAGE_SIZE;
+		if(seg_pages > pages)
+			seg_pages = pages;
+
+		do_pmeg_mapin(phys, virt, type, seg_pages);
+
+		pages -= seg_pages;
+		phys += seg_pages * PAGE_SIZE;
+		virt += seg_pages * PAGE_SIZE;
+	}
+
+	return (void *)ret;
+
+}
+
+
+void *__ioremap(unsigned long phys, unsigned long size, int cache)
+{
+
+	return sun3_ioremap(phys, size, SUN3_PAGE_TYPE_IO);
+
+}
+
+void iounmap(void *addr)
+{
+	vfree((void *)(PAGE_MASK & (unsigned long)addr));
+}
+
+/* sun3_map_test(addr, val) -- Reads a byte from addr, storing to val,
+ * trapping the potential read fault.  Returns 0 if the access faulted,
+ * 1 on success.
+ *
+ * This function is primarily used to check addresses on the VME bus.
+ *
+ * Mucking with the page fault handler seems a little hackish to me, but
+ * SunOS, NetBSD, and Mach all implemented this check in such a manner,
+ * so I figure we're allowed.
+ */
+int sun3_map_test(unsigned long addr, char *val)
+{
+	int ret = 0;
+
+	__asm__ __volatile__
+		(".globl _sun3_map_test_start\n"
+		 "_sun3_map_test_start:\n"
+		 "1: moveb (%2), (%0)\n"
+		 "   moveq #1, %1\n"
+		 "2:\n"
+		 ".section .fixup,\"ax\"\n"
+		 ".even\n"
+		 "3: moveq #0, %1\n"
+		 "   jmp 2b\n"
+		 ".previous\n"
+		 ".section __ex_table,\"a\"\n"
+		 ".align 4\n"
+		 ".long 1b,3b\n"
+		 ".previous\n"
+		 ".globl _sun3_map_test_end\n"
+		 "_sun3_map_test_end:\n"
+		 : "=a"(val), "=r"(ret)
+		 : "a"(addr));
+
+	return ret;
+}
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
new file mode 100644
index 0000000..a47be19
--- /dev/null
+++ b/arch/m68k/mm/sun3mmu.c
@@ -0,0 +1,102 @@
+/*
+ * linux/arch/m68k/mm/sun3mmu.c
+ *
+ * Implementations of mm routines specific to the sun3 MMU.
+ *
+ * Moved here 8/20/1999 Sam Creasey
+ *
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/setup.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/machdep.h>
+#include <asm/io.h>
+
+extern void mmu_emu_init (unsigned long bootmem_end);
+
+const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
+
+extern unsigned long num_pages;
+
+void free_initmem(void)
+{
+}
+
+/* For the sun3 we try to follow the i386 paging_init() more closely */
+/* start_mem and end_mem have PAGE_OFFSET added already */
+/* now sets up tables using sun3 PTEs rather than i386 as before. --m */
+void __init paging_init(void)
+{
+	pgd_t * pg_dir;
+	pte_t * pg_table;
+	int i;
+	unsigned long address;
+	unsigned long next_pgtable;
+	unsigned long bootmem_end;
+	unsigned long zones_size[3] = {0, 0, 0};
+	unsigned long size;
+
+
+#ifdef TEST_VERIFY_AREA
+	wp_works_ok = 0;
+#endif
+	empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
+	memset(empty_zero_page, 0, PAGE_SIZE);
+
+	address = PAGE_OFFSET;
+	pg_dir = swapper_pg_dir;
+	memset (swapper_pg_dir, 0, sizeof (swapper_pg_dir));
+	memset (kernel_pg_dir,  0, sizeof (kernel_pg_dir));
+
+	size = num_pages * sizeof(pte_t);
+	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
+
+	next_pgtable = (unsigned long)alloc_bootmem_pages(size);
+	bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
+
+	/* Map whole memory from PAGE_OFFSET (0x0E000000) */
+	pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
+
+	while (address < (unsigned long)high_memory) {
+		pg_table = (pte_t *) __pa (next_pgtable);
+		next_pgtable += PTRS_PER_PTE * sizeof (pte_t);
+		pgd_val(*pg_dir) = (unsigned long) pg_table;
+		pg_dir++;
+
+		/* now change pg_table to kernel virtual addresses */
+		pg_table = (pte_t *) __va ((unsigned long) pg_table);
+		for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
+			pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
+			if (address >= (unsigned long)high_memory)
+				pte_val (pte) = 0;
+			set_pte (pg_table, pte);
+			address += PAGE_SIZE;
+		}
+	}
+
+	mmu_emu_init(bootmem_end);
+
+	current->mm = NULL;
+
+	/* memory sizing is a hack stolen from motorola.c..  hope it works for us */
+	zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
+	zones_size[1] = 0;
+
+	free_area_init(zones_size);
+
+}
+
+
diff --git a/arch/m68k/mvme147/147ints.c b/arch/m68k/mvme147/147ints.c
new file mode 100644
index 0000000..69a744e
--- /dev/null
+++ b/arch/m68k/mvme147/147ints.c
@@ -0,0 +1,145 @@
+/*
+ * arch/m68k/mvme147/147ints.c
+ *
+ * Copyright (C) 1997 Richard Hirst [richard@sleepie.demon.co.uk]
+ *
+ * based on amiints.c -- Amiga Linux interrupt handling code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file README.legal in the main directory of this archive
+ * for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/seq_file.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+
+static irqreturn_t mvme147_defhand (int irq, void *dev_id, struct pt_regs *fp);
+
+/*
+ * This should ideally be 4 elements only, for speed.
+ */
+
+static struct {
+	irqreturn_t	(*handler)(int, void *, struct pt_regs *);
+	unsigned long	flags;
+	void		*dev_id;
+	const char	*devname;
+	unsigned	count;
+} irq_tab[256];
+
+/*
+ * void mvme147_init_IRQ (void)
+ *
+ * Parameters:	None
+ *
+ * Returns:	Nothing
+ *
+ * This function is called during kernel startup to initialize
+ * the mvme147 IRQ handling routines.
+ */
+
+void mvme147_init_IRQ (void)
+{
+	int i;
+
+	for (i = 0; i < 256; i++) {
+		irq_tab[i].handler = mvme147_defhand;
+		irq_tab[i].flags = IRQ_FLG_STD;
+		irq_tab[i].dev_id = NULL;
+		irq_tab[i].devname = NULL;
+		irq_tab[i].count = 0;
+	}
+}
+
+int mvme147_request_irq(unsigned int irq,
+		irqreturn_t (*handler)(int, void *, struct pt_regs *),
+                unsigned long flags, const char *devname, void *dev_id)
+{
+	if (irq > 255) {
+		printk("%s: Incorrect IRQ %d from %s\n", __FUNCTION__, irq, devname);
+		return -ENXIO;
+	}
+	if (!(irq_tab[irq].flags & IRQ_FLG_STD)) {
+		if (irq_tab[irq].flags & IRQ_FLG_LOCK) {
+			printk("%s: IRQ %d from %s is not replaceable\n",
+			       __FUNCTION__, irq, irq_tab[irq].devname);
+			return -EBUSY;
+		}
+		if (flags & IRQ_FLG_REPLACE) {
+			printk("%s: %s can't replace IRQ %d from %s\n",
+			       __FUNCTION__, devname, irq, irq_tab[irq].devname);
+			return -EBUSY;
+		}
+	}
+	irq_tab[irq].handler = handler;
+	irq_tab[irq].flags   = flags;
+	irq_tab[irq].dev_id  = dev_id;
+	irq_tab[irq].devname = devname;
+	return 0;
+}
+
+void mvme147_free_irq(unsigned int irq, void *dev_id)
+{
+	if (irq > 255) {
+		printk("%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
+		return;
+	}
+	if (irq_tab[irq].dev_id != dev_id)
+		printk("%s: Removing probably wrong IRQ %d from %s\n",
+		       __FUNCTION__, irq, irq_tab[irq].devname);
+
+	irq_tab[irq].handler = mvme147_defhand;
+	irq_tab[irq].flags   = IRQ_FLG_STD;
+	irq_tab[irq].dev_id  = NULL;
+	irq_tab[irq].devname = NULL;
+}
+
+irqreturn_t mvme147_process_int (unsigned long vec, struct pt_regs *fp)
+{
+	if (vec > 255) {
+		printk ("mvme147_process_int: Illegal vector %ld\n", vec);
+		return IRQ_NONE;
+	} else {
+		irq_tab[vec].count++;
+		irq_tab[vec].handler(vec, irq_tab[vec].dev_id, fp);
+		return IRQ_HANDLED;
+	}
+}
+
+int show_mvme147_interrupts (struct seq_file *p, void *v)
+{
+	int i;
+
+	for (i = 0; i < 256; i++) {
+		if (irq_tab[i].count)
+			seq_printf(p, "Vec 0x%02x: %8d  %s\n",
+			    i, irq_tab[i].count,
+			    irq_tab[i].devname ? irq_tab[i].devname : "free");
+	}
+	return 0;
+}
+
+
+static irqreturn_t mvme147_defhand (int irq, void *dev_id, struct pt_regs *fp)
+{
+	printk ("Unknown interrupt 0x%02x\n", irq);
+	return IRQ_NONE;
+}
+
+void mvme147_enable_irq (unsigned int irq)
+{
+}
+
+
+void mvme147_disable_irq (unsigned int irq)
+{
+}
+
diff --git a/arch/m68k/mvme147/Makefile b/arch/m68k/mvme147/Makefile
new file mode 100644
index 0000000..f0153ed
--- /dev/null
+++ b/arch/m68k/mvme147/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for Linux arch/m68k/mvme147 source directory
+#
+
+obj-y		:= config.o 147ints.o
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
new file mode 100644
index 0000000..0fcf972
--- /dev/null
+++ b/arch/m68k/mvme147/config.c
@@ -0,0 +1,229 @@
+/*
+ *  arch/m68k/mvme147/config.c
+ *
+ *  Copyright (C) 1996 Dave Frascone [chaos@mindspring.com]
+ *  Cloned from        Richard Hirst [richard@sleepie.demon.co.uk]
+ *
+ * Based on:
+ *
+ *  Copyright (C) 1993 Hamish Macdonald
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file README.legal in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/major.h>
+#include <linux/genhd.h>
+#include <linux/rtc.h>
+#include <linux/interrupt.h>
+
+#include <asm/bootinfo.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/rtc.h>
+#include <asm/machdep.h>
+#include <asm/mvme147hw.h>
+
+
+extern irqreturn_t mvme147_process_int (int level, struct pt_regs *regs);
+extern void mvme147_init_IRQ (void);
+extern void mvme147_free_irq (unsigned int, void *);
+extern int  show_mvme147_interrupts (struct seq_file *, void *);
+extern void mvme147_enable_irq (unsigned int);
+extern void mvme147_disable_irq (unsigned int);
+static void mvme147_get_model(char *model);
+static int  mvme147_get_hardware_list(char *buffer);
+extern int mvme147_request_irq (unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long flags, const char *devname, void *dev_id);
+extern void mvme147_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *));
+extern unsigned long mvme147_gettimeoffset (void);
+extern int mvme147_hwclk (int, struct rtc_time *);
+extern int mvme147_set_clock_mmss (unsigned long);
+extern void mvme147_reset (void);
+extern void mvme147_waitbut(void);
+
+
+static int bcd2int (unsigned char b);
+
+/* Save tick handler routine pointer, will point to do_timer() in
+ * kernel/sched.c, called via mvme147_process_int() */
+
+irqreturn_t (*tick_handler)(int, void *, struct pt_regs *);
+
+
+int mvme147_parse_bootinfo(const struct bi_record *bi)
+{
+	if (bi->tag == BI_VME_TYPE || bi->tag == BI_VME_BRDINFO)
+		return 0;
+	else
+		return 1;
+}
+
+void mvme147_reset(void)
+{
+	printk ("\r\n\nCalled mvme147_reset\r\n");
+	m147_pcc->watchdog = 0x0a;	/* Clear timer */
+	m147_pcc->watchdog = 0xa5;	/* Enable watchdog - 100ms to reset */
+	while (1)
+		;
+}
+
+static void mvme147_get_model(char *model)
+{
+	sprintf(model, "Motorola MVME147");
+}
+
+
+static int mvme147_get_hardware_list(char *buffer)
+{
+	*buffer = '\0';
+
+	return 0;
+}
+
+
+void __init config_mvme147(void)
+{
+	mach_max_dma_address	= 0x01000000;
+	mach_sched_init		= mvme147_sched_init;
+	mach_init_IRQ		= mvme147_init_IRQ;
+	mach_gettimeoffset	= mvme147_gettimeoffset;
+	mach_hwclk		= mvme147_hwclk;
+	mach_set_clock_mmss	= mvme147_set_clock_mmss;
+	mach_reset		= mvme147_reset;
+	mach_free_irq		= mvme147_free_irq;
+	mach_process_int	= mvme147_process_int;
+	mach_get_irq_list	= show_mvme147_interrupts;
+	mach_request_irq	= mvme147_request_irq;
+	enable_irq		= mvme147_enable_irq;
+	disable_irq		= mvme147_disable_irq;
+	mach_get_model		= mvme147_get_model;
+	mach_get_hardware_list	= mvme147_get_hardware_list;
+
+	/* Board type is only set by newer versions of vmelilo/tftplilo */
+	if (!vme_brdtype)
+		vme_brdtype = VME_TYPE_MVME147;
+}
+
+
+/* Using pcc tick timer 1 */
+
+static irqreturn_t mvme147_timer_int (int irq, void *dev_id, struct pt_regs *fp)
+{
+	m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
+	m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
+	return tick_handler(irq, dev_id, fp);
+}
+
+
+void mvme147_sched_init (irqreturn_t (*timer_routine)(int, void *, struct pt_regs *))
+{
+	tick_handler = timer_routine;
+	request_irq (PCC_IRQ_TIMER1, mvme147_timer_int,
+		IRQ_FLG_REPLACE, "timer 1", NULL);
+
+	/* Init the clock with a value */
+	/* our clock goes off every 6.25us */
+	m147_pcc->t1_preload = PCC_TIMER_PRELOAD;
+	m147_pcc->t1_cntrl = 0x0;	/* clear timer */
+	m147_pcc->t1_cntrl = 0x3;	/* start timer */
+	m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;  /* clear pending ints */
+	m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
+}
+
+/* This is always executed with interrupts disabled.  */
+/* XXX There are race hazards in this code XXX */
+unsigned long mvme147_gettimeoffset (void)
+{
+	volatile unsigned short *cp = (volatile unsigned short *)0xfffe1012;
+	unsigned short n;
+
+	n = *cp;
+	while (n != *cp)
+		n = *cp;
+
+	n -= PCC_TIMER_PRELOAD;
+	return (unsigned long)n * 25 / 4;
+}
+
+static int bcd2int (unsigned char b)
+{
+	return ((b>>4)*10 + (b&15));
+}
+
+int mvme147_hwclk(int op, struct rtc_time *t)
+{
+#warning check me!
+	if (!op) {
+		m147_rtc->ctrl = RTC_READ;
+		t->tm_year = bcd2int (m147_rtc->bcd_year);
+		t->tm_mon  = bcd2int (m147_rtc->bcd_mth);
+		t->tm_mday = bcd2int (m147_rtc->bcd_dom);
+		t->tm_hour = bcd2int (m147_rtc->bcd_hr);
+		t->tm_min  = bcd2int (m147_rtc->bcd_min);
+		t->tm_sec  = bcd2int (m147_rtc->bcd_sec);
+		m147_rtc->ctrl = 0;
+	}
+	return 0;
+}
+
+int mvme147_set_clock_mmss (unsigned long nowtime)
+{
+	return 0;
+}
+
+/*-------------------  Serial console stuff ------------------------*/
+
+static void scc_delay (void)
+{
+	int n;
+	volatile int trash;
+
+	for (n = 0; n < 20; n++)
+		trash = n;
+}
+
+static void scc_write (char ch)
+{
+	volatile char *p = (volatile char *)M147_SCC_A_ADDR;
+
+	do {
+		scc_delay();
+	}
+	while (!(*p & 4));
+	scc_delay();
+	*p = 8;
+	scc_delay();
+	*p = ch;
+}
+
+
+void m147_scc_write (struct console *co, const char *str, unsigned count)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	while (count--)
+	{
+		if (*str == '\n')
+			scc_write ('\r');
+		scc_write (*str++);
+	}
+	local_irq_restore(flags);
+}
+
+void mvme147_init_console_port (struct console *co, int cflag)
+{
+	co->write    = m147_scc_write;
+}
diff --git a/arch/m68k/mvme16x/16xints.c b/arch/m68k/mvme16x/16xints.c
new file mode 100644
index 0000000..793ef73
--- /dev/null
+++ b/arch/m68k/mvme16x/16xints.c
@@ -0,0 +1,149 @@
+/*
+ * arch/m68k/mvme16x/16xints.c
+ *
+ * Copyright (C) 1995 Richard Hirst [richard@sleepie.demon.co.uk]
+ *
+ * based on amiints.c -- Amiga Linux interrupt handling code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file README.legal in the main directory of this archive
+ * for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/seq_file.h>
+
+#include <asm/system.h>
+#include <asm/ptrace.h>
+#include <asm/irq.h>
+
+static irqreturn_t mvme16x_defhand (int irq, void *dev_id, struct pt_regs *fp);
+
+/*
+ * This should ideally be 4 elements only, for speed.
+ */
+
+static struct {
+	irqreturn_t	(*handler)(int, void *, struct pt_regs *);
+	unsigned long	flags;
+	void		*dev_id;
+	const char	*devname;
+	unsigned	count;
+} irq_tab[192];
+
+/*
+ * void mvme16x_init_IRQ (void)
+ *
+ * Parameters:	None
+ *
+ * Returns:	Nothing
+ *
+ * This function is called during kernel startup to initialize
+ * the mvme16x IRQ handling routines.  Should probably ensure
+ * that the base vectors for the VMEChip2 and PCCChip2 are valid.
+ */
+
+void mvme16x_init_IRQ (void)
+{
+	int i;
+
+	for (i = 0; i < 192; i++) {
+		irq_tab[i].handler = mvme16x_defhand;
+		irq_tab[i].flags = IRQ_FLG_STD;
+		irq_tab[i].dev_id = NULL;
+		irq_tab[i].devname = NULL;
+		irq_tab[i].count = 0;
+	}
+}
+
+int mvme16x_request_irq(unsigned int irq,
+		irqreturn_t (*handler)(int, void *, struct pt_regs *),
+                unsigned long flags, const char *devname, void *dev_id)
+{
+	if (irq < 64 || irq > 255) {
+		printk("%s: Incorrect IRQ %d from %s\n", __FUNCTION__, irq, devname);
+		return -ENXIO;
+	}
+
+	if (!(irq_tab[irq-64].flags & IRQ_FLG_STD)) {
+		if (irq_tab[irq-64].flags & IRQ_FLG_LOCK) {
+			printk("%s: IRQ %d from %s is not replaceable\n",
+			       __FUNCTION__, irq, irq_tab[irq-64].devname);
+			return -EBUSY;
+		}
+		if (flags & IRQ_FLG_REPLACE) {
+			printk("%s: %s can't replace IRQ %d from %s\n",
+			       __FUNCTION__, devname, irq, irq_tab[irq-64].devname);
+			return -EBUSY;
+		}
+	}
+	irq_tab[irq-64].handler = handler;
+	irq_tab[irq-64].flags   = flags;
+	irq_tab[irq-64].dev_id  = dev_id;
+	irq_tab[irq-64].devname = devname;
+	return 0;
+}
+
+void mvme16x_free_irq(unsigned int irq, void *dev_id)
+{
+	if (irq < 64 || irq > 255) {
+		printk("%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
+		return;
+	}
+
+	if (irq_tab[irq-64].dev_id != dev_id)
+		printk("%s: Removing probably wrong IRQ %d from %s\n",
+		       __FUNCTION__, irq, irq_tab[irq-64].devname);
+
+	irq_tab[irq-64].handler = mvme16x_defhand;
+	irq_tab[irq-64].flags   = IRQ_FLG_STD;
+	irq_tab[irq-64].dev_id  = NULL;
+	irq_tab[irq-64].devname = NULL;
+}
+
+irqreturn_t mvme16x_process_int (unsigned long vec, struct pt_regs *fp)
+{
+	if (vec < 64 || vec > 255) {
+		printk ("mvme16x_process_int: Illegal vector %ld", vec);
+		return IRQ_NONE;
+	} else {
+		irq_tab[vec-64].count++;
+		irq_tab[vec-64].handler(vec, irq_tab[vec-64].dev_id, fp);
+		return IRQ_HANDLED;
+	}
+}
+
+int show_mvme16x_interrupts (struct seq_file *p, void *v)
+{
+	int i;
+
+	for (i = 0; i < 192; i++) {
+		if (irq_tab[i].count)
+			seq_printf(p, "Vec 0x%02x: %8d  %s\n",
+			    i+64, irq_tab[i].count,
+			    irq_tab[i].devname ? irq_tab[i].devname : "free");
+	}
+	return 0;
+}
+
+
+static irqreturn_t mvme16x_defhand (int irq, void *dev_id, struct pt_regs *fp)
+{
+	printk ("Unknown interrupt 0x%02x\n", irq);
+	return IRQ_NONE;
+}
+
+
+void mvme16x_enable_irq (unsigned int irq)
+{
+}
+
+
+void mvme16x_disable_irq (unsigned int irq)
+{
+}
+
+
diff --git a/arch/m68k/mvme16x/Makefile b/arch/m68k/mvme16x/Makefile
new file mode 100644
index 0000000..5129f56
--- /dev/null
+++ b/arch/m68k/mvme16x/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for Linux arch/m68k/mvme16x source directory
+#
+
+obj-y		:= config.o 16xints.o rtc.o mvme16x_ksyms.o
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
new file mode 100644
index 0000000..26ce81c
--- /dev/null
+++ b/arch/m68k/mvme16x/config.c
@@ -0,0 +1,286 @@
+/*
+ *  arch/m68k/mvme16x/config.c
+ *
+ *  Copyright (C) 1995 Richard Hirst [richard@sleepie.demon.co.uk]
+ *
+ * Based on:
+ *
+ *  linux/amiga/config.c
+ *
+ *  Copyright (C) 1993 Hamish Macdonald
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file README.legal in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/major.h>
+#include <linux/genhd.h>
+#include <linux/rtc.h>
+#include <linux/interrupt.h>
+
+#include <asm/bootinfo.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/rtc.h>
+#include <asm/machdep.h>
+#include <asm/mvme16xhw.h>
+
+extern t_bdid mvme_bdid;
+
+static MK48T08ptr_t volatile rtc = (MK48T08ptr_t)MVME_RTC_BASE;
+
+extern irqreturn_t mvme16x_process_int (int level, struct pt_regs *regs);
+extern void mvme16x_init_IRQ (void);
+extern void mvme16x_free_irq (unsigned int, void *);
+extern int show_mvme16x_interrupts (struct seq_file *, void *);
+extern void mvme16x_enable_irq (unsigned int);
+extern void mvme16x_disable_irq (unsigned int);
+static void mvme16x_get_model(char *model);
+static int  mvme16x_get_hardware_list(char *buffer);
+extern int  mvme16x_request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long flags, const char *devname, void *dev_id);
+extern void mvme16x_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *));
+extern unsigned long mvme16x_gettimeoffset (void);
+extern int mvme16x_hwclk (int, struct rtc_time *);
+extern int mvme16x_set_clock_mmss (unsigned long);
+extern void mvme16x_reset (void);
+extern void mvme16x_waitbut(void);
+
+int bcd2int (unsigned char b);
+
+/* Save tick handler routine pointer, will point to do_timer() in
+ * kernel/sched.c, called via mvme16x_process_int() */
+
+static irqreturn_t (*tick_handler)(int, void *, struct pt_regs *);
+
+
+unsigned short mvme16x_config;
+
+
+int mvme16x_parse_bootinfo(const struct bi_record *bi)
+{
+	if (bi->tag == BI_VME_TYPE || bi->tag == BI_VME_BRDINFO)
+		return 0;
+	else
+		return 1;
+}
+
+void mvme16x_reset(void)
+{
+	printk ("\r\n\nCalled mvme16x_reset\r\n"
+			"\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r");
+	/* The string of returns is to delay the reset until the whole
+	 * message is output.  Assert reset bit in GCSR */
+	*(volatile char *)0xfff40107 = 0x80;
+}
+
+static void mvme16x_get_model(char *model)
+{
+    p_bdid p = &mvme_bdid;
+    char suf[4];
+
+    suf[1] = p->brdsuffix[0];
+    suf[2] = p->brdsuffix[1];
+    suf[3] = '\0';
+    suf[0] = suf[1] ? '-' : '\0';
+
+    sprintf(model, "Motorola MVME%x%s", p->brdno, suf);
+}
+
+
+static int mvme16x_get_hardware_list(char *buffer)
+{
+    p_bdid p = &mvme_bdid;
+    int len = 0;
+
+    if (p->brdno == 0x0162 || p->brdno == 0x0172)
+    {
+	unsigned char rev = *(unsigned char *)MVME162_VERSION_REG;
+
+	len += sprintf (buffer+len, "VMEchip2        %spresent\n",
+			rev & MVME16x_CONFIG_NO_VMECHIP2 ? "NOT " : "");
+	len += sprintf (buffer+len, "SCSI interface  %spresent\n",
+			rev & MVME16x_CONFIG_NO_SCSICHIP ? "NOT " : "");
+	len += sprintf (buffer+len, "Ethernet i/f    %spresent\n",
+			rev & MVME16x_CONFIG_NO_ETHERNET ? "NOT " : "");
+    }
+    else
+	*buffer = '\0';
+
+    return (len);
+}
+
+
+#define pcc2chip	((volatile u_char *)0xfff42000)
+#define PccSCCMICR	0x1d
+#define PccSCCTICR	0x1e
+#define PccSCCRICR	0x1f
+
+void __init config_mvme16x(void)
+{
+    p_bdid p = &mvme_bdid;
+    char id[40];
+
+    mach_max_dma_address = 0xffffffff;
+    mach_sched_init      = mvme16x_sched_init;
+    mach_init_IRQ        = mvme16x_init_IRQ;
+    mach_gettimeoffset   = mvme16x_gettimeoffset;
+    mach_hwclk           = mvme16x_hwclk;
+    mach_set_clock_mmss	 = mvme16x_set_clock_mmss;
+    mach_reset		 = mvme16x_reset;
+    mach_free_irq	 = mvme16x_free_irq;
+    mach_process_int	 = mvme16x_process_int;
+    mach_get_irq_list	 = show_mvme16x_interrupts;
+    mach_request_irq	 = mvme16x_request_irq;
+    enable_irq           = mvme16x_enable_irq;
+    disable_irq          = mvme16x_disable_irq;
+    mach_get_model       = mvme16x_get_model;
+    mach_get_hardware_list = mvme16x_get_hardware_list;
+
+    /* Report board revision */
+
+    if (strncmp("BDID", p->bdid, 4))
+    {
+	printk ("\n\nBug call .BRD_ID returned garbage - giving up\n\n");
+	while (1)
+		;
+    }
+    /* Board type is only set by newer versions of vmelilo/tftplilo */
+    if (vme_brdtype == 0)
+	vme_brdtype = p->brdno;
+
+    mvme16x_get_model(id);
+    printk ("\nBRD_ID: %s   BUG %x.%x %02x/%02x/%02x\n", id, p->rev>>4,
+					p->rev&0xf, p->yr, p->mth, p->day);
+    if (p->brdno == 0x0162 || p->brdno == 0x172)
+    {
+	unsigned char rev = *(unsigned char *)MVME162_VERSION_REG;
+
+	mvme16x_config = rev | MVME16x_CONFIG_GOT_SCCA;
+
+	printk ("MVME%x Hardware status:\n", p->brdno);
+	printk ("    CPU Type           68%s040\n",
+			rev & MVME16x_CONFIG_GOT_FPU ? "" : "LC");
+	printk ("    CPU clock          %dMHz\n",
+			rev & MVME16x_CONFIG_SPEED_32 ? 32 : 25);
+	printk ("    VMEchip2           %spresent\n",
+			rev & MVME16x_CONFIG_NO_VMECHIP2 ? "NOT " : "");
+	printk ("    SCSI interface     %spresent\n",
+			rev & MVME16x_CONFIG_NO_SCSICHIP ? "NOT " : "");
+	printk ("    Ethernet interface %spresent\n",
+			rev & MVME16x_CONFIG_NO_ETHERNET ? "NOT " : "");
+    }
+    else
+    {
+	mvme16x_config = MVME16x_CONFIG_GOT_LP | MVME16x_CONFIG_GOT_CD2401;
+
+	/* Dont allow any interrupts from the CD2401 until the interrupt */
+	/* handlers are installed					 */
+
+	pcc2chip[PccSCCMICR] = 0x10;
+	pcc2chip[PccSCCTICR] = 0x10;
+	pcc2chip[PccSCCRICR] = 0x10;
+    }
+}
+
+static irqreturn_t mvme16x_abort_int (int irq, void *dev_id, struct pt_regs *fp)
+{
+	p_bdid p = &mvme_bdid;
+	unsigned long *new = (unsigned long *)vectors;
+	unsigned long *old = (unsigned long *)0xffe00000;
+	volatile unsigned char uc, *ucp;
+
+	if (p->brdno == 0x0162 || p->brdno == 0x172)
+	{
+		ucp = (volatile unsigned char *)0xfff42043;
+		uc = *ucp | 8;
+		*ucp = uc;
+	}
+	else
+	{
+		*(volatile unsigned long *)0xfff40074 = 0x40000000;
+	}
+	*(new+4) = *(old+4);		/* Illegal instruction */
+	*(new+9) = *(old+9);		/* Trace */
+	*(new+47) = *(old+47);		/* Trap #15 */
+
+	if (p->brdno == 0x0162 || p->brdno == 0x172)
+		*(new+0x5e) = *(old+0x5e);	/* ABORT switch */
+	else
+		*(new+0x6e) = *(old+0x6e);	/* ABORT switch */
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t mvme16x_timer_int (int irq, void *dev_id, struct pt_regs *fp)
+{
+    *(volatile unsigned char *)0xfff4201b |= 8;
+    return tick_handler(irq, dev_id, fp);
+}
+
+void mvme16x_sched_init (irqreturn_t (*timer_routine)(int, void *, struct pt_regs *))
+{
+    p_bdid p = &mvme_bdid;
+    int irq;
+
+    tick_handler = timer_routine;
+    /* Using PCCchip2 or MC2 chip tick timer 1 */
+    *(volatile unsigned long *)0xfff42008 = 0;
+    *(volatile unsigned long *)0xfff42004 = 10000;	/* 10ms */
+    *(volatile unsigned char *)0xfff42017 |= 3;
+    *(volatile unsigned char *)0xfff4201b = 0x16;
+    if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, 0,
+				"timer", mvme16x_timer_int))
+	panic ("Couldn't register timer int");
+
+    if (p->brdno == 0x0162 || p->brdno == 0x172)
+	irq = MVME162_IRQ_ABORT;
+    else
+        irq = MVME167_IRQ_ABORT;
+    if (request_irq(irq, mvme16x_abort_int, 0,
+				"abort", mvme16x_abort_int))
+	panic ("Couldn't register abort int");
+}
+
+
+/* This is always executed with interrupts disabled.  */
+unsigned long mvme16x_gettimeoffset (void)
+{
+    return (*(volatile unsigned long *)0xfff42008);
+}
+
+int bcd2int (unsigned char b)
+{
+	return ((b>>4)*10 + (b&15));
+}
+
+int mvme16x_hwclk(int op, struct rtc_time *t)
+{
+#warning check me!
+	if (!op) {
+		rtc->ctrl = RTC_READ;
+		t->tm_year = bcd2int (rtc->bcd_year);
+		t->tm_mon  = bcd2int (rtc->bcd_mth);
+		t->tm_mday = bcd2int (rtc->bcd_dom);
+		t->tm_hour = bcd2int (rtc->bcd_hr);
+		t->tm_min  = bcd2int (rtc->bcd_min);
+		t->tm_sec  = bcd2int (rtc->bcd_sec);
+		rtc->ctrl = 0;
+	}
+	return 0;
+}
+
+int mvme16x_set_clock_mmss (unsigned long nowtime)
+{
+	return 0;
+}
+
diff --git a/arch/m68k/mvme16x/mvme16x_ksyms.c b/arch/m68k/mvme16x/mvme16x_ksyms.c
new file mode 100644
index 0000000..4a8a363
--- /dev/null
+++ b/arch/m68k/mvme16x/mvme16x_ksyms.c
@@ -0,0 +1,6 @@
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/ptrace.h>
+#include <asm/mvme16xhw.h>
+
+EXPORT_SYMBOL(mvme16x_config);
diff --git a/arch/m68k/mvme16x/rtc.c b/arch/m68k/mvme16x/rtc.c
new file mode 100644
index 0000000..8a24250
--- /dev/null
+++ b/arch/m68k/mvme16x/rtc.c
@@ -0,0 +1,172 @@
+/*
+ *	Real Time Clock interface for Linux on the MVME16x
+ *
+ * Based on the PC driver by Paul Gortmaker.
+ */
+
+#define RTC_VERSION		"1.00"
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/mc146818rtc.h>	/* For struct rtc_time and ioctls, etc */
+#include <linux/smp_lock.h>
+#include <asm/mvme16xhw.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/setup.h>
+
+/*
+ *	We sponge a minor off of the misc major. No need slurping
+ *	up another valuable major dev number for this. If you add
+ *	an ioctl, make sure you don't conflict with SPARC's RTC
+ *	ioctls.
+ */
+
+#define BCD2BIN(val) (((val)&15) + ((val)>>4)*10)
+#define BIN2BCD(val) ((((val)/10)<<4) + (val)%10)
+
+static const unsigned char days_in_mo[] =
+{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+
+static atomic_t rtc_ready = ATOMIC_INIT(1);
+
+static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+		     unsigned long arg)
+{
+	volatile MK48T08ptr_t rtc = (MK48T08ptr_t)MVME_RTC_BASE;
+	unsigned long flags;
+	struct rtc_time wtime;
+
+	switch (cmd) {
+	case RTC_RD_TIME:	/* Read the time/date from RTC	*/
+	{
+		local_irq_save(flags);
+		/* Ensure clock and real-time-mode-register are accessible */
+		rtc->ctrl = RTC_READ;
+		memset(&wtime, 0, sizeof(struct rtc_time));
+		wtime.tm_sec =  BCD2BIN(rtc->bcd_sec);
+		wtime.tm_min =  BCD2BIN(rtc->bcd_min);
+		wtime.tm_hour = BCD2BIN(rtc->bcd_hr);
+		wtime.tm_mday =  BCD2BIN(rtc->bcd_dom);
+		wtime.tm_mon =  BCD2BIN(rtc->bcd_mth)-1;
+		wtime.tm_year = BCD2BIN(rtc->bcd_year);
+		if (wtime.tm_year < 70)
+			wtime.tm_year += 100;
+		wtime.tm_wday = BCD2BIN(rtc->bcd_dow)-1;
+		rtc->ctrl = 0;
+		local_irq_restore(flags);
+		return copy_to_user((void *)arg, &wtime, sizeof wtime) ?
+								-EFAULT : 0;
+	}
+	case RTC_SET_TIME:	/* Set the RTC */
+	{
+		struct rtc_time rtc_tm;
+		unsigned char mon, day, hrs, min, sec, leap_yr;
+		unsigned int yrs;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EACCES;
+
+		if (copy_from_user(&rtc_tm, (struct rtc_time*)arg,
+				   sizeof(struct rtc_time)))
+			return -EFAULT;
+
+		yrs = rtc_tm.tm_year;
+		if (yrs < 1900)
+			yrs += 1900;
+		mon = rtc_tm.tm_mon + 1;   /* tm_mon starts at zero */
+		day = rtc_tm.tm_mday;
+		hrs = rtc_tm.tm_hour;
+		min = rtc_tm.tm_min;
+		sec = rtc_tm.tm_sec;
+
+		leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
+
+		if ((mon > 12) || (day == 0))
+			return -EINVAL;
+
+		if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
+			return -EINVAL;
+
+		if ((hrs >= 24) || (min >= 60) || (sec >= 60))
+			return -EINVAL;
+
+		if (yrs >= 2070)
+			return -EINVAL;
+
+		local_irq_save(flags);
+		rtc->ctrl     = RTC_WRITE;
+
+		rtc->bcd_sec  = BIN2BCD(sec);
+		rtc->bcd_min  = BIN2BCD(min);
+		rtc->bcd_hr   = BIN2BCD(hrs);
+		rtc->bcd_dom  = BIN2BCD(day);
+		rtc->bcd_mth  = BIN2BCD(mon);
+		rtc->bcd_year = BIN2BCD(yrs%100);
+
+		rtc->ctrl     = 0;
+		local_irq_restore(flags);
+		return 0;
+	}
+	default:
+		return -EINVAL;
+	}
+}
+
+/*
+ *	We enforce only one user at a time here with the open/close.
+ *	Also clear the previous interrupt data on an open, and clean
+ *	up things on a close.
+ */
+
+static int rtc_open(struct inode *inode, struct file *file)
+{
+	if( !atomic_dec_and_test(&rtc_ready) )
+	{
+		atomic_inc( &rtc_ready );
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int rtc_release(struct inode *inode, struct file *file)
+{
+	atomic_inc( &rtc_ready );
+	return 0;
+}
+
+/*
+ *	The various file operations we support.
+ */
+
+static struct file_operations rtc_fops = {
+	.ioctl =	rtc_ioctl,
+	.open =		rtc_open,
+	.release =	rtc_release,
+};
+
+static struct miscdevice rtc_dev=
+{
+	.minor =	RTC_MINOR,
+	.name =		"rtc",
+	.fops =		&rtc_fops
+};
+
+int __init rtc_MK48T08_init(void)
+{
+	if (!MACH_IS_MVME16x)
+		return -ENODEV;
+
+	printk(KERN_INFO "MK48T08 Real Time Clock Driver v%s\n", RTC_VERSION);
+	return misc_register(&rtc_dev);
+}
+
diff --git a/arch/m68k/q40/Makefile b/arch/m68k/q40/Makefile
new file mode 100644
index 0000000..27eb4279
--- /dev/null
+++ b/arch/m68k/q40/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for Linux arch/m68k/q40 source directory
+#
+
+obj-y		:= config.o q40ints.o
diff --git a/arch/m68k/q40/README b/arch/m68k/q40/README
new file mode 100644
index 0000000..6bdbf48
--- /dev/null
+++ b/arch/m68k/q40/README
@@ -0,0 +1,138 @@
+Linux for the Q40
+=================
+
+You may try http://www.geocities.com/SiliconValley/Bay/2602/ for
+some up to date information. Booter and other tools will be also
+available from this place or ftp.uni-erlangen.de/linux/680x0/q40/
+and mirrors.
+
+Hints to documentation usually refer to the linux source tree in
+/usr/src/linux/Documentation unless URL given.
+
+It seems IRQ unmasking can't be safely done on a Q40. IRQ probing
+is not implemented - do not try it! (See below)
+
+For a list of kernel command-line options read the documentation for the
+particular device drivers.
+
+The floppy imposes a very high interrupt load on the CPU, approx 30K/s.
+When something blocks interrupts (HD) it will lose some of them, so far
+this is not known to have caused any data loss. On highly loaded systems
+it can make the floppy very slow or practically stop. Other Q40 OS' simply
+poll the floppy for this reason - something that can't be done in Linux.
+Only possible cure is getting a 82072 controller with fifo instead of
+the 8272A.
+
+drivers used by the Q40, apart from the very obvious (console etc.):
+	drivers/char/q40_keyb.c		# use PC keymaps for national keyboards
+		     serial.c		# normal PC driver - any speed
+	             lp.c		# printer driver
+		     genrtc.c		# RTC
+		char/joystick/*		# most of this should work, not
+				        # in default config.in
+	        block/q40ide.c		# startup for ide
+		      ide*		# see Documentation/ide.txt
+		      floppy.c		# normal PC driver, DMA emu in asm/floppy.h
+					# and arch/m68k/kernel/entry.S
+					# see drivers/block/README.fd
+		net/ne.c
+		video/q40fb.c
+		parport/*
+		sound/dmasound_core.c
+		      dmasound_q40.c
+
+Various other PC drivers can be enabled simply by adding them to
+arch/m68k/config.in, especially 8 bit devices should be without any
+problems. For cards using 16bit io/mem more care is required, like
+checking byte order issues, hacking memcpy_*_io etc.
+
+
+Debugging
+=========
+
+Upon startup the kernel will usually output "ABCQGHIJ" into the SRAM,
+preceded by the booter signature. This is a trace just in case something
+went wrong during earliest setup stages of head.S.
+**Changed** to preserve SRAM contents by default, this is only done when
+requested - SRAM must start with '%LX$' signature to do this. '-d' option
+to 'lxx' loader enables this.
+
+SRAM can also be used as additional console device, use debug=mem.
+This will save kernel startup msgs into SRAM, the screen will display
+only the penguin - and shell prompt if it gets that far..
+Unfortunately only 2000 bytes are available.
+
+Serial console works and can also be used for debugging, see loader_txt
+
+Most problems seem to be caused by fawlty or badly configured io-cards or
+hard drives anyway.
+Make sure to configure the parallel port as SPP and remove IRQ/DMA jumpers
+for first testing. The Q40 does not support DMA and may have trouble with
+parallel ports version of interrupts.
+
+
+Q40 Hardware Description
+========================
+
+This is just an overview, see asm-m68k/* for details ask if you have any
+questions.
+
+The Q40 consists of a 68040@40 MHz, 1MB video RAM, up to 32MB RAM, AT-style
+keyboard interface, 1 Programmable LED, 2x8bit DACs and up to 1MB ROM, 1MB
+shadow ROM.
+The Q60 has any of 68060 or 68LC060 and up to 128 MB RAM.
+
+Most interfacing like floppy, IDE, serial and parallel ports is done via ISA
+slots. The ISA io and mem range is mapped (sparse&byteswapped!) into separate
+regions of the memory.
+The main interrupt register IIRQ_REG will indicate whether an IRQ was internal
+or from some ISA devices, EIRQ_REG can distinguish up to 8 ISA IRQs.
+
+The Q40 custom chip is programmable to provide 2 periodic timers:
+	- 50 or 200 Hz - level 2, !!THIS CANT BE DISABLED!!
+	- 10 or 20 KHz - level 4, used for dma-sound
+
+Linux uses the 200 Hz interrupt for timer and beep by default.
+
+
+Interrupts
+==========
+
+q40 master chip handles only a subset of level triggered interrupts.
+
+Linux has some requirements wrt interrupt architecture, these are
+to my knowledge:
+	(a) interrupt handler must not be reentered even when sti() is called
+	    from within handler
+	(b) working enable/disable_irq
+
+Luckily these requirements are only important for drivers shared
+with other architectures - ide,serial,parallel, ethernet.
+q40ints.c now contains a trivial hack for (a), (b) is more difficult
+because only irq's 4-15 can be disabled - and only all of them at once.
+Thus disable_irq() can effectively block the machine if the driver goes
+asleep.
+One thing to keep in mind when hacking around the interrupt code is
+that there is no way to find out which IRQ caused a request, [EI]IRQ_REG
+displays current state of the various IRQ lines.
+
+Keyboard
+========
+
+q40 receives AT make/break codes from the keyboard, these are translated to
+the PC scancodes x86 Linux uses. So by theory every national keyboard should
+work just by loading the appropriate x86 keytable - see any national-HOWTO.
+
+Unfortunately the AT->PC translation isn't quite trivial and even worse, my
+documentation of it is absolutely minimal - thus some exotic keys may not
+behave exactly as expected.
+
+There is still hope that it can be fixed completely though. If you encounter
+problems, email me ideally this:
+	- exact keypress/release sequence
+	- 'showkey -s' run on q40, non-X session
+	- 'showkey -s' run on a PC, non-X session
+	- AT codes as displayed by the q40 debugging ROM
+btw if the showkey output from PC and Q40 doesn't differ then you have some
+classic configuration problem - don't send me anything in this case
+
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
new file mode 100644
index 0000000..02b626bae
--- /dev/null
+++ b/arch/m68k/q40/config.c
@@ -0,0 +1,365 @@
+/*
+ *  arch/m68k/q40/config.c
+ *
+ *  Copyright (C) 1999 Richard Zidlicky
+ *
+ * originally based on:
+ *
+ *  linux/bvme/config.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file README.legal in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/major.h>
+#include <linux/serial_reg.h>
+#include <linux/rtc.h>
+#include <linux/vt_kern.h>
+
+#include <asm/io.h>
+#include <asm/rtc.h>
+#include <asm/bootinfo.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/machdep.h>
+#include <asm/q40_master.h>
+
+extern void floppy_setup(char *str, int *ints);
+
+extern irqreturn_t q40_process_int (int level, struct pt_regs *regs);
+extern irqreturn_t (*q40_default_handler[]) (int, void *, struct pt_regs *);  /* added just for debugging */
+extern void q40_init_IRQ (void);
+extern void q40_free_irq (unsigned int, void *);
+extern int  show_q40_interrupts (struct seq_file *, void *);
+extern void q40_enable_irq (unsigned int);
+extern void q40_disable_irq (unsigned int);
+static void q40_get_model(char *model);
+static int  q40_get_hardware_list(char *buffer);
+extern int  q40_request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long flags, const char *devname, void *dev_id);
+extern void q40_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *));
+
+extern unsigned long q40_gettimeoffset (void);
+extern int q40_hwclk (int, struct rtc_time *);
+extern unsigned int q40_get_ss (void);
+extern int q40_set_clock_mmss (unsigned long);
+static int q40_get_rtc_pll(struct rtc_pll_info *pll);
+static int q40_set_rtc_pll(struct rtc_pll_info *pll);
+extern void q40_reset (void);
+void q40_halt(void);
+extern void q40_waitbut(void);
+void q40_set_vectors (void);
+
+extern void q40_mksound(unsigned int /*freq*/, unsigned int /*ticks*/ );
+
+extern char m68k_debug_device[];
+static void q40_mem_console_write(struct console *co, const char *b,
+				    unsigned int count);
+
+extern int ql_ticks;
+
+static struct console q40_console_driver = {
+	.name =		"debug",
+	.flags =	CON_PRINTBUFFER,
+	.index =	-1,
+};
+
+
+/* early debugging function:*/
+extern char *q40_mem_cptr; /*=(char *)0xff020000;*/
+static int _cpleft;
+
+static void q40_mem_console_write(struct console *co, const char *s,
+				  unsigned int count)
+{
+  char *p=(char *)s;
+
+  if (count<_cpleft)
+    while (count-- >0){
+      *q40_mem_cptr=*p++;
+      q40_mem_cptr+=4;
+      _cpleft--;
+    }
+}
+#if 0
+void printq40(char *str)
+{
+  int l=strlen(str);
+  char *p=q40_mem_cptr;
+
+  while (l-- >0 && _cpleft-- >0)
+    {
+      *p=*str++;
+      p+=4;
+    }
+  q40_mem_cptr=p;
+}
+#endif
+
+static int halted=0;
+
+#ifdef CONFIG_HEARTBEAT
+static void q40_heartbeat(int on)
+{
+  if (halted) return;
+
+  if (on)
+    Q40_LED_ON();
+  else
+    Q40_LED_OFF();
+}
+#endif
+
+void q40_reset(void)
+{
+        halted=1;
+        printk ("\n\n*******************************************\n"
+		"Called q40_reset : press the RESET button!! \n"
+		"*******************************************\n");
+	Q40_LED_ON();
+	while(1) ;
+}
+void q40_halt(void)
+{
+        halted=1;
+        printk ("\n\n*******************\n"
+		    "  Called q40_halt\n"
+		    "*******************\n");
+	Q40_LED_ON();
+	while(1) ;
+}
+
+static void q40_get_model(char *model)
+{
+    sprintf(model, "Q40");
+}
+
+/* No hardware options on Q40? */
+
+static int q40_get_hardware_list(char *buffer)
+{
+    *buffer = '\0';
+    return 0;
+}
+
+static unsigned int serports[]={0x3f8,0x2f8,0x3e8,0x2e8,0};
+void q40_disable_irqs(void)
+{
+  unsigned i,j;
+
+  j=0;
+  while((i=serports[j++])) outb(0,i+UART_IER);
+  master_outb(0,EXT_ENABLE_REG);
+  master_outb(0,KEY_IRQ_ENABLE_REG);
+}
+
+void __init config_q40(void)
+{
+    mach_sched_init      = q40_sched_init;
+
+    mach_init_IRQ        = q40_init_IRQ;
+    mach_gettimeoffset   = q40_gettimeoffset;
+    mach_hwclk           = q40_hwclk;
+    mach_get_ss          = q40_get_ss;
+    mach_get_rtc_pll     = q40_get_rtc_pll;
+    mach_set_rtc_pll     = q40_set_rtc_pll;
+    mach_set_clock_mmss	 = q40_set_clock_mmss;
+
+    mach_reset		 = q40_reset;
+    mach_free_irq	 = q40_free_irq;
+    mach_process_int	 = q40_process_int;
+    mach_get_irq_list	 = show_q40_interrupts;
+    mach_request_irq	 = q40_request_irq;
+    enable_irq		 = q40_enable_irq;
+    disable_irq          = q40_disable_irq;
+    mach_default_handler = &q40_default_handler;
+    mach_get_model       = q40_get_model;
+    mach_get_hardware_list = q40_get_hardware_list;
+
+#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
+    mach_beep            = q40_mksound;
+#endif
+#ifdef CONFIG_HEARTBEAT
+    mach_heartbeat = q40_heartbeat;
+#endif
+    mach_halt = q40_halt;
+#ifdef CONFIG_DUMMY_CONSOLE
+    conswitchp = &dummy_con;
+#endif
+
+    /* disable a few things that SMSQ might have left enabled */
+    q40_disable_irqs();
+
+    /* no DMA at all, but ide-scsi requires it.. make sure
+     * all physical RAM fits into the boundary - otherwise
+     * allocator may play costly and useless tricks */
+    mach_max_dma_address = 1024*1024*1024;
+
+    /* useful for early debugging stages - writes kernel messages into SRAM */
+    if (!strncmp( m68k_debug_device,"mem",3 ))
+      {
+	/*printk("using NVRAM debug, q40_mem_cptr=%p\n",q40_mem_cptr);*/
+	_cpleft=2000-((long)q40_mem_cptr-0xff020000)/4;
+	q40_console_driver.write = q40_mem_console_write;
+	register_console(&q40_console_driver);
+      }
+}
+
+
+int q40_parse_bootinfo(const struct bi_record *rec)
+{
+  return 1;
+}
+
+
+static inline unsigned char bcd2bin (unsigned char b)
+{
+	return ((b>>4)*10 + (b&15));
+}
+
+static inline unsigned char bin2bcd (unsigned char b)
+{
+	return (((b/10)*16) + (b%10));
+}
+
+
+unsigned long q40_gettimeoffset (void)
+{
+    return 5000*(ql_ticks!=0);
+}
+
+
+/*
+ * Looks like op is non-zero for setting the clock, and zero for
+ * reading the clock.
+ *
+ *  struct hwclk_time {
+ *         unsigned        sec;       0..59
+ *         unsigned        min;       0..59
+ *         unsigned        hour;      0..23
+ *         unsigned        day;       1..31
+ *         unsigned        mon;       0..11
+ *         unsigned        year;      00...
+ *         int             wday;      0..6, 0 is Sunday, -1 means unknown/don't set
+ * };
+ */
+
+int q40_hwclk(int op, struct rtc_time *t)
+{
+        if (op)
+	{	/* Write.... */
+	        Q40_RTC_CTRL |= Q40_RTC_WRITE;
+
+		Q40_RTC_SECS = bin2bcd(t->tm_sec);
+		Q40_RTC_MINS = bin2bcd(t->tm_min);
+		Q40_RTC_HOUR = bin2bcd(t->tm_hour);
+		Q40_RTC_DATE = bin2bcd(t->tm_mday);
+		Q40_RTC_MNTH = bin2bcd(t->tm_mon + 1);
+		Q40_RTC_YEAR = bin2bcd(t->tm_year%100);
+		if (t->tm_wday >= 0)
+			Q40_RTC_DOW = bin2bcd(t->tm_wday+1);
+
+	        Q40_RTC_CTRL &= ~(Q40_RTC_WRITE);
+	}
+	else
+	{	/* Read....  */
+	  Q40_RTC_CTRL |= Q40_RTC_READ;
+
+	  t->tm_year = bcd2bin (Q40_RTC_YEAR);
+	  t->tm_mon  = bcd2bin (Q40_RTC_MNTH)-1;
+	  t->tm_mday = bcd2bin (Q40_RTC_DATE);
+	  t->tm_hour = bcd2bin (Q40_RTC_HOUR);
+	  t->tm_min  = bcd2bin (Q40_RTC_MINS);
+	  t->tm_sec  = bcd2bin (Q40_RTC_SECS);
+
+	  Q40_RTC_CTRL &= ~(Q40_RTC_READ);
+
+	  if (t->tm_year < 70)
+	    t->tm_year += 100;
+	  t->tm_wday = bcd2bin(Q40_RTC_DOW)-1;
+
+	}
+
+	return 0;
+}
+
+unsigned int q40_get_ss(void)
+{
+	return bcd2bin(Q40_RTC_SECS);
+}
+
+/*
+ * Set the minutes and seconds from seconds value 'nowtime'.  Fail if
+ * clock is out by > 30 minutes.  Logic lifted from atari code.
+ */
+
+int q40_set_clock_mmss (unsigned long nowtime)
+{
+	int retval = 0;
+	short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
+
+	int rtc_minutes;
+
+
+	rtc_minutes = bcd2bin (Q40_RTC_MINS);
+
+	if ((rtc_minutes < real_minutes
+		? real_minutes - rtc_minutes
+			: rtc_minutes - real_minutes) < 30)
+	{
+	        Q40_RTC_CTRL |= Q40_RTC_WRITE;
+		Q40_RTC_MINS = bin2bcd(real_minutes);
+		Q40_RTC_SECS = bin2bcd(real_seconds);
+		Q40_RTC_CTRL &= ~(Q40_RTC_WRITE);
+	}
+	else
+		retval = -1;
+
+
+	return retval;
+}
+
+
+/* get and set PLL calibration of RTC clock */
+#define Q40_RTC_PLL_MASK ((1<<5)-1)
+#define Q40_RTC_PLL_SIGN (1<<5)
+
+static int q40_get_rtc_pll(struct rtc_pll_info *pll)
+{
+	int tmp=Q40_RTC_CTRL;
+	pll->pll_value = tmp & Q40_RTC_PLL_MASK;
+	if (tmp & Q40_RTC_PLL_SIGN)
+		pll->pll_value = -pll->pll_value;
+	pll->pll_max=31;
+	pll->pll_min=-31;
+	pll->pll_posmult=512;
+	pll->pll_negmult=256;
+	pll->pll_clock=125829120;
+	return 0;
+}
+
+static int q40_set_rtc_pll(struct rtc_pll_info *pll)
+{
+	if (!pll->pll_ctrl){
+		/* the docs are a bit unclear so I am doublesetting */
+		/* RTC_WRITE here ... */
+		int tmp = (pll->pll_value & 31) | (pll->pll_value<0 ? 32 : 0) |
+			  Q40_RTC_WRITE;
+		Q40_RTC_CTRL |= Q40_RTC_WRITE;
+		Q40_RTC_CTRL = tmp;
+		Q40_RTC_CTRL &= ~(Q40_RTC_WRITE);
+		return 0;
+	} else
+		return -EINVAL;
+}
diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c
new file mode 100644
index 0000000..f8ecc26
--- /dev/null
+++ b/arch/m68k/q40/q40ints.c
@@ -0,0 +1,476 @@
+/*
+ * arch/m68k/q40/q40ints.c
+ *
+ * Copyright (C) 1999,2001 Richard Zidlicky
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * .. used to be loosely based on bvme6000ints.c
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/hardirq.h>
+
+#include <asm/rtc.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+
+#include <asm/q40_master.h>
+#include <asm/q40ints.h>
+
+/*
+ * Q40 IRQs are defined as follows:
+ *            3,4,5,6,7,10,11,14,15 : ISA dev IRQs
+ *            16-31: reserved
+ *            32   : keyboard int
+ *            33   : frame int (50/200 Hz periodic timer)
+ *            34   : sample int (10/20 KHz periodic timer)
+ *
+*/
+
+extern int ints_inited;
+
+
+irqreturn_t q40_irq2_handler (int, void *, struct pt_regs *fp);
+
+
+static irqreturn_t q40_defhand (int irq, void *dev_id, struct pt_regs *fp);
+static irqreturn_t default_handler(int lev, void *dev_id, struct pt_regs *regs);
+
+
+#define DEVNAME_SIZE 24
+
+static struct q40_irq_node {
+	irqreturn_t	(*handler)(int, void *, struct pt_regs *);
+	unsigned long	flags;
+	void		*dev_id;
+  /*        struct q40_irq_node *next;*/
+        char	        devname[DEVNAME_SIZE];
+	unsigned	count;
+        unsigned short  state;
+} irq_tab[Q40_IRQ_MAX+1];
+
+short unsigned q40_ablecount[Q40_IRQ_MAX+1];
+
+/*
+ * void q40_init_IRQ (void)
+ *
+ * Parameters:	None
+ *
+ * Returns:	Nothing
+ *
+ * This function is called during kernel startup to initialize
+ * the q40 IRQ handling routines.
+ */
+
+static int disabled=0;
+
+void q40_init_IRQ (void)
+{
+	int i;
+
+	disabled=0;
+	for (i = 0; i <= Q40_IRQ_MAX; i++) {
+		irq_tab[i].handler = q40_defhand;
+		irq_tab[i].flags = 0;
+		irq_tab[i].dev_id = NULL;
+		/*		irq_tab[i].next = NULL;*/
+		irq_tab[i].devname[0] = 0;
+		irq_tab[i].count = 0;
+		irq_tab[i].state =0;
+		q40_ablecount[i]=0;   /* all enabled */
+	}
+
+	/* setup handler for ISA ints */
+	cpu_request_irq(IRQ2, q40_irq2_handler, 0, "q40 ISA and master chip",
+			NULL);
+
+	/* now enable some ints.. */
+	master_outb(1,EXT_ENABLE_REG);  /* ISA IRQ 5-15 */
+
+	/* make sure keyboard IRQ is disabled */
+	master_outb(0,KEY_IRQ_ENABLE_REG);
+}
+
+int q40_request_irq(unsigned int irq,
+		irqreturn_t (*handler)(int, void *, struct pt_regs *),
+                unsigned long flags, const char *devname, void *dev_id)
+{
+  /*printk("q40_request_irq %d, %s\n",irq,devname);*/
+
+	if (irq > Q40_IRQ_MAX || (irq>15 && irq<32)) {
+		printk("%s: Incorrect IRQ %d from %s\n", __FUNCTION__, irq, devname);
+		return -ENXIO;
+	}
+
+	/* test for ISA ints not implemented by HW */
+	switch (irq)
+	  {
+	  case 1: case 2: case 8: case 9:
+	  case 12: case 13:
+	    printk("%s: ISA IRQ %d from %s not implemented by HW\n", __FUNCTION__, irq, devname);
+	    return -ENXIO;
+	  case 11:
+	    printk("warning IRQ 10 and 11 not distinguishable\n");
+	    irq=10;
+	  default:
+	    ;
+	  }
+
+	if (irq<Q40_IRQ_SAMPLE)
+	  {
+	    if (irq_tab[irq].dev_id != NULL)
+		  {
+		    printk("%s: IRQ %d from %s is not replaceable\n",
+			   __FUNCTION__, irq, irq_tab[irq].devname);
+		    return -EBUSY;
+		  }
+	    /*printk("IRQ %d set to handler %p\n",irq,handler);*/
+	    if (dev_id==NULL)
+		  {
+		printk("WARNING: dev_id == NULL in request_irq\n");
+		dev_id=(void*)1;
+	      }
+	    irq_tab[irq].handler = handler;
+	    irq_tab[irq].flags   = flags;
+	    irq_tab[irq].dev_id  = dev_id;
+	    strlcpy(irq_tab[irq].devname,devname,sizeof(irq_tab[irq].devname));
+	    irq_tab[irq].state = 0;
+	    return 0;
+	  }
+	else {
+	  /* Q40_IRQ_SAMPLE :somewhat special actions required here ..*/
+	  cpu_request_irq(4, handler, flags, devname, dev_id);
+	  cpu_request_irq(6, handler, flags, devname, dev_id);
+	  return 0;
+	}
+}
+
+void q40_free_irq(unsigned int irq, void *dev_id)
+{
+	if (irq > Q40_IRQ_MAX || (irq>15 && irq<32)) {
+		printk("%s: Incorrect IRQ %d, dev_id %x \n", __FUNCTION__, irq, (unsigned)dev_id);
+		return;
+	}
+
+	/* test for ISA ints not implemented by HW */
+	switch (irq)
+	  {
+	  case 1: case 2: case 8: case 9:
+	  case 12: case 13:
+	    printk("%s: ISA IRQ %d from %x invalid\n", __FUNCTION__, irq, (unsigned)dev_id);
+	    return;
+	  case 11: irq=10;
+	  default:
+	    ;
+	  }
+
+	if (irq<Q40_IRQ_SAMPLE)
+	  {
+	    if (irq_tab[irq].dev_id != dev_id)
+	      printk("%s: Removing probably wrong IRQ %d from %s\n",
+		     __FUNCTION__, irq, irq_tab[irq].devname);
+
+	    irq_tab[irq].handler = q40_defhand;
+	    irq_tab[irq].flags   = 0;
+	    irq_tab[irq].dev_id  = NULL;
+	    /* irq_tab[irq].devname = NULL; */
+	    /* do not reset state !! */
+	  }
+	else
+	  { /* == Q40_IRQ_SAMPLE */
+	    cpu_free_irq(4, dev_id);
+	    cpu_free_irq(6, dev_id);
+	  }
+}
+
+
+irqreturn_t q40_process_int (int level, struct pt_regs *fp)
+{
+  printk("unexpected interrupt vec=%x, pc=%lx, d0=%lx, d0_orig=%lx, d1=%lx, d2=%lx\n",
+          level, fp->pc, fp->d0, fp->orig_d0, fp->d1, fp->d2);
+  printk("\tIIRQ_REG = %x, EIRQ_REG = %x\n",master_inb(IIRQ_REG),master_inb(EIRQ_REG));
+  return IRQ_HANDLED;
+}
+
+/*
+ * this stuff doesn't really belong here..
+*/
+
+int ql_ticks;              /* 200Hz ticks since last jiffie */
+static int sound_ticks;
+
+#define SVOL 45
+
+void q40_mksound(unsigned int hz, unsigned int ticks)
+{
+  /* for now ignore hz, except that hz==0 switches off sound */
+  /* simply alternate the ampl (128-SVOL)-(128+SVOL)-..-.. at 200Hz */
+  if (hz==0)
+    {
+      if (sound_ticks)
+	sound_ticks=1;
+
+      *DAC_LEFT=128;
+      *DAC_RIGHT=128;
+
+      return;
+    }
+  /* sound itself is done in q40_timer_int */
+  if (sound_ticks == 0) sound_ticks=1000; /* pretty long beep */
+  sound_ticks=ticks<<1;
+}
+
+static irqreturn_t (*q40_timer_routine)(int, void *, struct pt_regs *);
+
+static irqreturn_t q40_timer_int (int irq, void * dev, struct pt_regs * regs)
+{
+    ql_ticks = ql_ticks ? 0 : 1;
+    if (sound_ticks)
+      {
+	unsigned char sval=(sound_ticks & 1) ? 128-SVOL : 128+SVOL;
+	sound_ticks--;
+	*DAC_LEFT=sval;
+	*DAC_RIGHT=sval;
+      }
+
+    if (!ql_ticks)
+	q40_timer_routine(irq, dev, regs);
+    return IRQ_HANDLED;
+}
+
+void q40_sched_init (irqreturn_t (*timer_routine)(int, void *, struct pt_regs *))
+{
+    int timer_irq;
+
+    q40_timer_routine = timer_routine;
+    timer_irq=Q40_IRQ_FRAME;
+
+    if (request_irq(timer_irq, q40_timer_int, 0,
+				"timer", q40_timer_int))
+	panic ("Couldn't register timer int");
+
+    master_outb(-1,FRAME_CLEAR_REG);
+    master_outb( 1,FRAME_RATE_REG);
+}
+
+
+/*
+ * tables to translate bits into IRQ numbers
+ * it is a good idea to order the entries by priority
+ *
+*/
+
+struct IRQ_TABLE{ unsigned mask; int irq ;};
+#if 0
+static struct IRQ_TABLE iirqs[]={
+  {Q40_IRQ_FRAME_MASK,Q40_IRQ_FRAME},
+  {Q40_IRQ_KEYB_MASK,Q40_IRQ_KEYBOARD},
+  {0,0}};
+#endif
+static struct IRQ_TABLE eirqs[] = {
+  { .mask = Q40_IRQ3_MASK,	.irq = 3 },	/* ser 1 */
+  { .mask = Q40_IRQ4_MASK,	.irq = 4 },	/* ser 2 */
+  { .mask = Q40_IRQ14_MASK,	.irq = 14 },	/* IDE 1 */
+  { .mask = Q40_IRQ15_MASK,	.irq = 15 },	/* IDE 2 */
+  { .mask = Q40_IRQ6_MASK,	.irq = 6 },	/* floppy, handled elsewhere */
+  { .mask = Q40_IRQ7_MASK,	.irq = 7 },	/* par */
+  { .mask = Q40_IRQ5_MASK,	.irq = 5 },
+  { .mask = Q40_IRQ10_MASK,	.irq = 10 },
+  {0,0}
+};
+
+/* complain only this many times about spurious ints : */
+static int ccleirq=60;    /* ISA dev IRQ's*/
+/*static int cclirq=60;*/     /* internal */
+
+/* FIXME: add shared ints,mask,unmask,probing.... */
+
+#define IRQ_INPROGRESS 1
+/*static unsigned short saved_mask;*/
+//static int do_tint=0;
+
+#define DEBUG_Q40INT
+/*#define IP_USE_DISABLE *//* would be nice, but crashes ???? */
+
+static int mext_disabled=0;  /* ext irq disabled by master chip? */
+static int aliased_irq=0;  /* how many times inside handler ?*/
+
+
+/* got level 2 interrupt, dispatch to ISA or keyboard/timer IRQs */
+irqreturn_t q40_irq2_handler (int vec, void *devname, struct pt_regs *fp)
+{
+  unsigned mir, mer;
+  int irq,i;
+
+//repeat:
+  mir=master_inb(IIRQ_REG);
+  if (mir&Q40_IRQ_FRAME_MASK) {
+	  irq_tab[Q40_IRQ_FRAME].count++;
+	  irq_tab[Q40_IRQ_FRAME].handler(Q40_IRQ_FRAME,irq_tab[Q40_IRQ_FRAME].dev_id,fp);
+	  master_outb(-1,FRAME_CLEAR_REG);
+  }
+  if ((mir&Q40_IRQ_SER_MASK) || (mir&Q40_IRQ_EXT_MASK)) {
+	  mer=master_inb(EIRQ_REG);
+	  for (i=0; eirqs[i].mask; i++) {
+		  if (mer&(eirqs[i].mask)) {
+			  irq=eirqs[i].irq;
+/*
+ * There is a little mess wrt which IRQ really caused this irq request. The
+ * main problem is that IIRQ_REG and EIRQ_REG reflect the state when they
+ * are read - which is long after the request came in. In theory IRQs should
+ * not just go away but they occassionally do
+ */
+			  if (irq>4 && irq<=15 && mext_disabled) {
+				  /*aliased_irq++;*/
+				  goto iirq;
+			  }
+			  if (irq_tab[irq].handler == q40_defhand ) {
+				  printk("handler for IRQ %d not defined\n",irq);
+				  continue; /* ignore uninited INTs :-( */
+			  }
+			  if ( irq_tab[irq].state & IRQ_INPROGRESS ) {
+				  /* some handlers do local_irq_enable() for irq latency reasons, */
+				  /* however reentering an active irq handler is not permitted */
+#ifdef IP_USE_DISABLE
+				  /* in theory this is the better way to do it because it still */
+				  /* lets through eg the serial irqs, unfortunately it crashes */
+				  disable_irq(irq);
+				  disabled=1;
+#else
+				  /*printk("IRQ_INPROGRESS detected for irq %d, disabling - %s disabled\n",irq,disabled ? "already" : "not yet"); */
+				  fp->sr = (((fp->sr) & (~0x700))+0x200);
+				  disabled=1;
+#endif
+				  goto iirq;
+			  }
+			  irq_tab[irq].count++;
+			  irq_tab[irq].state |= IRQ_INPROGRESS;
+			  irq_tab[irq].handler(irq,irq_tab[irq].dev_id,fp);
+			  irq_tab[irq].state &= ~IRQ_INPROGRESS;
+
+			  /* naively enable everything, if that fails than    */
+			  /* this function will be reentered immediately thus */
+			  /* getting another chance to disable the IRQ        */
+
+			  if ( disabled ) {
+#ifdef IP_USE_DISABLE
+				  if (irq>4){
+					  disabled=0;
+					  enable_irq(irq);}
+#else
+				  disabled=0;
+				  /*printk("reenabling irq %d\n",irq); */
+#endif
+			  }
+// used to do 'goto repeat;' here, this delayed bh processing too long
+			  return IRQ_HANDLED;
+		  }
+	  }
+	  if (mer && ccleirq>0 && !aliased_irq)
+		  printk("ISA interrupt from unknown source? EIRQ_REG = %x\n",mer),ccleirq--;
+  }
+ iirq:
+  mir=master_inb(IIRQ_REG);
+  /* should test whether keyboard irq is really enabled, doing it in defhand */
+  if (mir&Q40_IRQ_KEYB_MASK) {
+	  irq_tab[Q40_IRQ_KEYBOARD].count++;
+	  irq_tab[Q40_IRQ_KEYBOARD].handler(Q40_IRQ_KEYBOARD,irq_tab[Q40_IRQ_KEYBOARD].dev_id,fp);
+  }
+  return IRQ_HANDLED;
+}
+
+int show_q40_interrupts (struct seq_file *p, void *v)
+{
+	int i;
+
+	for (i = 0; i <= Q40_IRQ_MAX; i++) {
+		if (irq_tab[i].count)
+		      seq_printf(p, "%sIRQ %02d: %8d  %s%s\n",
+			      (i<=15) ? "ISA-" : "    " ,
+			    i, irq_tab[i].count,
+			    irq_tab[i].devname[0] ? irq_tab[i].devname : "?",
+			    irq_tab[i].handler == q40_defhand ?
+					" (now unassigned)" : "");
+	}
+	return 0;
+}
+
+
+static irqreturn_t q40_defhand (int irq, void *dev_id, struct pt_regs *fp)
+{
+        if (irq!=Q40_IRQ_KEYBOARD)
+	     printk ("Unknown q40 interrupt %d\n", irq);
+	else master_outb(-1,KEYBOARD_UNLOCK_REG);
+	return IRQ_NONE;
+}
+static irqreturn_t default_handler(int lev, void *dev_id, struct pt_regs *regs)
+{
+	printk ("Uninitialised interrupt level %d\n", lev);
+	return IRQ_NONE;
+}
+
+irqreturn_t (*q40_default_handler[SYS_IRQS])(int, void *, struct pt_regs *) = {
+	 [0] = default_handler,
+	 [1] = default_handler,
+	 [2] = default_handler,
+	 [3] = default_handler,
+	 [4] = default_handler,
+	 [5] = default_handler,
+	 [6] = default_handler,
+	 [7] = default_handler
+};
+
+
+void q40_enable_irq (unsigned int irq)
+{
+  if ( irq>=5 && irq<=15 )
+  {
+    mext_disabled--;
+    if (mext_disabled>0)
+	  printk("q40_enable_irq : nested disable/enable\n");
+    if (mext_disabled==0)
+    master_outb(1,EXT_ENABLE_REG);
+    }
+}
+
+
+void q40_disable_irq (unsigned int irq)
+{
+  /* disable ISA iqs : only do something if the driver has been
+   * verified to be Q40 "compatible" - right now IDE, NE2K
+   * Any driver should not attempt to sleep across disable_irq !!
+   */
+
+  if ( irq>=5 && irq<=15 ) {
+    master_outb(0,EXT_ENABLE_REG);
+    mext_disabled++;
+    if (mext_disabled>1) printk("disable_irq nesting count %d\n",mext_disabled);
+  }
+}
+
+unsigned long q40_probe_irq_on (void)
+{
+  printk("irq probing not working - reconfigure the driver to avoid this\n");
+  return -1;
+}
+int q40_probe_irq_off (unsigned long irqs)
+{
+  return -1;
+}
+/*
+ * Local variables:
+ * compile-command: "m68k-linux-gcc -D__KERNEL__ -I/home/rz/lx/linux-2.2.6/include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -ffixed-a2 -m68040   -c -o q40ints.o q40ints.c"
+ * End:
+ */
diff --git a/arch/m68k/sun3/Makefile b/arch/m68k/sun3/Makefile
new file mode 100644
index 0000000..4d4f069
--- /dev/null
+++ b/arch/m68k/sun3/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for Linux arch/m68k/sun3 source directory
+#
+
+obj-y	:= sun3_ksyms.o sun3ints.o sun3dvma.o sbus.o idprom.o
+
+obj-$(CONFIG_SUN3) += config.o mmu_emu.o leds.o dvma.o intersil.o
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
new file mode 100644
index 0000000..77d05bc
--- /dev/null
+++ b/arch/m68k/sun3/config.c
@@ -0,0 +1,188 @@
+/*
+ *  linux/arch/m68k/sun3/config.c
+ *
+ *  Copyright (C) 1996,1997 Pekka Pietik{inen
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/oplib.h>
+#include <asm/setup.h>
+#include <asm/contregs.h>
+#include <asm/movs.h>
+#include <asm/pgtable.h>
+#include <asm/sun3-head.h>
+#include <asm/sun3mmu.h>
+#include <asm/rtc.h>
+#include <asm/machdep.h>
+#include <asm/intersil.h>
+#include <asm/irq.h>
+#include <asm/segment.h>
+#include <asm/sun3ints.h>
+
+extern char _text, _end;
+
+char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
+
+extern unsigned long sun3_gettimeoffset(void);
+extern int show_sun3_interrupts (struct seq_file *, void *);
+extern void sun3_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *));
+extern void sun3_get_model (char* model);
+extern void idprom_init (void);
+extern int sun3_hwclk(int set, struct rtc_time *t);
+
+volatile char* clock_va;
+extern volatile unsigned char* sun3_intreg;
+extern unsigned long availmem;
+unsigned long num_pages;
+
+static int sun3_get_hardware_list(char *buffer)
+{
+
+	int len = 0;
+
+	len += sprintf(buffer + len, "PROM Revision:\t%s\n",
+		       romvec->pv_monid);
+
+	return len;
+
+}
+
+void __init sun3_init(void)
+{
+	unsigned char enable_register;
+	int i;
+
+	m68k_machtype= MACH_SUN3;
+	m68k_cputype = CPU_68020;
+	m68k_fputype = FPU_68881; /* mc68881 actually */
+	m68k_mmutype = MMU_SUN3;
+	clock_va    =          (char *) 0xfe06000;	/* dark  */
+	sun3_intreg = (unsigned char *) 0xfe0a000;	/* magic */
+	sun3_disable_interrupts();
+
+	prom_init((void *)LINUX_OPPROM_BEGVM);
+
+	GET_CONTROL_BYTE(AC_SENABLE,enable_register);
+	enable_register |= 0x50; /* Enable FPU */
+	SET_CONTROL_BYTE(AC_SENABLE,enable_register);
+	GET_CONTROL_BYTE(AC_SENABLE,enable_register);
+
+	/* This code looks suspicious, because it doesn't subtract
+           memory belonging to the kernel from the available space */
+
+
+	memset(sun3_reserved_pmeg, 0, sizeof(sun3_reserved_pmeg));
+
+	/* Reserve important PMEGS */
+	/* FIXME: These should be probed instead of hardcoded */
+
+	for (i=0; i<8; i++)		/* Kernel PMEGs */
+		sun3_reserved_pmeg[i] = 1;
+
+	sun3_reserved_pmeg[247] = 1;	/* ROM mapping  */
+	sun3_reserved_pmeg[248] = 1;	/* AMD Ethernet */
+	sun3_reserved_pmeg[251] = 1;	/* VB area      */
+	sun3_reserved_pmeg[254] = 1;	/* main I/O     */
+
+	sun3_reserved_pmeg[249] = 1;
+	sun3_reserved_pmeg[252] = 1;
+	sun3_reserved_pmeg[253] = 1;
+	set_fs(KERNEL_DS);
+}
+
+/* Without this, Bad Things happen when something calls arch_reset. */
+static void sun3_reboot (void)
+{
+	prom_reboot ("vmlinux");
+}
+
+static void sun3_halt (void)
+{
+	prom_halt ();
+}
+
+/* sun3 bootmem allocation */
+
+void __init sun3_bootmem_alloc(unsigned long memory_start, unsigned long memory_end)
+{
+	unsigned long start_page;
+
+	/* align start/end to page boundaries */
+	memory_start = ((memory_start + (PAGE_SIZE-1)) & PAGE_MASK);
+	memory_end = memory_end & PAGE_MASK;
+
+	start_page = __pa(memory_start) >> PAGE_SHIFT;
+	num_pages = __pa(memory_end) >> PAGE_SHIFT;
+
+	high_memory = (void *)memory_end;
+	availmem = memory_start;
+
+	availmem += init_bootmem_node(NODE_DATA(0), start_page, 0, num_pages);
+	availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK;
+
+	free_bootmem(__pa(availmem), memory_end - (availmem));
+}
+
+
+void __init config_sun3(void)
+{
+	unsigned long memory_start, memory_end;
+
+	printk("ARCH: SUN3\n");
+	idprom_init();
+
+	/* Subtract kernel memory from available memory */
+
+        mach_sched_init      =  sun3_sched_init;
+        mach_init_IRQ        =  sun3_init_IRQ;
+        mach_default_handler = &sun3_default_handler;
+        mach_request_irq     =  sun3_request_irq;
+        mach_free_irq        =  sun3_free_irq;
+	enable_irq	     =  sun3_enable_irq;
+        disable_irq	     =  sun3_disable_irq;
+	mach_process_int     =  sun3_process_int;
+        mach_get_irq_list    =  show_sun3_interrupts;
+        mach_reset           =  sun3_reboot;
+	mach_gettimeoffset   =  sun3_gettimeoffset;
+	mach_get_model	     =  sun3_get_model;
+	mach_hwclk           =  sun3_hwclk;
+	mach_halt	     =  sun3_halt;
+	mach_get_hardware_list = sun3_get_hardware_list;
+#if defined(CONFIG_DUMMY_CONSOLE)
+	conswitchp	     = &dummy_con;
+#endif
+
+	memory_start = ((((int)&_end) + 0x2000) & ~0x1fff);
+// PROM seems to want the last couple of physical pages. --m
+	memory_end   = *(romvec->pv_sun3mem) + PAGE_OFFSET - 2*PAGE_SIZE;
+
+	m68k_num_memory=1;
+        m68k_memory[0].size=*(romvec->pv_sun3mem);
+
+	sun3_bootmem_alloc(memory_start, memory_end);
+}
+
+void __init sun3_sched_init(irqreturn_t (*timer_routine)(int, void *, struct pt_regs *))
+{
+	sun3_disable_interrupts();
+        intersil_clock->cmd_reg=(INTERSIL_RUN|INTERSIL_INT_DISABLE|INTERSIL_24H_MODE);
+        intersil_clock->int_reg=INTERSIL_HZ_100_MASK;
+	intersil_clear();
+        sun3_enable_irq(5);
+        intersil_clock->cmd_reg=(INTERSIL_RUN|INTERSIL_INT_ENABLE|INTERSIL_24H_MODE);
+        sun3_enable_interrupts();
+        intersil_clear();
+}
+
diff --git a/arch/m68k/sun3/dvma.c b/arch/m68k/sun3/dvma.c
new file mode 100644
index 0000000..d2b3093
--- /dev/null
+++ b/arch/m68k/sun3/dvma.c
@@ -0,0 +1,71 @@
+/*
+ * linux/arch/m68k/sun3/dvma.c
+ *
+ * Written by Sam Creasey
+ *
+ * Sun3 IOMMU routines used for dvma accesses.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/list.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/sun3mmu.h>
+#include <asm/dvma.h>
+
+
+static unsigned long ptelist[120];
+
+inline unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr)
+{
+	unsigned long pte;
+	unsigned long j;
+	pte_t ptep;
+
+	j = *(volatile unsigned long *)kaddr;
+	*(volatile unsigned long *)kaddr = j;
+
+	ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL);
+	pte = pte_val(ptep);
+//		printk("dvma_remap: addr %lx -> %lx pte %08lx len %x\n",
+//		       kaddr, vaddr, pte, len);
+	if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) {
+		sun3_put_pte(vaddr, pte);
+		ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte;
+	}
+
+	return (vaddr + (kaddr & ~PAGE_MASK));
+
+}
+
+int dvma_map_iommu(unsigned long kaddr, unsigned long baddr,
+			      int len)
+{
+
+	unsigned long end;
+	unsigned long vaddr;
+
+	vaddr = dvma_btov(baddr);
+
+	end = vaddr + len;
+
+	while(vaddr < end) {
+		dvma_page(kaddr, vaddr);
+		kaddr += PAGE_SIZE;
+		vaddr += PAGE_SIZE;
+	}
+
+	return 0;
+
+}
+
+void sun3_dvma_init(void)
+{
+
+	memset(ptelist, 0, sizeof(ptelist));
+
+
+}
diff --git a/arch/m68k/sun3/idprom.c b/arch/m68k/sun3/idprom.c
new file mode 100644
index 0000000..02c1fee6
--- /dev/null
+++ b/arch/m68k/sun3/idprom.c
@@ -0,0 +1,129 @@
+/* $Id: idprom.c,v 1.22 1996/11/13 05:09:25 davem Exp $
+ * idprom.c: Routines to load the idprom into kernel addresses and
+ *           interpret the data contained within.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Sun3/3x models added by David Monro (davidm@psrg.cs.usyd.edu.au)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <asm/oplib.h>
+#include <asm/idprom.h>
+#include <asm/machines.h>  /* Fun with Sun released architectures. */
+
+struct idprom *idprom;
+static struct idprom idprom_buffer;
+
+/* Here is the master table of Sun machines which use some implementation
+ * of the Sparc CPU and have a meaningful IDPROM machtype value that we
+ * know about.  See asm-sparc/machines.h for empirical constants.
+ */
+struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = {
+/* First, Sun3's */
+    { .name = "Sun 3/160 Series",	.id_machtype = (SM_SUN3 | SM_3_160) },
+    { .name = "Sun 3/50",		.id_machtype = (SM_SUN3 | SM_3_50) },
+    { .name = "Sun 3/260 Series",	.id_machtype = (SM_SUN3 | SM_3_260) },
+    { .name = "Sun 3/110 Series",	.id_machtype = (SM_SUN3 | SM_3_110) },
+    { .name = "Sun 3/60",		.id_machtype = (SM_SUN3 | SM_3_60) },
+    { .name = "Sun 3/E",		.id_machtype = (SM_SUN3 | SM_3_E) },
+/* Now, Sun3x's */
+    { .name = "Sun 3/460 Series",	.id_machtype = (SM_SUN3X | SM_3_460) },
+    { .name = "Sun 3/80",		.id_machtype = (SM_SUN3X | SM_3_80) },
+/* Then, Sun4's */
+// { .name = "Sun 4/100 Series",	.id_machtype = (SM_SUN4 | SM_4_110) },
+// { .name = "Sun 4/200 Series",	.id_machtype = (SM_SUN4 | SM_4_260) },
+// { .name = "Sun 4/300 Series",	.id_machtype = (SM_SUN4 | SM_4_330) },
+// { .name = "Sun 4/400 Series",	.id_machtype = (SM_SUN4 | SM_4_470) },
+/* And now, Sun4c's */
+// { .name = "Sun4c SparcStation 1",	.id_machtype = (SM_SUN4C | SM_4C_SS1) },
+// { .name = "Sun4c SparcStation IPC",	.id_machtype = (SM_SUN4C | SM_4C_IPC) },
+// { .name = "Sun4c SparcStation 1+",	.id_machtype = (SM_SUN4C | SM_4C_SS1PLUS) },
+// { .name = "Sun4c SparcStation SLC",	.id_machtype = (SM_SUN4C | SM_4C_SLC) },
+// { .name = "Sun4c SparcStation 2",	.id_machtype = (SM_SUN4C | SM_4C_SS2) },
+// { .name = "Sun4c SparcStation ELC",	.id_machtype = (SM_SUN4C | SM_4C_ELC) },
+// { .name = "Sun4c SparcStation IPX",	.id_machtype = (SM_SUN4C | SM_4C_IPX) },
+/* Finally, early Sun4m's */
+// { .name = "Sun4m SparcSystem600",	.id_machtype = (SM_SUN4M | SM_4M_SS60) },
+// { .name = "Sun4m SparcStation10/20",	.id_machtype = (SM_SUN4M | SM_4M_SS50) },
+// { .name = "Sun4m SparcStation5",	.id_machtype = (SM_SUN4M | SM_4M_SS40) },
+/* One entry for the OBP arch's which are sun4d, sun4e, and newer sun4m's */
+// { .name = "Sun4M OBP based system",	.id_machtype = (SM_SUN4M_OBP | 0x0) }
+};
+
+static void __init display_system_type(unsigned char machtype)
+{
+	register int i;
+
+	for (i = 0; i < NUM_SUN_MACHINES; i++) {
+		if(Sun_Machines[i].id_machtype == machtype) {
+			if (machtype != (SM_SUN4M_OBP | 0x00))
+				printk("TYPE: %s\n", Sun_Machines[i].name);
+			else {
+#if 0
+				prom_getproperty(prom_root_node, "banner-name",
+						 sysname, sizeof(sysname));
+				printk("TYPE: %s\n", sysname);
+#endif
+			}
+			return;
+		}
+	}
+
+	prom_printf("IDPROM: Bogus id_machtype value, 0x%x\n", machtype);
+	prom_halt();
+}
+
+void sun3_get_model(unsigned char* model)
+{
+	register int i;
+
+	for (i = 0; i < NUM_SUN_MACHINES; i++) {
+		if(Sun_Machines[i].id_machtype == idprom->id_machtype) {
+		        strcpy(model, Sun_Machines[i].name);
+			return;
+		}
+	}
+}
+
+
+
+/* Calculate the IDPROM checksum (xor of the data bytes). */
+static unsigned char __init calc_idprom_cksum(struct idprom *idprom)
+{
+	unsigned char cksum, i, *ptr = (unsigned char *)idprom;
+
+	for (i = cksum = 0; i <= 0x0E; i++)
+		cksum ^= *ptr++;
+
+	return cksum;
+}
+
+/* Create a local IDPROM copy, verify integrity, and display information. */
+void __init idprom_init(void)
+{
+	prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer));
+
+	idprom = &idprom_buffer;
+
+	if (idprom->id_format != 0x01)  {
+		prom_printf("IDPROM: Unknown format type!\n");
+		prom_halt();
+	}
+
+	if (idprom->id_cksum != calc_idprom_cksum(idprom)) {
+		prom_printf("IDPROM: Checksum failure (nvram=%x, calc=%x)!\n",
+			    idprom->id_cksum, calc_idprom_cksum(idprom));
+		prom_halt();
+	}
+
+	display_system_type(idprom->id_machtype);
+
+	printk("Ethernet address: %x:%x:%x:%x:%x:%x\n",
+		    idprom->id_ethaddr[0], idprom->id_ethaddr[1],
+		    idprom->id_ethaddr[2], idprom->id_ethaddr[3],
+		    idprom->id_ethaddr[4], idprom->id_ethaddr[5]);
+}
diff --git a/arch/m68k/sun3/intersil.c b/arch/m68k/sun3/intersil.c
new file mode 100644
index 0000000..db359d7
--- /dev/null
+++ b/arch/m68k/sun3/intersil.c
@@ -0,0 +1,76 @@
+/*
+ * arch/m68k/sun3/intersil.c
+ *
+ * basic routines for accessing the intersil clock within the sun3 machines
+ *
+ * started 11/12/1999 Sam Creasey
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/rtc.h>
+
+#include <asm/errno.h>
+#include <asm/system.h>
+#include <asm/semaphore.h>
+#include <asm/rtc.h>
+#include <asm/intersil.h>
+
+
+/* bits to set for start/run of the intersil */
+#define STOP_VAL (INTERSIL_STOP | INTERSIL_INT_ENABLE | INTERSIL_24H_MODE)
+#define START_VAL (INTERSIL_RUN | INTERSIL_INT_ENABLE | INTERSIL_24H_MODE)
+
+/* does this need to be implemented? */
+unsigned long sun3_gettimeoffset(void)
+{
+  return 1;
+}
+
+
+/* get/set hwclock */
+
+int sun3_hwclk(int set, struct rtc_time *t)
+{
+	volatile struct intersil_dt *todintersil;
+	unsigned long flags;
+
+        todintersil = (struct intersil_dt *) &intersil_clock->counter;
+
+	local_irq_save(flags);
+
+	intersil_clock->cmd_reg = STOP_VAL;
+
+	/* set or read the clock */
+	if(set) {
+		todintersil->csec = 0;
+		todintersil->hour = t->tm_hour;
+		todintersil->minute = t->tm_min;
+		todintersil->second = t->tm_sec;
+		todintersil->month = t->tm_mon;
+		todintersil->day = t->tm_mday;
+		todintersil->year = t->tm_year - 68;
+		todintersil->weekday = t->tm_wday;
+	} else {
+		/* read clock */
+		t->tm_sec = todintersil->csec;
+		t->tm_hour = todintersil->hour;
+		t->tm_min = todintersil->minute;
+		t->tm_sec = todintersil->second;
+		t->tm_mon = todintersil->month;
+		t->tm_mday = todintersil->day;
+		t->tm_year = todintersil->year + 68;
+		t->tm_wday = todintersil->weekday;
+	}
+
+	intersil_clock->cmd_reg = START_VAL;
+
+	local_irq_restore(flags);
+
+	return 0;
+
+}
+
diff --git a/arch/m68k/sun3/leds.c b/arch/m68k/sun3/leds.c
new file mode 100644
index 0000000..a3e9484
--- /dev/null
+++ b/arch/m68k/sun3/leds.c
@@ -0,0 +1,13 @@
+#include <asm/contregs.h>
+#include <asm/sun3mmu.h>
+#include <asm/io.h>
+
+void sun3_leds(unsigned char byte)
+{
+	unsigned char dfc;
+
+	GET_DFC(dfc);
+        SET_DFC(FC_CONTROL);
+	SET_CONTROL_BYTE(AC_LEDS,byte);
+	SET_DFC(dfc);
+}
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c
new file mode 100644
index 0000000..7a0e3a2
--- /dev/null
+++ b/arch/m68k/sun3/mmu_emu.c
@@ -0,0 +1,427 @@
+/*
+** Tablewalk MMU emulator
+**
+** by Toshiyasu Morita
+**
+** Started 1/16/98 @ 2:22 am
+*/
+
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+
+#include <asm/setup.h>
+#include <asm/traps.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/sun3mmu.h>
+#include <asm/segment.h>
+#include <asm/oplib.h>
+#include <asm/mmu_context.h>
+#include <asm/dvma.h>
+
+extern void prom_reboot (char *) __attribute__ ((__noreturn__));
+
+#undef DEBUG_MMU_EMU
+#define DEBUG_PROM_MAPS
+
+/*
+** Defines
+*/
+
+#define CONTEXTS_NUM		8
+#define SEGMAPS_PER_CONTEXT_NUM 2048
+#define PAGES_PER_SEGMENT	16
+#define PMEGS_NUM		256
+#define PMEG_MASK		0xFF
+
+/*
+** Globals
+*/
+
+unsigned long vmalloc_end;
+EXPORT_SYMBOL(vmalloc_end);
+
+unsigned long pmeg_vaddr[PMEGS_NUM];
+unsigned char pmeg_alloc[PMEGS_NUM];
+unsigned char pmeg_ctx[PMEGS_NUM];
+
+/* pointers to the mm structs for each task in each
+   context. 0xffffffff is a marker for kernel context */
+struct mm_struct *ctx_alloc[CONTEXTS_NUM] = {
+    [0] = (struct mm_struct *)0xffffffff
+};
+
+/* has this context been mmdrop'd? */
+static unsigned char ctx_avail = CONTEXTS_NUM-1;
+
+/* array of pages to be marked off for the rom when we do mem_init later */
+/* 256 pages lets the rom take up to 2mb of physical ram..  I really
+   hope it never wants mote than that. */
+unsigned long rom_pages[256];
+
+/* Print a PTE value in symbolic form. For debugging. */
+void print_pte (pte_t pte)
+{
+#if 0
+	/* Verbose version. */
+	unsigned long val = pte_val (pte);
+	printk (" pte=%lx [addr=%lx",
+		val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT);
+	if (val & SUN3_PAGE_VALID)	printk (" valid");
+	if (val & SUN3_PAGE_WRITEABLE)	printk (" write");
+	if (val & SUN3_PAGE_SYSTEM)	printk (" sys");
+	if (val & SUN3_PAGE_NOCACHE)	printk (" nocache");
+	if (val & SUN3_PAGE_ACCESSED)	printk (" accessed");
+	if (val & SUN3_PAGE_MODIFIED)	printk (" modified");
+	switch (val & SUN3_PAGE_TYPE_MASK) {
+		case SUN3_PAGE_TYPE_MEMORY: printk (" memory"); break;
+		case SUN3_PAGE_TYPE_IO:     printk (" io");     break;
+		case SUN3_PAGE_TYPE_VME16:  printk (" vme16");  break;
+		case SUN3_PAGE_TYPE_VME32:  printk (" vme32");  break;
+	}
+	printk ("]\n");
+#else
+	/* Terse version. More likely to fit on a line. */
+	unsigned long val = pte_val (pte);
+	char flags[7], *type;
+
+	flags[0] = (val & SUN3_PAGE_VALID)     ? 'v' : '-';
+	flags[1] = (val & SUN3_PAGE_WRITEABLE) ? 'w' : '-';
+	flags[2] = (val & SUN3_PAGE_SYSTEM)    ? 's' : '-';
+	flags[3] = (val & SUN3_PAGE_NOCACHE)   ? 'x' : '-';
+	flags[4] = (val & SUN3_PAGE_ACCESSED)  ? 'a' : '-';
+	flags[5] = (val & SUN3_PAGE_MODIFIED)  ? 'm' : '-';
+	flags[6] = '\0';
+
+	switch (val & SUN3_PAGE_TYPE_MASK) {
+		case SUN3_PAGE_TYPE_MEMORY: type = "memory"; break;
+		case SUN3_PAGE_TYPE_IO:     type = "io"    ; break;
+		case SUN3_PAGE_TYPE_VME16:  type = "vme16" ; break;
+		case SUN3_PAGE_TYPE_VME32:  type = "vme32" ; break;
+		default: type = "unknown?"; break;
+	}
+
+	printk (" pte=%08lx [%07lx %s %s]\n",
+		val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT, flags, type);
+#endif
+}
+
+/* Print the PTE value for a given virtual address. For debugging. */
+void print_pte_vaddr (unsigned long vaddr)
+{
+	printk (" vaddr=%lx [%02lx]", vaddr, sun3_get_segmap (vaddr));
+	print_pte (__pte (sun3_get_pte (vaddr)));
+}
+
+/*
+ * Initialise the MMU emulator.
+ */
+void mmu_emu_init(unsigned long bootmem_end)
+{
+	unsigned long seg, num;
+	int i,j;
+
+	memset(rom_pages, 0, sizeof(rom_pages));
+	memset(pmeg_vaddr, 0, sizeof(pmeg_vaddr));
+	memset(pmeg_alloc, 0, sizeof(pmeg_alloc));
+	memset(pmeg_ctx, 0, sizeof(pmeg_ctx));
+
+	/* pmeg align the end of bootmem, adding another pmeg,
+	 * later bootmem allocations will likely need it */
+	bootmem_end = (bootmem_end + (2 * SUN3_PMEG_SIZE)) & ~SUN3_PMEG_MASK;
+
+	/* mark all of the pmegs used thus far as reserved */
+	for (i=0; i < __pa(bootmem_end) / SUN3_PMEG_SIZE ; ++i)
+		pmeg_alloc[i] = 2;
+
+
+	/* I'm thinking that most of the top pmeg's are going to be
+	   used for something, and we probably shouldn't risk it */
+	for(num = 0xf0; num <= 0xff; num++)
+		pmeg_alloc[num] = 2;
+
+	/* liberate all existing mappings in the rest of kernel space */
+	for(seg = bootmem_end; seg < 0x0f800000; seg += SUN3_PMEG_SIZE) {
+		i = sun3_get_segmap(seg);
+
+		if(!pmeg_alloc[i]) {
+#ifdef DEBUG_MMU_EMU
+			printk("freed: ");
+			print_pte_vaddr (seg);
+#endif
+			sun3_put_segmap(seg, SUN3_INVALID_PMEG);
+		}
+	}
+
+	j = 0;
+	for (num=0, seg=0x0F800000; seg<0x10000000; seg+=16*PAGE_SIZE) {
+		if (sun3_get_segmap (seg) != SUN3_INVALID_PMEG) {
+#ifdef DEBUG_PROM_MAPS
+			for(i = 0; i < 16; i++) {
+				printk ("mapped:");
+				print_pte_vaddr (seg + (i*PAGE_SIZE));
+				break;
+			}
+#endif
+			// the lowest mapping here is the end of our
+			// vmalloc region
+			if(!vmalloc_end)
+				vmalloc_end = seg;
+
+			// mark the segmap alloc'd, and reserve any
+			// of the first 0xbff pages the hardware is
+			// already using...  does any sun3 support > 24mb?
+			pmeg_alloc[sun3_get_segmap(seg)] = 2;
+		}
+	}
+
+	dvma_init();
+
+
+	/* blank everything below the kernel, and we've got the base
+	   mapping to start all the contexts off with... */
+	for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE)
+		sun3_put_segmap(seg, SUN3_INVALID_PMEG);
+
+	set_fs(MAKE_MM_SEG(3));
+	for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) {
+		i = sun3_get_segmap(seg);
+		for(j = 1; j < CONTEXTS_NUM; j++)
+			(*(romvec->pv_setctxt))(j, (void *)seg, i);
+	}
+	set_fs(KERNEL_DS);
+
+}
+
+/* erase the mappings for a dead context.  Uses the pg_dir for hints
+   as the pmeg tables proved somewhat unreliable, and unmapping all of
+   TASK_SIZE was much slower and no more stable. */
+/* todo: find a better way to keep track of the pmegs used by a
+   context for when they're cleared */
+void clear_context(unsigned long context)
+{
+     unsigned char oldctx;
+     unsigned long i;
+
+     if(context) {
+	     if(!ctx_alloc[context])
+		     panic("clear_context: context not allocated\n");
+
+	     ctx_alloc[context]->context = SUN3_INVALID_CONTEXT;
+	     ctx_alloc[context] = (struct mm_struct *)0;
+	     ctx_avail++;
+     }
+
+     oldctx = sun3_get_context();
+
+     sun3_put_context(context);
+
+     for(i = 0; i < SUN3_INVALID_PMEG; i++) {
+	     if((pmeg_ctx[i] == context) && (pmeg_alloc[i] == 1)) {
+		     sun3_put_segmap(pmeg_vaddr[i], SUN3_INVALID_PMEG);
+		     pmeg_ctx[i] = 0;
+		     pmeg_alloc[i] = 0;
+		     pmeg_vaddr[i] = 0;
+	     }
+     }
+
+     sun3_put_context(oldctx);
+}
+
+/* gets an empty context.  if full, kills the next context listed to
+   die first */
+/* This context invalidation scheme is, well, totally arbitrary, I'm
+   sure it could be much more intellegent...  but it gets the job done
+   for now without much overhead in making it's decision. */
+/* todo: come up with optimized scheme for flushing contexts */
+unsigned long get_free_context(struct mm_struct *mm)
+{
+	unsigned long new = 1;
+	static unsigned char next_to_die = 1;
+
+	if(!ctx_avail) {
+		/* kill someone to get our context */
+		new = next_to_die;
+		clear_context(new);
+		next_to_die = (next_to_die + 1) & 0x7;
+		if(!next_to_die)
+			next_to_die++;
+	} else {
+		while(new < CONTEXTS_NUM) {
+			if(ctx_alloc[new])
+				new++;
+			else
+				break;
+		}
+		// check to make sure one was really free...
+		if(new == CONTEXTS_NUM)
+			panic("get_free_context: failed to find free context");
+	}
+
+	ctx_alloc[new] = mm;
+	ctx_avail--;
+
+	return new;
+}
+
+/*
+ * Dynamically select a `spare' PMEG and use it to map virtual `vaddr' in
+ * `context'. Maintain internal PMEG management structures. This doesn't
+ * actually map the physical address, but does clear the old mappings.
+ */
+//todo: better allocation scheme? but is extra complexity worthwhile?
+//todo: only clear old entries if necessary? how to tell?
+
+inline void mmu_emu_map_pmeg (int context, int vaddr)
+{
+	static unsigned char curr_pmeg = 128;
+	int i;
+
+	/* Round address to PMEG boundary. */
+	vaddr &= ~SUN3_PMEG_MASK;
+
+	/* Find a spare one. */
+	while (pmeg_alloc[curr_pmeg] == 2)
+		++curr_pmeg;
+
+
+#ifdef DEBUG_MMU_EMU
+printk("mmu_emu_map_pmeg: pmeg %x to context %d vaddr %x\n",
+       curr_pmeg, context, vaddr);
+#endif
+
+	/* Invalidate old mapping for the pmeg, if any */
+	if (pmeg_alloc[curr_pmeg] == 1) {
+		sun3_put_context(pmeg_ctx[curr_pmeg]);
+		sun3_put_segmap (pmeg_vaddr[curr_pmeg], SUN3_INVALID_PMEG);
+		sun3_put_context(context);
+	}
+
+	/* Update PMEG management structures. */
+	// don't take pmeg's away from the kernel...
+	if(vaddr >= PAGE_OFFSET) {
+		/* map kernel pmegs into all contexts */
+		unsigned char i;
+
+		for(i = 0; i < CONTEXTS_NUM; i++) {
+			sun3_put_context(i);
+			sun3_put_segmap (vaddr, curr_pmeg);
+		}
+		sun3_put_context(context);
+		pmeg_alloc[curr_pmeg] = 2;
+		pmeg_ctx[curr_pmeg] = 0;
+
+	}
+	else {
+		pmeg_alloc[curr_pmeg] = 1;
+		pmeg_ctx[curr_pmeg] = context;
+		sun3_put_segmap (vaddr, curr_pmeg);
+
+	}
+	pmeg_vaddr[curr_pmeg] = vaddr;
+
+	/* Set hardware mapping and clear the old PTE entries. */
+	for (i=0; i<SUN3_PMEG_SIZE; i+=SUN3_PTE_SIZE)
+		sun3_put_pte (vaddr + i, SUN3_PAGE_SYSTEM);
+
+	/* Consider a different one next time. */
+	++curr_pmeg;
+}
+
+/*
+ * Handle a pagefault at virtual address `vaddr'; check if there should be a
+ * page there (specifically, whether the software pagetables indicate that
+ * there is). This is necessary due to the limited size of the second-level
+ * Sun3 hardware pagetables (256 groups of 16 pages). If there should be a
+ * mapping present, we select a `spare' PMEG and use it to create a mapping.
+ * `read_flag' is nonzero for a read fault; zero for a write. Returns nonzero
+ * if we successfully handled the fault.
+ */
+//todo: should we bump minor pagefault counter? if so, here or in caller?
+//todo: possibly inline this into bus_error030 in <asm/buserror.h> ?
+
+// kernel_fault is set when a kernel page couldn't be demand mapped,
+// and forces another try using the kernel page table.  basically a
+// hack so that vmalloc would work correctly.
+
+int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault)
+{
+	unsigned long segment, offset;
+	unsigned char context;
+	pte_t *pte;
+	pgd_t * crp;
+
+	if(current->mm == NULL) {
+		crp = swapper_pg_dir;
+		context = 0;
+	} else {
+		context = current->mm->context;
+		if(kernel_fault)
+			crp = swapper_pg_dir;
+		else
+			crp = current->mm->pgd;
+	}
+
+#ifdef DEBUG_MMU_EMU
+	printk ("mmu_emu_handle_fault: vaddr=%lx type=%s crp=%p\n",
+		vaddr, read_flag ? "read" : "write", crp);
+#endif
+
+	segment = (vaddr >> SUN3_PMEG_SIZE_BITS) & 0x7FF;
+	offset  = (vaddr >> SUN3_PTE_SIZE_BITS) & 0xF;
+
+#ifdef DEBUG_MMU_EMU
+	printk ("mmu_emu_handle_fault: segment=%lx offset=%lx\n", segment, offset);
+#endif
+
+	pte = (pte_t *) pgd_val (*(crp + segment));
+
+//todo: next line should check for valid pmd properly.
+	if (!pte) {
+//                printk ("mmu_emu_handle_fault: invalid pmd\n");
+                return 0;
+        }
+
+	pte = (pte_t *) __va ((unsigned long)(pte + offset));
+
+	/* Make sure this is a valid page */
+	if (!(pte_val (*pte) & SUN3_PAGE_VALID))
+		return 0;
+
+	/* Make sure there's a pmeg allocated for the page */
+	if (sun3_get_segmap (vaddr&~SUN3_PMEG_MASK) == SUN3_INVALID_PMEG)
+		mmu_emu_map_pmeg (context, vaddr);
+
+	/* Write the pte value to hardware MMU */
+	sun3_put_pte (vaddr&PAGE_MASK, pte_val (*pte));
+
+	/* Update software copy of the pte value */
+// I'm not sure this is necessary. If this is required, we ought to simply
+// copy this out when we reuse the PMEG or at some other convenient time.
+// Doing it here is fairly meaningless, anyway, as we only know about the
+// first access to a given page. --m
+	if (!read_flag) {
+		if (pte_val (*pte) & SUN3_PAGE_WRITEABLE)
+			pte_val (*pte) |= (SUN3_PAGE_ACCESSED
+					   | SUN3_PAGE_MODIFIED);
+		else
+			return 0;	/* Write-protect error. */
+	} else
+		pte_val (*pte) |= SUN3_PAGE_ACCESSED;
+
+#ifdef DEBUG_MMU_EMU
+	printk ("seg:%d crp:%p ->", get_fs().seg, crp);
+	print_pte_vaddr (vaddr);
+	printk ("\n");
+#endif
+
+	return 1;
+}
diff --git a/arch/m68k/sun3/prom/Makefile b/arch/m68k/sun3/prom/Makefile
new file mode 100644
index 0000000..6e48ae2
--- /dev/null
+++ b/arch/m68k/sun3/prom/Makefile
@@ -0,0 +1,7 @@
+# $Id: Makefile,v 1.5 1995/11/25 00:59:48 davem Exp $
+# Makefile for the Sun Boot PROM interface library under
+# Linux.
+#
+
+obj-y := init.o console.o printf.o  misc.o
+#bootstr.o init.o misc.o segment.o console.o printf.o
diff --git a/arch/m68k/sun3/prom/console.c b/arch/m68k/sun3/prom/console.c
new file mode 100644
index 0000000..52c1427
--- /dev/null
+++ b/arch/m68k/sun3/prom/console.c
@@ -0,0 +1,174 @@
+/* $Id: console.c,v 1.10 1996/12/18 06:46:54 tridge Exp $
+ * console.c: Routines that deal with sending and receiving IO
+ *            to/from the current console device using the PROM.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/system.h>
+#include <linux/string.h>
+
+/* Non blocking get character from console input device, returns -1
+ * if no input was taken.  This can be used for polling.
+ */
+int
+prom_nbgetchar(void)
+{
+	int i = -1;
+	unsigned long flags;
+
+	local_irq_save(flags);
+		i = (*(romvec->pv_nbgetchar))();
+	local_irq_restore(flags);
+	return i; /* Ugh, we could spin forever on unsupported proms ;( */
+}
+
+/* Non blocking put character to console device, returns -1 if
+ * unsuccessful.
+ */
+int
+prom_nbputchar(char c)
+{
+	unsigned long flags;
+	int i = -1;
+
+	local_irq_save(flags);
+		i = (*(romvec->pv_nbputchar))(c);
+	local_irq_restore(flags);
+	return i; /* Ugh, we could spin forever on unsupported proms ;( */
+}
+
+/* Blocking version of get character routine above. */
+char
+prom_getchar(void)
+{
+	int character;
+	while((character = prom_nbgetchar()) == -1) ;
+	return (char) character;
+}
+
+/* Blocking version of put character routine above. */
+void
+prom_putchar(char c)
+{
+	while(prom_nbputchar(c) == -1) ;
+	return;
+}
+
+/* Query for input device type */
+#if 0
+enum prom_input_device
+prom_query_input_device()
+{
+	unsigned long flags;
+	int st_p;
+	char propb[64];
+	char *p;
+
+	switch(prom_vers) {
+	case PROM_V0:
+	case PROM_V2:
+	default:
+		switch(*romvec->pv_stdin) {
+		case PROMDEV_KBD:	return PROMDEV_IKBD;
+		case PROMDEV_TTYA:	return PROMDEV_ITTYA;
+		case PROMDEV_TTYB:	return PROMDEV_ITTYB;
+		default:
+			return PROMDEV_I_UNK;
+		};
+	case PROM_V3:
+	case PROM_P1275:
+		local_irq_save(flags);
+		st_p = (*romvec->pv_v2devops.v2_inst2pkg)(*romvec->pv_v2bootargs.fd_stdin);
+		__asm__ __volatile__("ld [%0], %%g6\n\t" : :
+				     "r" (&current_set[smp_processor_id()]) :
+				     "memory");
+		local_irq_restore(flags);
+		if(prom_node_has_property(st_p, "keyboard"))
+			return PROMDEV_IKBD;
+		prom_getproperty(st_p, "device_type", propb, sizeof(propb));
+		if(strncmp(propb, "serial", sizeof("serial")))
+			return PROMDEV_I_UNK;
+		prom_getproperty(prom_root_node, "stdin-path", propb, sizeof(propb));
+		p = propb;
+		while(*p) p++; p -= 2;
+		if(p[0] == ':') {
+			if(p[1] == 'a')
+				return PROMDEV_ITTYA;
+			else if(p[1] == 'b')
+				return PROMDEV_ITTYB;
+		}
+		return PROMDEV_I_UNK;
+	case PROM_AP1000:
+		return PROMDEV_I_UNK;
+	};
+}
+#endif
+
+/* Query for output device type */
+
+#if 0
+enum prom_output_device
+prom_query_output_device()
+{
+	unsigned long flags;
+	int st_p;
+	char propb[64];
+	char *p;
+	int propl;
+
+	switch(prom_vers) {
+	case PROM_V0:
+		switch(*romvec->pv_stdin) {
+		case PROMDEV_SCREEN:	return PROMDEV_OSCREEN;
+		case PROMDEV_TTYA:	return PROMDEV_OTTYA;
+		case PROMDEV_TTYB:	return PROMDEV_OTTYB;
+		};
+		break;
+	case PROM_V2:
+	case PROM_V3:
+	case PROM_P1275:
+		local_irq_save(flags);
+		st_p = (*romvec->pv_v2devops.v2_inst2pkg)(*romvec->pv_v2bootargs.fd_stdout);
+		__asm__ __volatile__("ld [%0], %%g6\n\t" : :
+				     "r" (&current_set[smp_processor_id()]) :
+				     "memory");
+		local_irq_restore(flags);
+		propl = prom_getproperty(st_p, "device_type", propb, sizeof(propb));
+		if (propl >= 0 && propl == sizeof("display") &&
+			strncmp("display", propb, sizeof("display")) == 0)
+		{
+			return PROMDEV_OSCREEN;
+		}
+		if(prom_vers == PROM_V3) {
+			if(strncmp("serial", propb, sizeof("serial")))
+				return PROMDEV_O_UNK;
+			prom_getproperty(prom_root_node, "stdout-path", propb, sizeof(propb));
+			p = propb;
+			while(*p) p++; p -= 2;
+			if(p[0]==':') {
+				if(p[1] == 'a')
+					return PROMDEV_OTTYA;
+				else if(p[1] == 'b')
+					return PROMDEV_OTTYB;
+			}
+			return PROMDEV_O_UNK;
+		} else {
+			/* This works on SS-2 (an early OpenFirmware) still. */
+			switch(*romvec->pv_stdin) {
+			case PROMDEV_TTYA:	return PROMDEV_OTTYA;
+			case PROMDEV_TTYB:	return PROMDEV_OTTYB;
+			};
+		}
+		break;
+	case PROM_AP1000:
+		return PROMDEV_I_UNK;
+	};
+	return PROMDEV_O_UNK;
+}
+#endif
diff --git a/arch/m68k/sun3/prom/init.c b/arch/m68k/sun3/prom/init.c
new file mode 100644
index 0000000..2e6ae56
--- /dev/null
+++ b/arch/m68k/sun3/prom/init.c
@@ -0,0 +1,89 @@
+/* $Id: init.c,v 1.9 1996/12/18 06:46:55 tridge Exp $
+ * init.c:  Initialize internal variables used by the PROM
+ *          library functions.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+struct linux_romvec *romvec;
+enum prom_major_version prom_vers;
+unsigned int prom_rev, prom_prev;
+
+/* The root node of the prom device tree. */
+int prom_root_node;
+
+/* Pointer to the device tree operations structure. */
+struct linux_nodeops *prom_nodeops;
+
+/* You must call prom_init() before you attempt to use any of the
+ * routines in the prom library.  It returns 0 on success, 1 on
+ * failure.  It gets passed the pointer to the PROM vector.
+ */
+
+extern void prom_meminit(void);
+extern void prom_ranges_init(void);
+
+void __init prom_init(struct linux_romvec *rp)
+{
+#ifdef CONFIG_AP1000
+	extern struct linux_romvec *ap_prom_init(void);
+	rp = ap_prom_init();
+#endif
+
+	romvec = rp;
+#ifndef CONFIG_SUN3
+	switch(romvec->pv_romvers) {
+	case 0:
+		prom_vers = PROM_V0;
+		break;
+	case 2:
+		prom_vers = PROM_V2;
+		break;
+	case 3:
+		prom_vers = PROM_V3;
+		break;
+	case 4:
+		prom_vers = PROM_P1275;
+		prom_printf("PROMLIB: Sun IEEE Prom not supported yet\n");
+		prom_halt();
+		break;
+	case 42: /* why not :-) */
+		prom_vers = PROM_AP1000;
+		break;
+
+	default:
+		prom_printf("PROMLIB: Bad PROM version %d\n",
+			    romvec->pv_romvers);
+		prom_halt();
+		break;
+	};
+
+	prom_rev = romvec->pv_plugin_revision;
+	prom_prev = romvec->pv_printrev;
+	prom_nodeops = romvec->pv_nodeops;
+
+	prom_root_node = prom_getsibling(0);
+	if((prom_root_node == 0) || (prom_root_node == -1))
+		prom_halt();
+
+	if((((unsigned long) prom_nodeops) == 0) ||
+	   (((unsigned long) prom_nodeops) == -1))
+		prom_halt();
+
+	prom_meminit();
+
+	prom_ranges_init();
+#endif
+//	printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n",
+//	       romvec->pv_romvers, prom_rev);
+
+	/* Initialization successful. */
+	return;
+}
diff --git a/arch/m68k/sun3/prom/misc.c b/arch/m68k/sun3/prom/misc.c
new file mode 100644
index 0000000..b88716f
--- /dev/null
+++ b/arch/m68k/sun3/prom/misc.c
@@ -0,0 +1,94 @@
+/* $Id: misc.c,v 1.15 1997/05/14 20:45:00 davem Exp $
+ * misc.c:  Miscellaneous prom functions that don't belong
+ *          anywhere else.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/sun3-head.h>
+#include <asm/idprom.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/movs.h>
+
+/* Reset and reboot the machine with the command 'bcommand'. */
+void
+prom_reboot(char *bcommand)
+{
+	unsigned long flags;
+	local_irq_save(flags);
+	(*(romvec->pv_reboot))(bcommand);
+	local_irq_restore(flags);
+}
+
+/* Drop into the prom, with the chance to continue with the 'go'
+ * prom command.
+ */
+void
+prom_cmdline(void)
+{
+}
+
+/* Drop into the prom, but completely terminate the program.
+ * No chance of continuing.
+ */
+void
+prom_halt(void)
+{
+	unsigned long flags;
+again:
+	local_irq_save(flags);
+	(*(romvec->pv_halt))();
+	local_irq_restore(flags);
+	goto again; /* PROM is out to get me -DaveM */
+}
+
+typedef void (*sfunc_t)(void);
+
+/* Get the idprom and stuff it into buffer 'idbuf'.  Returns the
+ * format type.  'num_bytes' is the number of bytes that your idbuf
+ * has space for.  Returns 0xff on error.
+ */
+unsigned char
+prom_get_idprom(char *idbuf, int num_bytes)
+{
+	int i, oldsfc;
+	GET_SFC(oldsfc);
+	SET_SFC(FC_CONTROL);
+	for(i=0;i<num_bytes; i++)
+	{
+		/* There is a problem with the GET_CONTROL_BYTE
+		macro; defining the extra variable
+		gets around it.
+		*/
+		int c;
+		GET_CONTROL_BYTE(SUN3_IDPROM_BASE + i, c);
+		idbuf[i] = c;
+	}
+	SET_SFC(oldsfc);
+	return idbuf[0];
+}
+
+/* Get the major prom version number. */
+int
+prom_version(void)
+{
+	return romvec->pv_romvers;
+}
+
+/* Get the prom plugin-revision. */
+int
+prom_getrev(void)
+{
+	return prom_rev;
+}
+
+/* Get the prom firmware print revision. */
+int
+prom_getprev(void)
+{
+	return prom_prev;
+}
diff --git a/arch/m68k/sun3/prom/printf.c b/arch/m68k/sun3/prom/printf.c
new file mode 100644
index 0000000..e6ee100
--- /dev/null
+++ b/arch/m68k/sun3/prom/printf.c
@@ -0,0 +1,61 @@
+/* $Id: printf.c,v 1.5 1996/04/04 16:31:07 tridge Exp $
+ * printf.c:  Internal prom library printf facility.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+/* This routine is internal to the prom library, no one else should know
+ * about or use it!  It's simple and smelly anyway....
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+#ifdef CONFIG_KGDB
+extern int kgdb_initialized;
+#endif
+
+static char ppbuf[1024];
+
+void
+prom_printf(char *fmt, ...)
+{
+	va_list args;
+	char ch, *bptr;
+	int i;
+
+	va_start(args, fmt);
+
+#ifdef CONFIG_KGDB
+	ppbuf[0] = 'O';
+	i = vsprintf(ppbuf + 1, fmt, args) + 1;
+#else
+	i = vsprintf(ppbuf, fmt, args);
+#endif
+
+	bptr = ppbuf;
+
+#ifdef CONFIG_AP1000
+        ap_write(1,bptr,strlen(bptr));
+#else
+
+#ifdef CONFIG_KGDB
+	if (kgdb_initialized) {
+		printk("kgdb_initialized = %d\n", kgdb_initialized);
+		putpacket(bptr, 1);
+	} else
+#else
+	while((ch = *(bptr++)) != 0) {
+		if(ch == '\n')
+			prom_putchar('\r');
+
+		prom_putchar(ch);
+	}
+#endif
+#endif
+	va_end(args);
+	return;
+}
diff --git a/arch/m68k/sun3/sbus.c b/arch/m68k/sun3/sbus.c
new file mode 100644
index 0000000..babdbfa
--- /dev/null
+++ b/arch/m68k/sun3/sbus.c
@@ -0,0 +1,27 @@
+/*
+ * SBus helper functions
+ *
+ * Sun3 don't have a sbus, but many of the used devices are also
+ * used on Sparc machines with sbus. To avoid having a lot of
+ * duplicate code, we provide necessary glue stuff to make using
+ * of the sbus driver code possible.
+ *
+ * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ */
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/init.h>
+
+int __init sbus_init(void)
+{
+	return 0;
+}
+
+void *sparc_alloc_io (u32 address, void *virtual, int len, char *name,
+                      u32 bus_type, int rdonly)
+{
+	return (void *)address;
+}
+
+subsys_initcall(sbus_init);
diff --git a/arch/m68k/sun3/sun3_ksyms.c b/arch/m68k/sun3/sun3_ksyms.c
new file mode 100644
index 0000000..43e5a9a
--- /dev/null
+++ b/arch/m68k/sun3/sun3_ksyms.c
@@ -0,0 +1,13 @@
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/dvma.h>
+#include <asm/idprom.h>
+
+/*
+ * Add things here when you find the need for it.
+ */
+EXPORT_SYMBOL(dvma_map_align);
+EXPORT_SYMBOL(dvma_unmap);
+EXPORT_SYMBOL(dvma_malloc_align);
+EXPORT_SYMBOL(dvma_free);
+EXPORT_SYMBOL(idprom);
diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c
new file mode 100644
index 0000000..f04a1d2
--- /dev/null
+++ b/arch/m68k/sun3/sun3dvma.c
@@ -0,0 +1,379 @@
+/*
+ * linux/arch/m68k/mm/sun3dvma.c
+ *
+ * Copyright (C) 2000 Sam Creasey
+ *
+ * Contains common routines for sun3/sun3x DVMA management.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/dvma.h>
+
+#undef DVMA_DEBUG
+
+#ifdef CONFIG_SUN3X
+extern void dvma_unmap_iommu(unsigned long baddr, int len);
+#else
+static inline void dvma_unmap_iommu(unsigned long a, int b)
+{
+}
+#endif
+
+#ifdef CONFIG_SUN3
+extern void sun3_dvma_init(void);
+#endif
+
+unsigned long iommu_use[IOMMU_TOTAL_ENTRIES];
+
+#define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT)
+
+#define dvma_entry_use(baddr)		(iommu_use[dvma_index(baddr)])
+
+struct hole {
+	unsigned long start;
+	unsigned long end;
+	unsigned long size;
+	struct list_head list;
+};
+
+static struct list_head hole_list;
+static struct list_head hole_cache;
+static struct hole initholes[64];
+
+#ifdef DVMA_DEBUG
+
+static unsigned long dvma_allocs;
+static unsigned long dvma_frees;
+static unsigned long long dvma_alloc_bytes;
+static unsigned long long dvma_free_bytes;
+
+static void print_use(void)
+{
+
+	int i;
+	int j = 0;
+
+	printk("dvma entry usage:\n");
+
+	for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) {
+		if(!iommu_use[i])
+			continue;
+
+		j++;
+
+		printk("dvma entry: %08lx len %08lx\n",
+		       ( i << DVMA_PAGE_SHIFT) + DVMA_START,
+		       iommu_use[i]);
+	}
+
+	printk("%d entries in use total\n", j);
+
+	printk("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees);
+	printk("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes,
+	       dvma_free_bytes);
+}
+
+static void print_holes(struct list_head *holes)
+{
+
+	struct list_head *cur;
+	struct hole *hole;
+
+	printk("listing dvma holes\n");
+	list_for_each(cur, holes) {
+		hole = list_entry(cur, struct hole, list);
+
+		if((hole->start == 0) && (hole->end == 0) && (hole->size == 0))
+			continue;
+
+		printk("hole: start %08lx end %08lx size %08lx\n", hole->start, hole->end, hole->size);
+	}
+
+	printk("end of hole listing...\n");
+
+}
+#endif /* DVMA_DEBUG */
+
+static inline int refill(void)
+{
+
+	struct hole *hole;
+	struct hole *prev = NULL;
+	struct list_head *cur;
+	int ret = 0;
+
+	list_for_each(cur, &hole_list) {
+		hole = list_entry(cur, struct hole, list);
+
+		if(!prev) {
+			prev = hole;
+			continue;
+		}
+
+		if(hole->end == prev->start) {
+			hole->size += prev->size;
+			hole->end = prev->end;
+			list_del(&(prev->list));
+			list_add(&(prev->list), &hole_cache);
+			ret++;
+		}
+
+	}
+
+	return ret;
+}
+
+static inline struct hole *rmcache(void)
+{
+	struct hole *ret;
+
+	if(list_empty(&hole_cache)) {
+		if(!refill()) {
+			printk("out of dvma hole cache!\n");
+			BUG();
+		}
+	}
+
+	ret = list_entry(hole_cache.next, struct hole, list);
+	list_del(&(ret->list));
+
+	return ret;
+
+}
+
+static inline unsigned long get_baddr(int len, unsigned long align)
+{
+
+	struct list_head *cur;
+	struct hole *hole;
+
+	if(list_empty(&hole_list)) {
+#ifdef DVMA_DEBUG
+		printk("out of dvma holes! (printing hole cache)\n");
+		print_holes(&hole_cache);
+		print_use();
+#endif
+		BUG();
+	}
+
+	list_for_each(cur, &hole_list) {
+		unsigned long newlen;
+
+		hole = list_entry(cur, struct hole, list);
+
+		if(align > DVMA_PAGE_SIZE)
+			newlen = len + ((hole->end - len) & (align-1));
+		else
+			newlen = len;
+
+		if(hole->size > newlen) {
+			hole->end -= newlen;
+			hole->size -= newlen;
+			dvma_entry_use(hole->end) = newlen;
+#ifdef DVMA_DEBUG
+			dvma_allocs++;
+			dvma_alloc_bytes += newlen;
+#endif
+			return hole->end;
+		} else if(hole->size == newlen) {
+			list_del(&(hole->list));
+			list_add(&(hole->list), &hole_cache);
+			dvma_entry_use(hole->start) = newlen;
+#ifdef DVMA_DEBUG
+			dvma_allocs++;
+			dvma_alloc_bytes += newlen;
+#endif
+			return hole->start;
+		}
+
+	}
+
+	printk("unable to find dvma hole!\n");
+	BUG();
+	return 0;
+}
+
+static inline int free_baddr(unsigned long baddr)
+{
+
+	unsigned long len;
+	struct hole *hole;
+	struct list_head *cur;
+	unsigned long orig_baddr;
+
+	orig_baddr = baddr;
+	len = dvma_entry_use(baddr);
+	dvma_entry_use(baddr) = 0;
+	baddr &= DVMA_PAGE_MASK;
+	dvma_unmap_iommu(baddr, len);
+
+#ifdef DVMA_DEBUG
+	dvma_frees++;
+	dvma_free_bytes += len;
+#endif
+
+	list_for_each(cur, &hole_list) {
+		hole = list_entry(cur, struct hole, list);
+
+		if(hole->end == baddr) {
+			hole->end += len;
+			hole->size += len;
+			return 0;
+		} else if(hole->start == (baddr + len)) {
+			hole->start = baddr;
+			hole->size += len;
+			return 0;
+		}
+
+	}
+
+	hole = rmcache();
+
+	hole->start = baddr;
+	hole->end = baddr + len;
+	hole->size = len;
+
+//	list_add_tail(&(hole->list), cur);
+	list_add(&(hole->list), cur);
+
+	return 0;
+
+}
+
+void dvma_init(void)
+{
+
+	struct hole *hole;
+	int i;
+
+	INIT_LIST_HEAD(&hole_list);
+	INIT_LIST_HEAD(&hole_cache);
+
+	/* prepare the hole cache */
+	for(i = 0; i < 64; i++)
+		list_add(&(initholes[i].list), &hole_cache);
+
+	hole = rmcache();
+	hole->start = DVMA_START;
+	hole->end = DVMA_END;
+	hole->size = DVMA_SIZE;
+
+	list_add(&(hole->list), &hole_list);
+
+	memset(iommu_use, 0, sizeof(iommu_use));
+
+	dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
+
+#ifdef CONFIG_SUN3
+	sun3_dvma_init();
+#endif
+
+}
+
+inline unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
+{
+
+	unsigned long baddr;
+	unsigned long off;
+
+	if(!len)
+		len = 0x800;
+
+	if(!kaddr || !len) {
+//		printk("error: kaddr %lx len %x\n", kaddr, len);
+//		*(int *)4 = 0;
+		return 0;
+	}
+
+#ifdef DEBUG
+	printk("dvma_map request %08lx bytes from %08lx\n",
+	       len, kaddr);
+#endif
+	off = kaddr & ~DVMA_PAGE_MASK;
+	kaddr &= PAGE_MASK;
+	len += off;
+	len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
+
+	if(align == 0)
+		align = DVMA_PAGE_SIZE;
+	else
+		align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
+
+	baddr = get_baddr(len, align);
+//	printk("using baddr %lx\n", baddr);
+
+	if(!dvma_map_iommu(kaddr, baddr, len))
+		return (baddr + off);
+
+	printk("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr, len);
+	BUG();
+	return 0;
+}
+
+void dvma_unmap(void *baddr)
+{
+	unsigned long addr;
+
+	addr = (unsigned long)baddr;
+	/* check if this is a vme mapping */
+	if(!(addr & 0x00f00000))
+		addr |= 0xf00000;
+
+	free_baddr(addr);
+
+	return;
+
+}
+
+
+void *dvma_malloc_align(unsigned long len, unsigned long align)
+{
+	unsigned long kaddr;
+	unsigned long baddr;
+	unsigned long vaddr;
+
+	if(!len)
+		return NULL;
+
+#ifdef DEBUG
+	printk("dvma_malloc request %lx bytes\n", len);
+#endif
+	len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
+
+        if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0)
+		return NULL;
+
+	if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) {
+		free_pages(kaddr, get_order(len));
+		return NULL;
+	}
+
+	vaddr = dvma_btov(baddr);
+
+	if(dvma_map_cpu(kaddr, vaddr, len) < 0) {
+		dvma_unmap((void *)baddr);
+		free_pages(kaddr, get_order(len));
+		return NULL;
+	}
+
+#ifdef DEBUG
+	printk("mapped %08lx bytes %08lx kern -> %08lx bus\n",
+	       len, kaddr, baddr);
+#endif
+
+	return (void *)vaddr;
+
+}
+
+void dvma_free(void *vaddr)
+{
+
+	return;
+
+}
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
new file mode 100644
index 0000000..e62a033
--- /dev/null
+++ b/arch/m68k/sun3/sun3ints.c
@@ -0,0 +1,265 @@
+ /*
+ * linux/arch/m68k/sun3/sun3ints.c -- Sun-3(x) Linux interrupt handling code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+#include <asm/segment.h>
+#include <asm/intersil.h>
+#include <asm/oplib.h>
+#include <asm/sun3ints.h>
+#include <linux/seq_file.h>
+
+extern void sun3_leds (unsigned char);
+static irqreturn_t sun3_inthandle(int irq, void *dev_id, struct pt_regs *fp);
+
+void sun3_disable_interrupts(void)
+{
+	sun3_disable_irq(0);
+}
+
+void sun3_enable_interrupts(void)
+{
+	sun3_enable_irq(0);
+}
+
+int led_pattern[8] = {
+       ~(0x80), ~(0x01),
+       ~(0x40), ~(0x02),
+       ~(0x20), ~(0x04),
+       ~(0x10), ~(0x08)
+};
+
+volatile unsigned char* sun3_intreg;
+
+void sun3_insert_irq(irq_node_t **list, irq_node_t *node)
+{
+}
+
+void sun3_delete_irq(irq_node_t **list, void *dev_id)
+{
+}
+
+void sun3_enable_irq(unsigned int irq)
+{
+	*sun3_intreg |=  (1<<irq);
+}
+
+void sun3_disable_irq(unsigned int irq)
+{
+	*sun3_intreg &= ~(1<<irq);
+}
+
+inline void sun3_do_irq(int irq, struct pt_regs *fp)
+{
+	kstat_cpu(0).irqs[SYS_IRQS + irq]++;
+	*sun3_intreg &= ~(1<<irq);
+	*sun3_intreg |=  (1<<irq);
+}
+
+static irqreturn_t sun3_int7(int irq, void *dev_id, struct pt_regs *fp)
+{
+	sun3_do_irq(irq,fp);
+	if(!(kstat_cpu(0).irqs[SYS_IRQS + irq] % 2000))
+		sun3_leds(led_pattern[(kstat_cpu(0).irqs[SYS_IRQS+irq]%16000)
+			  /2000]);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t sun3_int5(int irq, void *dev_id, struct pt_regs *fp)
+{
+        kstat_cpu(0).irqs[SYS_IRQS + irq]++;
+#ifdef CONFIG_SUN3
+	intersil_clear();
+#endif
+        *sun3_intreg &= ~(1<<irq);
+        *sun3_intreg |=  (1<<irq);
+#ifdef CONFIG_SUN3
+	intersil_clear();
+#endif
+        do_timer(fp);
+#ifndef CONFIG_SMP
+	update_process_times(user_mode(fp));
+#endif
+        if(!(kstat_cpu(0).irqs[SYS_IRQS + irq] % 20))
+                sun3_leds(led_pattern[(kstat_cpu(0).irqs[SYS_IRQS+irq]%160)
+                /20]);
+	return IRQ_HANDLED;
+}
+
+/* handle requested ints, excepting 5 and 7, which always do the same
+   thing */
+irqreturn_t (*sun3_default_handler[SYS_IRQS])(int, void *, struct pt_regs *) = {
+	[0] = sun3_inthandle,
+	[1] = sun3_inthandle,
+	[2] = sun3_inthandle,
+	[3] = sun3_inthandle,
+	[4] = sun3_inthandle,
+	[5] = sun3_int5,
+	[6] = sun3_inthandle,
+	[7] = sun3_int7
+};
+
+static const char *dev_names[SYS_IRQS] = {
+	[5] = "timer",
+	[7] = "int7 handler"
+};
+static void *dev_ids[SYS_IRQS];
+static irqreturn_t (*sun3_inthandler[SYS_IRQS])(int, void *, struct pt_regs *) = {
+	[5] = sun3_int5,
+	[7] = sun3_int7
+};
+static irqreturn_t (*sun3_vechandler[SUN3_INT_VECS])(int, void *, struct pt_regs *);
+static void *vec_ids[SUN3_INT_VECS];
+static const char *vec_names[SUN3_INT_VECS];
+static int vec_ints[SUN3_INT_VECS];
+
+
+int show_sun3_interrupts(struct seq_file *p, void *v)
+{
+	int i;
+
+	for(i = 0; i < (SUN3_INT_VECS-1); i++) {
+		if(sun3_vechandler[i] != NULL) {
+			seq_printf(p, "vec %3d: %10u %s\n", i+64,
+				   vec_ints[i],
+				   (vec_names[i]) ? vec_names[i] :
+				   "sun3_vechandler");
+		}
+	}
+
+	return 0;
+}
+
+static irqreturn_t sun3_inthandle(int irq, void *dev_id, struct pt_regs *fp)
+{
+	if(sun3_inthandler[irq] == NULL)
+		panic ("bad interrupt %d received (id %p)\n",irq, dev_id);
+
+        kstat_cpu(0).irqs[SYS_IRQS + irq]++;
+        *sun3_intreg &= ~(1<<irq);
+
+	sun3_inthandler[irq](irq, dev_ids[irq], fp);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t sun3_vec255(int irq, void *dev_id, struct pt_regs *fp)
+{
+//	intersil_clear();
+	return IRQ_HANDLED;
+}
+
+void sun3_init_IRQ(void)
+{
+	int i;
+
+	*sun3_intreg = 1;
+
+	for(i = 0; i < SYS_IRQS; i++)
+	{
+		if(dev_names[i])
+			cpu_request_irq(i, sun3_default_handler[i], 0,
+					dev_names[i], NULL);
+	}
+
+	for(i = 0; i < 192; i++)
+		sun3_vechandler[i] = NULL;
+
+	sun3_vechandler[191] = sun3_vec255;
+}
+
+int sun3_request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
+                      unsigned long flags, const char *devname, void *dev_id)
+{
+
+	if(irq < SYS_IRQS) {
+		if(sun3_inthandler[irq] != NULL) {
+			printk("sun3_request_irq: request for irq %d -- already taken!\n", irq);
+			return 1;
+		}
+
+		sun3_inthandler[irq] = handler;
+		dev_ids[irq] = dev_id;
+		dev_names[irq] = devname;
+
+		/* setting devname would be nice */
+		cpu_request_irq(irq, sun3_default_handler[irq], 0, devname,
+				NULL);
+
+		return 0;
+	} else {
+		if((irq >= 64) && (irq <= 255)) {
+		        int vec;
+
+			vec = irq - 64;
+			if(sun3_vechandler[vec] != NULL) {
+				printk("sun3_request_irq: request for vec %d -- already taken!\n", irq);
+				return 1;
+			}
+
+			sun3_vechandler[vec] = handler;
+			vec_ids[vec] = dev_id;
+			vec_names[vec] = devname;
+			vec_ints[vec] = 0;
+
+			return 0;
+		}
+	}
+
+	printk("sun3_request_irq: invalid irq %d\n", irq);
+	return 1;
+
+}
+
+void sun3_free_irq(unsigned int irq, void *dev_id)
+{
+
+	if(irq < SYS_IRQS) {
+		if(sun3_inthandler[irq] == NULL)
+			panic("sun3_free_int: attempt to free unused irq %d\n", irq);
+		if(dev_ids[irq] != dev_id)
+			panic("sun3_free_int: incorrect dev_id for irq %d\n", irq);
+
+		sun3_inthandler[irq] = NULL;
+		return;
+	} else if((irq >= 64) && (irq <= 255)) {
+		int vec;
+
+		vec = irq - 64;
+		if(sun3_vechandler[vec] == NULL)
+			panic("sun3_free_int: attempt to free unused vector %d\n", irq);
+		if(vec_ids[irq] != dev_id)
+			panic("sun3_free_int: incorrect dev_id for vec %d\n", irq);
+
+		sun3_vechandler[vec] = NULL;
+		return;
+	} else {
+		panic("sun3_free_irq: invalid irq %d\n", irq);
+	}
+}
+
+irqreturn_t sun3_process_int(int irq, struct pt_regs *regs)
+{
+
+	if((irq >= 64) && (irq <= 255)) {
+		int vec;
+
+		vec = irq - 64;
+		if(sun3_vechandler[vec] == NULL)
+			panic ("bad interrupt vector %d received\n",irq);
+
+		vec_ints[vec]++;
+		return sun3_vechandler[vec](irq, vec_ids[vec], regs);
+	} else {
+		panic("sun3_process_int: unable to handle interrupt vector %d\n",
+		      irq);
+	}
+}
diff --git a/arch/m68k/sun3x/Makefile b/arch/m68k/sun3x/Makefile
new file mode 100644
index 0000000..be5776d
--- /dev/null
+++ b/arch/m68k/sun3x/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for Linux arch/m68k/sun3x source directory
+#
+
+obj-y		:= config.o time.o dvma.o prom.o
diff --git a/arch/m68k/sun3x/config.c b/arch/m68k/sun3x/config.c
new file mode 100644
index 0000000..0ef547f
--- /dev/null
+++ b/arch/m68k/sun3x/config.c
@@ -0,0 +1,99 @@
+/*
+ * Setup kernel for a Sun3x machine
+ *
+ * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ *
+ * based on code from Oliver Jowett <oliver@jowett.manawatu.gen.nz>
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/console.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/sun3xprom.h>
+#include <asm/sun3ints.h>
+#include <asm/setup.h>
+#include <asm/oplib.h>
+
+#include "time.h"
+
+volatile char *clock_va;
+extern volatile unsigned char *sun3_intreg;
+
+extern void sun3_get_model(char *model);
+
+void sun3_leds(unsigned int i)
+{
+
+}
+
+static int sun3x_get_hardware_list(char *buffer)
+{
+
+	int len = 0;
+
+	len += sprintf(buffer + len, "PROM Revision:\t%s\n",
+		       romvec->pv_monid);
+
+	return len;
+
+}
+
+/*
+ *  Setup the sun3x configuration info
+ */
+void __init config_sun3x(void)
+{
+
+	sun3x_prom_init();
+
+	mach_get_irq_list	 = show_sun3_interrupts;
+	mach_max_dma_address = 0xffffffff; /* we can DMA anywhere, whee */
+
+	mach_default_handler = &sun3_default_handler;
+	mach_sched_init      = sun3x_sched_init;
+	mach_init_IRQ        = sun3_init_IRQ;
+	enable_irq           = sun3_enable_irq;
+	disable_irq          = sun3_disable_irq;
+	mach_request_irq     = sun3_request_irq;
+	mach_free_irq        = sun3_free_irq;
+	mach_process_int     = sun3_process_int;
+
+	mach_gettimeoffset   = sun3x_gettimeoffset;
+	mach_reset           = sun3x_reboot;
+
+	mach_hwclk           = sun3x_hwclk;
+	mach_get_model       = sun3_get_model;
+	mach_get_hardware_list = sun3x_get_hardware_list;
+
+#ifdef CONFIG_DUMMY_CONSOLE
+	conswitchp	     = &dummy_con;
+#endif
+
+	sun3_intreg = (unsigned char *)SUN3X_INTREG;
+
+	/* only the serial console is known to work anyway... */
+#if 0
+	switch (*(unsigned char *)SUN3X_EEPROM_CONS) {
+	case 0x10:
+		serial_console = 1;
+		conswitchp = NULL;
+		break;
+	case 0x11:
+		serial_console = 2;
+		conswitchp = NULL;
+		break;
+	default:
+		serial_console = 0;
+		conswitchp = &dummy_con;
+		break;
+	}
+#endif
+
+}
+
diff --git a/arch/m68k/sun3x/dvma.c b/arch/m68k/sun3x/dvma.c
new file mode 100644
index 0000000..32e55ad
--- /dev/null
+++ b/arch/m68k/sun3x/dvma.c
@@ -0,0 +1,208 @@
+/*
+ * Virtual DMA allocation
+ *
+ * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ *
+ * 11/26/2000 -- disabled the existing code because it didn't work for
+ * me in 2.4.  Replaced with a significantly more primitive version
+ * similar to the sun3 code.  the old functionality was probably more
+ * desirable, but....   -- Sam Creasey (sammy@oh.verio.com)
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <asm/sun3x.h>
+#include <asm/dvma.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+
+/* IOMMU support */
+
+#define IOMMU_ADDR_MASK            0x03ffe000
+#define IOMMU_CACHE_INHIBIT        0x00000040
+#define IOMMU_FULL_BLOCK           0x00000020
+#define IOMMU_MODIFIED             0x00000010
+#define IOMMU_USED                 0x00000008
+#define IOMMU_WRITE_PROTECT        0x00000004
+#define IOMMU_DT_MASK              0x00000003
+#define IOMMU_DT_INVALID           0x00000000
+#define IOMMU_DT_VALID             0x00000001
+#define IOMMU_DT_BAD               0x00000002
+
+
+static volatile unsigned long *iommu_pte = (unsigned long *)SUN3X_IOMMU;
+
+
+#define dvma_entry_paddr(index)		(iommu_pte[index] & IOMMU_ADDR_MASK)
+#define dvma_entry_vaddr(index,paddr)	((index << DVMA_PAGE_SHIFT) |  \
+					 (paddr & (DVMA_PAGE_SIZE-1)))
+#if 0
+#define dvma_entry_set(index,addr)	(iommu_pte[index] =            \
+					    (addr & IOMMU_ADDR_MASK) | \
+				             IOMMU_DT_VALID | IOMMU_CACHE_INHIBIT)
+#else
+#define dvma_entry_set(index,addr)	(iommu_pte[index] =            \
+					    (addr & IOMMU_ADDR_MASK) | \
+				             IOMMU_DT_VALID)
+#endif
+#define dvma_entry_clr(index)		(iommu_pte[index] = IOMMU_DT_INVALID)
+#define dvma_entry_hash(addr)		((addr >> DVMA_PAGE_SHIFT) ^ \
+					 ((addr & 0x03c00000) >>     \
+						(DVMA_PAGE_SHIFT+4)))
+
+#undef DEBUG
+
+#ifdef DEBUG
+/* code to print out a dvma mapping for debugging purposes */
+void dvma_print (unsigned long dvma_addr)
+{
+
+        unsigned long index;
+
+        index = dvma_addr >> DVMA_PAGE_SHIFT;
+
+        printk("idx %lx dvma_addr %08lx paddr %08lx\n", index, dvma_addr,
+               dvma_entry_paddr(index));
+
+
+}
+#endif
+
+
+/* create a virtual mapping for a page assigned within the IOMMU
+   so that the cpu can reach it easily */
+inline int dvma_map_cpu(unsigned long kaddr,
+			       unsigned long vaddr, int len)
+{
+	pgd_t *pgd;
+	unsigned long end;
+	int ret = 0;
+
+	kaddr &= PAGE_MASK;
+	vaddr &= PAGE_MASK;
+
+	end = PAGE_ALIGN(vaddr + len);
+
+#ifdef DEBUG
+	printk("dvma: mapping kern %08lx to virt %08lx\n",
+	       kaddr, vaddr);
+#endif
+	pgd = pgd_offset_k(vaddr);
+
+	do {
+		pmd_t *pmd;
+		unsigned long end2;
+
+		if((pmd = pmd_alloc(&init_mm, pgd, vaddr)) == NULL) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK))
+			end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK;
+		else
+			end2 = end;
+
+		do {
+			pte_t *pte;
+			unsigned long end3;
+
+			if((pte = pte_alloc_kernel(&init_mm, pmd, vaddr)) == NULL) {
+				ret = -ENOMEM;
+				goto out;
+			}
+
+			if((end2 & PMD_MASK) > (vaddr & PMD_MASK))
+				end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK;
+			else
+				end3 = end2;
+
+			do {
+#ifdef DEBUG
+				printk("mapping %08lx phys to %08lx\n",
+				       __pa(kaddr), vaddr);
+#endif
+				set_pte(pte, pfn_pte(virt_to_pfn(kaddr),
+						     PAGE_KERNEL));
+				pte++;
+				kaddr += PAGE_SIZE;
+				vaddr += PAGE_SIZE;
+			} while(vaddr < end3);
+
+		} while(vaddr < end2);
+
+	} while(vaddr < end);
+
+	flush_tlb_all();
+
+ out:
+	return ret;
+}
+
+
+inline int dvma_map_iommu(unsigned long kaddr, unsigned long baddr,
+				 int len)
+{
+	unsigned long end, index;
+
+	index = baddr >> DVMA_PAGE_SHIFT;
+	end = ((baddr+len) >> DVMA_PAGE_SHIFT);
+
+	if(len & ~DVMA_PAGE_MASK)
+		end++;
+
+	for(; index < end ; index++) {
+//		if(dvma_entry_use(index))
+//			BUG();
+//		printk("mapping pa %lx to ba %lx\n", __pa(kaddr), index << DVMA_PAGE_SHIFT);
+
+		dvma_entry_set(index, __pa(kaddr));
+
+		iommu_pte[index] |= IOMMU_FULL_BLOCK;
+//		dvma_entry_inc(index);
+
+		kaddr += DVMA_PAGE_SIZE;
+	}
+
+#ifdef DEBUG
+	for(index = (baddr >> DVMA_PAGE_SHIFT); index < end; index++)
+		dvma_print(index << DVMA_PAGE_SHIFT);
+#endif
+	return 0;
+
+}
+
+void dvma_unmap_iommu(unsigned long baddr, int len)
+{
+
+	int index, end;
+
+
+	index = baddr >> DVMA_PAGE_SHIFT;
+	end = (DVMA_PAGE_ALIGN(baddr+len) >> DVMA_PAGE_SHIFT);
+
+	for(; index < end ; index++) {
+#ifdef DEBUG
+		printk("freeing bus mapping %08x\n", index << DVMA_PAGE_SHIFT);
+#endif
+#if 0
+		if(!dvma_entry_use(index))
+			printk("dvma_unmap freeing unused entry %04x\n",
+			       index);
+		else
+			dvma_entry_dec(index);
+#endif
+		dvma_entry_clr(index);
+	}
+
+}
+
diff --git a/arch/m68k/sun3x/prom.c b/arch/m68k/sun3x/prom.c
new file mode 100644
index 0000000..574cf06
--- /dev/null
+++ b/arch/m68k/sun3x/prom.c
@@ -0,0 +1,166 @@
+/* Prom access routines for the sun3x */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/bootinfo.h>
+#include <asm/setup.h>
+#include <asm/traps.h>
+#include <asm/sun3xprom.h>
+#include <asm/idprom.h>
+#include <asm/segment.h>
+#include <asm/sun3ints.h>
+#include <asm/openprom.h>
+#include <asm/machines.h>
+
+void (*sun3x_putchar)(int);
+int (*sun3x_getchar)(void);
+int (*sun3x_mayget)(void);
+int (*sun3x_mayput)(int);
+void (*sun3x_prom_reboot)(void);
+e_vector sun3x_prom_abort;
+struct linux_romvec *romvec;
+
+/* prom vector table */
+e_vector *sun3x_prom_vbr;
+
+/* Handle returning to the prom */
+void sun3x_halt(void)
+{
+    unsigned long flags;
+
+    /* Disable interrupts while we mess with things */
+    local_irq_save(flags);
+
+    /* Restore prom vbr */
+    __asm__ volatile ("movec %0,%%vbr" : : "r" ((void*)sun3x_prom_vbr));
+
+    /* Restore prom NMI clock */
+//    sun3x_disable_intreg(5);
+    sun3_enable_irq(7);
+
+    /* Let 'er rip */
+    __asm__ volatile ("trap #14" : : );
+
+    /* Restore everything */
+    sun3_disable_irq(7);
+    sun3_enable_irq(5);
+
+    __asm__ volatile ("movec %0,%%vbr" : : "r" ((void*)vectors));
+    local_irq_restore(flags);
+}
+
+void sun3x_reboot(void)
+{
+    /* This never returns, don't bother saving things */
+    local_irq_disable();
+
+    /* Restore prom vbr */
+    __asm__ volatile ("movec %0,%%vbr" : : "r" ((void*)sun3x_prom_vbr));
+
+    /* Restore prom NMI clock */
+    sun3_disable_irq(5);
+    sun3_enable_irq(7);
+
+    /* Let 'er rip */
+    (*romvec->pv_reboot)("vmlinux");
+}
+
+extern char m68k_debug_device[];
+
+static void sun3x_prom_write(struct console *co, const char *s,
+                             unsigned int count)
+{
+    while (count--) {
+        if (*s == '\n')
+            sun3x_putchar('\r');
+        sun3x_putchar(*s++);
+    }
+}
+
+/* debug console - write-only */
+
+static struct console sun3x_debug = {
+	.name  =	"debug",
+	.write =	sun3x_prom_write,
+	.flags =	CON_PRINTBUFFER,
+	.index =	-1,
+};
+
+void sun3x_prom_init(void)
+{
+    /* Read the vector table */
+
+    sun3x_putchar = *(void (**)(int)) (SUN3X_P_PUTCHAR);
+    sun3x_getchar = *(int (**)(void)) (SUN3X_P_GETCHAR);
+    sun3x_mayget = *(int (**)(void))  (SUN3X_P_MAYGET);
+    sun3x_mayput = *(int (**)(int))   (SUN3X_P_MAYPUT);
+    sun3x_prom_reboot = *(void (**)(void)) (SUN3X_P_REBOOT);
+    sun3x_prom_abort = *(e_vector *)  (SUN3X_P_ABORT);
+    romvec = (struct linux_romvec *)SUN3X_PROM_BASE;
+
+    idprom_init();
+
+    if(!((idprom->id_machtype & SM_ARCH_MASK) == SM_SUN3X)) {
+	    printk("Warning: machine reports strange type %02x\n",
+		   idprom->id_machtype);
+	    printk("Pretending it's a 3/80, but very afraid...\n");
+	    idprom->id_machtype = SM_SUN3X | SM_3_80;
+    }
+
+    /* point trap #14 at abort.
+     * XXX this is futile since we restore the vbr first - oops
+     */
+    vectors[VEC_TRAP14] = sun3x_prom_abort;
+
+    /* If debug=prom was specified, start the debug console */
+
+    if (!strcmp(m68k_debug_device, "prom"))
+        register_console(&sun3x_debug);
+
+
+}
+
+/* some prom functions to export */
+int prom_getintdefault(int node, char *property, int deflt)
+{
+	return deflt;
+}
+
+int prom_getbool (int node, char *prop)
+{
+	return 1;
+}
+
+void prom_printf(char *fmt, ...)
+{
+
+}
+
+void prom_halt (void)
+{
+	sun3x_halt();
+}
+
+/* Get the idprom and stuff it into buffer 'idbuf'.  Returns the
+ * format type.  'num_bytes' is the number of bytes that your idbuf
+ * has space for.  Returns 0xff on error.
+ */
+unsigned char
+prom_get_idprom(char *idbuf, int num_bytes)
+{
+        int i;
+
+	/* make a copy of the idprom structure */
+	for(i = 0; i < num_bytes; i++)
+		idbuf[i] = ((char *)SUN3X_IDPROM)[i];
+
+        return idbuf[0];
+}
diff --git a/arch/m68k/sun3x/time.c b/arch/m68k/sun3x/time.c
new file mode 100644
index 0000000..6f4204f
--- /dev/null
+++ b/arch/m68k/sun3x/time.c
@@ -0,0 +1,103 @@
+/*
+ *  linux/arch/m68k/sun3x/time.c
+ *
+ *  Sun3x-specific time handling
+ */
+
+#include <linux/types.h>
+#include <linux/kd.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+#include <asm/sun3x.h>
+#include <asm/sun3ints.h>
+#include <asm/rtc.h>
+
+#include "time.h"
+
+#define M_CONTROL 0xf8
+#define M_SEC     0xf9
+#define M_MIN     0xfa
+#define M_HOUR    0xfb
+#define M_DAY     0xfc
+#define M_DATE    0xfd
+#define M_MONTH   0xfe
+#define M_YEAR    0xff
+
+#define C_WRITE   0x80
+#define C_READ    0x40
+#define C_SIGN    0x20
+#define C_CALIB   0x1f
+
+int sun3x_hwclk(int set, struct rtc_time *t)
+{
+	volatile struct mostek_dt *h =
+		(struct mostek_dt *)(SUN3X_EEPROM+M_CONTROL);
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	if(set) {
+		h->csr |= C_WRITE;
+		h->sec = BIN2BCD(t->tm_sec);
+		h->min = BIN2BCD(t->tm_min);
+		h->hour = BIN2BCD(t->tm_hour);
+		h->wday = BIN2BCD(t->tm_wday);
+		h->mday = BIN2BCD(t->tm_mday);
+		h->month = BIN2BCD(t->tm_mon);
+		h->year = BIN2BCD(t->tm_year);
+		h->csr &= ~C_WRITE;
+	} else {
+		h->csr |= C_READ;
+		t->tm_sec = BCD2BIN(h->sec);
+		t->tm_min = BCD2BIN(h->min);
+		t->tm_hour = BCD2BIN(h->hour);
+		t->tm_wday = BCD2BIN(h->wday);
+		t->tm_mday = BCD2BIN(h->mday);
+		t->tm_mon = BCD2BIN(h->month);
+		t->tm_year = BCD2BIN(h->year);
+		h->csr &= ~C_READ;
+	}
+
+	local_irq_restore(flags);
+
+	return 0;
+}
+/* Not much we can do here */
+unsigned long sun3x_gettimeoffset (void)
+{
+    return 0L;
+}
+
+#if 0
+static void sun3x_timer_tick(int irq, void *dev_id, struct pt_regs *regs)
+{
+    void (*vector)(int, void *, struct pt_regs *) = dev_id;
+
+    /* Clear the pending interrupt - pulse the enable line low */
+    disable_irq(5);
+    enable_irq(5);
+
+    vector(irq, NULL, regs);
+}
+#endif
+
+void __init sun3x_sched_init(irqreturn_t (*vector)(int, void *, struct pt_regs *))
+{
+
+	sun3_disable_interrupts();
+
+
+    /* Pulse enable low to get the clock started */
+	sun3_disable_irq(5);
+	sun3_enable_irq(5);
+	sun3_enable_interrupts();
+}
diff --git a/arch/m68k/sun3x/time.h b/arch/m68k/sun3x/time.h
new file mode 100644
index 0000000..e7e43b4
--- /dev/null
+++ b/arch/m68k/sun3x/time.h
@@ -0,0 +1,19 @@
+#ifndef SUN3X_TIME_H
+#define SUN3X_TIME_H
+
+extern int sun3x_hwclk(int set, struct rtc_time *t);
+unsigned long sun3x_gettimeoffset (void);
+void sun3x_sched_init(irqreturn_t (*vector)(int, void *, struct pt_regs *));
+
+struct mostek_dt {
+	volatile unsigned char csr;
+	volatile unsigned char sec;
+	volatile unsigned char min;
+	volatile unsigned char hour;
+	volatile unsigned char wday;
+	volatile unsigned char mday;
+	volatile unsigned char month;
+	volatile unsigned char year;
+};
+
+#endif
diff --git a/arch/m68k/tools/amiga/Makefile b/arch/m68k/tools/amiga/Makefile
new file mode 100644
index 0000000..1134361
--- /dev/null
+++ b/arch/m68k/tools/amiga/Makefile
@@ -0,0 +1,11 @@
+
+CC =		m68k-cbm-amigados-gcc
+CFLAGS =	-Wall -O2
+
+
+All:		dmesg
+
+
+dmesg:		dmesg.c
+		$(CC) $(CFLAGS) -o dmesg dmesg.c -noixemul
+
diff --git a/arch/m68k/tools/amiga/dmesg.c b/arch/m68k/tools/amiga/dmesg.c
new file mode 100644
index 0000000..e892748
--- /dev/null
+++ b/arch/m68k/tools/amiga/dmesg.c
@@ -0,0 +1,69 @@
+/*
+ *  linux/arch/m68k/tools/amiga/dmesg.c -- Retrieve the kernel messages stored
+ *					   in Chip RAM with the kernel command
+ *					   line option `debug=mem'.
+ *
+ *  © Copyright 1996 by Geert Uytterhoeven <geert@linux-m68k.org>
+ *
+ *
+ *  Usage:
+ *
+ *	dmesg
+ *	dmesg <CHIPMEM_END>
+ *
+ *
+ *  This file is subject to the terms and conditions of the GNU General Public
+ *  License.  See the file COPYING in the main directory of the Linux
+ *  distribution for more details.
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+
+#define CHIPMEM_START	0x00000000
+#define CHIPMEM_END	0x00200000	/* overridden by argv[1] */
+
+#define SAVEKMSG_MAGIC1	0x53415645	/* 'SAVE' */
+#define SAVEKMSG_MAGIC2	0x4B4D5347	/* 'KMSG' */
+
+struct savekmsg {
+    u_long magic1;	/* SAVEKMSG_MAGIC1 */
+    u_long magic2;	/* SAVEKMSG_MAGIC2 */
+    u_long magicptr;	/* address of magic1 */
+    u_long size;
+    char data[0];
+};
+
+
+int main(int argc, char *argv[])
+{
+    u_long start = CHIPMEM_START, end = CHIPMEM_END, p;
+    int found = 0;
+    struct savekmsg *m = NULL;
+
+    if (argc >= 2)
+	end = strtoul(argv[1], NULL, 0);
+    printf("Searching for SAVEKMSG magic...\n");
+    for (p = start; p <= end-sizeof(struct savekmsg); p += 4) {
+	m = (struct savekmsg *)p;
+	if ((m->magic1 == SAVEKMSG_MAGIC1) && (m->magic2 == SAVEKMSG_MAGIC2) &&
+	    (m->magicptr == p)) {
+	    found = 1;
+	    break;
+	}
+    }
+    if (!found)
+	printf("Not found\n");
+    else {
+	printf("Found %ld bytes at 0x%08lx\n", m->size, (u_long)&m->data);
+	puts(">>>>>>>>>>>>>>>>>>>>");
+	fflush(stdout);
+	write(1, &m->data, m->size);
+	fflush(stdout);
+	puts("<<<<<<<<<<<<<<<<<<<<");
+    }
+    return(0);
+}