Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx
new file mode 100644
index 0000000..c2523a3
--- /dev/null
+++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx
@@ -0,0 +1,97 @@
+#
+# AIC79XX 2.5.X Kernel configuration File.
+# $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Kconfig.aic79xx#4 $
+#
+config SCSI_AIC79XX
+	tristate "Adaptec AIC79xx U320 support"
+	depends on PCI && SCSI
+	help
+	This driver supports all of Adaptec's Ultra 320 PCI-X
+	based SCSI controllers.
+
+config AIC79XX_CMDS_PER_DEVICE
+	int "Maximum number of TCQ commands per device"
+	depends on SCSI_AIC79XX
+	default "32"
+	---help---
+	Specify the number of commands you would like to allocate per SCSI
+	device when Tagged Command Queueing (TCQ) is enabled on that device.
+
+	This is an upper bound value for the number of tagged transactions
+	to be used for any device.  The aic7xxx driver will automatically
+	vary this number based on device behavior.  For devices with a
+	fixed maximum, the driver will eventually lock to this maximum
+	and display a console message inidicating this value.
+
+	Due to resource allocation issues in the Linux SCSI mid-layer, using
+	a high number of commands per device may result in memory allocation
+	failures when many devices are attached to the system.  For this reason,
+	the default is set to 32.  Higher values may result in higer performance
+	on some devices.  The upper bound is 253.  0 disables tagged queueing.
+
+	Per device tag depth can be controlled via the kernel command line
+	"tag_info" option.  See drivers/scsi/aic7xxx/README.aic79xx
+	for details.
+
+config AIC79XX_RESET_DELAY_MS
+	int "Initial bus reset delay in milli-seconds"
+	depends on SCSI_AIC79XX
+	default "15000"
+	---help---
+	The number of milliseconds to delay after an initial bus reset.
+	The bus settle delay following all error recovery actions is
+	dictated by the SCSI layer and is not affected by this value.
+
+	Default: 15000 (15 seconds)
+
+config AIC79XX_BUILD_FIRMWARE
+	bool "Build Adapter Firmware with Kernel Build"
+	depends on SCSI_AIC79XX && !PREVENT_FIRMWARE_BUILD
+	help
+	This option should only be enabled if you are modifying the firmware
+	source to the aic79xx driver and wish to have the generated firmware
+	include files updated during a normal kernel build.  The assembler
+	for the firmware requires lex and yacc or their equivalents, as well
+	as the db v1 library.  You may have to install additional packages
+	or modify the assembler Makefile or the files it includes if your
+	build environment is different than that of the author.
+
+config AIC79XX_ENABLE_RD_STRM
+	bool "Enable Read Streaming for All Targets"
+	depends on SCSI_AIC79XX
+	default n
+	help
+	Read Streaming is a U320 protocol option that should enhance
+	performance.  Early U320 drive firmware actually performs slower
+	with read streaming enabled so it is disabled by default.  Read
+	Streaming can be configured in much the same way as tagged queueing
+	using the "rd_strm" command line option.  See
+	drivers/scsi/aic7xxx/README.aic79xx for details.
+
+config AIC79XX_DEBUG_ENABLE
+	bool "Compile in Debugging Code"
+	depends on SCSI_AIC79XX
+	default y
+	help
+	Compile in aic79xx debugging code that can be useful in diagnosing
+	driver errors.
+
+config AIC79XX_DEBUG_MASK
+	int "Debug code enable mask (16383 for all debugging)"
+	depends on SCSI_AIC79XX
+	default "0"
+	help
+	Bit mask of debug options that is only valid if the
+	CONFIG_AIC79XX_DEBUG_ENBLE option is enabled.  The bits in this mask
+	are defined in the drivers/scsi/aic7xxx/aic79xx.h - search for the
+	variable ahd_debug in that file to find them.
+
+config AIC79XX_REG_PRETTY_PRINT
+	bool "Decode registers during diagnostics"
+	depends on SCSI_AIC79XX
+	default y
+	help
+	Compile in register value tables for the output of expanded register
+	contents in diagnostics.  This make it much easier to understand debug
+	output without having to refer to a data book and/or the aic7xxx.reg
+	file.
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
new file mode 100644
index 0000000..8398e0d
--- /dev/null
+++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
@@ -0,0 +1,100 @@
+#
+# AIC7XXX and AIC79XX 2.5.X Kernel configuration File.
+# $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Kconfig.aic7xxx#7 $
+#
+config SCSI_AIC7XXX
+	tristate "Adaptec AIC7xxx Fast -> U160 support (New Driver)"
+	depends on (PCI || EISA) && SCSI
+	---help---
+	This driver supports all of Adaptec's Fast through Ultra 160 PCI
+	based SCSI controllers as well as the aic7770 based EISA and VLB
+	SCSI controllers (the 274x and 284x series).  For AAA and ARO based
+	configurations, only SCSI functionality is provided.
+
+	To compile this driver as a module, choose M here: the
+	module will be called aic7xxx.
+
+config AIC7XXX_CMDS_PER_DEVICE
+	int "Maximum number of TCQ commands per device"
+	depends on SCSI_AIC7XXX
+	default "32"
+	---help---
+	Specify the number of commands you would like to allocate per SCSI
+	device when Tagged Command Queueing (TCQ) is enabled on that device.
+
+	This is an upper bound value for the number of tagged transactions
+	to be used for any device.  The aic7xxx driver will automatically
+	vary this number based on device behavior.  For devices with a
+	fixed maximum, the driver will eventually lock to this maximum
+	and display a console message inidicating this value.
+
+	Due to resource allocation issues in the Linux SCSI mid-layer, using
+	a high number of commands per device may result in memory allocation
+	failures when many devices are attached to the system.  For this reason,
+	the default is set to 32.  Higher values may result in higer performance
+	on some devices.  The upper bound is 253.  0 disables tagged queueing.
+
+	Per device tag depth can be controlled via the kernel command line
+	"tag_info" option.  See drivers/scsi/aic7xxx/README.aic7xxx
+	for details.
+
+config AIC7XXX_RESET_DELAY_MS
+	int "Initial bus reset delay in milli-seconds"
+	depends on SCSI_AIC7XXX
+	default "15000"
+	---help---
+	The number of milliseconds to delay after an initial bus reset.
+	The bus settle delay following all error recovery actions is
+	dictated by the SCSI layer and is not affected by this value.
+
+	Default: 15000 (15 seconds)
+
+config AIC7XXX_PROBE_EISA_VL
+	bool "Probe for EISA and VL AIC7XXX Adapters"
+	depends on SCSI_AIC7XXX && EISA
+	help
+	Probe for EISA and VLB Aic7xxx controllers.  In many newer systems,
+	the invasive probes necessary to detect these controllers can cause
+	other devices to fail.  For this reason, the non-PCI probe code is
+	disabled by default.  The current value of this option can be "toggled"
+	via the no_probe kernel command line option.
+
+config AIC7XXX_BUILD_FIRMWARE
+	bool "Build Adapter Firmware with Kernel Build"
+	depends on SCSI_AIC7XXX && !PREVENT_FIRMWARE_BUILD
+	help
+	This option should only be enabled if you are modifying the firmware
+	source to the aic7xxx driver and wish to have the generated firmware
+	include files updated during a normal kernel build.  The assembler
+	for the firmware requires lex and yacc or their equivalents, as well
+	as the db v1 library.  You may have to install additional packages
+	or modify the assembler Makefile or the files it includes if your
+	build environment is different than that of the author.
+
+config AIC7XXX_DEBUG_ENABLE
+	bool "Compile in Debugging Code"
+	depends on SCSI_AIC7XXX
+	default y
+	help
+	Compile in aic7xxx debugging code that can be useful in diagnosing
+	driver errors.
+
+config AIC7XXX_DEBUG_MASK
+        int "Debug code enable mask (2047 for all debugging)"
+        depends on SCSI_AIC7XXX
+        default "0"
+        help
+	Bit mask of debug options that is only valid if the
+	CONFIG_AIC7XXX_DEBUG_ENBLE option is enabled.  The bits in this mask
+	are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the
+	variable ahc_debug in that file to find them.
+
+config AIC7XXX_REG_PRETTY_PRINT
+        bool "Decode registers during diagnostics"
+        depends on SCSI_AIC7XXX
+	default y
+        help
+	Compile in register value tables for the output of expanded register
+	contents in diagnostics.  This make it much easier to understand debug
+	output without having to refer to a data book and/or the aic7xxx.reg
+	file.
diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile
new file mode 100644
index 0000000..9a6ce19
--- /dev/null
+++ b/drivers/scsi/aic7xxx/Makefile
@@ -0,0 +1,99 @@
+#
+# Makefile for the Linux aic7xxx SCSI driver.
+#
+# $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Makefile#8 $
+#
+
+# Let kbuild descend into aicasm when cleaning
+subdir-				+= aicasm
+
+obj-$(CONFIG_SCSI_AIC7XXX)	+= aic7xxx.o
+obj-$(CONFIG_SCSI_AIC79XX)	+= aic79xx.o
+
+# Core Fast -> U160 files
+aic7xxx-y					+= aic7xxx_core.o	\
+						   aic7xxx_93cx6.o
+aic7xxx-$(CONFIG_EISA)				+= aic7770.o
+aic7xxx-$(CONFIG_PCI)				+= aic7xxx_pci.o
+aic7xxx-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT)	+= aic7xxx_reg_print.o
+
+# Platform Specific Fast -> U160 Files
+aic7xxx-y					+= aic7xxx_osm.o	\
+						   aic7xxx_proc.o
+aic7xxx-$(CONFIG_EISA)				+= aic7770_osm.o
+aic7xxx-$(CONFIG_PCI)				+= aic7xxx_osm_pci.o
+
+# Core U320 files
+aic79xx-y					+= aic79xx_core.o	\
+						   aic79xx_pci.o
+aic79xx-$(CONFIG_AIC79XX_REG_PRETTY_PRINT)	+= aic79xx_reg_print.o
+
+# Platform Specific U320 Files
+aic79xx-y					+= aic79xx_osm.o	\
+						   aic79xx_proc.o	\
+						   aic79xx_osm_pci.o
+
+EXTRA_CFLAGS += -Idrivers/scsi
+ifdef WARNINGS_BECOME_ERRORS
+EXTRA_CFLAGS += -Werror
+endif
+#EXTRA_CFLAGS += -g
+
+# Files generated that shall be removed upon make clean
+clean-files := aic7xxx_seq.h aic7xxx_reg.h aic7xxx_reg_print.c
+clean-files += aic79xx_seq.h aic79xx_reg.h aic79xx_reg_print.c
+
+# Dependencies for generated files need to be listed explicitly
+
+$(obj)/aic7xxx_core.o: $(obj)/aic7xxx_seq.h
+$(obj)/aic79xx_core.o: $(obj)/aic79xx_seq.h
+$(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped
+$(obj)/aic7xxx_reg_print.c: $(src)/aic7xxx_reg_print.c_shipped
+
+$(addprefix $(obj)/,$(aic7xxx-y)): $(obj)/aic7xxx_reg.h
+$(addprefix $(obj)/,$(aic79xx-y)): $(obj)/aic79xx_reg.h
+
+aic7xxx-gen-$(CONFIG_AIC7XXX_BUILD_FIRMWARE)	:= $(obj)/aic7xxx_seq.h \
+						   $(obj)/aic7xxx_reg.h
+aic7xxx-gen-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT)	+= $(obj)/aic7xxx_reg_print.c
+
+aicasm-7xxx-opts-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) := \
+	-p $(obj)/aic7xxx_reg_print.c -i aic7xxx_osm.h
+
+ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y)
+# Create a dependency chain in generated files
+# to avoid concurrent invocations of the single
+# rule that builds them all.
+aic7xxx_seq.h: aic7xxx_reg.h
+ifeq ($(CONFIG_AIC7XXX_REG_PRETTY_PRINT),y)
+aic7xxx_reg.h: aic7xxx_reg_print.c
+endif
+$(aic7xxx-gen-y): $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm
+	$(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic7xxx_reg.h \
+			      $(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \
+			      $(src)/aic7xxx.seq
+endif
+
+aic79xx-gen-$(CONFIG_AIC79XX_BUILD_FIRMWARE)	:= $(obj)/aic79xx_seq.h \
+						   $(obj)/aic79xx_reg.h
+aic79xx-gen-$(CONFIG_AIC79XX_REG_PRETTY_PRINT)	+= $(obj)/aic79xx_reg_print.c
+
+aicasm-79xx-opts-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) := \
+	-p $(obj)/aic79xx_reg_print.c -i aic79xx_osm.h
+
+ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y)
+# Create a dependency chain in generated files
+# to avoid concurrent invocations of the single
+# rule that builds them all.
+aic79xx_seq.h: aic79xx_reg.h
+ifeq ($(CONFIG_AIC79XX_REG_PRETTY_PRINT),y)
+aic79xx_reg.h: aic79xx_reg_print.c
+endif
+$(aic79xx-gen-y): $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm
+	$(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic79xx_reg.h \
+			      $(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \
+			      $(src)/aic79xx.seq
+endif
+
+$(obj)/aicasm/aicasm: $(src)/aicasm/*.[chyl]
+	$(MAKE) -C $(src)/aicasm
diff --git a/drivers/scsi/aic7xxx/aic7770.c b/drivers/scsi/aic7xxx/aic7770.c
new file mode 100644
index 0000000..92703bb
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7770.c
@@ -0,0 +1,415 @@
+/*
+ * Product specific probe and attach routines for:
+ * 	27/284X and aic7770 motherboard SCSI controllers
+ *
+ * Copyright (c) 1994-1998, 2000, 2001 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7770.c#32 $
+ *
+ * $FreeBSD$
+ */
+
+#ifdef __linux__
+#include "aic7xxx_osm.h"
+#include "aic7xxx_inline.h"
+#include "aic7xxx_93cx6.h"
+#else
+#include <dev/aic7xxx/aic7xxx_osm.h>
+#include <dev/aic7xxx/aic7xxx_inline.h>
+#include <dev/aic7xxx/aic7xxx_93cx6.h>
+#endif
+
+#define ID_AIC7770	0x04907770
+#define ID_AHA_274x	0x04907771
+#define ID_AHA_284xB	0x04907756 /* BIOS enabled */
+#define ID_AHA_284x	0x04907757 /* BIOS disabled*/
+#define	ID_OLV_274x	0x04907782 /* Olivetti OEM */
+#define	ID_OLV_274xD	0x04907783 /* Olivetti OEM (Differential) */
+
+static int aic7770_chip_init(struct ahc_softc *ahc);
+static int aic7770_suspend(struct ahc_softc *ahc);
+static int aic7770_resume(struct ahc_softc *ahc);
+static int aha2840_load_seeprom(struct ahc_softc *ahc);
+static ahc_device_setup_t ahc_aic7770_VL_setup;
+static ahc_device_setup_t ahc_aic7770_EISA_setup;
+static ahc_device_setup_t ahc_aic7770_setup;
+
+struct aic7770_identity aic7770_ident_table[] =
+{
+	{
+		ID_AHA_274x,
+		0xFFFFFFFF,
+		"Adaptec 274X SCSI adapter",
+		ahc_aic7770_EISA_setup
+	},
+	{
+		ID_AHA_284xB,
+		0xFFFFFFFE,
+		"Adaptec 284X SCSI adapter",
+		ahc_aic7770_VL_setup
+	},
+	{
+		ID_AHA_284x,
+		0xFFFFFFFE,
+		"Adaptec 284X SCSI adapter (BIOS Disabled)",
+		ahc_aic7770_VL_setup
+	},
+	{
+		ID_OLV_274x,
+		0xFFFFFFFF,
+		"Adaptec (Olivetti OEM) 274X SCSI adapter",
+		ahc_aic7770_EISA_setup
+	},
+	{
+		ID_OLV_274xD,
+		0xFFFFFFFF,
+		"Adaptec (Olivetti OEM) 274X Differential SCSI adapter",
+		ahc_aic7770_EISA_setup
+	},
+	/* Generic chip probes for devices we don't know 'exactly' */
+	{
+		ID_AIC7770,
+		0xFFFFFFFF,
+		"Adaptec aic7770 SCSI adapter",
+		ahc_aic7770_EISA_setup
+	}
+};
+const int ahc_num_aic7770_devs = NUM_ELEMENTS(aic7770_ident_table);
+
+struct aic7770_identity *
+aic7770_find_device(uint32_t id)
+{
+	struct	aic7770_identity *entry;
+	int	i;
+
+	for (i = 0; i < ahc_num_aic7770_devs; i++) {
+		entry = &aic7770_ident_table[i];
+		if (entry->full_id == (id & entry->id_mask))
+			return (entry);
+	}
+	return (NULL);
+}
+
+int
+aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io)
+{
+	u_long	l;
+	int	error;
+	int	have_seeprom;
+	u_int	hostconf;
+	u_int   irq;
+	u_int	intdef;
+
+	error = entry->setup(ahc);
+	have_seeprom = 0;
+	if (error != 0)
+		return (error);
+
+	error = aic7770_map_registers(ahc, io);
+	if (error != 0)
+		return (error);
+
+	/*
+	 * Before we continue probing the card, ensure that
+	 * its interrupts are *disabled*.  We don't want
+	 * a misstep to hang the machine in an interrupt
+	 * storm.
+	 */
+	ahc_intr_enable(ahc, FALSE);
+
+	ahc->description = entry->name;
+	error = ahc_softc_init(ahc);
+	if (error != 0)
+		return (error);
+
+	ahc->bus_chip_init = aic7770_chip_init;
+	ahc->bus_suspend = aic7770_suspend;
+	ahc->bus_resume = aic7770_resume;
+
+	error = ahc_reset(ahc, /*reinit*/FALSE);
+	if (error != 0)
+		return (error);
+
+	/* Make sure we have a valid interrupt vector */
+	intdef = ahc_inb(ahc, INTDEF);
+	irq = intdef & VECTOR;
+	switch (irq) {
+	case 9:
+	case 10:
+	case 11:
+	case 12:
+	case 14:
+	case 15:
+		break;
+	default:
+		printf("aic7770_config: invalid irq setting %d\n", intdef);
+		return (ENXIO);
+	}
+
+	if ((intdef & EDGE_TRIG) != 0)
+		ahc->flags |= AHC_EDGE_INTERRUPT;
+
+	switch (ahc->chip & (AHC_EISA|AHC_VL)) {
+	case AHC_EISA:
+	{
+		u_int biosctrl;
+		u_int scsiconf;
+		u_int scsiconf1;
+
+		biosctrl = ahc_inb(ahc, HA_274_BIOSCTRL);
+		scsiconf = ahc_inb(ahc, SCSICONF);
+		scsiconf1 = ahc_inb(ahc, SCSICONF + 1);
+
+		/* Get the primary channel information */
+		if ((biosctrl & CHANNEL_B_PRIMARY) != 0)
+			ahc->flags |= 1;
+
+		if ((biosctrl & BIOSMODE) == BIOSDISABLED) {
+			ahc->flags |= AHC_USEDEFAULTS;
+		} else {
+			if ((ahc->features & AHC_WIDE) != 0) {
+				ahc->our_id = scsiconf1 & HWSCSIID;
+				if (scsiconf & TERM_ENB)
+					ahc->flags |= AHC_TERM_ENB_A;
+			} else {
+				ahc->our_id = scsiconf & HSCSIID;
+				ahc->our_id_b = scsiconf1 & HSCSIID;
+				if (scsiconf & TERM_ENB)
+					ahc->flags |= AHC_TERM_ENB_A;
+				if (scsiconf1 & TERM_ENB)
+					ahc->flags |= AHC_TERM_ENB_B;
+			}
+		}
+		if ((ahc_inb(ahc, HA_274_BIOSGLOBAL) & HA_274_EXTENDED_TRANS))
+			ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B;
+		break;
+	}
+	case AHC_VL:
+	{
+		have_seeprom = aha2840_load_seeprom(ahc);
+		break;
+	}
+	default:
+		break;
+	}
+	if (have_seeprom == 0) {
+		free(ahc->seep_config, M_DEVBUF);
+		ahc->seep_config = NULL;
+	}
+
+	/*
+	 * Ensure autoflush is enabled
+	 */
+	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~AUTOFLUSHDIS);
+
+	/* Setup the FIFO threshold and the bus off time */
+	hostconf = ahc_inb(ahc, HOSTCONF);
+	ahc_outb(ahc, BUSSPD, hostconf & DFTHRSH);
+	ahc_outb(ahc, BUSTIME, (hostconf << 2) & BOFF);
+
+	ahc->bus_softc.aic7770_softc.busspd = hostconf & DFTHRSH;
+	ahc->bus_softc.aic7770_softc.bustime = (hostconf << 2) & BOFF;
+
+	/*
+	 * Generic aic7xxx initialization.
+	 */
+	error = ahc_init(ahc);
+	if (error != 0)
+		return (error);
+
+	error = aic7770_map_int(ahc, irq);
+	if (error != 0)
+		return (error);
+
+	ahc_list_lock(&l);
+	/*
+	 * Link this softc in with all other ahc instances.
+	 */
+	ahc_softc_insert(ahc);
+
+	/*
+	 * Enable the board's BUS drivers
+	 */
+	ahc_outb(ahc, BCTL, ENABLE);
+
+	ahc_list_unlock(&l);
+
+	return (0);
+}
+
+static int
+aic7770_chip_init(struct ahc_softc *ahc)
+{
+	ahc_outb(ahc, BUSSPD, ahc->bus_softc.aic7770_softc.busspd);
+	ahc_outb(ahc, BUSTIME, ahc->bus_softc.aic7770_softc.bustime);
+	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~AUTOFLUSHDIS);
+	ahc_outb(ahc, BCTL, ENABLE);
+	return (ahc_chip_init(ahc));
+}
+
+static int
+aic7770_suspend(struct ahc_softc *ahc)
+{
+	return (ahc_suspend(ahc));
+}
+
+static int
+aic7770_resume(struct ahc_softc *ahc)
+{
+	return (ahc_resume(ahc));
+}
+
+/*
+ * Read the 284x SEEPROM.
+ */
+static int
+aha2840_load_seeprom(struct ahc_softc *ahc)
+{
+	struct	seeprom_descriptor sd;
+	struct	seeprom_config *sc;
+	int	have_seeprom;
+	uint8_t scsi_conf;
+
+	sd.sd_ahc = ahc;
+	sd.sd_control_offset = SEECTL_2840;
+	sd.sd_status_offset = STATUS_2840;
+	sd.sd_dataout_offset = STATUS_2840;		
+	sd.sd_chip = C46;
+	sd.sd_MS = 0;
+	sd.sd_RDY = EEPROM_TF;
+	sd.sd_CS = CS_2840;
+	sd.sd_CK = CK_2840;
+	sd.sd_DO = DO_2840;
+	sd.sd_DI = DI_2840;
+	sc = ahc->seep_config;
+
+	if (bootverbose)
+		printf("%s: Reading SEEPROM...", ahc_name(ahc));
+	have_seeprom = ahc_read_seeprom(&sd, (uint16_t *)sc,
+					/*start_addr*/0, sizeof(*sc)/2);
+
+	if (have_seeprom) {
+
+		if (ahc_verify_cksum(sc) == 0) {
+			if(bootverbose)
+				printf ("checksum error\n");
+			have_seeprom = 0;
+		} else if (bootverbose) {
+			printf("done.\n");
+		}
+	}
+
+	if (!have_seeprom) {
+		if (bootverbose)
+			printf("%s: No SEEPROM available\n", ahc_name(ahc));
+		ahc->flags |= AHC_USEDEFAULTS;
+	} else {
+		/*
+		 * Put the data we've collected down into SRAM
+		 * where ahc_init will find it.
+		 */
+		int	 i;
+		int	 max_targ;
+		uint16_t discenable;
+
+		max_targ = (ahc->features & AHC_WIDE) != 0 ? 16 : 8;
+		discenable = 0;
+		for (i = 0; i < max_targ; i++){
+			uint8_t target_settings;
+
+			target_settings = (sc->device_flags[i] & CFXFER) << 4;
+			if (sc->device_flags[i] & CFSYNCH)
+				target_settings |= SOFS;
+			if (sc->device_flags[i] & CFWIDEB)
+				target_settings |= WIDEXFER;
+			if (sc->device_flags[i] & CFDISC)
+				discenable |= (0x01 << i);
+			ahc_outb(ahc, TARG_SCSIRATE + i, target_settings);
+		}
+		ahc_outb(ahc, DISC_DSB, ~(discenable & 0xff));
+		ahc_outb(ahc, DISC_DSB + 1, ~((discenable >> 8) & 0xff));
+
+		ahc->our_id = sc->brtime_id & CFSCSIID;
+
+		scsi_conf = (ahc->our_id & 0x7);
+		if (sc->adapter_control & CFSPARITY)
+			scsi_conf |= ENSPCHK;
+		if (sc->adapter_control & CFRESETB)
+			scsi_conf |= RESET_SCSI;
+
+		if (sc->bios_control & CF284XEXTEND)		
+			ahc->flags |= AHC_EXTENDED_TRANS_A;
+		/* Set SCSICONF info */
+		ahc_outb(ahc, SCSICONF, scsi_conf);
+
+		if (sc->adapter_control & CF284XSTERM)
+			ahc->flags |= AHC_TERM_ENB_A;
+	}
+	return (have_seeprom);
+}
+
+static int
+ahc_aic7770_VL_setup(struct ahc_softc *ahc)
+{
+	int error;
+
+	error = ahc_aic7770_setup(ahc);
+	ahc->chip |= AHC_VL;
+	return (error);
+}
+
+static int
+ahc_aic7770_EISA_setup(struct ahc_softc *ahc)
+{
+	int error;
+
+	error = ahc_aic7770_setup(ahc);
+	ahc->chip |= AHC_EISA;
+	return (error);
+}
+
+static int
+ahc_aic7770_setup(struct ahc_softc *ahc)
+{
+	ahc->channel = 'A';
+	ahc->channel_b = 'B';
+	ahc->chip = AHC_AIC7770;
+	ahc->features = AHC_AIC7770_FE;
+	ahc->bugs |= AHC_TMODE_WIDEODD_BUG;
+	ahc->flags |= AHC_PAGESCBS;
+	ahc->instruction_ram_size = 448;
+	return (0);
+}
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
new file mode 100644
index 0000000..c2b47f2
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -0,0 +1,264 @@
+/*
+ * Linux driver attachment glue for aic7770 based controllers.
+ *
+ * Copyright (c) 2000-2003 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7770_osm.c#14 $
+ */
+
+#include "aic7xxx_osm.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+#include <linux/device.h>
+#include <linux/eisa.h>
+
+#define EISA_MFCTR_CHAR0(ID) (char)(((ID>>26) & 0x1F) | '@')  /* Bits 26-30 */
+#define EISA_MFCTR_CHAR1(ID) (char)(((ID>>21) & 0x1F) | '@')  /* Bits 21-25 */
+#define EISA_MFCTR_CHAR2(ID) (char)(((ID>>16) & 0x1F) | '@')  /* Bits 16-20 */
+#define EISA_PRODUCT_ID(ID)  (short)((ID>>4)  & 0xFFF)        /* Bits  4-15 */
+#define EISA_REVISION_ID(ID) (uint8_t)(ID & 0x0F)             /* Bits  0-3  */
+
+static int aic7770_eisa_dev_probe(struct device *dev);
+static int aic7770_eisa_dev_remove(struct device *dev);
+static struct eisa_driver aic7770_driver = {
+	.driver = {
+		.name   = "aic7xxx",
+		.probe  = aic7770_eisa_dev_probe,
+		.remove = aic7770_eisa_dev_remove,
+	}
+};
+
+typedef  struct device *aic7770_dev_t;
+#else
+#define MINSLOT			1
+#define NUMSLOTS		16
+#define IDOFFSET		0x80
+
+typedef void *aic7770_dev_t;
+#endif
+
+static int aic7770_linux_config(struct aic7770_identity *entry,
+				aic7770_dev_t dev, u_int eisaBase);
+
+int
+ahc_linux_eisa_init(void)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+	struct eisa_device_id *eid;
+	struct aic7770_identity *id;
+	int i;
+
+	if (aic7xxx_probe_eisa_vl == 0)
+		return -ENODEV;
+
+	/*
+	 * Linux requires the EISA IDs to be specified in
+	 * the EISA ID string format.  Perform the conversion
+	 * and setup a table with a NUL terminal entry.
+	 */
+	aic7770_driver.id_table = malloc(sizeof(struct eisa_device_id) *
+					 (ahc_num_aic7770_devs + 1),
+					 M_DEVBUF, M_NOWAIT);
+	if (aic7770_driver.id_table == NULL)
+		return -ENOMEM;
+
+	for (eid = (struct eisa_device_id *)aic7770_driver.id_table,
+	     id = aic7770_ident_table, i = 0;
+	     i < ahc_num_aic7770_devs; eid++, id++, i++) {
+
+		sprintf(eid->sig, "%c%c%c%03X%01X",
+			EISA_MFCTR_CHAR0(id->full_id),
+			EISA_MFCTR_CHAR1(id->full_id),
+			EISA_MFCTR_CHAR2(id->full_id),
+			EISA_PRODUCT_ID(id->full_id),
+			EISA_REVISION_ID(id->full_id));
+		eid->driver_data = i;
+	}
+	eid->sig[0] = 0;
+
+	return eisa_driver_register(&aic7770_driver);
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) */
+	struct aic7770_identity *entry;
+	u_int  slot;
+	u_int  eisaBase;
+	u_int  i;
+	int ret = -ENODEV;
+
+	if (aic7xxx_probe_eisa_vl == 0)
+		return ret;
+
+	eisaBase = 0x1000 + AHC_EISA_SLOT_OFFSET;
+	for (slot = 1; slot < NUMSLOTS; eisaBase+=0x1000, slot++) {
+		uint32_t eisa_id;
+		size_t	 id_size;
+
+		if (request_region(eisaBase, AHC_EISA_IOSIZE, "aic7xxx") == 0)
+			continue;
+
+		eisa_id = 0;
+		id_size = sizeof(eisa_id);
+		for (i = 0; i < 4; i++) {
+			/* VLcards require priming*/
+			outb(0x80 + i, eisaBase + IDOFFSET);
+			eisa_id |= inb(eisaBase + IDOFFSET + i)
+				   << ((id_size-i-1) * 8);
+		}
+		release_region(eisaBase, AHC_EISA_IOSIZE);
+		if (eisa_id & 0x80000000)
+			continue;  /* no EISA card in slot */
+
+		entry = aic7770_find_device(eisa_id);
+		if (entry != NULL) {
+			aic7770_linux_config(entry, NULL, eisaBase);
+			ret = 0;
+		}
+	}
+	return ret;
+#endif
+}
+
+void
+ahc_linux_eisa_exit(void)
+{
+	if(aic7xxx_probe_eisa_vl != 0 && aic7770_driver.id_table != NULL) {
+		eisa_driver_unregister(&aic7770_driver);
+		free(aic7770_driver.id_table, M_DEVBUF);
+	}
+}
+
+static int
+aic7770_linux_config(struct aic7770_identity *entry, aic7770_dev_t dev,
+		     u_int eisaBase)
+{
+	struct	ahc_softc *ahc;
+	char	buf[80];
+	char   *name;
+	int	error;
+
+	/*
+	 * Allocate a softc for this card and
+	 * set it up for attachment by our
+	 * common detect routine.
+	 */
+	sprintf(buf, "ahc_eisa:%d", eisaBase >> 12);
+	name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
+	if (name == NULL)
+		return (ENOMEM);
+	strcpy(name, buf);
+	ahc = ahc_alloc(&aic7xxx_driver_template, name);
+	if (ahc == NULL)
+		return (ENOMEM);
+	error = aic7770_config(ahc, entry, eisaBase);
+	if (error != 0) {
+		ahc->bsh.ioport = 0;
+		ahc_free(ahc);
+		return (error);
+	}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+	dev->driver_data = (void *)ahc;
+	if (aic7xxx_detect_complete)
+		error = ahc_linux_register_host(ahc, &aic7xxx_driver_template);
+#endif
+	return (error);
+}
+
+int
+aic7770_map_registers(struct ahc_softc *ahc, u_int port)
+{
+	/*
+	 * Lock out other contenders for our i/o space.
+	 */
+	if (request_region(port, AHC_EISA_IOSIZE, "aic7xxx") == 0)
+		return (ENOMEM);
+	ahc->tag = BUS_SPACE_PIO;
+	ahc->bsh.ioport = port;
+	return (0);
+}
+
+int
+aic7770_map_int(struct ahc_softc *ahc, u_int irq)
+{
+	int error;
+	int shared;
+
+	shared = 0;
+	if ((ahc->flags & AHC_EDGE_INTERRUPT) == 0)
+		shared = SA_SHIRQ;
+
+	error = request_irq(irq, ahc_linux_isr, shared, "aic7xxx", ahc);
+	if (error == 0)
+		ahc->platform_data->irq = irq;
+	
+	return (-error);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+static int
+aic7770_eisa_dev_probe(struct device *dev)
+{
+	struct eisa_device *edev;
+
+	edev = to_eisa_device(dev);
+	return (aic7770_linux_config(aic7770_ident_table + edev->id.driver_data,
+				    dev, edev->base_addr+AHC_EISA_SLOT_OFFSET));
+}
+
+static int
+aic7770_eisa_dev_remove(struct device *dev)
+{
+	struct ahc_softc *ahc;
+	u_long l;
+
+	/*
+	 * We should be able to just perform
+	 * the free directly, but check our
+	 * list for extra sanity.
+	 */
+	ahc_list_lock(&l);
+	ahc = ahc_find_softc((struct ahc_softc *)dev->driver_data);
+	if (ahc != NULL) {
+		u_long s;
+
+		ahc_lock(ahc, &s);
+		ahc_intr_enable(ahc, FALSE);
+		ahc_unlock(ahc, &s);
+		ahc_free(ahc);
+	}
+	ahc_list_unlock(&l);
+
+	return (0);
+}
+#endif
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
new file mode 100644
index 0000000..fd4b2f3
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -0,0 +1,1537 @@
+/*
+ * Core definitions and data structures shareable across OS platforms.
+ *
+ * Copyright (c) 1994-2002 Justin T. Gibbs.
+ * Copyright (c) 2000-2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.h#95 $
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _AIC79XX_H_
+#define _AIC79XX_H_
+
+/* Register Definitions */
+#include "aic79xx_reg.h"
+
+/************************* Forward Declarations *******************************/
+struct ahd_platform_data;
+struct scb_platform_data;
+
+/****************************** Useful Macros *********************************/
+#ifndef MAX
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef MIN
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array))
+
+#define ALL_CHANNELS '\0'
+#define ALL_TARGETS_MASK 0xFFFF
+#define INITIATOR_WILDCARD	(~0)
+#define	SCB_LIST_NULL		0xFF00
+#define	SCB_LIST_NULL_LE	(ahd_htole16(SCB_LIST_NULL))
+#define QOUTFIFO_ENTRY_VALID 0x8000
+#define QOUTFIFO_ENTRY_VALID_LE (ahd_htole16(0x8000))
+#define SCBID_IS_NULL(scbid) (((scbid) & 0xFF00 ) == SCB_LIST_NULL)
+
+#define SCSIID_TARGET(ahd, scsiid)	\
+	(((scsiid) & TID) >> TID_SHIFT)
+#define SCSIID_OUR_ID(scsiid)		\
+	((scsiid) & OID)
+#define SCSIID_CHANNEL(ahd, scsiid) ('A')
+#define	SCB_IS_SCSIBUS_B(ahd, scb) (0)
+#define	SCB_GET_OUR_ID(scb) \
+	SCSIID_OUR_ID((scb)->hscb->scsiid)
+#define	SCB_GET_TARGET(ahd, scb) \
+	SCSIID_TARGET((ahd), (scb)->hscb->scsiid)
+#define	SCB_GET_CHANNEL(ahd, scb) \
+	SCSIID_CHANNEL(ahd, (scb)->hscb->scsiid)
+#define	SCB_GET_LUN(scb) \
+	((scb)->hscb->lun)
+#define SCB_GET_TARGET_OFFSET(ahd, scb)	\
+	SCB_GET_TARGET(ahd, scb)
+#define SCB_GET_TARGET_MASK(ahd, scb) \
+	(0x01 << (SCB_GET_TARGET_OFFSET(ahd, scb)))
+#ifdef AHD_DEBUG
+#define SCB_IS_SILENT(scb)					\
+	((ahd_debug & AHD_SHOW_MASKED_ERRORS) == 0		\
+      && (((scb)->flags & SCB_SILENT) != 0))
+#else
+#define SCB_IS_SILENT(scb)					\
+	(((scb)->flags & SCB_SILENT) != 0)
+#endif
+/*
+ * TCLs have the following format: TTTTLLLLLLLL
+ */
+#define TCL_TARGET_OFFSET(tcl) \
+	((((tcl) >> 4) & TID) >> 4)
+#define TCL_LUN(tcl) \
+	(tcl & (AHD_NUM_LUNS - 1))
+#define BUILD_TCL(scsiid, lun) \
+	((lun) | (((scsiid) & TID) << 4))
+#define BUILD_TCL_RAW(target, channel, lun) \
+	((lun) | ((target) << 8))
+
+#define SCB_GET_TAG(scb) \
+	ahd_le16toh(scb->hscb->tag)
+
+#ifndef	AHD_TARGET_MODE
+#undef	AHD_TMODE_ENABLE
+#define	AHD_TMODE_ENABLE 0
+#endif
+
+#define AHD_BUILD_COL_IDX(target, lun)				\
+	(((lun) << 4) | target)
+
+#define AHD_GET_SCB_COL_IDX(ahd, scb)				\
+	((SCB_GET_LUN(scb) << 4) | SCB_GET_TARGET(ahd, scb))
+
+#define AHD_SET_SCB_COL_IDX(scb, col_idx)				\
+do {									\
+	(scb)->hscb->scsiid = ((col_idx) << TID_SHIFT) & TID;		\
+	(scb)->hscb->lun = ((col_idx) >> 4) & (AHD_NUM_LUNS_NONPKT-1);	\
+} while (0)
+
+#define AHD_COPY_SCB_COL_IDX(dst, src)				\
+do {								\
+	dst->hscb->scsiid = src->hscb->scsiid;			\
+	dst->hscb->lun = src->hscb->lun;			\
+} while (0)
+
+#define	AHD_NEVER_COL_IDX 0xFFFF
+
+/**************************** Driver Constants ********************************/
+/*
+ * The maximum number of supported targets.
+ */
+#define AHD_NUM_TARGETS 16
+
+/*
+ * The maximum number of supported luns.
+ * The identify message only supports 64 luns in non-packetized transfers.
+ * You can have 2^64 luns when information unit transfers are enabled,
+ * but until we see a need to support that many, we support 256.
+ */
+#define AHD_NUM_LUNS_NONPKT 64
+#define AHD_NUM_LUNS 256
+
+/*
+ * The maximum transfer per S/G segment.
+ */
+#define AHD_MAXTRANSFER_SIZE	 0x00ffffff	/* limited by 24bit counter */
+
+/*
+ * The maximum amount of SCB storage in hardware on a controller.
+ * This value represents an upper bound.  Due to software design,
+ * we may not be able to use this number.
+ */
+#define AHD_SCB_MAX	512
+
+/*
+ * The maximum number of concurrent transactions supported per driver instance.
+ * Sequencer Control Blocks (SCBs) store per-transaction information.
+ */
+#define AHD_MAX_QUEUE	AHD_SCB_MAX
+
+/*
+ * Define the size of our QIN and QOUT FIFOs.  They must be a power of 2
+ * in size and accommodate as many transactions as can be queued concurrently.
+ */
+#define	AHD_QIN_SIZE	AHD_MAX_QUEUE
+#define	AHD_QOUT_SIZE	AHD_MAX_QUEUE
+
+#define AHD_QIN_WRAP(x) ((x) & (AHD_QIN_SIZE-1))
+/*
+ * The maximum amount of SCB storage we allocate in host memory.
+ */
+#define AHD_SCB_MAX_ALLOC AHD_MAX_QUEUE
+
+/*
+ * Ring Buffer of incoming target commands.
+ * We allocate 256 to simplify the logic in the sequencer
+ * by using the natural wrap point of an 8bit counter.
+ */
+#define AHD_TMODE_CMDS	256
+
+/* Reset line assertion time in us */
+#define AHD_BUSRESET_DELAY	25
+
+/******************* Chip Characteristics/Operating Settings  *****************/
+/*
+ * Chip Type
+ * The chip order is from least sophisticated to most sophisticated.
+ */
+typedef enum {
+	AHD_NONE	= 0x0000,
+	AHD_CHIPID_MASK	= 0x00FF,
+	AHD_AIC7901	= 0x0001,
+	AHD_AIC7902	= 0x0002,
+	AHD_AIC7901A	= 0x0003,
+	AHD_PCI		= 0x0100,	/* Bus type PCI */
+	AHD_PCIX	= 0x0200,	/* Bus type PCIX */
+	AHD_BUS_MASK	= 0x0F00
+} ahd_chip;
+
+/*
+ * Features available in each chip type.
+ */
+typedef enum {
+	AHD_FENONE		= 0x00000,
+	AHD_WIDE  		= 0x00001,/* Wide Channel */
+	AHD_MULTI_FUNC		= 0x00100,/* Multi-Function/Channel Device */
+	AHD_TARGETMODE		= 0x01000,/* Has tested target mode support */
+	AHD_MULTIROLE		= 0x02000,/* Space for two roles at a time */
+	AHD_RTI			= 0x04000,/* Retained Training Support */
+	AHD_NEW_IOCELL_OPTS	= 0x08000,/* More Signal knobs in the IOCELL */
+	AHD_NEW_DFCNTRL_OPTS	= 0x10000,/* SCSIENWRDIS bit */
+	AHD_FAST_CDB_DELIVERY	= 0x20000,/* CDB acks released to Output Sync */
+	AHD_REMOVABLE		= 0x00000,/* Hot-Swap supported - None so far*/
+	AHD_AIC7901_FE		= AHD_FENONE,
+	AHD_AIC7901A_FE		= AHD_FENONE,
+	AHD_AIC7902_FE		= AHD_MULTI_FUNC
+} ahd_feature;
+
+/*
+ * Bugs in the silicon that we work around in software.
+ */
+typedef enum {
+	AHD_BUGNONE		= 0x0000,
+	/*
+	 * Rev A hardware fails to update LAST/CURR/NEXTSCB
+	 * correctly in certain packetized selection cases.
+	 */
+	AHD_SENT_SCB_UPDATE_BUG	= 0x0001,
+	/* The wrong SCB is accessed to check the abort pending bit. */
+	AHD_ABORT_LQI_BUG	= 0x0002,
+	/* Packetized bitbucket crosses packet boundaries. */
+	AHD_PKT_BITBUCKET_BUG	= 0x0004,
+	/* The selection timer runs twice as long as its setting. */
+	AHD_LONG_SETIMO_BUG	= 0x0008,
+	/* The Non-LQ CRC error status is delayed until phase change. */
+	AHD_NLQICRC_DELAYED_BUG	= 0x0010,
+	/* The chip must be reset for all outgoing bus resets.  */
+	AHD_SCSIRST_BUG		= 0x0020,
+	/* Some PCIX fields must be saved and restored across chip reset. */
+	AHD_PCIX_CHIPRST_BUG	= 0x0040,
+	/* MMAPIO is not functional in PCI-X mode.  */
+	AHD_PCIX_MMAPIO_BUG	= 0x0080,
+	/* Reads to SCBRAM fail to reset the discard timer. */
+	AHD_PCIX_SCBRAM_RD_BUG  = 0x0100,
+	/* Bug workarounds that can be disabled on non-PCIX busses. */
+	AHD_PCIX_BUG_MASK	= AHD_PCIX_CHIPRST_BUG
+				| AHD_PCIX_MMAPIO_BUG
+				| AHD_PCIX_SCBRAM_RD_BUG,
+	/*
+	 * LQOSTOP0 status set even for forced selections with ATN
+	 * to perform non-packetized message delivery.
+	 */
+	AHD_LQO_ATNO_BUG	= 0x0200,
+	/* FIFO auto-flush does not always trigger.  */
+	AHD_AUTOFLUSH_BUG	= 0x0400,
+	/* The CLRLQO registers are not self-clearing. */
+	AHD_CLRLQO_AUTOCLR_BUG	= 0x0800,
+	/* The PACKETIZED status bit refers to the previous connection. */
+	AHD_PKTIZED_STATUS_BUG  = 0x1000,
+	/* "Short Luns" are not placed into outgoing LQ packets correctly. */
+	AHD_PKT_LUN_BUG		= 0x2000,
+	/*
+	 * Only the FIFO allocated to the non-packetized connection may
+	 * be in use during a non-packetzied connection.
+	 */
+	AHD_NONPACKFIFO_BUG	= 0x4000,
+	/*
+	 * Writing to a DFF SCBPTR register may fail if concurent with
+	 * a hardware write to the other DFF SCBPTR register.  This is
+	 * not currently a concern in our sequencer since all chips with
+	 * this bug have the AHD_NONPACKFIFO_BUG and all writes of concern
+	 * occur in non-packetized connections.
+	 */
+	AHD_MDFF_WSCBPTR_BUG	= 0x8000,
+	/* SGHADDR updates are slow. */
+	AHD_REG_SLOW_SETTLE_BUG	= 0x10000,
+	/*
+	 * Changing the MODE_PTR coincident with an interrupt that
+	 * switches to a different mode will cause the interrupt to
+	 * be in the mode written outside of interrupt context.
+	 */
+	AHD_SET_MODE_BUG	= 0x20000,
+	/* Non-packetized busfree revision does not work. */
+	AHD_BUSFREEREV_BUG	= 0x40000,
+	/*
+	 * Paced transfers are indicated with a non-standard PPR
+	 * option bit in the neg table, 160MHz is indicated by
+	 * sync factor 0x7, and the offset if off by a factor of 2.
+	 */
+	AHD_PACED_NEGTABLE_BUG	= 0x80000,
+	/* LQOOVERRUN false positives. */
+	AHD_LQOOVERRUN_BUG	= 0x100000,
+	/*
+	 * Controller write to INTSTAT will lose to a host
+	 * write to CLRINT.
+	 */
+	AHD_INTCOLLISION_BUG	= 0x200000,
+	/*
+	 * The GEM318 violates the SCSI spec by not waiting
+	 * the mandated bus settle delay between phase changes
+	 * in some situations.  Some aic79xx chip revs. are more
+	 * strict in this regard and will treat REQ assertions
+	 * that fall within the bus settle delay window as
+	 * glitches.  This flag tells the firmware to tolerate
+	 * early REQ assertions.
+	 */
+	AHD_EARLY_REQ_BUG	= 0x400000,
+	/*
+	 * The LED does not stay on long enough in packetized modes.
+	 */
+	AHD_FAINT_LED_BUG	= 0x800000
+} ahd_bug;
+
+/*
+ * Configuration specific settings.
+ * The driver determines these settings by probing the
+ * chip/controller's configuration.
+ */
+typedef enum {
+	AHD_FNONE	      = 0x00000,
+	AHD_BOOT_CHANNEL      = 0x00001,/* We were set as the boot channel. */
+	AHD_USEDEFAULTS	      = 0x00004,/*
+					 * For cards without an seeprom
+					 * or a BIOS to initialize the chip's
+					 * SRAM, we use the default target
+					 * settings.
+					 */
+	AHD_SEQUENCER_DEBUG   = 0x00008,
+	AHD_RESET_BUS_A	      = 0x00010,
+	AHD_EXTENDED_TRANS_A  = 0x00020,
+	AHD_TERM_ENB_A	      = 0x00040,
+	AHD_SPCHK_ENB_A	      = 0x00080,
+	AHD_STPWLEVEL_A	      = 0x00100,
+	AHD_INITIATORROLE     = 0x00200,/*
+					 * Allow initiator operations on
+					 * this controller.
+					 */
+	AHD_TARGETROLE	      = 0x00400,/*
+					 * Allow target operations on this
+					 * controller.
+					 */
+	AHD_RESOURCE_SHORTAGE = 0x00800,
+	AHD_TQINFIFO_BLOCKED  = 0x01000,/* Blocked waiting for ATIOs */
+	AHD_INT50_SPEEDFLEX   = 0x02000,/*
+					 * Internal 50pin connector
+					 * sits behind an aic3860
+					 */
+	AHD_BIOS_ENABLED      = 0x04000,
+	AHD_ALL_INTERRUPTS    = 0x08000,
+	AHD_39BIT_ADDRESSING  = 0x10000,/* Use 39 bit addressing scheme. */
+	AHD_64BIT_ADDRESSING  = 0x20000,/* Use 64 bit addressing scheme. */
+	AHD_CURRENT_SENSING   = 0x40000,
+	AHD_SCB_CONFIG_USED   = 0x80000,/* No SEEPROM but SCB had info. */
+	AHD_HP_BOARD	      = 0x100000,
+	AHD_RESET_POLL_ACTIVE = 0x200000,
+	AHD_UPDATE_PEND_CMDS  = 0x400000,
+	AHD_RUNNING_QOUTFIFO  = 0x800000,
+	AHD_HAD_FIRST_SEL     = 0x1000000
+} ahd_flag;
+
+/************************* Hardware  SCB Definition ***************************/
+
+/*
+ * The driver keeps up to MAX_SCB scb structures per card in memory.  The SCB
+ * consists of a "hardware SCB" mirroring the fields available on the card
+ * and additional information the kernel stores for each transaction.
+ *
+ * To minimize space utilization, a portion of the hardware scb stores
+ * different data during different portions of a SCSI transaction.
+ * As initialized by the host driver for the initiator role, this area
+ * contains the SCSI cdb (or a pointer to the  cdb) to be executed.  After
+ * the cdb has been presented to the target, this area serves to store
+ * residual transfer information and the SCSI status byte.
+ * For the target role, the contents of this area do not change, but
+ * still serve a different purpose than for the initiator role.  See
+ * struct target_data for details.
+ */
+
+/*
+ * Status information embedded in the shared poriton of
+ * an SCB after passing the cdb to the target.  The kernel
+ * driver will only read this data for transactions that
+ * complete abnormally.
+ */
+struct initiator_status {
+	uint32_t residual_datacnt;	/* Residual in the current S/G seg */
+	uint32_t residual_sgptr;	/* The next S/G for this transfer */
+	uint8_t	 scsi_status;		/* Standard SCSI status byte */
+};
+
+struct target_status {
+	uint32_t residual_datacnt;	/* Residual in the current S/G seg */
+	uint32_t residual_sgptr;	/* The next S/G for this transfer */
+	uint8_t  scsi_status;		/* SCSI status to give to initiator */
+	uint8_t  target_phases;		/* Bitmap of phases to execute */
+	uint8_t  data_phase;		/* Data-In or Data-Out */
+	uint8_t  initiator_tag;		/* Initiator's transaction tag */
+};
+
+/*
+ * Initiator mode SCB shared data area.
+ * If the embedded CDB is 12 bytes or less, we embed
+ * the sense buffer address in the SCB.  This allows
+ * us to retrieve sense information without interrupting
+ * the host in packetized mode.
+ */
+typedef uint32_t sense_addr_t;
+#define MAX_CDB_LEN 16
+#define MAX_CDB_LEN_WITH_SENSE_ADDR (MAX_CDB_LEN - sizeof(sense_addr_t))
+union initiator_data {
+	struct {
+		uint64_t cdbptr;
+		uint8_t  cdblen;
+	} cdb_from_host;
+	uint8_t	 cdb[MAX_CDB_LEN];
+	struct {
+		uint8_t	 cdb[MAX_CDB_LEN_WITH_SENSE_ADDR];
+		sense_addr_t sense_addr;
+	} cdb_plus_saddr;
+};
+
+/*
+ * Target mode version of the shared data SCB segment.
+ */
+struct target_data {
+	uint32_t spare[2];	
+	uint8_t  scsi_status;		/* SCSI status to give to initiator */
+	uint8_t  target_phases;		/* Bitmap of phases to execute */
+	uint8_t  data_phase;		/* Data-In or Data-Out */
+	uint8_t  initiator_tag;		/* Initiator's transaction tag */
+};
+
+struct hardware_scb {
+/*0*/	union {
+		union	initiator_data idata;
+		struct	target_data tdata;
+		struct	initiator_status istatus;
+		struct	target_status tstatus;
+	} shared_data;
+/*
+ * A word about residuals.
+ * The scb is presented to the sequencer with the dataptr and datacnt
+ * fields initialized to the contents of the first S/G element to
+ * transfer.  The sgptr field is initialized to the bus address for
+ * the S/G element that follows the first in the in core S/G array
+ * or'ed with the SG_FULL_RESID flag.  Sgptr may point to an invalid
+ * S/G entry for this transfer (single S/G element transfer with the
+ * first elements address and length preloaded in the dataptr/datacnt
+ * fields).  If no transfer is to occur, sgptr is set to SG_LIST_NULL.
+ * The SG_FULL_RESID flag ensures that the residual will be correctly
+ * noted even if no data transfers occur.  Once the data phase is entered,
+ * the residual sgptr and datacnt are loaded from the sgptr and the
+ * datacnt fields.  After each S/G element's dataptr and length are
+ * loaded into the hardware, the residual sgptr is advanced.  After
+ * each S/G element is expired, its datacnt field is checked to see
+ * if the LAST_SEG flag is set.  If so, SG_LIST_NULL is set in the
+ * residual sg ptr and the transfer is considered complete.  If the
+ * sequencer determines that there is a residual in the tranfer, or
+ * there is non-zero status, it will set the SG_STATUS_VALID flag in
+ * sgptr and dma the scb back into host memory.  To sumarize:
+ *
+ * Sequencer:
+ *	o A residual has occurred if SG_FULL_RESID is set in sgptr,
+ *	  or residual_sgptr does not have SG_LIST_NULL set.
+ *
+ *	o We are transfering the last segment if residual_datacnt has
+ *	  the SG_LAST_SEG flag set.
+ *
+ * Host:
+ *	o A residual can only have occurred if a completed scb has the
+ *	  SG_STATUS_VALID flag set.  Inspection of the SCSI status field,
+ *	  the residual_datacnt, and the residual_sgptr field will tell
+ *	  for sure.
+ *
+ *	o residual_sgptr and sgptr refer to the "next" sg entry
+ *	  and so may point beyond the last valid sg entry for the
+ *	  transfer.
+ */ 
+#define SG_PTR_MASK	0xFFFFFFF8
+/*16*/	uint16_t tag;		/* Reused by Sequencer. */
+/*18*/	uint8_t  control;	/* See SCB_CONTROL in aic79xx.reg for details */
+/*19*/	uint8_t	 scsiid;	/*
+				 * Selection out Id
+				 * Our Id (bits 0-3) Their ID (bits 4-7)
+				 */
+/*20*/	uint8_t  lun;
+/*21*/	uint8_t  task_attribute;
+/*22*/	uint8_t  cdb_len;
+/*23*/	uint8_t  task_management;
+/*24*/	uint64_t dataptr;
+/*32*/	uint32_t datacnt;	/* Byte 3 is spare. */
+/*36*/	uint32_t sgptr;
+/*40*/	uint32_t hscb_busaddr;
+/*44*/	uint32_t next_hscb_busaddr;
+/********** Long lun field only downloaded for full 8 byte lun support ********/
+/*48*/  uint8_t	 pkt_long_lun[8];
+/******* Fields below are not Downloaded (Sequencer may use for scratch) ******/
+/*56*/  uint8_t	 spare[8];
+};
+
+/************************ Kernel SCB Definitions ******************************/
+/*
+ * Some fields of the SCB are OS dependent.  Here we collect the
+ * definitions for elements that all OS platforms need to include
+ * in there SCB definition.
+ */
+
+/*
+ * Definition of a scatter/gather element as transfered to the controller.
+ * The aic7xxx chips only support a 24bit length.  We use the top byte of
+ * the length to store additional address bits and a flag to indicate
+ * that a given segment terminates the transfer.  This gives us an
+ * addressable range of 512GB on machines with 64bit PCI or with chips
+ * that can support dual address cycles on 32bit PCI busses.
+ */
+struct ahd_dma_seg {
+	uint32_t	addr;
+	uint32_t	len;
+#define	AHD_DMA_LAST_SEG	0x80000000
+#define	AHD_SG_HIGH_ADDR_MASK	0x7F000000
+#define	AHD_SG_LEN_MASK		0x00FFFFFF
+};
+
+struct ahd_dma64_seg {
+	uint64_t	addr;
+	uint32_t	len;
+	uint32_t	pad;
+};
+
+struct map_node {
+	bus_dmamap_t		 dmamap;
+	dma_addr_t		 physaddr;
+	uint8_t			*vaddr;
+	SLIST_ENTRY(map_node)	 links;
+};
+
+/*
+ * The current state of this SCB.
+ */
+typedef enum {
+	SCB_FLAG_NONE		= 0x00000,
+	SCB_TRANSMISSION_ERROR	= 0x00001,/*
+					   * We detected a parity or CRC
+					   * error that has effected the
+					   * payload of the command.  This
+					   * flag is checked when normal
+					   * status is returned to catch
+					   * the case of a target not
+					   * responding to our attempt
+					   * to report the error.
+					   */
+	SCB_OTHERTCL_TIMEOUT	= 0x00002,/*
+					   * Another device was active
+					   * during the first timeout for
+					   * this SCB so we gave ourselves
+					   * an additional timeout period
+					   * in case it was hogging the
+					   * bus.
+				           */
+	SCB_DEVICE_RESET	= 0x00004,
+	SCB_SENSE		= 0x00008,
+	SCB_CDB32_PTR		= 0x00010,
+	SCB_RECOVERY_SCB	= 0x00020,
+	SCB_AUTO_NEGOTIATE	= 0x00040,/* Negotiate to achieve goal. */
+	SCB_NEGOTIATE		= 0x00080,/* Negotiation forced for command. */
+	SCB_ABORT		= 0x00100,
+	SCB_ACTIVE		= 0x00200,
+	SCB_TARGET_IMMEDIATE	= 0x00400,
+	SCB_PACKETIZED		= 0x00800,
+	SCB_EXPECT_PPR_BUSFREE	= 0x01000,
+	SCB_PKT_SENSE		= 0x02000,
+	SCB_CMDPHASE_ABORT	= 0x04000,
+	SCB_ON_COL_LIST		= 0x08000,
+	SCB_SILENT		= 0x10000 /*
+					   * Be quiet about transmission type
+					   * errors.  They are expected and we
+					   * don't want to upset the user.  This
+					   * flag is typically used during DV.
+					   */
+} scb_flag;
+
+struct scb {
+	struct	hardware_scb	 *hscb;
+	union {
+		SLIST_ENTRY(scb)  sle;
+		LIST_ENTRY(scb)	  le;
+		TAILQ_ENTRY(scb)  tqe;
+	} links;
+	union {
+		SLIST_ENTRY(scb)  sle;
+		LIST_ENTRY(scb)	  le;
+		TAILQ_ENTRY(scb)  tqe;
+	} links2;
+#define pending_links links2.le
+#define collision_links links2.le
+	struct scb		 *col_scb;
+	ahd_io_ctx_t		  io_ctx;
+	struct ahd_softc	 *ahd_softc;
+	scb_flag		  flags;
+#ifndef __linux__
+	bus_dmamap_t		  dmamap;
+#endif
+	struct scb_platform_data *platform_data;
+	struct map_node	 	 *hscb_map;
+	struct map_node	 	 *sg_map;
+	struct map_node	 	 *sense_map;
+	void			 *sg_list;
+	uint8_t			 *sense_data;
+	dma_addr_t		  sg_list_busaddr;
+	dma_addr_t		  sense_busaddr;
+	u_int			  sg_count;/* How full ahd_dma_seg is */
+#define	AHD_MAX_LQ_CRC_ERRORS 5
+	u_int			  crc_retry_count;
+};
+
+TAILQ_HEAD(scb_tailq, scb);
+LIST_HEAD(scb_list, scb);
+
+struct scb_data {
+	/*
+	 * TAILQ of lists of free SCBs grouped by device
+	 * collision domains.
+	 */
+	struct scb_tailq free_scbs;
+
+	/*
+	 * Per-device lists of SCBs whose tag ID would collide
+	 * with an already active tag on the device.
+	 */
+	struct scb_list free_scb_lists[AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT];
+
+	/*
+	 * SCBs that will not collide with any active device.
+	 */
+	struct scb_list any_dev_free_scb_list;
+
+	/*
+	 * Mapping from tag to SCB.
+	 */
+	struct	scb *scbindex[AHD_SCB_MAX];
+
+	/*
+	 * "Bus" addresses of our data structures.
+	 */
+	bus_dma_tag_t	 hscb_dmat;	/* dmat for our hardware SCB array */
+	bus_dma_tag_t	 sg_dmat;	/* dmat for our sg segments */
+	bus_dma_tag_t	 sense_dmat;	/* dmat for our sense buffers */
+	SLIST_HEAD(, map_node) hscb_maps;
+	SLIST_HEAD(, map_node) sg_maps;
+	SLIST_HEAD(, map_node) sense_maps;
+	int		 scbs_left;	/* unallocated scbs in head map_node */
+	int		 sgs_left;	/* unallocated sgs in head map_node */
+	int		 sense_left;	/* unallocated sense in head map_node */
+	uint16_t	 numscbs;
+	uint16_t	 maxhscbs;	/* Number of SCBs on the card */
+	uint8_t		 init_level;	/*
+					 * How far we've initialized
+					 * this structure.
+					 */
+};
+
+/************************ Target Mode Definitions *****************************/
+
+/*
+ * Connection desciptor for select-in requests in target mode.
+ */
+struct target_cmd {
+	uint8_t scsiid;		/* Our ID and the initiator's ID */
+	uint8_t identify;	/* Identify message */
+	uint8_t bytes[22];	/* 
+				 * Bytes contains any additional message
+				 * bytes terminated by 0xFF.  The remainder
+				 * is the cdb to execute.
+				 */
+	uint8_t cmd_valid;	/*
+				 * When a command is complete, the firmware
+				 * will set cmd_valid to all bits set.
+				 * After the host has seen the command,
+				 * the bits are cleared.  This allows us
+				 * to just peek at host memory to determine
+				 * if more work is complete. cmd_valid is on
+				 * an 8 byte boundary to simplify setting
+				 * it on aic7880 hardware which only has
+				 * limited direct access to the DMA FIFO.
+				 */
+	uint8_t pad[7];
+};
+
+/*
+ * Number of events we can buffer up if we run out
+ * of immediate notify ccbs.
+ */
+#define AHD_TMODE_EVENT_BUFFER_SIZE 8
+struct ahd_tmode_event {
+	uint8_t initiator_id;
+	uint8_t event_type;	/* MSG type or EVENT_TYPE_BUS_RESET */
+#define	EVENT_TYPE_BUS_RESET 0xFF
+	uint8_t event_arg;
+};
+
+/*
+ * Per enabled lun target mode state.
+ * As this state is directly influenced by the host OS'es target mode
+ * environment, we let the OS module define it.  Forward declare the
+ * structure here so we can store arrays of them, etc. in OS neutral
+ * data structures.
+ */
+#ifdef AHD_TARGET_MODE 
+struct ahd_tmode_lstate {
+	struct cam_path *path;
+	struct ccb_hdr_slist accept_tios;
+	struct ccb_hdr_slist immed_notifies;
+	struct ahd_tmode_event event_buffer[AHD_TMODE_EVENT_BUFFER_SIZE];
+	uint8_t event_r_idx;
+	uint8_t event_w_idx;
+};
+#else
+struct ahd_tmode_lstate;
+#endif
+
+/******************** Transfer Negotiation Datastructures *********************/
+#define AHD_TRANS_CUR		0x01	/* Modify current neogtiation status */
+#define AHD_TRANS_ACTIVE	0x03	/* Assume this target is on the bus */
+#define AHD_TRANS_GOAL		0x04	/* Modify negotiation goal */
+#define AHD_TRANS_USER		0x08	/* Modify user negotiation settings */
+#define AHD_PERIOD_10MHz	0x19
+
+#define AHD_WIDTH_UNKNOWN	0xFF
+#define AHD_PERIOD_UNKNOWN	0xFF
+#define AHD_OFFSET_UNKNOWN	0xFF
+#define AHD_PPR_OPTS_UNKNOWN	0xFF
+
+/*
+ * Transfer Negotiation Information.
+ */
+struct ahd_transinfo {
+	uint8_t protocol_version;	/* SCSI Revision level */
+	uint8_t transport_version;	/* SPI Revision level */
+	uint8_t width;			/* Bus width */
+	uint8_t period;			/* Sync rate factor */
+	uint8_t offset;			/* Sync offset */
+	uint8_t ppr_options;		/* Parallel Protocol Request options */
+};
+
+/*
+ * Per-initiator current, goal and user transfer negotiation information. */
+struct ahd_initiator_tinfo {
+	struct ahd_transinfo curr;
+	struct ahd_transinfo goal;
+	struct ahd_transinfo user;
+};
+
+/*
+ * Per enabled target ID state.
+ * Pointers to lun target state as well as sync/wide negotiation information
+ * for each initiator<->target mapping.  For the initiator role we pretend
+ * that we are the target and the targets are the initiators since the
+ * negotiation is the same regardless of role.
+ */
+struct ahd_tmode_tstate {
+	struct ahd_tmode_lstate*	enabled_luns[AHD_NUM_LUNS];
+	struct ahd_initiator_tinfo	transinfo[AHD_NUM_TARGETS];
+
+	/*
+	 * Per initiator state bitmasks.
+	 */
+	uint16_t	 auto_negotiate;/* Auto Negotiation Required */
+	uint16_t	 discenable;	/* Disconnection allowed  */
+	uint16_t	 tagenable;	/* Tagged Queuing allowed */
+};
+
+/*
+ * Points of interest along the negotiated transfer scale.
+ */
+#define AHD_SYNCRATE_160	0x8
+#define AHD_SYNCRATE_PACED	0x8
+#define AHD_SYNCRATE_DT		0x9
+#define AHD_SYNCRATE_ULTRA2	0xa
+#define AHD_SYNCRATE_ULTRA	0xc
+#define AHD_SYNCRATE_FAST	0x19
+#define AHD_SYNCRATE_MIN_DT	AHD_SYNCRATE_FAST
+#define AHD_SYNCRATE_SYNC	0x32
+#define AHD_SYNCRATE_MIN	0x60
+#define	AHD_SYNCRATE_ASYNC	0xFF
+#define AHD_SYNCRATE_MAX	AHD_SYNCRATE_160
+
+/* Safe and valid period for async negotiations. */
+#define	AHD_ASYNC_XFER_PERIOD	0x44
+
+/*
+ * In RevA, the synctable uses a 120MHz rate for the period
+ * factor 8 and 160MHz for the period factor 7.  The 120MHz
+ * rate never made it into the official SCSI spec, so we must
+ * compensate when setting the negotiation table for Rev A
+ * parts.
+ */
+#define AHD_SYNCRATE_REVA_120	0x8
+#define AHD_SYNCRATE_REVA_160	0x7
+
+/***************************** Lookup Tables **********************************/
+/*
+ * Phase -> name and message out response
+ * to parity errors in each phase table. 
+ */
+struct ahd_phase_table_entry {
+        uint8_t phase;
+        uint8_t mesg_out; /* Message response to parity errors */
+	char *phasemsg;
+};
+
+/************************** Serial EEPROM Format ******************************/
+
+struct seeprom_config {
+/*
+ * Per SCSI ID Configuration Flags
+ */
+	uint16_t device_flags[16];	/* words 0-15 */
+#define		CFXFER		0x003F	/* synchronous transfer rate */
+#define			CFXFER_ASYNC	0x3F
+#define		CFQAS		0x0040	/* Negotiate QAS */
+#define		CFPACKETIZED	0x0080	/* Negotiate Packetized Transfers */
+#define		CFSTART		0x0100	/* send start unit SCSI command */
+#define		CFINCBIOS	0x0200	/* include in BIOS scan */
+#define		CFDISC		0x0400	/* enable disconnection */
+#define		CFMULTILUNDEV	0x0800	/* Probe multiple luns in BIOS scan */
+#define		CFWIDEB		0x1000	/* wide bus device */
+#define		CFHOSTMANAGED	0x8000	/* Managed by a RAID controller */
+
+/*
+ * BIOS Control Bits
+ */
+	uint16_t bios_control;		/* word 16 */
+#define		CFSUPREM	0x0001	/* support all removeable drives */
+#define		CFSUPREMB	0x0002	/* support removeable boot drives */
+#define		CFBIOSSTATE	0x000C	/* BIOS Action State */
+#define		    CFBS_DISABLED	0x00
+#define		    CFBS_ENABLED	0x04
+#define		    CFBS_DISABLED_SCAN	0x08
+#define		CFENABLEDV	0x0010	/* Perform Domain Validation */
+#define		CFCTRL_A	0x0020	/* BIOS displays Ctrl-A message */	
+#define		CFSPARITY	0x0040	/* SCSI parity */
+#define		CFEXTEND	0x0080	/* extended translation enabled */
+#define		CFBOOTCD	0x0100  /* Support Bootable CD-ROM */
+#define		CFMSG_LEVEL	0x0600	/* BIOS Message Level */
+#define			CFMSG_VERBOSE	0x0000
+#define			CFMSG_SILENT	0x0200
+#define			CFMSG_DIAG	0x0400
+#define		CFRESETB	0x0800	/* reset SCSI bus at boot */
+/*		UNUSED		0xf000	*/
+
+/*
+ * Host Adapter Control Bits
+ */
+	uint16_t adapter_control;	/* word 17 */	
+#define		CFAUTOTERM	0x0001	/* Perform Auto termination */
+#define		CFSTERM		0x0002	/* SCSI low byte termination */
+#define		CFWSTERM	0x0004	/* SCSI high byte termination */
+#define		CFSEAUTOTERM	0x0008	/* Ultra2 Perform secondary Auto Term*/
+#define		CFSELOWTERM	0x0010	/* Ultra2 secondary low term */
+#define		CFSEHIGHTERM	0x0020	/* Ultra2 secondary high term */
+#define		CFSTPWLEVEL	0x0040	/* Termination level control */
+#define		CFBIOSAUTOTERM	0x0080	/* Perform Auto termination */
+#define		CFTERM_MENU	0x0100	/* BIOS displays termination menu */	
+#define		CFCLUSTERENB	0x8000	/* Cluster Enable */
+
+/*
+ * Bus Release Time, Host Adapter ID
+ */
+	uint16_t brtime_id;		/* word 18 */
+#define		CFSCSIID	0x000f	/* host adapter SCSI ID */
+/*		UNUSED		0x00f0	*/
+#define		CFBRTIME	0xff00	/* bus release time/PCI Latency Time */
+
+/*
+ * Maximum targets
+ */
+	uint16_t max_targets;		/* word 19 */	
+#define		CFMAXTARG	0x00ff	/* maximum targets */
+#define		CFBOOTLUN	0x0f00	/* Lun to boot from */
+#define		CFBOOTID	0xf000	/* Target to boot from */
+	uint16_t res_1[10];		/* words 20-29 */
+	uint16_t signature;		/* BIOS Signature */
+#define		CFSIGNATURE	0x400
+	uint16_t checksum;		/* word 31 */
+};
+
+/*
+ * Vital Product Data used during POST and by the BIOS.
+ */
+struct vpd_config {
+	uint8_t  bios_flags;
+#define		VPDMASTERBIOS	0x0001
+#define		VPDBOOTHOST	0x0002
+	uint8_t  reserved_1[21];
+	uint8_t  resource_type;
+	uint8_t  resource_len[2];
+	uint8_t  resource_data[8];
+	uint8_t  vpd_tag;
+	uint16_t vpd_len;
+	uint8_t  vpd_keyword[2];
+	uint8_t  length;
+	uint8_t  revision;
+	uint8_t  device_flags;
+	uint8_t  termnation_menus[2];
+	uint8_t  fifo_threshold;
+	uint8_t  end_tag;
+	uint8_t  vpd_checksum;
+	uint16_t default_target_flags;
+	uint16_t default_bios_flags;
+	uint16_t default_ctrl_flags;
+	uint8_t  default_irq;
+	uint8_t  pci_lattime;
+	uint8_t  max_target;
+	uint8_t  boot_lun;
+	uint16_t signature;
+	uint8_t  reserved_2;
+	uint8_t  checksum;
+	uint8_t	 reserved_3[4];
+};
+
+/****************************** Flexport Logic ********************************/
+#define FLXADDR_TERMCTL			0x0
+#define		FLX_TERMCTL_ENSECHIGH	0x8
+#define		FLX_TERMCTL_ENSECLOW	0x4
+#define		FLX_TERMCTL_ENPRIHIGH	0x2
+#define		FLX_TERMCTL_ENPRILOW	0x1
+#define FLXADDR_ROMSTAT_CURSENSECTL	0x1
+#define		FLX_ROMSTAT_SEECFG	0xF0
+#define		FLX_ROMSTAT_EECFG	0x0F
+#define		FLX_ROMSTAT_SEE_93C66	0x00
+#define		FLX_ROMSTAT_SEE_NONE	0xF0
+#define		FLX_ROMSTAT_EE_512x8	0x0
+#define		FLX_ROMSTAT_EE_1MBx8	0x1
+#define		FLX_ROMSTAT_EE_2MBx8	0x2
+#define		FLX_ROMSTAT_EE_4MBx8	0x3
+#define		FLX_ROMSTAT_EE_16MBx8	0x4
+#define 		CURSENSE_ENB	0x1
+#define	FLXADDR_FLEXSTAT		0x2
+#define		FLX_FSTAT_BUSY		0x1
+#define FLXADDR_CURRENT_STAT		0x4
+#define		FLX_CSTAT_SEC_HIGH	0xC0
+#define		FLX_CSTAT_SEC_LOW	0x30
+#define		FLX_CSTAT_PRI_HIGH	0x0C
+#define		FLX_CSTAT_PRI_LOW	0x03
+#define		FLX_CSTAT_MASK		0x03
+#define		FLX_CSTAT_SHIFT		2
+#define		FLX_CSTAT_OKAY		0x0
+#define		FLX_CSTAT_OVER		0x1
+#define		FLX_CSTAT_UNDER		0x2
+#define		FLX_CSTAT_INVALID	0x3
+
+int		ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf,
+				 u_int start_addr, u_int count, int bstream);
+
+int		ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf,
+				  u_int start_addr, u_int count);
+int		ahd_wait_seeprom(struct ahd_softc *ahd);
+int		ahd_verify_vpd_cksum(struct vpd_config *vpd);
+int		ahd_verify_cksum(struct seeprom_config *sc);
+int		ahd_acquire_seeprom(struct ahd_softc *ahd);
+void		ahd_release_seeprom(struct ahd_softc *ahd);
+
+/****************************  Message Buffer *********************************/
+typedef enum {
+	MSG_FLAG_NONE			= 0x00,
+	MSG_FLAG_EXPECT_PPR_BUSFREE	= 0x01,
+	MSG_FLAG_IU_REQ_CHANGED		= 0x02,
+	MSG_FLAG_EXPECT_IDE_BUSFREE	= 0x04,
+	MSG_FLAG_EXPECT_QASREJ_BUSFREE	= 0x08,
+	MSG_FLAG_PACKETIZED		= 0x10
+} ahd_msg_flags;
+
+typedef enum {
+	MSG_TYPE_NONE			= 0x00,
+	MSG_TYPE_INITIATOR_MSGOUT	= 0x01,
+	MSG_TYPE_INITIATOR_MSGIN	= 0x02,
+	MSG_TYPE_TARGET_MSGOUT		= 0x03,
+	MSG_TYPE_TARGET_MSGIN		= 0x04
+} ahd_msg_type;
+
+typedef enum {
+	MSGLOOP_IN_PROG,
+	MSGLOOP_MSGCOMPLETE,
+	MSGLOOP_TERMINATED
+} msg_loop_stat;
+
+/*********************** Software Configuration Structure *********************/
+struct ahd_suspend_channel_state {
+	uint8_t	scsiseq;
+	uint8_t	sxfrctl0;
+	uint8_t	sxfrctl1;
+	uint8_t	simode0;
+	uint8_t	simode1;
+	uint8_t	seltimer;
+	uint8_t	seqctl;
+};
+
+struct ahd_suspend_state {
+	struct	ahd_suspend_channel_state channel[2];
+	uint8_t	optionmode;
+	uint8_t	dscommand0;
+	uint8_t	dspcistatus;
+	/* hsmailbox */
+	uint8_t	crccontrol1;
+	uint8_t	scbbaddr;
+	/* Host and sequencer SCB counts */
+	uint8_t	dff_thrsh;
+	uint8_t	*scratch_ram;
+	uint8_t	*btt;
+};
+
+typedef void (*ahd_bus_intr_t)(struct ahd_softc *);
+
+typedef enum {
+	AHD_MODE_DFF0,
+	AHD_MODE_DFF1,
+	AHD_MODE_CCHAN,
+	AHD_MODE_SCSI,
+	AHD_MODE_CFG,
+	AHD_MODE_UNKNOWN
+} ahd_mode;
+
+#define AHD_MK_MSK(x) (0x01 << (x))
+#define AHD_MODE_DFF0_MSK	AHD_MK_MSK(AHD_MODE_DFF0)
+#define AHD_MODE_DFF1_MSK	AHD_MK_MSK(AHD_MODE_DFF1)
+#define AHD_MODE_CCHAN_MSK	AHD_MK_MSK(AHD_MODE_CCHAN)
+#define AHD_MODE_SCSI_MSK	AHD_MK_MSK(AHD_MODE_SCSI)
+#define AHD_MODE_CFG_MSK	AHD_MK_MSK(AHD_MODE_CFG)
+#define AHD_MODE_UNKNOWN_MSK	AHD_MK_MSK(AHD_MODE_UNKNOWN)
+#define AHD_MODE_ANY_MSK (~0)
+
+typedef uint8_t ahd_mode_state;
+
+typedef void ahd_callback_t (void *);
+
+struct ahd_softc {
+	bus_space_tag_t           tags[2];
+	bus_space_handle_t        bshs[2];
+#ifndef __linux__
+	bus_dma_tag_t		  buffer_dmat;   /* dmat for buffer I/O */
+#endif
+	struct scb_data		  scb_data;
+
+	struct hardware_scb	 *next_queued_hscb;
+
+	/*
+	 * SCBs that have been sent to the controller
+	 */
+	LIST_HEAD(, scb)	  pending_scbs;
+
+	/*
+	 * Current register window mode information.
+	 */
+	ahd_mode		  dst_mode;
+	ahd_mode		  src_mode;
+
+	/*
+	 * Saved register window mode information
+	 * used for restore on next unpause.
+	 */
+	ahd_mode		  saved_dst_mode;
+	ahd_mode		  saved_src_mode;
+
+	/*
+	 * Platform specific data.
+	 */
+	struct ahd_platform_data *platform_data;
+
+	/*
+	 * Platform specific device information.
+	 */
+	ahd_dev_softc_t		  dev_softc;
+
+	/*
+	 * Bus specific device information.
+	 */
+	ahd_bus_intr_t		  bus_intr;
+
+	/*
+	 * Target mode related state kept on a per enabled lun basis.
+	 * Targets that are not enabled will have null entries.
+	 * As an initiator, we keep one target entry for our initiator
+	 * ID to store our sync/wide transfer settings.
+	 */
+	struct ahd_tmode_tstate  *enabled_targets[AHD_NUM_TARGETS];
+
+	/*
+	 * The black hole device responsible for handling requests for
+	 * disabled luns on enabled targets.
+	 */
+	struct ahd_tmode_lstate  *black_hole;
+
+	/*
+	 * Device instance currently on the bus awaiting a continue TIO
+	 * for a command that was not given the disconnect priveledge.
+	 */
+	struct ahd_tmode_lstate  *pending_device;
+
+	/*
+	 * Timer handles for timer driven callbacks.
+	 */
+	ahd_timer_t		  reset_timer;
+	ahd_timer_t		  stat_timer;
+
+	/*
+	 * Statistics.
+	 */
+#define	AHD_STAT_UPDATE_US	250000 /* 250ms */
+#define	AHD_STAT_BUCKETS	4
+	u_int			  cmdcmplt_bucket;
+	uint32_t		  cmdcmplt_counts[AHD_STAT_BUCKETS];
+	uint32_t		  cmdcmplt_total;
+
+	/*
+	 * Card characteristics
+	 */
+	ahd_chip		  chip;
+	ahd_feature		  features;
+	ahd_bug			  bugs;
+	ahd_flag		  flags;
+	struct seeprom_config	 *seep_config;
+
+	/* Values to store in the SEQCTL register for pause and unpause */
+	uint8_t			  unpause;
+	uint8_t			  pause;
+
+	/* Command Queues */
+	uint16_t		  qoutfifonext;
+	uint16_t		  qoutfifonext_valid_tag;
+	uint16_t		  qinfifonext;
+	uint16_t		  qinfifo[AHD_SCB_MAX];
+	uint16_t		 *qoutfifo;
+
+	/* Critical Section Data */
+	struct cs		 *critical_sections;
+	u_int			  num_critical_sections;
+
+	/* Buffer for handling packetized bitbucket. */
+	uint8_t			 *overrun_buf;
+
+	/* Links for chaining softcs */
+	TAILQ_ENTRY(ahd_softc)	  links;
+
+	/* Channel Names ('A', 'B', etc.) */
+	char			  channel;
+
+	/* Initiator Bus ID */
+	uint8_t			  our_id;
+
+	/*
+	 * Target incoming command FIFO.
+	 */
+	struct target_cmd	 *targetcmds;
+	uint8_t			  tqinfifonext;
+
+	/*
+	 * Cached verson of the hs_mailbox so we can avoid
+	 * pausing the sequencer during mailbox updates.
+	 */
+	uint8_t			  hs_mailbox;
+
+	/*
+	 * Incoming and outgoing message handling.
+	 */
+	uint8_t			  send_msg_perror;
+	ahd_msg_flags		  msg_flags;
+	ahd_msg_type		  msg_type;
+	uint8_t			  msgout_buf[12];/* Message we are sending */
+	uint8_t			  msgin_buf[12];/* Message we are receiving */
+	u_int			  msgout_len;	/* Length of message to send */
+	u_int			  msgout_index;	/* Current index in msgout */
+	u_int			  msgin_index;	/* Current index in msgin */
+
+	/*
+	 * Mapping information for data structures shared
+	 * between the sequencer and kernel.
+	 */
+	bus_dma_tag_t		  parent_dmat;
+	bus_dma_tag_t		  shared_data_dmat;
+	bus_dmamap_t		  shared_data_dmamap;
+	dma_addr_t		  shared_data_busaddr;
+
+	/* Information saved through suspend/resume cycles */
+	struct ahd_suspend_state  suspend_state;
+
+	/* Number of enabled target mode device on this card */
+	u_int			  enabled_luns;
+
+	/* Initialization level of this data structure */
+	u_int			  init_level;
+
+	/* PCI cacheline size. */
+	u_int			  pci_cachesize;
+
+	/* IO Cell Parameters */
+	uint8_t			  iocell_opts[AHD_NUM_PER_DEV_ANNEXCOLS];
+
+	u_int			  stack_size;
+	uint16_t		 *saved_stack;
+
+	/* Per-Unit descriptive information */
+	const char		 *description;
+	const char		 *bus_description;
+	char			 *name;
+	int			  unit;
+
+	/* Selection Timer settings */
+	int			  seltime;
+
+	/*
+	 * Interrupt coalescing settings.
+	 */
+#define	AHD_INT_COALESCING_TIMER_DEFAULT		250 /*us*/
+#define	AHD_INT_COALESCING_MAXCMDS_DEFAULT		10
+#define	AHD_INT_COALESCING_MAXCMDS_MAX			127
+#define	AHD_INT_COALESCING_MINCMDS_DEFAULT		5
+#define	AHD_INT_COALESCING_MINCMDS_MAX			127
+#define	AHD_INT_COALESCING_THRESHOLD_DEFAULT		2000
+#define	AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT	1000
+	u_int			  int_coalescing_timer;
+	u_int			  int_coalescing_maxcmds;
+	u_int			  int_coalescing_mincmds;
+	u_int			  int_coalescing_threshold;
+	u_int			  int_coalescing_stop_threshold;
+
+	uint16_t	 	  user_discenable;/* Disconnection allowed  */
+	uint16_t		  user_tagenable;/* Tagged Queuing allowed */
+};
+
+TAILQ_HEAD(ahd_softc_tailq, ahd_softc);
+extern struct ahd_softc_tailq ahd_tailq;
+
+/*************************** IO Cell Configuration ****************************/
+#define	AHD_PRECOMP_SLEW_INDEX						\
+    (AHD_ANNEXCOL_PRECOMP_SLEW - AHD_ANNEXCOL_PER_DEV0)
+
+#define	AHD_AMPLITUDE_INDEX						\
+    (AHD_ANNEXCOL_AMPLITUDE - AHD_ANNEXCOL_PER_DEV0)
+
+#define AHD_SET_SLEWRATE(ahd, new_slew)					\
+do {									\
+    (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_SLEWRATE_MASK;	\
+    (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] |=			\
+	(((new_slew) << AHD_SLEWRATE_SHIFT) & AHD_SLEWRATE_MASK);	\
+} while (0)
+
+#define AHD_SET_PRECOMP(ahd, new_pcomp)					\
+do {									\
+    (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK;	\
+    (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] |=			\
+	(((new_pcomp) << AHD_PRECOMP_SHIFT) & AHD_PRECOMP_MASK);	\
+} while (0)
+
+#define AHD_SET_AMPLITUDE(ahd, new_amp)					\
+do {									\
+    (ahd)->iocell_opts[AHD_AMPLITUDE_INDEX] &= ~AHD_AMPLITUDE_MASK;	\
+    (ahd)->iocell_opts[AHD_AMPLITUDE_INDEX] |=				\
+	(((new_amp) << AHD_AMPLITUDE_SHIFT) & AHD_AMPLITUDE_MASK);	\
+} while (0)
+
+/************************ Active Device Information ***************************/
+typedef enum {
+	ROLE_UNKNOWN,
+	ROLE_INITIATOR,
+	ROLE_TARGET
+} role_t;
+
+struct ahd_devinfo {
+	int	 our_scsiid;
+	int	 target_offset;
+	uint16_t target_mask;
+	u_int	 target;
+	u_int	 lun;
+	char	 channel;
+	role_t	 role;		/*
+				 * Only guaranteed to be correct if not
+				 * in the busfree state.
+				 */
+};
+
+/****************************** PCI Structures ********************************/
+#define AHD_PCI_IOADDR0	PCIR_MAPS	/* I/O BAR*/
+#define AHD_PCI_MEMADDR	(PCIR_MAPS + 4)	/* Memory BAR */
+#define AHD_PCI_IOADDR1	(PCIR_MAPS + 12)/* Second I/O BAR */
+
+typedef int (ahd_device_setup_t)(struct ahd_softc *);
+
+struct ahd_pci_identity {
+	uint64_t		 full_id;
+	uint64_t		 id_mask;
+	char			*name;
+	ahd_device_setup_t	*setup;
+};
+extern struct ahd_pci_identity ahd_pci_ident_table [];
+extern const u_int ahd_num_pci_devs;
+
+/***************************** VL/EISA Declarations ***************************/
+struct aic7770_identity {
+	uint32_t		 full_id;
+	uint32_t		 id_mask;
+	char			*name;
+	ahd_device_setup_t	*setup;
+};
+extern struct aic7770_identity aic7770_ident_table [];
+extern const int ahd_num_aic7770_devs;
+
+#define AHD_EISA_SLOT_OFFSET	0xc00
+#define AHD_EISA_IOSIZE		0x100
+
+/*************************** Function Declarations ****************************/
+/******************************************************************************/
+void			ahd_reset_cmds_pending(struct ahd_softc *ahd);
+u_int			ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl);
+void			ahd_busy_tcl(struct ahd_softc *ahd,
+				     u_int tcl, u_int busyid);
+static __inline void	ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl);
+static __inline void
+ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl)
+{
+	ahd_busy_tcl(ahd, tcl, SCB_LIST_NULL);
+}
+
+/***************************** PCI Front End *********************************/
+struct	ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t);
+int			  ahd_pci_config(struct ahd_softc *,
+					 struct ahd_pci_identity *);
+int	ahd_pci_test_register_access(struct ahd_softc *);
+
+/************************** SCB and SCB queue management **********************/
+int		ahd_probe_scbs(struct ahd_softc *);
+void		ahd_qinfifo_requeue_tail(struct ahd_softc *ahd,
+					 struct scb *scb);
+int		ahd_match_scb(struct ahd_softc *ahd, struct scb *scb,
+			      int target, char channel, int lun,
+			      u_int tag, role_t role);
+
+/****************************** Initialization ********************************/
+struct ahd_softc	*ahd_alloc(void *platform_arg, char *name);
+int			 ahd_softc_init(struct ahd_softc *);
+void			 ahd_controller_info(struct ahd_softc *ahd, char *buf);
+int			 ahd_init(struct ahd_softc *ahd);
+int			 ahd_default_config(struct ahd_softc *ahd);
+int			 ahd_parse_vpddata(struct ahd_softc *ahd,
+					   struct vpd_config *vpd);
+int			 ahd_parse_cfgdata(struct ahd_softc *ahd,
+					   struct seeprom_config *sc);
+void			 ahd_intr_enable(struct ahd_softc *ahd, int enable);
+void			 ahd_update_coalescing_values(struct ahd_softc *ahd,
+						      u_int timer,
+						      u_int maxcmds,
+						      u_int mincmds);
+void			 ahd_enable_coalescing(struct ahd_softc *ahd,
+					       int enable);
+void			 ahd_pause_and_flushwork(struct ahd_softc *ahd);
+int			 ahd_suspend(struct ahd_softc *ahd); 
+int			 ahd_resume(struct ahd_softc *ahd);
+void			 ahd_softc_insert(struct ahd_softc *);
+struct ahd_softc	*ahd_find_softc(struct ahd_softc *ahd);
+void			 ahd_set_unit(struct ahd_softc *, int);
+void			 ahd_set_name(struct ahd_softc *, char *);
+struct scb		*ahd_get_scb(struct ahd_softc *ahd, u_int col_idx);
+void			 ahd_free_scb(struct ahd_softc *ahd, struct scb *scb);
+void			 ahd_alloc_scbs(struct ahd_softc *ahd);
+void			 ahd_free(struct ahd_softc *ahd);
+int			 ahd_reset(struct ahd_softc *ahd, int reinit);
+void			 ahd_shutdown(void *arg);
+int			 ahd_write_flexport(struct ahd_softc *ahd,
+					    u_int addr, u_int value);
+int			 ahd_read_flexport(struct ahd_softc *ahd, u_int addr,
+					   uint8_t *value);
+int			 ahd_wait_flexport(struct ahd_softc *ahd);
+
+/*************************** Interrupt Services *******************************/
+void			ahd_pci_intr(struct ahd_softc *ahd);
+void			ahd_clear_intstat(struct ahd_softc *ahd);
+void			ahd_flush_qoutfifo(struct ahd_softc *ahd);
+void			ahd_run_qoutfifo(struct ahd_softc *ahd);
+#ifdef AHD_TARGET_MODE
+void			ahd_run_tqinfifo(struct ahd_softc *ahd, int paused);
+#endif
+void			ahd_handle_hwerrint(struct ahd_softc *ahd);
+void			ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat);
+void			ahd_handle_scsiint(struct ahd_softc *ahd,
+					   u_int intstat);
+void			ahd_clear_critical_section(struct ahd_softc *ahd);
+
+/***************************** Error Recovery *********************************/
+typedef enum {
+	SEARCH_COMPLETE,
+	SEARCH_COUNT,
+	SEARCH_REMOVE,
+	SEARCH_PRINT
+} ahd_search_action;
+int			ahd_search_qinfifo(struct ahd_softc *ahd, int target,
+					   char channel, int lun, u_int tag,
+					   role_t role, uint32_t status,
+					   ahd_search_action action);
+int			ahd_search_disc_list(struct ahd_softc *ahd, int target,
+					     char channel, int lun, u_int tag,
+					     int stop_on_first, int remove,
+					     int save_state);
+void			ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb);
+int			ahd_reset_channel(struct ahd_softc *ahd, char channel,
+					  int initiate_reset);
+int			ahd_abort_scbs(struct ahd_softc *ahd, int target,
+				       char channel, int lun, u_int tag,
+				       role_t role, uint32_t status);
+void			ahd_restart(struct ahd_softc *ahd);
+void			ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo);
+void			ahd_handle_scb_status(struct ahd_softc *ahd,
+					      struct scb *scb);
+void			ahd_handle_scsi_status(struct ahd_softc *ahd,
+					       struct scb *scb);
+void			ahd_calc_residual(struct ahd_softc *ahd,
+					  struct scb *scb);
+/*************************** Utility Functions ********************************/
+struct ahd_phase_table_entry*
+			ahd_lookup_phase_entry(int phase);
+void			ahd_compile_devinfo(struct ahd_devinfo *devinfo,
+					    u_int our_id, u_int target,
+					    u_int lun, char channel,
+					    role_t role);
+/************************** Transfer Negotiation ******************************/
+void			ahd_find_syncrate(struct ahd_softc *ahd, u_int *period,
+					  u_int *ppr_options, u_int maxsync);
+void			ahd_validate_offset(struct ahd_softc *ahd,
+					    struct ahd_initiator_tinfo *tinfo,
+					    u_int period, u_int *offset,
+					    int wide, role_t role);
+void			ahd_validate_width(struct ahd_softc *ahd,
+					   struct ahd_initiator_tinfo *tinfo,
+					   u_int *bus_width,
+					   role_t role);
+/*
+ * Negotiation types.  These are used to qualify if we should renegotiate
+ * even if our goal and current transport parameters are identical.
+ */
+typedef enum {
+	AHD_NEG_TO_GOAL,	/* Renegotiate only if goal and curr differ. */
+	AHD_NEG_IF_NON_ASYNC,	/* Renegotiate so long as goal is non-async. */
+	AHD_NEG_ALWAYS		/* Renegotiat even if goal is async. */
+} ahd_neg_type;
+int			ahd_update_neg_request(struct ahd_softc*,
+					       struct ahd_devinfo*,
+					       struct ahd_tmode_tstate*,
+					       struct ahd_initiator_tinfo*,
+					       ahd_neg_type);
+void			ahd_set_width(struct ahd_softc *ahd,
+				      struct ahd_devinfo *devinfo,
+				      u_int width, u_int type, int paused);
+void			ahd_set_syncrate(struct ahd_softc *ahd,
+					 struct ahd_devinfo *devinfo,
+					 u_int period, u_int offset,
+					 u_int ppr_options,
+					 u_int type, int paused);
+typedef enum {
+	AHD_QUEUE_NONE,
+	AHD_QUEUE_BASIC,
+	AHD_QUEUE_TAGGED
+} ahd_queue_alg;
+
+void			ahd_set_tags(struct ahd_softc *ahd,
+				     struct ahd_devinfo *devinfo,
+				     ahd_queue_alg alg);
+
+/**************************** Target Mode *************************************/
+#ifdef AHD_TARGET_MODE
+void		ahd_send_lstate_events(struct ahd_softc *,
+				       struct ahd_tmode_lstate *);
+void		ahd_handle_en_lun(struct ahd_softc *ahd,
+				  struct cam_sim *sim, union ccb *ccb);
+cam_status	ahd_find_tmode_devs(struct ahd_softc *ahd,
+				    struct cam_sim *sim, union ccb *ccb,
+				    struct ahd_tmode_tstate **tstate,
+				    struct ahd_tmode_lstate **lstate,
+				    int notfound_failure);
+#ifndef AHD_TMODE_ENABLE
+#define AHD_TMODE_ENABLE 0
+#endif
+#endif
+/******************************* Debug ***************************************/
+#ifdef AHD_DEBUG
+extern uint32_t ahd_debug;
+#define AHD_SHOW_MISC		0x00001
+#define AHD_SHOW_SENSE		0x00002
+#define AHD_SHOW_RECOVERY	0x00004
+#define AHD_DUMP_SEEPROM	0x00008
+#define AHD_SHOW_TERMCTL	0x00010
+#define AHD_SHOW_MEMORY		0x00020
+#define AHD_SHOW_MESSAGES	0x00040
+#define AHD_SHOW_MODEPTR	0x00080
+#define AHD_SHOW_SELTO		0x00100
+#define AHD_SHOW_FIFOS		0x00200
+#define AHD_SHOW_QFULL		0x00400
+#define	AHD_SHOW_DV		0x00800
+#define AHD_SHOW_MASKED_ERRORS	0x01000
+#define AHD_SHOW_QUEUE		0x02000
+#define AHD_SHOW_TQIN		0x04000
+#define AHD_SHOW_SG		0x08000
+#define AHD_SHOW_INT_COALESCING	0x10000
+#define AHD_DEBUG_SEQUENCER	0x20000
+#endif
+void			ahd_print_scb(struct scb *scb);
+void			ahd_print_devinfo(struct ahd_softc *ahd,
+					  struct ahd_devinfo *devinfo);
+void			ahd_dump_sglist(struct scb *scb);
+void			ahd_dump_all_cards_state(void);
+void			ahd_dump_card_state(struct ahd_softc *ahd);
+int			ahd_print_register(ahd_reg_parse_entry_t *table,
+					   u_int num_entries,
+					   const char *name,
+					   u_int address,
+					   u_int value,
+					   u_int *cur_column,
+					   u_int wrap_point);
+void			ahd_dump_scbs(struct ahd_softc *ahd);
+#endif /* _AIC79XX_H_ */
diff --git a/drivers/scsi/aic7xxx/aic79xx.reg b/drivers/scsi/aic7xxx/aic79xx.reg
new file mode 100644
index 0000000..cca58ed
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx.reg
@@ -0,0 +1,3958 @@
+/*
+ * Aic79xx register and scratch ram definitions.
+ *
+ * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 2000-2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $FreeBSD$
+ */
+VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#70 $"
+
+/*
+ * This file is processed by the aic7xxx_asm utility for use in assembling
+ * firmware for the aic79xx family of SCSI host adapters as well as to generate
+ * a C header file for use in the kernel portion of the Aic79xx driver.
+ */
+
+/* Register window Modes */
+#define M_DFF0		0
+#define M_DFF1		1
+#define M_CCHAN		2
+#define M_SCSI		3
+#define M_CFG		4
+#define M_DST_SHIFT	4
+
+#define MK_MODE(src, dst) ((src) | ((dst) << M_DST_SHIFT))
+#define SET_MODE(src, dst)						\
+	SET_SRC_MODE	src;						\
+	SET_DST_MODE	dst;						\
+	if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) {			\
+		mvi	MK_MODE(src, dst) call set_mode_work_around;	\
+	} else {							\
+		mvi	MODE_PTR, MK_MODE(src, dst);			\
+	}
+
+#define TOGGLE_DFF_MODE							\
+	if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) {			\
+		call	toggle_dff_mode_work_around;			\
+	} else {							\
+		xor	MODE_PTR, MK_MODE(M_DFF1, M_DFF1);		\
+	}
+	
+#define RESTORE_MODE(mode)						\
+	if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) {			\
+		mov	mode call set_mode_work_around;			\
+	} else {							\
+		mov	MODE_PTR, mode;					\
+	}
+
+#define SET_SEQINTCODE(code)						\
+	if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {			\
+		mvi	code call set_seqint_work_around;		\
+	} else {							\
+		mvi	SEQINTCODE, code;				\
+	}
+
+/*
+ * Mode Pointer
+ * Controls which of the 5, 512byte, address spaces should be used
+ * as the source and destination of any register accesses in our
+ * register window.
+ */
+register MODE_PTR {
+	address			0x000
+	access_mode	RW
+	field	DST_MODE	0x70
+	field	SRC_MODE	0x07
+	mode_pointer
+}
+
+const SRC_MODE_SHIFT	0
+const DST_MODE_SHIFT	4
+
+/*
+ * Host Interrupt Status
+ */
+register INTSTAT {
+	address			0x001
+	access_mode	RW
+	field	HWERRINT	0x80
+	field	BRKADRINT	0x40
+	field	SWTMINT		0x20
+	field	PCIINT		0x10
+	field	SCSIINT		0x08
+	field	SEQINT		0x04
+	field	CMDCMPLT	0x02
+	field	SPLTINT		0x01
+	mask	INT_PEND 0xFF
+}
+
+/*
+ * Sequencer Interrupt Code
+ */
+register SEQINTCODE {
+	address			0x002
+	access_mode	RW
+	field {
+		NO_SEQINT,			/* No seqint pending. */
+		BAD_PHASE,			/* unknown scsi bus phase */
+		SEND_REJECT,			/* sending a message reject */
+		PROTO_VIOLATION, 		/* Protocol Violation */
+		NO_MATCH,			/* no cmd match for reconnect */
+		IGN_WIDE_RES,			/* Complex IGN Wide Res Msg */
+		PDATA_REINIT,			/*
+						 * Returned to data phase
+						 * that requires data
+						 * transfer pointers to be
+						 * recalculated from the
+						 * transfer residual.
+						 */
+		HOST_MSG_LOOP,			/*
+						 * The bus is ready for the
+						 * host to perform another
+						 * message transaction.  This
+						 * mechanism is used for things
+						 * like sync/wide negotiation
+						 * that require a kernel based
+						 * message state engine.
+						 */
+		BAD_STATUS,			/* Bad status from target */
+		DATA_OVERRUN,			/*
+						 * Target attempted to write
+						 * beyond the bounds of its
+						 * command.
+						 */
+		MKMSG_FAILED,			/*
+						 * Target completed command
+						 * without honoring our ATN
+						 * request to issue a message. 
+						 */
+		MISSED_BUSFREE,			/*
+						 * The sequencer never saw
+						 * the bus go free after
+						 * either a command complete
+						 * or disconnect message.
+						 */
+		DUMP_CARD_STATE,
+		ILLEGAL_PHASE,
+		INVALID_SEQINT,
+		CFG4ISTAT_INTR,
+		STATUS_OVERRUN,
+		CFG4OVERRUN,
+		ENTERING_NONPACK,
+		TASKMGMT_FUNC_COMPLETE,		/*
+						 * Task management function
+						 * request completed with
+						 * an expected busfree.
+						 */
+		TASKMGMT_CMD_CMPLT_OKAY,	/*
+						 * A command with a non-zero
+						 * task management function
+						 * has completed via the normal
+						 * command completion method
+						 * for commands with a zero
+						 * task management function.
+						 * This happens when an attempt
+						 * to abort a command loses
+						 * the race for the command to
+						 * complete normally.
+						 */
+		TRACEPOINT0,
+		TRACEPOINT1,
+		TRACEPOINT2,
+		TRACEPOINT3,
+		SAW_HWERR,
+		BAD_SCB_STATUS
+	}
+}
+
+/*
+ * Clear Host Interrupt
+ */
+register CLRINT {
+	address			0x003
+	access_mode	WO
+	field	CLRHWERRINT	0x80 /* Rev B or greater */
+	field	CLRBRKADRINT	0x40
+	field	CLRSWTMINT	0x20
+	field	CLRPCIINT	0x10
+	field	CLRSCSIINT	0x08
+	field	CLRSEQINT	0x04
+	field	CLRCMDINT	0x02
+	field	CLRSPLTINT	0x01
+}
+
+/*
+ * Error Register
+ */
+register ERROR {
+	address			0x004
+	access_mode	RO
+	field	CIOPARERR	0x80
+	field	CIOACCESFAIL	0x40 /* Rev B or greater */
+	field	MPARERR		0x20
+	field	DPARERR		0x10
+	field	SQPARERR	0x08
+	field	ILLOPCODE	0x04
+	field	DSCTMOUT	0x02
+}
+
+/*
+ * Clear Error
+ */
+register CLRERR {
+	address			0x004
+	access_mode 	WO
+	field	CLRCIOPARERR	0x80
+	field	CLRCIOACCESFAIL	0x40 /* Rev B or greater */
+	field	CLRMPARERR	0x20
+	field	CLRDPARERR	0x10
+	field	CLRSQPARERR	0x08
+	field	CLRILLOPCODE	0x04
+	field	CLRDSCTMOUT	0x02
+}
+
+/*
+ * Host Control Register
+ * Overall host control of the device.
+ */
+register HCNTRL {
+	address			0x005
+	access_mode	RW
+	field	SEQ_RESET	0x80 /* Rev B or greater */
+	field	POWRDN		0x40
+	field	SWINT		0x10
+	field	SWTIMER_START_B	0x08 /* Rev B or greater */
+	field	PAUSE		0x04
+	field	INTEN		0x02
+	field	CHIPRST		0x01
+	field	CHIPRSTACK	0x01
+}
+
+/*
+ * Host New SCB Queue Offset
+ */
+register HNSCB_QOFF {
+	address			0x006
+	access_mode	RW
+	size		2
+}
+
+/*
+ * Host Empty SCB Queue Offset
+ */
+register HESCB_QOFF {
+	address			0x008
+	access_mode	RW
+}
+
+/*
+ * Host Mailbox
+ */
+register HS_MAILBOX {
+	address			0x00B
+	access_mode	RW
+	mask	HOST_TQINPOS	0x80	/* Boundary at either 0 or 128 */
+	mask	ENINT_COALESCE	0x40	/* Perform interrupt coalescing */
+}
+
+/*
+ * Sequencer Interupt Status
+ */
+register SEQINTSTAT {
+	address			0x00C
+	access_mode	RO
+	field	SEQ_SWTMRTO	0x10
+	field	SEQ_SEQINT	0x08
+	field	SEQ_SCSIINT	0x04
+	field	SEQ_PCIINT	0x02
+	field	SEQ_SPLTINT	0x01
+}
+
+/*
+ * Clear SEQ Interrupt
+ */
+register CLRSEQINTSTAT {
+	address			0x00C
+	access_mode	WO
+	field	CLRSEQ_SWTMRTO	0x10
+	field	CLRSEQ_SEQINT	0x08
+	field	CLRSEQ_SCSIINT	0x04
+	field	CLRSEQ_PCIINT	0x02
+	field	CLRSEQ_SPLTINT	0x01
+}
+
+/*
+ * Software Timer
+ */
+register SWTIMER {
+	address			0x00E
+	access_mode	RW
+	size		2
+}
+
+/*
+ * SEQ New SCB Queue Offset
+ */
+register SNSCB_QOFF {
+	address			0x010
+	access_mode	RW
+	size		2
+	modes		M_CCHAN
+}
+
+/*
+ * SEQ Empty SCB Queue Offset
+ */
+register SESCB_QOFF {
+	address			0x012
+	access_mode	RW
+	modes		M_CCHAN
+}
+
+/*
+ * SEQ Done SCB Queue Offset
+ */
+register SDSCB_QOFF {
+	address			0x014
+	access_mode	RW
+	modes		M_CCHAN
+	size		2
+}
+
+/*
+ * Queue Offset Control & Status
+ */
+register QOFF_CTLSTA {
+	address			0x016
+	access_mode	RW
+	modes		M_CCHAN
+	field	EMPTY_SCB_AVAIL	0x80
+	field	NEW_SCB_AVAIL	0x40
+	field	SDSCB_ROLLOVR	0x20
+	field	HS_MAILBOX_ACT	0x10
+	field	SCB_QSIZE	0x0F {
+		SCB_QSIZE_4,
+		SCB_QSIZE_8,
+		SCB_QSIZE_16,
+		SCB_QSIZE_32,
+		SCB_QSIZE_64,
+		SCB_QSIZE_128,
+		SCB_QSIZE_256,
+		SCB_QSIZE_512,
+		SCB_QSIZE_1024,
+		SCB_QSIZE_2048,
+		SCB_QSIZE_4096,
+		SCB_QSIZE_8192,
+		SCB_QSIZE_16384
+	}
+}
+
+/*
+ * Interrupt Control
+ */
+register INTCTL {
+	address			0x018
+	access_mode	RW
+	field	SWTMINTMASK	0x80
+	field	SWTMINTEN	0x40
+	field	SWTIMER_START	0x20
+	field	AUTOCLRCMDINT	0x10
+	field	PCIINTEN	0x08
+	field	SCSIINTEN	0x04
+	field	SEQINTEN	0x02
+	field	SPLTINTEN	0x01
+}
+
+/*
+ * Data FIFO Control
+ */
+register DFCNTRL {
+	address			0x019
+	access_mode	RW
+	modes		M_DFF0, M_DFF1
+	field	PRELOADEN	0x80
+	field	SCSIENWRDIS	0x40	/* Rev B only. */
+	field	SCSIEN		0x20
+	field	SCSIENACK	0x20
+	field	HDMAEN		0x08
+	field	HDMAENACK	0x08
+	field	DIRECTION	0x04
+	field	DIRECTIONACK	0x04
+	field	FIFOFLUSH	0x02
+	field	FIFOFLUSHACK	0x02
+	field	DIRECTIONEN	0x01
+}
+
+/*
+ * Device Space Command 0
+ */
+register DSCOMMAND0 {
+	address			0x019
+	access_mode	RW
+	modes		M_CFG
+	field	CACHETHEN	0x80	/* Cache Threshold enable */
+	field	DPARCKEN	0x40	/* Data Parity Check Enable */
+	field	MPARCKEN	0x20	/* Memory Parity Check Enable */
+	field	EXTREQLCK	0x10	/* External Request Lock */
+	field	DISABLE_TWATE	0x02	/* Rev B or greater */
+	field	CIOPARCKEN	0x01	/* Internal bus parity error enable */
+}
+
+/*
+ * Data FIFO Status
+ */
+register DFSTATUS {
+	address			0x01A
+	access_mode	RO
+	modes		M_DFF0, M_DFF1
+	field	PRELOAD_AVAIL		0x80
+	field	PKT_PRELOAD_AVAIL	0x40
+	field	MREQPEND		0x10
+	field	HDONE			0x08
+	field	DFTHRESH		0x04
+	field	FIFOFULL		0x02
+	field	FIFOEMP			0x01
+}
+
+/*
+ * S/G Cache Pointer
+ */
+register SG_CACHE_PRE {
+	address			0x01B
+	access_mode	WO
+	modes		M_DFF0, M_DFF1
+	field	SG_ADDR_MASK	0xf8
+	field	ODD_SEG		0x04
+	field	LAST_SEG	0x02
+}
+
+register SG_CACHE_SHADOW {
+	address			0x01B
+	access_mode	RO
+	modes		M_DFF0, M_DFF1
+	field	SG_ADDR_MASK	0xf8
+	field	ODD_SEG		0x04
+	field	LAST_SEG	0x02
+	field	LAST_SEG_DONE	0x01
+}
+
+/*
+ * Arbiter Control
+ */
+register ARBCTL {
+	address			0x01B
+	access_mode	RW
+	modes		M_CFG
+	field	RESET_HARB	0x80
+	field	RETRY_SWEN	0x08
+	field	USE_TIME	0x07
+}
+
+/*
+ * Data Channel Host Address
+ */
+register HADDR {
+	address			0x070
+	access_mode	RW
+	size		8
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * Host Overlay DMA Address
+ */
+register HODMAADR {
+	address			0x070
+	access_mode	RW
+	size		8
+	modes		M_SCSI
+}
+
+/*
+ * PCI PLL Delay.
+ */
+register PLLDELAY {
+	address			0x070
+	access_mode	RW
+	size		1
+	modes		M_CFG
+	field	SPLIT_DROP_REQ	0x80
+}
+
+/*
+ * Data Channel Host Count
+ */
+register HCNT {
+	address			0x078
+	access_mode	RW
+	size		3
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * Host Overlay DMA Count
+ */
+register HODMACNT {
+	address			0x078
+	access_mode	RW
+	size		2
+	modes		M_SCSI
+}
+
+/*
+ * Host Overlay DMA Enable
+ */
+register HODMAEN {
+	address			0x07A
+	access_mode	RW
+	modes		M_SCSI
+}
+
+/*
+ * Scatter/Gather Host Address
+ */
+register SGHADDR {
+	address			0x07C
+	access_mode	RW
+	size		8
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * SCB Host Address
+ */
+register SCBHADDR {
+	address			0x07C
+	access_mode	RW
+	size		8
+	modes		M_CCHAN
+}
+
+/*
+ * Scatter/Gather Host Count
+ */
+register SGHCNT {
+	address			0x084
+	access_mode	RW
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * SCB Host Count
+ */
+register SCBHCNT {
+	address			0x084
+	access_mode	RW
+	modes		M_CCHAN
+}
+
+/*
+ * Data FIFO Threshold
+ */
+register DFF_THRSH {
+	address			0x088
+	access_mode	RW
+	modes		M_CFG
+	field	WR_DFTHRSH	0x70 {
+		WR_DFTHRSH_MIN,
+		WR_DFTHRSH_25,
+		WR_DFTHRSH_50,
+		WR_DFTHRSH_63,
+		WR_DFTHRSH_75,
+		WR_DFTHRSH_85,
+		WR_DFTHRSH_90,
+		WR_DFTHRSH_MAX
+	}
+	field	RD_DFTHRSH	0x07 {
+		RD_DFTHRSH_MIN,
+		RD_DFTHRSH_25,
+		RD_DFTHRSH_50,
+		RD_DFTHRSH_63,
+		RD_DFTHRSH_75,
+		RD_DFTHRSH_85,
+		RD_DFTHRSH_90,
+		RD_DFTHRSH_MAX
+	}
+}
+
+/*
+ * ROM Address
+ */
+register ROMADDR {
+	address			0x08A
+	access_mode	RW
+	size		3
+}
+
+/*
+ * ROM Control
+ */
+register ROMCNTRL {
+	address			0x08D
+	access_mode	RW
+	field	ROMOP		0xE0
+	field	ROMSPD		0x18
+	field	REPEAT		0x02
+	field	RDY		0x01
+}
+
+/*
+ * ROM Data
+ */
+register ROMDATA {
+	address			0x08E
+	access_mode	RW
+}
+
+/*
+ * Data Channel Receive Message 0
+ */
+register DCHRXMSG0 {
+	address			0x090
+	access_mode	RO
+	modes		M_DFF0, M_DFF1
+	field		CDNUM	0xF8
+	field		CFNUM	0x07
+}
+
+/*
+ * CMC Recieve Message 0
+ */
+register CMCRXMSG0 {
+	address			0x090
+	access_mode	RO
+	modes		M_CCHAN
+	field		CDNUM	0xF8
+	field		CFNUM	0x07
+}
+
+/*
+ * Overlay Recieve Message 0
+ */
+register OVLYRXMSG0 {
+	address			0x090
+	access_mode	RO
+	modes		M_SCSI
+	field		CDNUM	0xF8
+	field		CFNUM	0x07
+}
+
+/*
+ * Relaxed Order Enable
+ */
+register ROENABLE {
+	address			0x090
+	access_mode	RW
+	modes		M_CFG
+	field	MSIROEN		0x20
+	field	OVLYROEN	0x10
+	field	CMCROEN		0x08
+	field	SGROEN		0x04
+	field	DCH1ROEN	0x02
+	field	DCH0ROEN	0x01
+}
+
+/*
+ * Data Channel Receive Message 1
+ */
+register DCHRXMSG1 {
+	address			0x091
+	access_mode	RO
+	modes		M_DFF0, M_DFF1
+	field	CBNUM		0xFF
+}
+
+/*
+ * CMC Recieve Message 1
+ */
+register CMCRXMSG1 {
+	address			0x091
+	access_mode	RO
+	modes		M_CCHAN
+	field	CBNUM		0xFF
+}
+
+/*
+ * Overlay Recieve Message 1
+ */
+register OVLYRXMSG1 {
+	address			0x091
+	access_mode	RO
+	modes		M_SCSI
+	field	CBNUM		0xFF
+}
+
+/*
+ * No Snoop Enable
+ */
+register NSENABLE {
+	address			0x091
+	access_mode	RW
+	modes		M_CFG
+	field	MSINSEN		0x20
+	field	OVLYNSEN	0x10
+	field	CMCNSEN		0x08
+	field	SGNSEN		0x04
+	field	DCH1NSEN	0x02
+	field	DCH0NSEN	0x01
+}
+
+/*
+ * Data Channel Receive Message 2
+ */
+register DCHRXMSG2 {
+	address			0x092
+	access_mode	RO
+	modes		M_DFF0, M_DFF1
+	field	MINDEX		0xFF
+}
+
+/*
+ * CMC Recieve Message 2
+ */
+register CMCRXMSG2 {
+	address			0x092
+	access_mode	RO
+	modes		M_CCHAN
+	field	MINDEX		0xFF
+}
+
+/*
+ * Overlay Recieve Message 2
+ */
+register OVLYRXMSG2 {
+	address			0x092
+	access_mode	RO
+	modes		M_SCSI
+	field	MINDEX		0xFF
+}
+
+/*
+ * Outstanding Split Transactions
+ */
+register OST {
+	address			0x092
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Data Channel Receive Message 3
+ */
+register DCHRXMSG3 {
+	address			0x093
+	access_mode	RO
+	modes		M_DFF0, M_DFF1
+	field	MCLASS		0x0F
+}
+
+/*
+ * CMC Recieve Message 3
+ */
+register CMCRXMSG3 {
+	address			0x093
+	access_mode	RO
+	modes		M_CCHAN
+	field	MCLASS		0x0F
+}
+
+/*
+ * Overlay Recieve Message 3
+ */
+register OVLYRXMSG3 {
+	address			0x093
+	access_mode	RO
+	modes		M_SCSI
+	field	MCLASS		0x0F
+}
+
+/*
+ * PCI-X Control
+ */
+register PCIXCTL {
+	address			0x093
+	access_mode	RW
+	modes		M_CFG
+	field	SERRPULSE	0x80
+	field	UNEXPSCIEN	0x20
+	field	SPLTSMADIS	0x10
+	field	SPLTSTADIS	0x08
+	field	SRSPDPEEN	0x04
+	field	TSCSERREN	0x02
+	field	CMPABCDIS	0x01
+}
+
+/*
+ * CMC Sequencer Byte Count
+ */
+register CMCSEQBCNT {
+	address			0x094
+	access_mode	RO
+	modes		M_CCHAN
+}
+
+/*
+ * Overlay Sequencer Byte Count
+ */
+register OVLYSEQBCNT {
+	address			0x094
+	access_mode	RO
+	modes		M_SCSI
+}
+
+/*
+ * Data Channel Sequencer Byte Count
+ */
+register DCHSEQBCNT {
+	address			0x094
+	access_mode	RO
+	size		2
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * Data Channel Split Status 0
+ */
+register DCHSPLTSTAT0 {
+	address			0x096
+	access_mode	RW
+	modes		M_DFF0, M_DFF1
+	field	STAETERM	0x80
+	field	SCBCERR		0x40
+	field	SCADERR		0x20
+	field	SCDATBUCKET	0x10
+	field	CNTNOTCMPLT	0x08
+	field	RXOVRUN		0x04
+	field	RXSCEMSG	0x02
+	field	RXSPLTRSP	0x01
+}
+
+/*
+ * CMC Split Status 0
+ */
+register CMCSPLTSTAT0 {
+	address			0x096
+	access_mode	RW
+	modes		M_CCHAN
+	field	STAETERM	0x80
+	field	SCBCERR		0x40
+	field	SCADERR		0x20
+	field	SCDATBUCKET	0x10
+	field	CNTNOTCMPLT	0x08
+	field	RXOVRUN		0x04
+	field	RXSCEMSG	0x02
+	field	RXSPLTRSP	0x01
+}
+
+/*
+ * Overlay Split Status 0
+ */
+register OVLYSPLTSTAT0 {
+	address			0x096
+	access_mode	RW
+	modes		M_SCSI
+	field	STAETERM	0x80
+	field	SCBCERR		0x40
+	field	SCADERR		0x20
+	field	SCDATBUCKET	0x10
+	field	CNTNOTCMPLT	0x08
+	field	RXOVRUN		0x04
+	field	RXSCEMSG	0x02
+	field	RXSPLTRSP	0x01
+}
+
+/*
+ * Data Channel Split Status 1
+ */
+register DCHSPLTSTAT1 {
+	address			0x097
+	access_mode	RW
+	modes		M_DFF0, M_DFF1
+	field	RXDATABUCKET	0x01
+}
+
+/*
+ * CMC Split Status 1
+ */
+register CMCSPLTSTAT1 {
+	address			0x097
+	access_mode	RW
+	modes		M_CCHAN
+	field	RXDATABUCKET	0x01
+}
+
+/*
+ * Overlay Split Status 1
+ */
+register OVLYSPLTSTAT1 {
+	address			0x097
+	access_mode	RW
+	modes		M_SCSI
+	field	RXDATABUCKET	0x01
+}
+
+/*
+ * S/G Receive Message 0
+ */
+register SGRXMSG0 {
+	address			0x098
+	access_mode	RO
+	modes		M_DFF0, M_DFF1
+	field		CDNUM	0xF8
+	field		CFNUM	0x07
+}
+
+/*
+ * S/G Receive Message 1
+ */
+register SGRXMSG1 {
+	address			0x099
+	access_mode	RO
+	modes		M_DFF0, M_DFF1
+	field	CBNUM		0xFF
+}
+
+/*
+ * S/G Receive Message 2
+ */
+register SGRXMSG2 {
+	address			0x09A
+	access_mode	RO
+	modes		M_DFF0, M_DFF1
+	field	MINDEX		0xFF
+}
+
+/*
+ * S/G Receive Message 3
+ */
+register SGRXMSG3 {
+	address			0x09B
+	access_mode	RO
+	modes		M_DFF0, M_DFF1
+	field	MCLASS		0x0F
+}
+
+/*
+ * Slave Split Out Address 0
+ */
+register SLVSPLTOUTADR0 {
+	address			0x098
+	access_mode	RO
+	modes		M_SCSI
+	field	LOWER_ADDR	0x7F
+}
+
+/*
+ * Slave Split Out Address 1
+ */
+register SLVSPLTOUTADR1 {
+	address			0x099
+	access_mode	RO
+	modes		M_SCSI
+	field	REQ_DNUM	0xF8
+	field	REQ_FNUM	0x07
+}
+
+/*
+ * Slave Split Out Address 2
+ */
+register SLVSPLTOUTADR2 {
+	address			0x09A
+	access_mode	RO
+	modes		M_SCSI
+	field	REQ_BNUM	0xFF
+}
+
+/*
+ * Slave Split Out Address 3
+ */
+register SLVSPLTOUTADR3 {
+	address			0x09B
+	access_mode	RO
+	modes		M_SCSI
+	field	RLXORD		020
+	field	TAG_NUM		0x1F
+}
+
+/*
+ * SG Sequencer Byte Count
+ */
+register SGSEQBCNT {
+	address			0x09C
+	access_mode	RO
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * Slave Split Out Attribute 0
+ */
+register SLVSPLTOUTATTR0 {
+	address			0x09C
+	access_mode	RO
+	modes		M_SCSI
+	field	LOWER_BCNT	0xFF
+}
+
+/*
+ * Slave Split Out Attribute 1
+ */
+register SLVSPLTOUTATTR1 {
+	address			0x09D
+	access_mode	RO
+	modes		M_SCSI
+	field	CMPLT_DNUM	0xF8
+	field	CMPLT_FNUM	0x07
+}
+
+/*
+ * Slave Split Out Attribute 2
+ */
+register SLVSPLTOUTATTR2 {
+	address			0x09E
+	access_mode	RO
+	size		2
+	modes		M_SCSI
+	field	CMPLT_BNUM	0xFF
+}
+/*
+ * S/G Split Status 0
+ */
+register SGSPLTSTAT0 {
+	address			0x09E
+	access_mode	RW
+	modes		M_DFF0, M_DFF1
+	field	STAETERM	0x80
+	field	SCBCERR		0x40
+	field	SCADERR		0x20
+	field	SCDATBUCKET	0x10
+	field	CNTNOTCMPLT	0x08
+	field	RXOVRUN		0x04
+	field	RXSCEMSG	0x02
+	field	RXSPLTRSP	0x01
+}
+
+/*
+ * S/G Split Status 1
+ */
+register SGSPLTSTAT1 {
+	address			0x09F
+	access_mode	RW
+	modes		M_DFF0, M_DFF1
+	field	RXDATABUCKET	0x01
+}
+
+/*
+ * Special Function
+ */
+register SFUNCT {
+	address			0x09f
+	access_mode	RW
+	modes		M_CFG
+	field	TEST_GROUP	0xF0
+	field	TEST_NUM	0x0F
+}
+
+/*
+ * Data FIFO 0 PCI Status 
+ */
+register DF0PCISTAT {
+	address			0x0A0
+	access_mode	RW
+	modes		M_CFG
+	field	DPE		0x80
+	field	SSE		0x40
+	field	RMA		0x20
+	field	RTA		0x10
+	field	SCAAPERR	0x08
+	field	RDPERR		0x04
+	field	TWATERR		0x02
+	field	DPR		0x01
+}
+
+/*
+ * Data FIFO 1 PCI Status 
+ */
+register DF1PCISTAT {
+	address			0x0A1
+	access_mode	RW
+	modes		M_CFG
+	field	DPE		0x80
+	field	SSE		0x40
+	field	RMA		0x20
+	field	RTA		0x10
+	field	SCAAPERR	0x08
+	field	RDPERR		0x04
+	field	TWATERR		0x02
+	field	DPR		0x01
+}
+
+/*
+ * S/G PCI Status 
+ */
+register SGPCISTAT {
+	address			0x0A2
+	access_mode	RW
+	modes		M_CFG
+	field	DPE		0x80
+	field	SSE		0x40
+	field	RMA		0x20
+	field	RTA		0x10
+	field	SCAAPERR	0x08
+	field	RDPERR		0x04
+	field	DPR		0x01
+}
+
+/*
+ * CMC PCI Status 
+ */
+register CMCPCISTAT {
+	address			0x0A3
+	access_mode	RW
+	modes		M_CFG
+	field	DPE		0x80
+	field	SSE		0x40
+	field	RMA		0x20
+	field	RTA		0x10
+	field	SCAAPERR	0x08
+	field	RDPERR		0x04
+	field	TWATERR		0x02
+	field	DPR		0x01
+}
+
+/*
+ * Overlay PCI Status 
+ */
+register OVLYPCISTAT {
+	address			0x0A4
+	access_mode	RW
+	modes		M_CFG
+	field	DPE		0x80
+	field	SSE		0x40
+	field	RMA		0x20
+	field	RTA		0x10
+	field	SCAAPERR	0x08
+	field	RDPERR		0x04
+	field	DPR		0x01
+}
+
+/*
+ * PCI Status for MSI Master DMA Transfer
+ */
+register MSIPCISTAT {
+	address			0x0A6
+	access_mode	RW
+	modes		M_CFG
+	field	SSE		0x40
+	field	RMA		0x20
+	field	RTA		0x10
+	field	CLRPENDMSI	0x08
+	field	TWATERR		0x02
+	field	DPR		0x01
+}
+
+/*
+ * PCI Status for Target
+ */
+register TARGPCISTAT {
+	address			0x0A7
+	access_mode	RW
+	modes		M_CFG
+	field	DPE		0x80
+	field	SSE		0x40
+	field	STA		0x08
+	field	TWATERR		0x02
+}
+
+/*
+ * LQ Packet In
+ * The last LQ Packet received
+ */
+register LQIN {
+	address			0x020
+	access_mode	RW
+	size		20
+	modes		M_DFF0, M_DFF1, M_SCSI
+}
+
+/*
+ * SCB Type Pointer
+ * SCB offset for Target Mode SCB type information
+ */
+register TYPEPTR {
+	address			0x020
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Queue Tag Pointer
+ * SCB offset to the Two Byte tag identifier used for target mode.
+ */
+register TAGPTR {
+	address			0x021
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Logical Unit Number Pointer
+ * SCB offset to the LSB (little endian) of the lun field.
+ */
+register LUNPTR {
+	address			0x022
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Data Length Pointer
+ * SCB offset for the 4 byte data length field in target mode.
+ */
+register DATALENPTR {
+	address			0x023
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Status Length Pointer
+ * SCB offset to the two byte status field in target SCBs.
+ */
+register STATLENPTR {
+	address			0x024
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Command Length Pointer
+ * Scb offset for the CDB length field in initiator SCBs.
+ */
+register CMDLENPTR {
+	address			0x025
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Task Attribute Pointer
+ * Scb offset for the byte field specifying the attribute byte
+ * to be used in command packets.
+ */ 
+register ATTRPTR {
+	address			0x026
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Task Management Flags Pointer
+ * Scb offset for the byte field specifying the attribute flags
+ * byte to be used in command packets.
+ */ 
+register FLAGPTR {
+	address			0x027
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Command Pointer
+ * Scb offset for the first byte in the CDB for initiator SCBs.
+ */
+register CMDPTR {
+	address			0x028
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Queue Next Pointer
+ * Scb offset for the 2 byte "next scb link".
+ */
+register QNEXTPTR {
+	address			0x029
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * SCSI ID Pointer
+ * Scb offset to the value to place in the SCSIID register
+ * during target mode connections.
+ */
+register IDPTR {
+	address			0x02A
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Command Aborted Byte Pointer
+ * Offset to the SCB flags field that includes the
+ * "SCB aborted" status bit.
+ */
+register ABRTBYTEPTR {
+	address			0x02B
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Command Aborted Bit Pointer
+ * Bit offset in the SCB flags field for "SCB aborted" status.
+ */
+register ABRTBITPTR {
+	address			0x02C
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Rev B or greater.
+ */
+register MAXCMDBYTES {
+	address			0x02D
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Rev B or greater.
+ */
+register MAXCMD2RCV {
+	address			0x02E
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Rev B or greater.
+ */
+register SHORTTHRESH {
+	address			0x02F
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Logical Unit Number Length
+ * The length, in bytes, of the SCB lun field.
+ */
+register LUNLEN {
+	address			0x030
+	access_mode	RW
+	modes		M_CFG
+	mask		ILUNLEN	0x0F
+	mask		TLUNLEN	0xF0
+}
+const LUNLEN_SINGLE_LEVEL_LUN 0xF
+
+/*
+ * CDB Limit
+ * The size, in bytes, of the embedded CDB field in initator SCBs.
+ */
+register CDBLIMIT {
+	address			0x031
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Maximum Commands
+ * The maximum number of commands to issue during a
+ * single packetized connection.
+ */
+register MAXCMD {
+	address			0x032
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * Maximum Command Counter
+ * The number of commands already sent during this connection
+ */
+register MAXCMDCNT {
+	address			0x033
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * LQ Packet Reserved Bytes
+ * The bytes to be sent in the currently reserved fileds
+ * of all LQ packets.
+ */
+register LQRSVD01 {
+	address			0x034
+	access_mode	RW
+	modes		M_SCSI
+}
+register LQRSVD16 {
+	address			0x035
+	access_mode	RW
+	modes		M_SCSI
+}
+register LQRSVD17 {
+	address			0x036
+	access_mode	RW
+	modes		M_SCSI
+}
+
+/*
+ * Command Reserved 0
+ * The byte to be sent for the reserved byte 0 of
+ * outgoing command packets.
+ */
+register CMDRSVD0 {
+	address			0x037
+	access_mode	RW
+	modes		M_CFG
+}
+
+/*
+ * LQ Manager Control 0
+ */
+register LQCTL0 {
+	address			0x038
+	access_mode	RW
+	modes		M_CFG
+	field	LQITARGCLT	0xC0
+	field	LQIINITGCLT	0x30
+	field	LQ0TARGCLT	0x0C
+	field	LQ0INITGCLT	0x03
+}
+
+/*
+ * LQ Manager Control 1
+ */
+register LQCTL1 {
+	address			0x038
+	access_mode	RW
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	PCI2PCI		0x04
+	field	SINGLECMD	0x02
+	field	ABORTPENDING	0x01
+}
+
+/*
+ * LQ Manager Control 2
+ */
+register LQCTL2 {
+	address			0x039
+	access_mode	RW
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	LQIRETRY	0x80
+	field	LQICONTINUE	0x40
+	field	LQITOIDLE	0x20
+	field	LQIPAUSE	0x10
+	field	LQORETRY	0x08
+	field	LQOCONTINUE	0x04
+	field	LQOTOIDLE	0x02
+	field	LQOPAUSE	0x01
+}
+
+/*
+ * SCSI RAM BIST0
+ */
+register SCSBIST0 {
+	address			0x039
+	access_mode	RW
+	modes		M_CFG
+	field	GSBISTERR	0x40
+	field	GSBISTDONE	0x20
+	field	GSBISTRUN	0x10
+	field	OSBISTERR	0x04
+	field	OSBISTDONE	0x02
+	field	OSBISTRUN	0x01
+}
+
+/*
+ * SCSI Sequence Control0
+ */
+register SCSISEQ0 {
+	address			0x03A
+	access_mode	RW
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	TEMODEO		0x80
+	field	ENSELO		0x40
+	field	ENARBO		0x20
+	field	FORCEBUSFREE	0x10
+	field	SCSIRSTO	0x01
+}
+
+/*
+ * SCSI RAM BIST 1
+ */
+register SCSBIST1 {
+	address			0x03A
+	access_mode	RW
+	modes		M_CFG
+	field	NTBISTERR	0x04
+	field	NTBISTDONE	0x02
+	field	NTBISTRUN	0x01
+}
+
+/*
+ * SCSI Sequence Control 1
+ */
+register SCSISEQ1 {
+	address			0x03B
+	access_mode	RW
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	MANUALCTL	0x40
+	field	ENSELI		0x20
+	field	ENRSELI		0x10
+	field	MANUALP		0x0C
+	field	ENAUTOATNP	0x02
+	field	ALTSTIM		0x01
+}
+
+/*
+ * SCSI Transfer Control 0
+ */
+register SXFRCTL0 {
+	address			0x03C
+	access_mode	RW
+	modes		M_SCSI
+	field	DFON		0x80
+	field	DFPEXP		0x40
+	field	BIOSCANCELEN	0x10
+	field	SPIOEN		0x08
+}
+
+/*
+ * SCSI Transfer Control 1
+ */
+register SXFRCTL1 {
+	address			0x03D
+	access_mode	RW
+	modes		M_SCSI
+	field	BITBUCKET	0x80
+	field	ENSACHK		0x40
+	field	ENSPCHK		0x20
+	field	STIMESEL	0x18
+	field	ENSTIMER	0x04
+	field	ACTNEGEN	0x02
+	field	STPWEN		0x01
+}
+
+/*
+ * SCSI Transfer Control 2
+ */
+register SXFRCTL2 {
+	address			0x03E
+	access_mode	RW
+	modes		M_SCSI
+	field	AUTORSTDIS	0x10
+	field	CMDDMAEN	0x08
+	field	ASU		0x07
+}
+
+/*
+ * SCSI Bus Initiator IDs
+ * Bitmask of observed initiators on the bus.
+ */
+register BUSINITID {
+	address			0x03C
+	access_mode	RW
+	modes		M_CFG
+	size		2
+}
+
+/*
+ * Data Length Counters
+ * Packet byte counter.
+ */
+register DLCOUNT {
+	address			0x03C
+	access_mode	RW
+	modes		M_DFF0, M_DFF1
+	size		3
+}
+
+/*
+ * Data FIFO Status
+ */
+register DFFSTAT {
+	address			0x03F
+	access_mode	RW
+	modes		M_SCSI
+	field	FIFO1FREE	0x20
+	field	FIFO0FREE	0x10
+	/*
+	 * On the B, this enum only works
+	 * in the read direction.  For writes,
+	 * you must use the B version of the
+	 * CURRFIFO_0 definition which is defined
+	 * as a constant outside of this register
+	 * definition to avoid confusing the
+	 * register pretty printing code.
+	 */
+	enum	CURRFIFO	0x03 {
+		CURRFIFO_0,
+		CURRFIFO_1,
+		CURRFIFO_NONE	0x3
+	}
+}
+
+const B_CURRFIFO_0 0x2
+
+/*
+ * SCSI Bus Target IDs
+ * Bitmask of observed targets on the bus.
+ */
+register BUSTARGID {
+	address			0x03E
+	access_mode	RW
+	modes		M_CFG
+	size		2
+}
+
+/*
+ * SCSI Control Signal Out
+ */
+register SCSISIGO {
+	address			0x040
+	access_mode	RW
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	CDO		0x80
+	field	IOO		0x40
+	field	MSGO		0x20
+	field	ATNO		0x10
+	field	SELO		0x08
+	field	BSYO		0x04
+	field	REQO		0x02
+	field	ACKO		0x01
+/*
+ * Possible phases to write into SCSISIG0
+ */
+	enum	PHASE_MASK  CDO|IOO|MSGO {
+		P_DATAOUT	0x0,
+		P_DATAIN	IOO,
+		P_DATAOUT_DT	P_DATAOUT|MSGO,
+		P_DATAIN_DT	P_DATAIN|MSGO,
+		P_COMMAND	CDO,
+		P_MESGOUT	CDO|MSGO,
+		P_STATUS	CDO|IOO,
+		P_MESGIN	CDO|IOO|MSGO
+	}
+}
+
+register SCSISIGI {
+	address			0x041
+	access_mode	RO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	CDI		0x80
+	field	IOI		0x40
+	field	MSGI		0x20
+	field	ATNI		0x10
+	field	SELI		0x08
+	field	BSYI		0x04
+	field	REQI		0x02
+	field	ACKI		0x01
+/*
+ * Possible phases in SCSISIGI
+ */
+	enum	PHASE_MASK  CDO|IOO|MSGO {
+		P_DATAOUT	0x0,
+		P_DATAIN	IOO,
+		P_DATAOUT_DT	P_DATAOUT|MSGO,
+		P_DATAIN_DT	P_DATAIN|MSGO,
+		P_COMMAND	CDO,
+		P_MESGOUT	CDO|MSGO,
+		P_STATUS	CDO|IOO,
+		P_MESGIN	CDO|IOO|MSGO
+	}
+}
+
+/*
+ * Multiple Target IDs
+ * Bitmask of ids to respond as a target.
+ */
+register MULTARGID {
+	address			0x040
+	access_mode	RW
+	modes		M_CFG
+	size		2
+}
+
+/*
+ * SCSI Phase
+ */
+register SCSIPHASE {
+	address			0x042
+	access_mode	RO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	STATUS_PHASE	0x20
+	field	COMMAND_PHASE	0x10
+	field	MSG_IN_PHASE	0x08
+	field	MSG_OUT_PHASE	0x04
+	field	DATA_PHASE_MASK	0x03 {
+		DATA_OUT_PHASE	0x01,
+		DATA_IN_PHASE	0x02
+	}
+}
+
+/*
+ * SCSI Data 0 Image
+ */
+register SCSIDAT0_IMG {
+	address			0x043
+	access_mode	RW
+	modes		M_DFF0, M_DFF1, M_SCSI
+}
+
+/*
+ * SCSI Latched Data
+ */
+register SCSIDAT {
+	address			0x044
+	access_mode	RW
+	modes		M_DFF0, M_DFF1, M_SCSI
+	size		2
+}
+
+/*
+ * SCSI Data Bus
+ */
+register SCSIBUS {
+	address			0x046
+	access_mode	RW
+	modes		M_DFF0, M_DFF1, M_SCSI
+	size		2
+}
+
+/*
+ * Target ID In
+ */
+register TARGIDIN {
+	address			0x048
+	access_mode	RO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	CLKOUT		0x80
+	field	TARGID		0x0F
+}
+
+/*
+ * Selection/Reselection ID
+ * Upper four bits are the device id.  The ONEBIT is set when the re/selecting
+ * device did not set its own ID.
+ */
+register SELID {
+	address			0x049
+	access_mode	RW
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	SELID_MASK	0xf0
+	field	ONEBIT		0x08
+}
+
+/*
+ * SCSI Block Control
+ * Controls Bus type and channel selection.  SELWIDE allows for the
+ * coexistence of 8bit and 16bit devices on a wide bus.
+ */
+register SBLKCTL {
+	address			0x04A
+	access_mode	RW
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	DIAGLEDEN	0x80
+	field	DIAGLEDON	0x40
+	field	ENAB40		0x08	/* LVD transceiver active */
+	field	ENAB20		0x04	/* SE/HVD transceiver active */
+	field	SELWIDE		0x02
+}
+
+/*
+ * Option Mode
+ */
+register OPTIONMODE {
+	address			0x04A
+	access_mode	RW
+	modes		M_CFG
+	field	BIOSCANCTL		0x80
+	field	AUTOACKEN		0x40
+	field	BIASCANCTL		0x20
+	field	BUSFREEREV		0x10
+	field	ENDGFORMCHK		0x04
+	field	AUTO_MSGOUT_DE		0x02
+	mask	OPTIONMODE_DEFAULTS	AUTO_MSGOUT_DE
+}
+
+/*
+ * SCSI Status 0
+ */
+register SSTAT0	{
+	address			0x04B
+	access_mode	RO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	TARGET		0x80	/* Board acting as target */
+	field	SELDO		0x40	/* Selection Done */
+	field	SELDI		0x20	/* Board has been selected */
+	field	SELINGO		0x10	/* Selection In Progress */
+	field	IOERR		0x08	/* LVD Tranceiver mode changed */
+	field	OVERRUN		0x04	/* SCSI Offset overrun detected */
+	field	SPIORDY		0x02	/* SCSI PIO Ready */
+	field	ARBDO		0x01	/* Arbitration Done Out */
+}
+
+/*
+ * Clear SCSI Interrupt 0
+ * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT0.
+ */
+register CLRSINT0 {
+	address			0x04B
+	access_mode	WO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	CLRSELDO	0x40
+	field	CLRSELDI	0x20
+	field	CLRSELINGO	0x10
+	field	CLRIOERR	0x08
+	field	CLROVERRUN	0x04
+	field	CLRSPIORDY	0x02
+	field	CLRARBDO	0x01
+}
+
+/*
+ * SCSI Interrupt Mode 0
+ * Setting any bit will enable the corresponding function
+ * in SIMODE0 to interrupt via the IRQ pin.
+ */
+register SIMODE0 {
+	address			0x04B
+	access_mode	RW
+	modes		M_CFG
+	field	ENSELDO		0x40
+	field	ENSELDI		0x20
+	field	ENSELINGO	0x10
+	field	ENIOERR		0x08
+	field	ENOVERRUN	0x04
+	field	ENSPIORDY	0x02
+	field	ENARBDO		0x01
+}
+
+/*
+ * SCSI Status 1
+ */
+register SSTAT1 {
+	address			0x04C
+	access_mode	RO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	SELTO		0x80
+	field	ATNTARG 	0x40
+	field	SCSIRSTI	0x20
+	field	PHASEMIS	0x10
+	field	BUSFREE		0x08
+	field	SCSIPERR	0x04
+	field	STRB2FAST	0x02
+	field	REQINIT		0x01
+}
+
+/*
+ * Clear SCSI Interrupt 1
+ * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT1.
+ */
+register CLRSINT1 {
+	address			0x04C
+	access_mode	WO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	CLRSELTIMEO	0x80
+	field	CLRATNO		0x40
+	field	CLRSCSIRSTI	0x20
+	field	CLRBUSFREE	0x08
+	field	CLRSCSIPERR	0x04
+	field	CLRSTRB2FAST	0x02
+	field	CLRREQINIT	0x01
+}
+
+/*
+ * SCSI Status 2
+ */
+register SSTAT2 {
+	address			0x04d
+	access_mode	RO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	BUSFREETIME	0xc0 {
+		BUSFREE_LQO	0x40,
+		BUSFREE_DFF0	0x80,
+		BUSFREE_DFF1	0xC0
+	}
+	field	NONPACKREQ	0x20
+	field	EXP_ACTIVE	0x10	/* SCSI Expander Active */
+	field	BSYX		0x08	/* Busy Expander */
+	field	WIDE_RES	0x04	/* Modes 0 and 1 only */
+	field	SDONE		0x02	/* Modes 0 and 1 only */
+	field	DMADONE		0x01	/* Modes 0 and 1 only */
+}
+
+/*
+ * Clear SCSI Interrupt 2
+ */
+register CLRSINT2 {
+	address			0x04D
+	access_mode	WO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	CLRNONPACKREQ	0x20
+	field	CLRWIDE_RES	0x04	/* Modes 0 and 1 only */
+	field	CLRSDONE	0x02	/* Modes 0 and 1 only */
+	field	CLRDMADONE	0x01	/* Modes 0 and 1 only */
+}
+
+/*
+ * SCSI Interrupt Mode 2
+ */
+register SIMODE2 {
+	address			0x04D
+	access_mode	RW
+	modes		M_CFG
+	field	ENWIDE_RES	0x04
+	field	ENSDONE		0x02
+	field	ENDMADONE	0x01
+}
+
+/*
+ * Physical Error Diagnosis
+ */
+register PERRDIAG {
+	address			0x04E
+	access_mode	RO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	HIZERO		0x80
+	field	HIPERR		0x40
+	field	PREVPHASE	0x20
+	field	PARITYERR	0x10
+	field	AIPERR		0x08
+	field	CRCERR		0x04
+	field	DGFORMERR	0x02
+	field	DTERR		0x01
+}
+
+/*
+ * LQI Manager Current State
+ */
+register LQISTATE {
+	address			0x04E
+	access_mode	RO
+	modes		M_CFG
+}
+
+/*
+ * SCSI Offset Count
+ */
+register SOFFCNT {
+	address			0x04F
+	access_mode	RO
+	modes		M_DFF0, M_DFF1, M_SCSI
+}
+
+/*
+ * LQO Manager Current State
+ */
+register LQOSTATE {
+	address			0x04F
+	access_mode	RO
+	modes		M_CFG
+}
+
+/*
+ * LQI Manager Status
+ */
+register LQISTAT0 {
+	address			0x050
+	access_mode	RO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	LQIATNQAS	0x20
+	field	LQICRCT1	0x10
+	field	LQICRCT2	0x08
+	field	LQIBADLQT	0x04
+	field	LQIATNLQ	0x02
+	field	LQIATNCMD	0x01
+}
+
+/*
+ * Clear LQI Interrupts 0
+ */
+register CLRLQIINT0 {
+	address			0x050
+	access_mode	WO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	CLRLQIATNQAS	0x20
+	field	CLRLQICRCT1	0x10
+	field	CLRLQICRCT2	0x08
+	field	CLRLQIBADLQT	0x04
+	field	CLRLQIATNLQ	0x02
+	field	CLRLQIATNCMD	0x01
+}
+
+/*
+ * LQI Manager Interrupt Mode 0
+ */
+register LQIMODE0 {
+	address			0x050
+	access_mode	RW
+	modes		M_CFG
+	field	ENLQIATNQASK	0x20
+	field	ENLQICRCT1	0x10
+	field	ENLQICRCT2	0x08
+	field	ENLQIBADLQT	0x04
+	field	ENLQIATNLQ	0x02
+	field	ENLQIATNCMD	0x01
+}
+
+/*
+ * LQI Manager Status 1
+ */
+register LQISTAT1 {
+	address			0x051
+	access_mode	RO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	LQIPHASE_LQ	0x80
+	field	LQIPHASE_NLQ	0x40
+	field	LQIABORT	0x20
+	field	LQICRCI_LQ	0x10
+	field	LQICRCI_NLQ	0x08
+	field	LQIBADLQI	0x04
+	field	LQIOVERI_LQ	0x02
+	field	LQIOVERI_NLQ	0x01
+}
+
+/*
+ * Clear LQI Manager Interrupts1
+ */
+register CLRLQIINT1 {
+	address			0x051
+	access_mode	WO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	CLRLQIPHASE_LQ	0x80
+	field	CLRLQIPHASE_NLQ	0x40
+	field	CLRLIQABORT	0x20
+	field	CLRLQICRCI_LQ	0x10
+	field	CLRLQICRCI_NLQ	0x08
+	field	CLRLQIBADLQI	0x04
+	field	CLRLQIOVERI_LQ	0x02
+	field	CLRLQIOVERI_NLQ	0x01
+}
+
+/*
+ * LQI Manager Interrupt Mode 1
+ */
+register LQIMODE1 {
+	address			0x051
+	access_mode	RW
+	modes		M_CFG
+	field	ENLQIPHASE_LQ	0x80	/* LQIPHASE1 */
+	field	ENLQIPHASE_NLQ	0x40	/* LQIPHASE2 */
+	field	ENLIQABORT	0x20
+	field	ENLQICRCI_LQ	0x10	/* LQICRCI1 */
+	field	ENLQICRCI_NLQ	0x08	/* LQICRCI2 */
+	field	ENLQIBADLQI	0x04
+	field	ENLQIOVERI_LQ	0x02	/* LQIOVERI1 */
+	field	ENLQIOVERI_NLQ	0x01	/* LQIOVERI2 */
+}
+
+/*
+ * LQI Manager Status 2
+ */
+register LQISTAT2 {
+	address			0x052
+	access_mode	RO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	PACKETIZED	0x80
+	field	LQIPHASE_OUTPKT	0x40
+	field	LQIWORKONLQ	0x20
+	field	LQIWAITFIFO	0x10
+	field	LQISTOPPKT	0x08
+	field	LQISTOPLQ	0x04
+	field	LQISTOPCMD	0x02
+	field	LQIGSAVAIL	0x01
+}
+
+/*
+ * SCSI Status 3
+ */
+register SSTAT3 {
+	address			0x053
+	access_mode	RO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	NTRAMPERR	0x02
+	field	OSRAMPERR	0x01
+}
+
+/*
+ * Clear SCSI Status 3
+ */
+register CLRSINT3 {
+	address			0x053
+	access_mode	WO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	CLRNTRAMPERR	0x02
+	field	CLROSRAMPERR	0x01
+}
+
+/*
+ * SCSI Interrupt Mode 3
+ */
+register SIMODE3 {
+	address			0x053
+	access_mode	RW
+	modes		M_CFG
+	field	ENNTRAMPERR	0x02
+	field	ENOSRAMPERR	0x01
+}
+
+/*
+ * LQO Manager Status 0
+ */
+register LQOSTAT0 {
+	address			0x054
+	access_mode	RO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	LQOTARGSCBPERR	0x10
+	field	LQOSTOPT2	0x08
+	field	LQOATNLQ	0x04
+	field	LQOATNPKT	0x02
+	field	LQOTCRC		0x01
+}
+
+/*
+ * Clear LQO Manager interrupt 0
+ */
+register CLRLQOINT0 {
+	address			0x054
+	access_mode	WO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	CLRLQOTARGSCBPERR	0x10
+	field	CLRLQOSTOPT2		0x08
+	field	CLRLQOATNLQ		0x04
+	field	CLRLQOATNPKT		0x02
+	field	CLRLQOTCRC		0x01
+}
+
+/*
+ * LQO Manager Interrupt Mode 0
+ */
+register LQOMODE0 {
+	address			0x054
+	access_mode	RW
+	modes		M_CFG
+	field	ENLQOTARGSCBPERR	0x10
+	field	ENLQOSTOPT2		0x08
+	field	ENLQOATNLQ		0x04
+	field	ENLQOATNPKT		0x02
+	field	ENLQOTCRC		0x01
+}
+
+/*
+ * LQO Manager Status 1
+ */
+register LQOSTAT1 {
+	address			0x055
+	access_mode	RO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	LQOINITSCBPERR	0x10
+	field	LQOSTOPI2	0x08
+	field	LQOBADQAS	0x04
+	field	LQOBUSFREE	0x02
+	field	LQOPHACHGINPKT	0x01
+}
+
+/*
+ * Clear LOQ Interrupt 1
+ */
+register CLRLQOINT1 {
+	address			0x055
+	access_mode	WO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	CLRLQOINITSCBPERR	0x10
+	field	CLRLQOSTOPI2		0x08
+	field	CLRLQOBADQAS		0x04
+	field	CLRLQOBUSFREE		0x02
+	field	CLRLQOPHACHGINPKT	0x01
+}
+
+/*
+ * LQO Manager Interrupt Mode 1
+ */
+register LQOMODE1 {
+	address			0x055
+	access_mode	RW
+	modes		M_CFG
+	field	ENLQOINITSCBPERR	0x10
+	field	ENLQOSTOPI2		0x08
+	field	ENLQOBADQAS		0x04
+	field	ENLQOBUSFREE		0x02
+	field	ENLQOPHACHGINPKT	0x01
+}
+
+/*
+ * LQO Manager Status 2
+ */
+register LQOSTAT2 {
+	address			0x056
+	access_mode	RO
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	LQOPKT		0xE0
+	field	LQOWAITFIFO	0x10
+	field	LQOPHACHGOUTPKT	0x02	/* outside of packet boundaries. */
+	field	LQOSTOP0	0x01	/* Stopped after sending all packets */
+}
+
+/*
+ * Output Synchronizer Space Count
+ */
+register OS_SPACE_CNT {
+	address			0x056
+	access_mode	RO
+	modes		M_CFG
+}
+
+/*
+ * SCSI Interrupt Mode 1
+ * Setting any bit will enable the corresponding function
+ * in SIMODE1 to interrupt via the IRQ pin.
+ */
+register SIMODE1 {
+	address			0x057
+	access_mode	RW
+	modes		M_DFF0, M_DFF1, M_SCSI
+	field	ENSELTIMO	0x80
+	field	ENATNTARG	0x40
+	field	ENSCSIRST	0x20
+	field	ENPHASEMIS	0x10
+	field	ENBUSFREE	0x08
+	field	ENSCSIPERR	0x04
+	field	ENSTRB2FAST	0x02
+	field	ENREQINIT	0x01
+}
+
+/*
+ * Good Status FIFO
+ */
+register GSFIFO {
+	address			0x058
+	access_mode	RO
+	size		2
+	modes		M_DFF0, M_DFF1, M_SCSI
+}
+
+/*
+ * Data FIFO SCSI Transfer Control
+ */
+register DFFSXFRCTL {
+	address			0x05A
+	access_mode	RW
+	modes		M_DFF0, M_DFF1
+	field	DFFBITBUCKET	0x08
+	field	CLRSHCNT	0x04
+	field	CLRCHN		0x02
+	field	RSTCHN		0x01
+}
+
+/*
+ * Next SCSI Control Block
+ */
+register NEXTSCB {
+	address			0x05A
+	access_mode	RW
+	size		2
+	modes		M_SCSI
+}
+
+/* Rev B only. */
+register LQOSCSCTL {
+	address			0x05A
+	access_mode	RW
+	size		1
+	modes		M_CFG
+	field		LQOH2A_VERSION	0x80
+	field		LQONOCHKOVER	0x01
+}
+
+/*
+ * SEQ Interrupts
+ */
+register SEQINTSRC {
+	address			0x05B
+	access_mode	RO
+	modes		M_DFF0, M_DFF1
+	field	CTXTDONE	0x40
+	field	SAVEPTRS	0x20
+	field	CFG4DATA	0x10
+	field	CFG4ISTAT	0x08
+	field	CFG4TSTAT	0x04
+	field	CFG4ICMD	0x02
+	field	CFG4TCMD	0x01
+}
+
+/*
+ * Clear Arp Interrupts
+ */
+register CLRSEQINTSRC {
+	address			0x05B
+	access_mode	WO
+	modes		M_DFF0, M_DFF1
+	field	CLRCTXTDONE	0x40
+	field	CLRSAVEPTRS	0x20
+	field	CLRCFG4DATA	0x10
+	field	CLRCFG4ISTAT	0x08
+	field	CLRCFG4TSTAT	0x04
+	field	CLRCFG4ICMD	0x02
+	field	CLRCFG4TCMD	0x01
+}
+
+/*
+ * SEQ Interrupt Enabled (Shared)
+ */
+register SEQIMODE {
+	address			0x05C
+	access_mode	RW
+	modes		M_DFF0, M_DFF1
+	field	ENCTXTDONE	0x40
+	field	ENSAVEPTRS	0x20
+	field	ENCFG4DATA	0x10
+	field	ENCFG4ISTAT	0x08
+	field	ENCFG4TSTAT	0x04
+	field	ENCFG4ICMD	0x02
+	field	ENCFG4TCMD	0x01
+}
+
+/*
+ * Current SCSI Control Block
+ */
+register CURRSCB {
+	address			0x05C
+	access_mode	RW
+	size		2
+	modes		M_SCSI
+}
+
+/*
+ * Data FIFO Status
+ */
+register MDFFSTAT {
+	address			0x05D
+	access_mode	RO
+	modes		M_DFF0, M_DFF1
+	field	SHCNTNEGATIVE	0x40 /* Rev B or higher */
+	field	SHCNTMINUS1	0x20 /* Rev B or higher */
+	field	LASTSDONE	0x10
+	field	SHVALID		0x08
+	field	DLZERO		0x04 /* FIFO data ends on packet boundary. */
+	field	DATAINFIFO	0x02
+	field	FIFOFREE	0x01
+}
+
+/*
+ * CRC Control
+ */
+register CRCCONTROL {
+	address			0x05d
+	access_mode	RW
+	modes		M_CFG
+	field	CRCVALCHKEN		0x40
+}
+
+/*
+ * SCSI Test Control
+ */
+register SCSITEST {
+	address			0x05E
+	access_mode	RW
+	modes		M_CFG
+	field	CNTRTEST	0x08
+	field	SEL_TXPLL_DEBUG	0x04
+}
+
+/*
+ * Data FIFO Queue Tag
+ */
+register DFFTAG {
+	address			0x05E
+	access_mode	RW
+	size		2
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * Last SCSI Control Block
+ */
+register LASTSCB {
+	address			0x05E
+	access_mode	RW
+	size		2
+	modes		M_SCSI
+}
+
+/*
+ * SCSI I/O Cell Power-down Control
+ */
+register IOPDNCTL {
+	address			0x05F
+	access_mode	RW
+	modes		M_CFG
+	field	DISABLE_OE	0x80
+	field	PDN_IDIST	0x04
+	field	PDN_DIFFSENSE	0x01
+}
+
+/*
+ * Shaddow Host Address.
+ */
+register SHADDR {
+	address			0x060
+	access_mode	RO
+	size		8
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * Data Group CRC Interval.
+ */
+register DGRPCRCI {
+	address			0x060
+	access_mode	RW
+	size		2
+	modes		M_CFG
+}
+
+/*
+ * Data Transfer Negotiation Address
+ */
+register NEGOADDR {
+	address			0x060
+	access_mode	RW
+	modes		M_SCSI
+}
+
+/*
+ * Data Transfer Negotiation Data - Period Byte
+ */
+register NEGPERIOD {
+	address			0x061
+	access_mode	RW
+	modes		M_SCSI
+}
+
+/*
+ * Packetized CRC Interval
+ */
+register PACKCRCI {
+	address			0x062
+	access_mode	RW
+	size		2
+	modes		M_CFG
+}
+
+/*
+ * Data Transfer Negotiation Data - Offset Byte
+ */
+register NEGOFFSET {
+	address			0x062
+	access_mode	RW
+	modes		M_SCSI
+}
+
+/*
+ * Data Transfer Negotiation Data - PPR Options
+ */
+register NEGPPROPTS {
+	address			0x063
+	access_mode	RW
+	modes		M_SCSI
+	field	PPROPT_PACE	0x08
+	field	PPROPT_QAS	0x04
+	field	PPROPT_DT	0x02
+	field	PPROPT_IUT	0x01
+}
+
+/*
+ * Data Transfer Negotiation Data -  Connection Options
+ */
+register NEGCONOPTS {
+	address			0x064
+	access_mode	RW
+	modes		M_SCSI
+	field	ENSNAPSHOT	0x40
+	field	RTI_WRTDIS	0x20
+	field	RTI_OVRDTRN	0x10
+	field	ENSLOWCRC	0x08
+	field	ENAUTOATNI	0x04
+	field	ENAUTOATNO	0x02
+	field	WIDEXFER	0x01
+}
+
+/*
+ * Negotiation Table Annex Column Index.
+ */
+register ANNEXCOL {
+	address			0x065
+	access_mode	RW
+	modes		M_SCSI
+}
+
+register SCSCHKN {
+	address			0x066
+	access_mode	RW
+	modes		M_CFG
+	field	STSELSKIDDIS	0x40
+	field	CURRFIFODEF	0x20
+	field	WIDERESEN	0x10
+	field	SDONEMSKDIS	0x08
+	field	DFFACTCLR	0x04
+	field	SHVALIDSTDIS	0x02
+	field	LSTSGCLRDIS	0x01
+}
+
+const AHD_ANNEXCOL_PER_DEV0	4
+const AHD_NUM_PER_DEV_ANNEXCOLS	4
+const AHD_ANNEXCOL_PRECOMP_SLEW	4
+const	AHD_PRECOMP_MASK	0x07
+const	AHD_PRECOMP_SHIFT	0
+const	AHD_PRECOMP_CUTBACK_17	0x04
+const	AHD_PRECOMP_CUTBACK_29	0x06
+const	AHD_PRECOMP_CUTBACK_37	0x07
+const	AHD_SLEWRATE_MASK	0x78
+const	AHD_SLEWRATE_SHIFT	3
+/*
+ * Rev A has only a single bit (high bit of field) of slew adjustment.
+ * Rev B has 4 bits.  The current default happens to be the same for both.
+ */
+const	AHD_SLEWRATE_DEF_REVA	0x08
+const	AHD_SLEWRATE_DEF_REVB	0x08
+
+/* Rev A does not have any amplitude setting. */
+const AHD_ANNEXCOL_AMPLITUDE	6
+const	AHD_AMPLITUDE_MASK	0x7
+const	AHD_AMPLITUDE_SHIFT	0
+const	AHD_AMPLITUDE_DEF	0x7
+
+/*
+ * Negotiation Table Annex Data Port.
+ */
+register ANNEXDAT {
+	address			0x066
+	access_mode	RW
+	modes		M_SCSI
+}
+
+/*
+ * Initiator's Own Id.
+ * The SCSI ID to use for Selection Out and seen during a reselection..
+ */
+register IOWNID {
+	address			0x067
+	access_mode	RW
+	modes		M_SCSI
+}
+
+/*
+ * 960MHz Phase-Locked Loop Control 0
+ */
+register PLL960CTL0 {
+	address			0x068
+	access_mode	RW
+	modes		M_CFG
+	field	PLL_VCOSEL	0x80
+	field	PLL_PWDN	0x40
+	field	PLL_NS		0x30
+	field	PLL_ENLUD	0x08
+	field	PLL_ENLPF	0x04
+	field	PLL_DLPF	0x02
+	field	PLL_ENFBM	0x01
+}
+
+/*
+ * Target Own Id
+ */
+register TOWNID {
+	address			0x069
+	access_mode	RW
+	modes		M_SCSI
+}
+
+/*
+ * 960MHz Phase-Locked Loop Control 1
+ */
+register PLL960CTL1 {
+	address			0x069
+	access_mode	RW
+	modes		M_CFG
+	field	PLL_CNTEN	0x80
+	field	PLL_CNTCLR	0x40
+	field	PLL_RST		0x01
+}
+
+/*
+ * Expander Signature
+ */
+register XSIG {
+	address			0x06A
+	access_mode	RW
+	modes		M_SCSI
+}
+
+/*
+ * Shadow Byte Count
+ */
+register SHCNT {
+	address			0x068
+	access_mode	RW
+	size		3
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * Selection Out ID
+ */
+register SELOID {
+	address			0x06B
+	access_mode	RW
+	modes		M_SCSI
+}
+
+/*
+ * 960-MHz Phase-Locked Loop Test Count
+ */
+register PLL960CNT0 {
+	address			0x06A
+	access_mode	RO
+	size		2
+	modes		M_CFG
+}
+
+/*
+ * 400-MHz Phase-Locked Loop Control 0
+ */
+register PLL400CTL0 {
+	address			0x06C
+	access_mode	RW
+	modes		M_CFG
+	field	PLL_VCOSEL	0x80
+	field	PLL_PWDN	0x40
+	field	PLL_NS		0x30
+	field	PLL_ENLUD	0x08
+	field	PLL_ENLPF	0x04
+	field	PLL_DLPF	0x02
+	field	PLL_ENFBM	0x01
+}
+
+/*
+ * Arbitration Fairness
+ */
+register FAIRNESS {
+	address			0x06C
+	access_mode	RW
+	size		2
+	modes		M_SCSI
+}
+
+/*
+ * 400-MHz Phase-Locked Loop Control 1
+ */
+register PLL400CTL1 {
+	address			0x06D
+	access_mode	RW
+	modes		M_CFG
+	field	PLL_CNTEN	0x80
+	field	PLL_CNTCLR	0x40
+	field	PLL_RST		0x01
+}
+
+/*
+ * Arbitration Unfairness
+ */
+register UNFAIRNESS {
+	address			0x06E
+	access_mode	RW
+	size		2
+	modes		M_SCSI
+}
+
+/*
+ * 400-MHz Phase-Locked Loop Test Count
+ */
+register PLL400CNT0 {
+	address			0x06E
+	access_mode	RO
+	size		2
+	modes		M_CFG
+}
+
+/*
+ * SCB Page Pointer
+ */
+register SCBPTR {
+	address			0x0A8
+	access_mode	RW
+	size		2
+	modes		M_DFF0, M_DFF1, M_CCHAN, M_SCSI
+}
+
+/*
+ * CMC SCB Array Count
+ * Number of bytes to transfer between CMC SCB memory and SCBRAM.
+ * Transfers must be 8byte aligned and sized.
+ */
+register CCSCBACNT {
+	address			0x0AB
+	access_mode	RW
+	modes		M_CCHAN
+}
+
+/*
+ * SCB Autopointer
+ * SCB-Next Address Snooping logic.  When an SCB is transferred to
+ * the card, the next SCB address to be used by the CMC array can
+ * be autoloaded from that transfer.
+ */
+register SCBAUTOPTR {
+	address			0x0AB
+	access_mode	RW
+	modes		M_CFG
+	field	AUSCBPTR_EN	0x80
+	field	SCBPTR_ADDR	0x38
+	field	SCBPTR_OFF	0x07
+}
+
+/*
+ * CMC SG Ram Address Pointer
+ */
+register CCSGADDR {
+	address			0x0AC
+	access_mode	RW
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * CMC SCB RAM Address Pointer
+ */
+register CCSCBADDR {
+	address			0x0AC
+	access_mode	RW
+	modes		M_CCHAN
+}
+
+/*
+ * CMC SCB Ram Back-up Address Pointer
+ * Indicates the true stop location of transfers halted prior
+ * to SCBHCNT going to 0.
+ */
+register CCSCBADR_BK {
+	address			0x0AC
+	access_mode	RO
+	modes		M_CFG
+}
+
+/*
+ * CMC SG Control
+ */
+register CCSGCTL {
+	address			0x0AD
+	access_mode	RW
+	modes		M_DFF0, M_DFF1
+	field	CCSGDONE	0x80
+	field	SG_CACHE_AVAIL	0x10
+	field	CCSGENACK	0x08
+	mask	CCSGEN		0x0C
+	field	SG_FETCH_REQ	0x02
+	field	CCSGRESET	0x01
+}
+
+/*
+ * CMD SCB Control
+ */
+register CCSCBCTL {
+	address			0x0AD
+	access_mode	RW
+	modes		M_CCHAN
+	field	CCSCBDONE	0x80
+	field	ARRDONE		0x40
+	field	CCARREN		0x10
+	field	CCSCBEN		0x08
+	field	CCSCBDIR	0x04
+	field	CCSCBRESET	0x01
+}
+
+/*
+ * CMC Ram BIST
+ */
+register CMC_RAMBIST {
+	address			0x0AD
+	access_mode	RW
+	modes		M_CFG
+	field	SG_ELEMENT_SIZE		0x80
+	field	SCBRAMBIST_FAIL		0x40
+	field	SG_BIST_FAIL		0x20
+	field	SG_BIST_EN		0x10
+	field	CMC_BUFFER_BIST_FAIL	0x02
+	field	CMC_BUFFER_BIST_EN	0x01
+}
+
+/*
+ * CMC SG RAM Data Port
+ */
+register CCSGRAM {
+	address			0x0B0
+	access_mode	RW
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * CMC SCB RAM Data Port
+ */
+register CCSCBRAM {
+	address			0x0B0
+	access_mode	RW
+	modes		M_CCHAN
+}
+
+/*
+ * Flex DMA Address.
+ */
+register FLEXADR {
+	address			0x0B0
+	access_mode	RW
+	size		3
+	modes		M_SCSI
+}
+
+/*
+ * Flex DMA Byte Count
+ */
+register FLEXCNT {
+	address			0x0B3
+	access_mode	RW
+	size		2
+	modes		M_SCSI
+}
+
+/*
+ * Flex DMA Status
+ */
+register FLEXDMASTAT {
+	address			0x0B5
+	access_mode	RW
+	modes		M_SCSI
+	field	FLEXDMAERR	0x02
+	field	FLEXDMADONE	0x01
+}
+
+/*
+ * Flex DMA Data Port
+ */
+register FLEXDATA {
+	address			0x0B6
+	access_mode	RW
+	modes		M_SCSI
+}
+
+/*
+ * Board Data
+ */
+register BRDDAT {
+	address			0x0B8
+	access_mode	RW
+	modes		M_SCSI
+}
+
+/*
+ * Board Control
+ */
+register BRDCTL {
+	address			0x0B9
+	access_mode	RW
+	modes		M_SCSI
+	field	FLXARBACK	0x80
+	field	FLXARBREQ	0x40
+	field	BRDADDR		0x38
+	field	BRDEN		0x04
+	field	BRDRW		0x02
+	field	BRDSTB		0x01
+}
+
+/*
+ * Serial EEPROM Address
+ */
+register SEEADR {
+	address			0x0BA
+	access_mode	RW
+	modes		M_SCSI
+}
+
+/*
+ * Serial EEPROM Data
+ */
+register SEEDAT {
+	address			0x0BC
+	access_mode	RW
+	size		2
+	modes		M_SCSI
+}
+
+/*
+ * Serial EEPROM Status
+ */
+register SEESTAT {
+	address			0x0BE
+	access_mode	RO
+	modes		M_SCSI
+	field	INIT_DONE	0x80
+	field	SEEOPCODE	0x70
+	field	LDALTID_L	0x08
+	field	SEEARBACK	0x04
+	field	SEEBUSY		0x02
+	field	SEESTART	0x01
+}
+
+/*
+ * Serial EEPROM Control
+ */
+register SEECTL {
+	address			0x0BE
+	access_mode	RW
+	modes		M_SCSI
+	field	SEEOPCODE	0x70 {
+		SEEOP_ERASE	0x70,
+		SEEOP_READ	0x60,
+		SEEOP_WRITE	0x50,
+	/*
+	 * The following four commands use special
+	 * addresses for differentiation.
+	 */
+		SEEOP_ERAL	0x40
+	}
+	mask	SEEOP_EWEN	0x40
+	mask	SEEOP_WALL	0x40
+	mask	SEEOP_EWDS	0x40
+	field	SEERST		0x02
+	field	SEESTART	0x01
+}
+
+const SEEOP_ERAL_ADDR	0x80
+const SEEOP_EWEN_ADDR	0xC0
+const SEEOP_WRAL_ADDR	0x40
+const SEEOP_EWDS_ADDR	0x00
+
+/*
+ * SCB Counter
+ */
+register SCBCNT {
+	address			0x0BF
+	access_mode	RW
+	modes		M_SCSI
+}
+
+/*
+ * Data FIFO Write Address
+ * Pointer to the next QWD location to be written to the data FIFO.
+ */
+register DFWADDR {
+	address			0x0C0
+	access_mode	RW
+	size		2
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * DSP Filter Control
+ */
+register DSPFLTRCTL {
+	address			0x0C0
+	access_mode	RW
+	modes		M_CFG
+	field	FLTRDISABLE	0x20
+	field	EDGESENSE	0x10
+	field	DSPFCNTSEL	0x0F
+}
+
+/*
+ * DSP Data Channel Control
+ */
+register DSPDATACTL {
+	address			0x0C1
+	access_mode	RW
+	modes		M_CFG
+	field	BYPASSENAB	0x80
+	field	DESQDIS		0x10
+	field	RCVROFFSTDIS	0x04
+	field	XMITOFFSTDIS	0x02
+}
+
+/*
+ * Data FIFO Read Address
+ * Pointer to the next QWD location to be read from the data FIFO.
+ */
+register DFRADDR {
+	address			0x0C2
+	access_mode	RW
+	size		2
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * DSP REQ Control
+ */
+register DSPREQCTL {
+	address			0x0C2
+	access_mode	RW
+	modes		M_CFG
+	field	MANREQCTL	0xC0
+	field	MANREQDLY	0x3F
+}
+
+/*
+ * DSP ACK Control
+ */
+register DSPACKCTL {
+	address			0x0C3
+	access_mode	RW
+	modes		M_CFG
+	field	MANACKCTL	0xC0
+	field	MANACKDLY	0x3F
+}
+
+/*
+ * Data FIFO Data
+ * Read/Write byte port into the data FIFO.  The read and write
+ * FIFO pointers increment with each read and write respectively
+ * to this port.
+ */
+register DFDAT {
+	address			0x0C4
+	access_mode	RW
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * DSP Channel Select
+ */
+register DSPSELECT {
+	address			0x0C4
+	access_mode	RW
+	modes		M_CFG
+	field	AUTOINCEN	0x80
+	field	DSPSEL		0x1F
+}
+
+const NUMDSPS 0x14
+
+/*
+ * Write Bias Control
+ */
+register WRTBIASCTL {
+	address			0x0C5
+	access_mode	WO
+	modes		M_CFG
+	field	AUTOXBCDIS	0x80
+	field	XMITMANVAL	0x3F
+}
+
+/*
+ * Currently the WRTBIASCTL is the same as the default.
+ */
+const WRTBIASCTL_HP_DEFAULT 0x0
+
+/*
+ * Receiver Bias Control
+ */
+register RCVRBIOSCTL {
+	address			0x0C6
+	access_mode	WO
+	modes		M_CFG
+	field	AUTORBCDIS	0x80
+	field	RCVRMANVAL	0x3F
+}
+
+/*
+ * Write Bias Calculator
+ */
+register WRTBIASCALC {
+	address			0x0C7
+	access_mode	RO
+	modes		M_CFG
+}
+
+/*
+ * Data FIFO Pointers
+ * Contains the byte offset from DFWADDR and DWRADDR to the current
+ * FIFO write/read locations.
+ */
+register DFPTRS {
+	address			0x0C8
+	access_mode	RW
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * Receiver Bias Calculator
+ */
+register RCVRBIASCALC {
+	address			0x0C8
+	access_mode	RO
+	modes		M_CFG
+}
+
+/*
+ * Data FIFO Backup Read Pointer
+ * Contains the data FIFO address to be restored if the last
+ * data accessed from the data FIFO was not transferred successfully.
+ */
+register DFBKPTR {
+	address			0x0C9
+	access_mode	RW
+	size		2
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * Skew Calculator
+ */
+register SKEWCALC {
+	address			0x0C9
+	access_mode	RO
+	modes		M_CFG
+}
+
+/*
+ * Data FIFO Debug Control
+ */
+register DFDBCTL {
+	address				0x0CB
+	access_mode	RW
+	modes		M_DFF0, M_DFF1
+	field	DFF_CIO_WR_RDY		0x20
+	field	DFF_CIO_RD_RDY		0x10
+	field	DFF_DIR_ERR		0x08
+	field	DFF_RAMBIST_FAIL	0x04
+	field	DFF_RAMBIST_DONE	0x02
+	field	DFF_RAMBIST_EN		0x01
+}
+
+/*
+ * Data FIFO Space Count
+ * Number of FIFO locations that are free.
+ */
+register DFSCNT {
+	address			0x0CC
+	access_mode	RO
+	size		2
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * Data FIFO Byte Count
+ * Number of filled FIFO locations.
+ */
+register DFBCNT {
+	address			0x0CE
+	access_mode	RO
+	size		2
+	modes		M_DFF0, M_DFF1
+}
+
+/*
+ * Sequencer Program Overlay Address.
+ * Low address must be written prior to high address.
+ */
+register OVLYADDR {
+	address			0x0D4
+	modes		M_SCSI
+	size		2
+	access_mode	RW
+}
+
+/*
+ * Sequencer Control 0
+ * Error detection mode, speed configuration,
+ * single step, breakpoints and program load.
+ */
+register SEQCTL0 {
+	address			0x0D6
+	access_mode RW
+	field	PERRORDIS	0x80
+	field	PAUSEDIS	0x40
+	field	FAILDIS		0x20
+	field	FASTMODE	0x10
+	field	BRKADRINTEN	0x08
+	field	STEP		0x04
+	field	SEQRESET	0x02
+	field	LOADRAM		0x01
+}
+
+/*
+ * Sequencer Control 1
+ * Instruction RAM Diagnostics
+ */
+register SEQCTL1 {
+	address			0x0D7
+	access_mode RW
+	field	OVRLAY_DATA_CHK	0x08
+	field	RAMBIST_DONE	0x04
+	field	RAMBIST_FAIL	0x02
+	field	RAMBIST_EN	0x01
+}
+
+/*
+ * Sequencer Flags
+ * Zero and Carry state of the ALU.
+ */
+register FLAGS {
+	address			0x0D8
+	access_mode RO
+	field	ZERO		0x02
+	field	CARRY		0x01
+}
+
+/*
+ * Sequencer Interrupt Control
+ */ 
+register SEQINTCTL {
+	address			0x0D9
+	access_mode RW
+	field	INTVEC1DSL	0x80
+	field	INT1_CONTEXT	0x20
+	field	SCS_SEQ_INT1M1	0x10
+	field	SCS_SEQ_INT1M0	0x08
+	field	INTMASK2	0x04
+	field	INTMASK1	0x02
+	field	IRET		0x01
+}
+
+/*
+ * Sequencer RAM Data Port
+ * Single byte window into the Sequencer Instruction Ram area starting
+ * at the address specified by OVLYADDR.  To write a full instruction word,
+ * simply write four bytes in succession.  OVLYADDR will increment after the
+ * most significant instrution byte (the byte with the parity bit) is written.
+ */
+register SEQRAM {
+	address			0x0DA
+	access_mode RW
+}
+
+/*
+ * Sequencer Program Counter
+ * Low byte must be written prior to high byte.
+ */
+register PRGMCNT {
+	address			0x0DE
+	access_mode	RW
+	size		2
+}
+
+/*
+ * Accumulator
+ */
+register ACCUM {
+	address			0x0E0
+	access_mode RW
+	accumulator
+}
+
+/*
+ * Source Index Register
+ * Incrementing index for reads of SINDIR and the destination (low byte only)
+ * for any immediate operands passed in jmp, jc, jnc, call instructions.
+ * Example:
+ *		mvi	0xFF	call some_routine;
+ *
+ *  Will set SINDEX[0] to 0xFF and call the routine "some_routine.
+ */
+register SINDEX	{
+	address			0x0E2
+	access_mode	RW
+	size		2
+	sindex
+}
+
+/*
+ * Destination Index Register
+ * Incrementing index for writes to DINDIR.  Can be used as a scratch register.
+ */
+register DINDEX {
+	address			0x0E4
+	access_mode	RW
+	size		2
+}
+
+/*
+ * Break Address
+ * Sequencer instruction breakpoint address address.
+ */
+register BRKADDR0 {
+	address			0x0E6
+	access_mode	RW
+}
+
+register BRKADDR1 {
+	address			0x0E6
+	access_mode	RW
+	field	BRKDIS		0x80	/* Disable Breakpoint */
+}
+
+/*
+ * All Ones
+ * All reads to this register return the value 0xFF.
+ */
+register ALLONES {
+	address			0x0E8
+	access_mode RO
+	allones
+}
+
+/*
+ * All Zeros
+ * All reads to this register return the value 0.
+ */
+register ALLZEROS {
+	address			0x0EA
+	access_mode RO
+	allzeros
+}
+
+/*
+ * No Destination
+ * Writes to this register have no effect.
+ */
+register NONE {
+	address			0x0EA
+	access_mode WO
+	none
+}
+
+/*
+ * Source Index Indirect
+ * Reading this register is equivalent to reading (register_base + SINDEX) and
+ * incrementing SINDEX by 1.
+ */
+register SINDIR	{
+	address			0x0EC
+	access_mode RO
+}
+
+/*
+ * Destination Index Indirect
+ * Writing this register is equivalent to writing to (register_base + DINDEX)
+ * and incrementing DINDEX by 1.
+ */
+register DINDIR	 {
+	address			0x0ED
+	access_mode WO
+}
+
+/*
+ * Function One
+ * 2's complement to bit value conversion.  Write the 2's complement value
+ * (0-7 only) to the top nibble and retrieve the bit indexed by that value
+ * on the next read of this register. 
+ * Example:
+ *	Write	0x60
+ *	Read	0x40
+ */
+register FUNCTION1 {
+	address			0x0F0
+	access_mode RW
+}
+
+/*
+ * Stack
+ * Window into the stack.  Each stack location is 10 bits wide reported
+ * low byte followed by high byte.  There are 8 stack locations.
+ */
+register STACK {
+	address			0x0F2
+	access_mode RW
+}
+
+/*
+ * Interrupt Vector 1 Address
+ * Interrupt branch address for SCS SEQ_INT1 mode 0 and 1 interrupts.
+ */
+register INTVEC1_ADDR {
+	address			0x0F4
+	access_mode	RW
+	size		2
+	modes		M_CFG
+}
+
+/*
+ * Current Address
+ * Address of the SEQRAM instruction currently executing instruction.
+ */
+register CURADDR {
+	address			0x0F4
+	access_mode	RW
+	size		2
+	modes		M_SCSI
+}
+
+/*
+ * Interrupt Vector 2 Address
+ * Interrupt branch address for HST_SEQ_INT2 interrupts.
+ */
+register INTVEC2_ADDR {
+	address			0x0F6
+	access_mode	RW
+	size		2
+	modes		M_CFG
+}
+
+/*
+ * Last Address
+ * Address of the SEQRAM instruction executed prior to the current instruction.
+ */
+register LASTADDR {
+	address			0x0F6
+	access_mode	RW
+	size		2
+	modes		M_SCSI
+}
+
+register AHD_PCI_CONFIG_BASE {
+	address			0x100
+	access_mode	RW
+	size		256
+	modes		M_CFG
+}
+
+/* ---------------------- Scratch RAM Offsets ------------------------- */
+scratch_ram {
+	/* Mode Specific */
+	address			0x0A0
+	size	8
+	modes	0, 1, 2, 3
+	REG0 {
+		size		2
+	}
+	REG1 {
+		size		2
+	}
+	REG_ISR {
+		size		2
+	}
+	SG_STATE {
+		size		1
+		field	SEGS_AVAIL	0x01
+		field	LOADING_NEEDED	0x02
+		field	FETCH_INPROG	0x04
+	}
+	/*
+	 * Track whether the transfer byte count for
+	 * the current data phase is odd.
+	 */
+	DATA_COUNT_ODD {
+		size		1
+	}
+}
+
+scratch_ram {
+	/* Mode Specific */
+	address			0x0F8
+	size	8
+	modes	0, 1, 2, 3
+	LONGJMP_ADDR {
+		size		2
+	}
+	ACCUM_SAVE {
+		size		1
+	}
+}
+
+
+scratch_ram {
+	address			0x100
+	size	128
+	modes	0, 1, 2, 3
+	/*
+	 * Per "other-id" execution queues.  We use an array of
+	 * tail pointers into lists of SCBs sorted by "other-id".
+	 * The execution head pointer threads the head SCBs for
+	 * each list.
+	 */
+	WAITING_SCB_TAILS {
+		size		32
+	}
+	WAITING_TID_HEAD {
+		size		2
+	}
+	WAITING_TID_TAIL {
+		size		2
+	}
+	/*
+	 * SCBID of the next SCB in the new SCB queue.
+	 */
+	NEXT_QUEUED_SCB_ADDR {
+		size		4
+	}
+	/*
+	 * head of list of SCBs that have
+	 * completed but have not been
+	 * put into the qoutfifo.
+	 */
+	COMPLETE_SCB_HEAD {
+		size		2
+	}
+	/*
+	 * The list of completed SCBs in
+	 * the active DMA.
+	 */
+	COMPLETE_SCB_DMAINPROG_HEAD {
+		size		2
+	}
+	/*
+	 * head of list of SCBs that have
+	 * completed but need to be uploaded
+	 * to the host prior to being completed.
+	 */
+	COMPLETE_DMA_SCB_HEAD {
+		size		2
+	}
+	/* Counting semaphore to prevent new select-outs */
+	QFREEZE_COUNT {
+		size		2
+	}
+	/*
+	 * Mode to restore on legacy idle loop exit.
+	 */
+	SAVED_MODE {
+		size		1
+	}
+	/*
+	 * Single byte buffer used to designate the type or message
+	 * to send to a target.
+	 */
+	MSG_OUT {
+		size		1
+	}
+	/* Parameters for DMA Logic */
+	DMAPARAMS {
+		size		1
+		field	PRELOADEN	0x80
+		field	WIDEODD		0x40
+		field	SCSIEN		0x20
+		field	SDMAEN		0x10
+		field	SDMAENACK	0x10
+		field	HDMAEN		0x08
+		field	HDMAENACK	0x08
+		field	DIRECTION	0x04	/* Set indicates PCI->SCSI */
+		field	FIFOFLUSH	0x02
+		field	FIFORESET	0x01
+	}
+	SEQ_FLAGS {
+		size		1
+		field	NOT_IDENTIFIED		0x80
+		field	NO_CDB_SENT		0x40
+		field	TARGET_CMD_IS_TAGGED	0x40
+		field	DPHASE			0x20
+		/* Target flags */
+		field	TARG_CMD_PENDING	0x10
+		field	CMDPHASE_PENDING	0x08
+		field	DPHASE_PENDING		0x04
+		field	SPHASE_PENDING		0x02
+		field	NO_DISCONNECT		0x01
+	}
+	/*
+	 * Temporary storage for the
+	 * target/channel/lun of a
+	 * reconnecting target
+	 */
+	SAVED_SCSIID {
+		size		1
+	}
+	SAVED_LUN {
+		size		1
+	}
+	/*
+	 * The last bus phase as seen by the sequencer. 
+	 */
+	LASTPHASE {
+		size		1
+		field	CDI		0x80
+		field	IOI		0x40
+		field	MSGI		0x20
+		field	P_BUSFREE	0x01
+		enum	PHASE_MASK  CDO|IOO|MSGO {
+			P_DATAOUT	0x0,
+			P_DATAIN	IOO,
+			P_DATAOUT_DT	P_DATAOUT|MSGO,
+			P_DATAIN_DT	P_DATAIN|MSGO,
+			P_COMMAND	CDO,
+			P_MESGOUT	CDO|MSGO,
+			P_STATUS	CDO|IOO,
+			P_MESGIN	CDO|IOO|MSGO
+		}
+	}
+	/*
+	 * Value to "or" into the SCBPTR[1] value to
+	 * indicate that an entry in the QINFIFO is valid.
+	 */
+	QOUTFIFO_ENTRY_VALID_TAG {
+		size		1
+	}
+	/*
+	 * Base address of our shared data with the kernel driver in host
+	 * memory.  This includes the qoutfifo and target mode
+	 * incoming command queue.
+	 */
+	SHARED_DATA_ADDR {
+		size		4
+	}
+	/*
+	 * Pointer to location in host memory for next
+	 * position in the qoutfifo.
+	 */
+	QOUTFIFO_NEXT_ADDR {
+		size		4
+	}
+	/*
+	 * Kernel and sequencer offsets into the queue of
+	 * incoming target mode command descriptors.  The
+	 * queue is full when the KERNEL_TQINPOS == TQINPOS.
+	 */
+	KERNEL_TQINPOS {
+		size		1
+	}
+	TQINPOS {                
+		size		1
+	}
+	ARG_1 {
+		size		1
+		mask	SEND_MSG		0x80
+		mask	SEND_SENSE		0x40
+		mask	SEND_REJ		0x20
+		mask	MSGOUT_PHASEMIS		0x10
+		mask	EXIT_MSG_LOOP		0x08
+		mask	CONT_MSG_LOOP_WRITE	0x04
+		mask	CONT_MSG_LOOP_READ	0x03
+		mask	CONT_MSG_LOOP_TARG	0x02
+		alias	RETURN_1
+	}
+	ARG_2 {
+		size		1
+		alias	RETURN_2
+	}
+
+	/*
+	 * Snapshot of MSG_OUT taken after each message is sent.
+	 */
+	LAST_MSG {
+		size		1
+	}
+
+	/*
+	 * Sequences the kernel driver has okayed for us.  This allows
+	 * the driver to do things like prevent initiator or target
+	 * operations.
+	 */
+	SCSISEQ_TEMPLATE {
+		size		1
+		field	MANUALCTL	0x40
+		field	ENSELI		0x20
+		field	ENRSELI		0x10
+		field	MANUALP		0x0C
+		field	ENAUTOATNP	0x02
+		field	ALTSTIM		0x01
+	}
+
+	/*
+	 * The initiator specified tag for this target mode transaction.
+	 */
+	INITIATOR_TAG {
+		size		1
+	}
+
+	SEQ_FLAGS2 {
+		size		1
+		field	TARGET_MSG_PENDING	  0x02
+		field	SELECTOUT_QFROZEN	  0x04
+	}
+
+	ALLOCFIFO_SCBPTR {
+		size		2
+	}
+
+	/*
+	 * The maximum amount of time to wait, when interrupt coalescing
+	 * is enabled, before issueing a CMDCMPLT interrupt for a completed
+	 * command.
+	 */
+	INT_COALESCING_TIMER {
+		size		2
+	}
+
+	/*
+	 * The maximum number of commands to coalesce into a single interrupt.
+	 * Actually the 2's complement of that value to simplify sequencer
+	 * code.
+	 */
+	INT_COALESCING_MAXCMDS {
+		size		1
+	}
+
+	/*
+	 * The minimum number of commands still outstanding required
+	 * to continue coalescing (2's complement of value).
+	 */
+	INT_COALESCING_MINCMDS {
+		size		1
+	}
+
+	/*
+	 * Number of commands "in-flight".
+	 */
+	CMDS_PENDING {
+		size		2
+	}
+
+	/*
+	 * The count of commands that have been coalesced.
+	 */
+	INT_COALESCING_CMDCOUNT {
+		size		1
+	}
+
+	/*
+	 * Since the HS_MAIBOX is self clearing, copy its contents to
+	 * this position in scratch ram every time it changes.
+	 */
+	LOCAL_HS_MAILBOX {
+		size		1
+	}
+	/*
+	 * Target-mode CDB type to CDB length table used
+	 * in non-packetized operation.
+	 */
+	CMDSIZE_TABLE {
+		size		8
+	}
+}
+
+/************************* Hardware SCB Definition ****************************/
+scb {
+	address			0x180
+	size	64
+	modes	0, 1, 2, 3
+	SCB_RESIDUAL_DATACNT {
+		size	4
+		alias	SCB_CDB_STORE
+		alias	SCB_HOST_CDB_PTR
+	}
+	SCB_RESIDUAL_SGPTR {
+		size	4
+		field	SG_ADDR_MASK		0xf8	/* In the last byte */
+		field	SG_OVERRUN_RESID	0x02	/* In the first byte */
+		field	SG_LIST_NULL		0x01	/* In the first byte */
+	}
+	SCB_SCSI_STATUS {
+		size	1
+		alias	SCB_HOST_CDB_LEN
+	}
+	SCB_TARGET_PHASES {
+		size	1
+	}
+	SCB_TARGET_DATA_DIR {
+		size	1
+	}
+	SCB_TARGET_ITAG {
+		size	1
+	}
+	SCB_SENSE_BUSADDR {
+		/*
+		 * Only valid if CDB length is less than 13 bytes or
+		 * we are using a CDB pointer.  Otherwise contains
+		 * the last 4 bytes of embedded cdb information.
+		 */
+		size	4
+		alias	SCB_NEXT_COMPLETE
+	}
+	SCB_TAG {
+		alias	SCB_FIFO_USE_COUNT
+		size	2
+	}
+	SCB_CONTROL {
+		size	1
+		field	TARGET_SCB	0x80
+		field	DISCENB		0x40
+		field	TAG_ENB		0x20
+		field	MK_MESSAGE	0x10
+		field	STATUS_RCVD	0x08
+		field	DISCONNECTED	0x04
+		field	SCB_TAG_TYPE	0x03
+	}
+	SCB_SCSIID {
+		size	1
+		field	TID	0xF0
+		field	OID	0x0F
+	}
+	SCB_LUN {
+		size	1
+		field	LID	0xff
+	}
+	SCB_TASK_ATTRIBUTE {
+		size	1
+		/*
+		 * Overloaded field for non-packetized 
+		 * ignore wide residue message handling.
+		 */
+		field	SCB_XFERLEN_ODD	0x01
+	}
+	SCB_CDB_LEN {
+		size	1
+		field	SCB_CDB_LEN_PTR	0x80	/* CDB in host memory */
+	}
+	SCB_TASK_MANAGEMENT {
+		size	1
+	}
+	SCB_DATAPTR {
+		size	8
+	}
+	SCB_DATACNT {
+		/*
+		 * The last byte is really the high address bits for
+		 * the data address.
+		 */
+		size	4
+		field	SG_LAST_SEG		0x80	/* In the fourth byte */
+		field	SG_HIGH_ADDR_BITS	0x7F	/* In the fourth byte */
+	}
+	SCB_SGPTR {
+		size	4
+		field	SG_STATUS_VALID	0x04	/* In the first byte */
+		field	SG_FULL_RESID	0x02	/* In the first byte */
+		field	SG_LIST_NULL	0x01	/* In the first byte */
+	}
+	SCB_BUSADDR {
+		size	4
+	}
+	SCB_NEXT {
+		alias	SCB_NEXT_SCB_BUSADDR
+		size	2
+	}
+	SCB_NEXT2 {
+		size	2
+	}
+	SCB_SPARE {
+		size	8
+		alias	SCB_PKT_LUN
+	}
+	SCB_DISCONNECTED_LISTS {
+		size	8
+	}
+}
+
+/*********************************** Constants ********************************/
+const MK_MESSAGE_BIT_OFFSET	4
+const TID_SHIFT		4
+const TARGET_CMD_CMPLT	0xfe
+const INVALID_ADDR	0x80
+#define SCB_LIST_NULL	0xff
+#define QOUTFIFO_ENTRY_VALID_TOGGLE	0x80
+
+const CCSGADDR_MAX	0x80
+const CCSCBADDR_MAX	0x80
+const CCSGRAM_MAXSEGS	16
+
+/* Selection Timeout Timer Constants */
+const STIMESEL_SHIFT	3
+const STIMESEL_MIN	0x18
+const STIMESEL_BUG_ADJ	0x8
+
+/* WDTR Message values */
+const BUS_8_BIT			0x00
+const BUS_16_BIT		0x01
+const BUS_32_BIT		0x02
+
+/* Offset maximums */
+const MAX_OFFSET		0xfe
+const MAX_OFFSET_PACED		0xfe
+const MAX_OFFSET_PACED_BUG	0x7f
+/*
+ * Some 160 devices incorrectly accept 0xfe as a
+ * sync offset, but will overrun this value.  Limit
+ * to 0x7f for speed lower than U320 which will
+ * avoid the persistent sync offset overruns.
+ */
+const MAX_OFFSET_NON_PACED	0x7f
+const HOST_MSG			0xff
+
+/*
+ * The size of our sense buffers.
+ * Sense buffer mapping can be handled in either of two ways.
+ * The first is to allocate a dmamap for each transaction.
+ * Depending on the architecture, dmamaps can be costly. The
+ * alternative is to statically map the buffers in much the same
+ * way we handle our scatter gather lists.  The driver implements
+ * the later.
+ */
+const AHD_SENSE_BUFSIZE		256
+
+/* Target mode command processing constants */
+const CMD_GROUP_CODE_SHIFT	0x05
+
+const STATUS_BUSY		0x08
+const STATUS_QUEUE_FULL		0x28
+const STATUS_PKT_SENSE		0xFF
+const TARGET_DATA_IN		1
+
+const SCB_TRANSFER_SIZE_FULL_LUN	56
+const SCB_TRANSFER_SIZE_1BYTE_LUN	48
+/* PKT_OVERRUN_BUFSIZE must be a multiple of 256 less than 64K */
+const PKT_OVERRUN_BUFSIZE	512
+
+/*
+ * Timer parameters.
+ */
+const AHD_TIMER_US_PER_TICK	25
+const AHD_TIMER_MAX_TICKS	0xFFFF
+const AHD_TIMER_MAX_US		(AHD_TIMER_MAX_TICKS * AHD_TIMER_US_PER_TICK)
+
+/*
+ * Downloaded (kernel inserted) constants
+ */
+const SG_PREFETCH_CNT download
+const SG_PREFETCH_CNT_LIMIT download
+const SG_PREFETCH_ALIGN_MASK download
+const SG_PREFETCH_ADDR_MASK download
+const SG_SIZEOF download
+const PKT_OVERRUN_BUFOFFSET download
+const SCB_TRANSFER_SIZE	download
+
+/*
+ * BIOS SCB offsets
+ */
+const NVRAM_SCB_OFFSET	0x2C
diff --git a/drivers/scsi/aic7xxx/aic79xx.seq b/drivers/scsi/aic7xxx/aic79xx.seq
new file mode 100644
index 0000000..65339bc
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx.seq
@@ -0,0 +1,2058 @@
+/*
+ * Adaptec U320 device driver firmware for Linux and FreeBSD.
+ *
+ * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 2000-2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $FreeBSD$
+ */
+
+VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#99 $"
+PATCH_ARG_LIST = "struct ahd_softc *ahd"
+PREFIX = "ahd_"
+
+#include "aic79xx.reg"
+#include "scsi_message.h"
+
+restart:
+if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
+	test	SEQINTCODE, 0xFF jz idle_loop;
+	SET_SEQINTCODE(NO_SEQINT)
+}
+
+idle_loop:
+
+	if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
+		/*
+		 * Convert ERROR status into a sequencer
+		 * interrupt to handle the case of an
+		 * interrupt collision on the hardware
+		 * setting of HWERR.
+		 */
+		test	ERROR, 0xFF jz no_error_set;
+		SET_SEQINTCODE(SAW_HWERR)
+no_error_set:
+	}
+	SET_MODE(M_SCSI, M_SCSI)
+	test	SCSISEQ0, ENSELO|ENARBO jnz idle_loop_checkbus;
+	test	SEQ_FLAGS2, SELECTOUT_QFROZEN jnz idle_loop_checkbus;
+	cmp	WAITING_TID_HEAD[1], SCB_LIST_NULL je idle_loop_checkbus;
+	/*
+	 * ENSELO is cleared by a SELDO, so we must test for SELDO
+	 * one last time.
+	 */
+BEGIN_CRITICAL;
+	test	SSTAT0, SELDO jnz select_out;
+END_CRITICAL;
+	call	start_selection;
+idle_loop_checkbus:
+BEGIN_CRITICAL;
+	test	SSTAT0, SELDO jnz select_out;
+END_CRITICAL;
+	test	SSTAT0, SELDI jnz select_in;
+	test	SCSIPHASE, ~DATA_PHASE_MASK jz idle_loop_check_nonpackreq;
+	test	SCSISIGO, ATNO jz idle_loop_check_nonpackreq;
+	call	unexpected_nonpkt_phase_find_ctxt;
+idle_loop_check_nonpackreq:
+	test	SSTAT2, NONPACKREQ jz . + 2;
+	call	unexpected_nonpkt_phase_find_ctxt;
+	if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) {
+		and	A, FIFO0FREE|FIFO1FREE, DFFSTAT;
+		cmp	A, FIFO0FREE|FIFO1FREE jne . + 3;
+		and	SBLKCTL, ~DIAGLEDEN|DIAGLEDON;
+		jmp	. + 2;
+		or	SBLKCTL, DIAGLEDEN|DIAGLEDON;
+	}
+	call	idle_loop_gsfifo_in_scsi_mode;
+	call	idle_loop_service_fifos;
+	call	idle_loop_cchan;
+	jmp	idle_loop;
+
+BEGIN_CRITICAL;
+idle_loop_gsfifo:
+	SET_MODE(M_SCSI, M_SCSI)
+idle_loop_gsfifo_in_scsi_mode:
+	test	LQISTAT2, LQIGSAVAIL jz return;
+	/*
+	 * We have received good status for this transaction.  There may
+	 * still be data in our FIFOs draining to the host.  Complete
+	 * the SCB only if all data has transferred to the host.
+	 */
+good_status_IU_done:
+	bmov	SCBPTR, GSFIFO, 2;
+	clr	SCB_SCSI_STATUS;
+	/*
+	 * If a command completed before an attempted task management
+	 * function completed, notify the host after disabling any
+	 * pending select-outs.
+	 */
+	test	SCB_TASK_MANAGEMENT, 0xFF jz gsfifo_complete_normally;
+	test	SSTAT0, SELDO|SELINGO jnz . + 2;
+	and	SCSISEQ0, ~ENSELO;
+	SET_SEQINTCODE(TASKMGMT_CMD_CMPLT_OKAY)
+gsfifo_complete_normally:
+	or	SCB_CONTROL, STATUS_RCVD;
+
+	/*
+	 * Since this status did not consume a FIFO, we have to
+	 * be a bit more dilligent in how we check for FIFOs pertaining
+	 * to this transaction.  There are two states that a FIFO still
+	 * transferring data may be in.
+	 *
+	 * 1) Configured and draining to the host, with a FIFO handler.
+	 * 2) Pending cfg4data, fifo not empty.
+	 *
+	 * Case 1 can be detected by noticing a non-zero FIFO active
+	 * count in the SCB.  In this case, we allow the routine servicing
+	 * the FIFO to complete the SCB.
+	 * 
+	 * Case 2 implies either a pending or yet to occur save data
+	 * pointers for this same context in the other FIFO.  So, if
+	 * we detect case 1, we will properly defer the post of the SCB
+	 * and achieve the desired result.  The pending cfg4data will
+	 * notice that status has been received and complete the SCB.
+	 */
+	test	SCB_FIFO_USE_COUNT, 0xFF jnz idle_loop_gsfifo_in_scsi_mode;
+	call	complete;
+END_CRITICAL;
+	jmp	idle_loop_gsfifo_in_scsi_mode;
+
+idle_loop_service_fifos:
+	SET_MODE(M_DFF0, M_DFF0)
+	test	LONGJMP_ADDR[1], INVALID_ADDR jnz idle_loop_next_fifo;
+	call	longjmp;
+idle_loop_next_fifo:
+	SET_MODE(M_DFF1, M_DFF1)
+	test	LONGJMP_ADDR[1], INVALID_ADDR jz longjmp;
+return:
+	ret;
+
+idle_loop_cchan:
+	SET_MODE(M_CCHAN, M_CCHAN)
+	test	QOFF_CTLSTA, HS_MAILBOX_ACT jz	hs_mailbox_empty;
+	or	QOFF_CTLSTA, HS_MAILBOX_ACT;
+	mov	LOCAL_HS_MAILBOX, HS_MAILBOX;
+hs_mailbox_empty:
+BEGIN_CRITICAL;
+	test	CCSCBCTL, CCARREN|CCSCBEN jz scbdma_idle;
+	test	CCSCBCTL, CCSCBDIR jnz fetch_new_scb_inprog;
+	test	CCSCBCTL, CCSCBDONE jz return;
+END_CRITICAL;
+	/* FALLTHROUGH */
+scbdma_tohost_done:
+	test	CCSCBCTL, CCARREN jz fill_qoutfifo_dmadone;
+	/*
+	 * An SCB has been succesfully uploaded to the host.
+	 * If the SCB was uploaded for some reason other than
+	 * bad SCSI status (currently only for underruns), we
+	 * queue the SCB for normal completion.  Otherwise, we
+	 * wait until any select-out activity has halted, and
+	 * then notify the host so that the transaction can be
+	 * dealt with.
+	 */
+	test	SCB_SCSI_STATUS, 0xff jnz scbdma_notify_host;
+	and	CCSCBCTL, ~(CCARREN|CCSCBEN);
+	bmov	COMPLETE_DMA_SCB_HEAD, SCB_NEXT_COMPLETE, 2;
+	bmov	SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2;
+	bmov	COMPLETE_SCB_HEAD, SCBPTR, 2 ret;
+scbdma_notify_host:
+	SET_MODE(M_SCSI, M_SCSI)
+	test	SCSISEQ0, ENSELO jnz return;
+	test	SSTAT0, (SELDO|SELINGO) jnz return;
+	SET_MODE(M_CCHAN, M_CCHAN)
+	/*
+	 * Remove SCB and notify host.
+	 */
+	and	CCSCBCTL, ~(CCARREN|CCSCBEN);
+	bmov	COMPLETE_DMA_SCB_HEAD, SCB_NEXT_COMPLETE, 2;
+	SET_SEQINTCODE(BAD_SCB_STATUS)
+	ret;
+fill_qoutfifo_dmadone:
+	and	CCSCBCTL, ~(CCARREN|CCSCBEN);
+	call	qoutfifo_updated;
+	mvi	COMPLETE_SCB_DMAINPROG_HEAD[1], SCB_LIST_NULL;
+	bmov	QOUTFIFO_NEXT_ADDR, SCBHADDR, 4;
+	test	QOFF_CTLSTA, SDSCB_ROLLOVR jz return;
+	bmov	QOUTFIFO_NEXT_ADDR, SHARED_DATA_ADDR, 4;
+	xor	QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID_TOGGLE ret;
+
+qoutfifo_updated:
+	/*
+	 * If there are more commands waiting to be dma'ed
+	 * to the host, always coalesce.  Otherwise honor the
+	 * host's wishes.
+	 */
+	cmp	COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne coalesce_by_count;
+	cmp	COMPLETE_SCB_HEAD[1], SCB_LIST_NULL jne coalesce_by_count;
+	test	LOCAL_HS_MAILBOX, ENINT_COALESCE jz issue_cmdcmplt;
+
+	/*
+	 * If we have relatively few commands outstanding, don't
+	 * bother waiting for another command to complete.
+	 */
+	test	CMDS_PENDING[1], 0xFF jnz coalesce_by_count;
+	/* Add -1 so that jnc means <= not just < */
+	add	A, -1, INT_COALESCING_MINCMDS;
+	add	NONE, A, CMDS_PENDING;
+	jnc	issue_cmdcmplt;
+	
+	/*
+	 * If coalescing, only coalesce up to the limit
+	 * provided by the host driver.
+	 */
+coalesce_by_count:
+	mov	A, INT_COALESCING_MAXCMDS;
+	add	NONE, A, INT_COALESCING_CMDCOUNT;
+	jc	issue_cmdcmplt;
+	/*
+	 * If the timer is not currently active,
+	 * fire it up.
+	 */
+	test	INTCTL, SWTMINTMASK jz return;
+	bmov	SWTIMER, INT_COALESCING_TIMER, 2;
+	mvi	CLRSEQINTSTAT, CLRSEQ_SWTMRTO;
+	or	INTCTL, SWTMINTEN|SWTIMER_START;
+	and	INTCTL, ~SWTMINTMASK ret;
+
+issue_cmdcmplt:
+	mvi	INTSTAT, CMDCMPLT;
+	clr	INT_COALESCING_CMDCOUNT;
+	or	INTCTL, SWTMINTMASK ret;
+
+BEGIN_CRITICAL;
+fetch_new_scb_inprog:
+	test	CCSCBCTL, ARRDONE jz return;
+fetch_new_scb_done:
+	and	CCSCBCTL, ~(CCARREN|CCSCBEN);
+	bmov	REG0, SCBPTR, 2;
+	clr	A;
+	add	CMDS_PENDING, 1;
+	adc	CMDS_PENDING[1], A;
+	if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) {
+		/*
+		 * "Short Luns" are not placed into outgoing LQ
+		 * packets in the correct byte order.  Use a full
+		 * sized lun field instead and fill it with the
+		 * one byte of lun information we support.
+		 */
+		mov	SCB_PKT_LUN[6], SCB_LUN;
+	}
+	/*
+	 * The FIFO use count field is shared with the
+	 * tag set by the host so that our SCB dma engine
+	 * knows the correct location to store the SCB.
+	 * Set it to zero before processing the SCB.
+	 */
+	clr	SCB_FIFO_USE_COUNT;
+	/* Update the next SCB address to download. */
+	bmov	NEXT_QUEUED_SCB_ADDR, SCB_NEXT_SCB_BUSADDR, 4;
+	mvi	SCB_NEXT[1], SCB_LIST_NULL;
+	mvi	SCB_NEXT2[1], SCB_LIST_NULL;
+	/* Increment our position in the QINFIFO. */
+	mov	NONE, SNSCB_QOFF;
+	/*
+	 * SCBs that want to send messages are always
+	 * queued independently.  This ensures that they
+	 * are at the head of the SCB list to select out
+	 * to a target and we will see the MK_MESSAGE flag.
+	 */
+	test	SCB_CONTROL, MK_MESSAGE jnz first_new_target_scb;
+	shr	SINDEX, 3, SCB_SCSIID;
+	and	SINDEX, ~0x1;
+	mvi	SINDEX[1], (WAITING_SCB_TAILS >> 8);
+	bmov	DINDEX, SINDEX, 2;
+	bmov	SCBPTR, SINDIR, 2;
+	bmov	DINDIR, REG0, 2;
+	cmp	SCBPTR[1], SCB_LIST_NULL je first_new_target_scb;
+	bmov	SCB_NEXT, REG0, 2 ret;
+first_new_target_scb:
+	cmp	WAITING_TID_HEAD[1], SCB_LIST_NULL je first_new_scb;
+	bmov	SCBPTR, WAITING_TID_TAIL, 2;
+	bmov	SCB_NEXT2, REG0, 2;
+	bmov	WAITING_TID_TAIL, REG0, 2 ret;
+first_new_scb:
+	bmov	WAITING_TID_HEAD, REG0, 2;
+	bmov	WAITING_TID_TAIL, REG0, 2 ret;
+END_CRITICAL;
+
+scbdma_idle:
+	/*
+	 * Give precedence to downloading new SCBs to execute
+	 * unless select-outs are currently frozen.
+	 */
+	test	SEQ_FLAGS2, SELECTOUT_QFROZEN jnz . + 2;
+BEGIN_CRITICAL;
+	test	QOFF_CTLSTA, NEW_SCB_AVAIL jnz fetch_new_scb;
+	cmp	COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne dma_complete_scb;
+	cmp	COMPLETE_SCB_HEAD[1], SCB_LIST_NULL je return;
+	/* FALLTHROUGH */
+fill_qoutfifo:
+	/*
+	 * Keep track of the SCBs we are dmaing just
+	 * in case the DMA fails or is aborted.
+	 */
+	mov	A, QOUTFIFO_ENTRY_VALID_TAG;
+	bmov	COMPLETE_SCB_DMAINPROG_HEAD, COMPLETE_SCB_HEAD, 2;
+	mvi	CCSCBCTL, CCSCBRESET;
+	bmov	SCBHADDR, QOUTFIFO_NEXT_ADDR, 4;
+	bmov	SCBPTR, COMPLETE_SCB_HEAD, 2;
+fill_qoutfifo_loop:
+	mov	CCSCBRAM, SCBPTR;
+	or	CCSCBRAM, A, SCBPTR[1];
+	mov	NONE, SDSCB_QOFF;
+	inc	INT_COALESCING_CMDCOUNT;
+	add	CMDS_PENDING, -1;
+	adc	CMDS_PENDING[1], -1;
+	cmp	SCB_NEXT_COMPLETE[1], SCB_LIST_NULL je fill_qoutfifo_done;
+	cmp	CCSCBADDR, CCSCBADDR_MAX je fill_qoutfifo_done;
+	test	QOFF_CTLSTA, SDSCB_ROLLOVR jnz fill_qoutfifo_done;
+	bmov	SCBPTR, SCB_NEXT_COMPLETE, 2;
+	jmp	fill_qoutfifo_loop;
+fill_qoutfifo_done:
+	mov	SCBHCNT, CCSCBADDR;
+	mvi	CCSCBCTL, CCSCBEN|CCSCBRESET;
+	bmov	COMPLETE_SCB_HEAD, SCB_NEXT_COMPLETE, 2;
+	mvi	SCB_NEXT_COMPLETE[1], SCB_LIST_NULL ret;
+
+fetch_new_scb:
+	bmov	SCBHADDR, NEXT_QUEUED_SCB_ADDR, 4;
+	mvi	CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET jmp dma_scb;
+dma_complete_scb:
+	bmov	SCBPTR, COMPLETE_DMA_SCB_HEAD, 2;
+	bmov	SCBHADDR, SCB_BUSADDR, 4;
+	mvi	CCARREN|CCSCBEN|CCSCBRESET jmp dma_scb;
+END_CRITICAL;
+
+/*
+ * Either post or fetch an SCB from host memory.  The caller
+ * is responsible for polling for transfer completion.
+ *
+ * Prerequisits: Mode == M_CCHAN
+ *		 SINDEX contains CCSCBCTL flags
+ *		 SCBHADDR set to Host SCB address
+ *		 SCBPTR set to SCB src location on "push" operations
+ */
+SET_SRC_MODE	M_CCHAN;
+SET_DST_MODE	M_CCHAN;
+dma_scb:
+	mvi	SCBHCNT, SCB_TRANSFER_SIZE;
+	mov	CCSCBCTL, SINDEX ret;
+
+BEGIN_CRITICAL;
+setjmp:
+	bmov	LONGJMP_ADDR, STACK, 2 ret;
+setjmp_inline:
+	bmov	LONGJMP_ADDR, STACK, 2;
+longjmp:
+	bmov	STACK, LONGJMP_ADDR, 2 ret;
+END_CRITICAL;
+
+/*************************** Chip Bug Work Arounds ****************************/
+/*
+ * Must disable interrupts when setting the mode pointer
+ * register as an interrupt occurring mid update will
+ * fail to store the new mode value for restoration on
+ * an iret.
+ */
+if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) {
+set_mode_work_around:
+	mvi	SEQINTCTL, INTVEC1DSL;
+	mov	MODE_PTR, SINDEX;
+	clr	SEQINTCTL ret;
+
+toggle_dff_mode_work_around:
+	mvi	SEQINTCTL, INTVEC1DSL;
+	xor	MODE_PTR, MK_MODE(M_DFF1, M_DFF1);
+	clr	SEQINTCTL ret;
+}
+
+
+if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
+set_seqint_work_around:
+	mov	SEQINTCODE, SINDEX;
+	mvi	SEQINTCODE, NO_SEQINT ret;
+}
+
+/************************ Packetized LongJmp Routines *************************/
+SET_SRC_MODE	M_SCSI;
+SET_DST_MODE	M_SCSI;
+start_selection:
+BEGIN_CRITICAL;
+	if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) {
+		/*
+		 * Razor #494
+		 * Rev A hardware fails to update LAST/CURR/NEXTSCB
+		 * correctly after a packetized selection in several
+		 * situations:
+		 *
+		 * 1) If only one command existed in the queue, the
+		 *    LAST/CURR/NEXTSCB are unchanged.
+		 *
+		 * 2) In a non QAS, protocol allowed phase change,
+		 *    the queue is shifted 1 too far.  LASTSCB is
+		 *    the last SCB that was correctly processed.
+		 * 
+		 * 3) In the QAS case, if the full list of commands
+		 *    was successfully sent, NEXTSCB is NULL and neither
+		 *    CURRSCB nor LASTSCB can be trusted.  We must
+		 *    manually walk the list counting MAXCMDCNT elements
+		 *    to find the last SCB that was sent correctly.
+		 *
+		 * To simplify the workaround for this bug in SELDO
+		 * handling, we initialize LASTSCB prior to enabling
+		 * selection so we can rely on it even for case #1 above.
+		 */
+		bmov	LASTSCB, WAITING_TID_HEAD, 2;
+	}
+	bmov	CURRSCB, WAITING_TID_HEAD, 2;
+	bmov	SCBPTR, WAITING_TID_HEAD, 2;
+	shr	SELOID, 4, SCB_SCSIID;
+	/*
+	 * If we want to send a message to the device, ensure
+	 * we are selecting with atn irregardless of our packetized
+	 * agreement.  Since SPI4 only allows target reset or PPR
+	 * messages if this is a packetized connection, the change
+	 * to our negotiation table entry for this selection will
+	 * be cleared when the message is acted on.
+	 */
+	test	SCB_CONTROL, MK_MESSAGE jz . + 3;
+	mov	NEGOADDR, SELOID;
+	or	NEGCONOPTS, ENAUTOATNO;
+	or	SCSISEQ0, ENSELO ret;
+END_CRITICAL;
+
+/*
+ * Allocate a FIFO for a non-packetized transaction.
+ * In RevA hardware, both FIFOs must be free before we
+ * can allocate a FIFO for a non-packetized transaction.
+ */
+allocate_fifo_loop:
+	/*
+	 * Do whatever work is required to free a FIFO.
+	 */
+	call	idle_loop_service_fifos;
+	SET_MODE(M_SCSI, M_SCSI)
+allocate_fifo:
+	if ((ahd->bugs & AHD_NONPACKFIFO_BUG) != 0) {
+		and	A, FIFO0FREE|FIFO1FREE, DFFSTAT;
+		cmp	A, FIFO0FREE|FIFO1FREE jne allocate_fifo_loop;
+	} else {
+		test	DFFSTAT, FIFO1FREE jnz allocate_fifo1;
+		test	DFFSTAT, FIFO0FREE jz allocate_fifo_loop;
+		mvi	DFFSTAT, B_CURRFIFO_0;
+		SET_MODE(M_DFF0, M_DFF0)
+		bmov	SCBPTR, ALLOCFIFO_SCBPTR, 2 ret;
+	}
+SET_SRC_MODE	M_SCSI;
+SET_DST_MODE	M_SCSI;
+allocate_fifo1:
+	mvi	DFFSTAT, CURRFIFO_1;
+	SET_MODE(M_DFF1, M_DFF1)
+	bmov	SCBPTR, ALLOCFIFO_SCBPTR, 2 ret;
+
+/*
+ * We have been reselected as an initiator
+ * or selected as a target.
+ */
+SET_SRC_MODE	M_SCSI;
+SET_DST_MODE	M_SCSI;
+select_in:
+	if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
+		/*
+		 * Test to ensure that the bus has not
+		 * already gone free prior to clearing
+		 * any stale busfree status.  This avoids
+		 * a window whereby a busfree just after
+		 * a selection could be missed.
+		 */
+		test	SCSISIGI, BSYI jz . + 2;
+		mvi	CLRSINT1,CLRBUSFREE;
+		or	SIMODE1, ENBUSFREE;
+	}
+	or	SXFRCTL0, SPIOEN;
+	and	SAVED_SCSIID, SELID_MASK, SELID;
+	and	A, OID, IOWNID;
+	or	SAVED_SCSIID, A;
+	mvi	CLRSINT0, CLRSELDI;
+	jmp	ITloop;
+
+/*
+ * We have successfully selected out.
+ *
+ * Clear SELDO.
+ * Dequeue all SCBs sent from the waiting queue
+ * Requeue all SCBs *not* sent to the tail of the waiting queue
+ * Take Razor #494 into account for above.
+ *
+ * In Packetized Mode:
+ *	Return to the idle loop.  Our interrupt handler will take
+ *	care of any incoming L_Qs.
+ *
+ * In Non-Packetize Mode:
+ *	Continue to our normal state machine.
+ */
+SET_SRC_MODE	M_SCSI;
+SET_DST_MODE	M_SCSI;
+select_out:
+BEGIN_CRITICAL;
+	/* Clear out all SCBs that have been successfully sent. */
+	if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) {
+		/*
+		 * For packetized, the LQO manager clears ENSELO on
+		 * the assertion of SELDO.  If we are non-packetized,
+		 * LASTSCB and CURRSCB are accurate.
+		 */
+		test	SCSISEQ0, ENSELO jnz use_lastscb;
+
+		/*
+		 * The update is correct for LQOSTAT1 errors.  All
+		 * but LQOBUSFREE are handled by kernel interrupts.
+		 * If we see LQOBUSFREE, return to the idle loop.
+		 * Once we are out of the select_out critical section,
+		 * the kernel will cleanup the LQOBUSFREE and we will
+		 * eventually restart the selection if appropriate.
+		 */
+		test	LQOSTAT1, LQOBUSFREE jnz idle_loop;
+
+		/*
+		 * On a phase change oustside of packet boundaries,
+		 * LASTSCB points to the currently active SCB context
+		 * on the bus.
+		 */
+		test	LQOSTAT2, LQOPHACHGOUTPKT jnz use_lastscb;
+
+		/*
+		 * If the hardware has traversed the whole list, NEXTSCB
+		 * will be NULL, CURRSCB and LASTSCB cannot be trusted,
+		 * but MAXCMDCNT is accurate.  If we stop part way through
+		 * the list or only had one command to issue, NEXTSCB[1] is
+		 * not NULL and LASTSCB is the last command to go out.
+		 */
+		cmp	NEXTSCB[1], SCB_LIST_NULL jne use_lastscb;
+
+		/*
+		 * Brute force walk.
+		 */
+		bmov	SCBPTR, WAITING_TID_HEAD, 2;
+		mvi	SEQINTCTL, INTVEC1DSL;
+		mvi	MODE_PTR, MK_MODE(M_CFG, M_CFG);
+		mov	A, MAXCMDCNT;
+		mvi	MODE_PTR, MK_MODE(M_SCSI, M_SCSI);
+		clr	SEQINTCTL;
+find_lastscb_loop:
+		dec	A;
+		test	A, 0xFF jz found_last_sent_scb;
+		bmov	SCBPTR, SCB_NEXT, 2;
+		jmp	find_lastscb_loop;
+use_lastscb:
+		bmov	SCBPTR, LASTSCB, 2;
+found_last_sent_scb:
+		bmov	CURRSCB, SCBPTR, 2;
+curscb_ww_done:
+	} else {
+		bmov	SCBPTR, CURRSCB, 2;
+	}
+
+	/*
+	 * Requeue any SCBs not sent, to the tail of the waiting Q.
+	 */
+	cmp	SCB_NEXT[1], SCB_LIST_NULL je select_out_list_done;
+
+	/*
+	 * We know that neither the per-TID list nor the list of
+	 * TIDs is empty.  Use this knowledge to our advantage.
+	 */
+	bmov	REG0, SCB_NEXT, 2;
+	bmov	SCBPTR, WAITING_TID_TAIL, 2;
+	bmov	SCB_NEXT2, REG0, 2;
+	bmov	WAITING_TID_TAIL, REG0, 2;
+	jmp	select_out_inc_tid_q;
+
+select_out_list_done:
+	/*
+	 * The whole list made it.  Just clear our TID's tail pointer
+	 * unless we were queued independently due to our need to
+	 * send a message.
+	 */
+	test	SCB_CONTROL, MK_MESSAGE jnz select_out_inc_tid_q;
+	shr	DINDEX, 3, SCB_SCSIID;
+	or	DINDEX, 1;	/* Want only the second byte */
+	mvi	DINDEX[1], ((WAITING_SCB_TAILS) >> 8);
+	mvi	DINDIR, SCB_LIST_NULL;
+select_out_inc_tid_q:
+	bmov	SCBPTR, WAITING_TID_HEAD, 2;
+	bmov	WAITING_TID_HEAD, SCB_NEXT2, 2;
+	cmp	WAITING_TID_HEAD[1], SCB_LIST_NULL jne . + 2;
+	mvi	WAITING_TID_TAIL[1], SCB_LIST_NULL;
+	bmov	SCBPTR, CURRSCB, 2;
+	mvi	CLRSINT0, CLRSELDO;
+	test	LQOSTAT2, LQOPHACHGOUTPKT jnz unexpected_nonpkt_phase;
+	test	LQOSTAT1, LQOPHACHGINPKT jnz unexpected_nonpkt_phase;
+
+	/*
+	 * If this is a packetized connection, return to our
+	 * idle_loop and let our interrupt handler deal with
+	 * any connection setup/teardown issues.  The only
+	 * exceptions are the case of MK_MESSAGE and task management
+	 * SCBs.
+	 */
+	if ((ahd->bugs & AHD_LQO_ATNO_BUG) != 0) {
+		/*
+		 * In the A, the LQO manager transitions to LQOSTOP0 even if
+		 * we have selected out with ATN asserted and the target
+		 * REQs in a non-packet phase.
+		 */
+		test 	SCB_CONTROL, MK_MESSAGE jz select_out_no_message;
+		test	SCSISIGO, ATNO jnz select_out_non_packetized;
+select_out_no_message:
+	}
+	test	LQOSTAT2, LQOSTOP0 jz select_out_non_packetized;
+	test	SCB_TASK_MANAGEMENT, 0xFF jz idle_loop;
+	SET_SEQINTCODE(TASKMGMT_FUNC_COMPLETE)
+	jmp	idle_loop;
+
+select_out_non_packetized:
+	/* Non packetized request. */
+	and     SCSISEQ0, ~ENSELO;
+	if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
+		/*
+		 * Test to ensure that the bus has not
+		 * already gone free prior to clearing
+		 * any stale busfree status.  This avoids
+		 * a window whereby a busfree just after
+		 * a selection could be missed.
+		 */
+		test	SCSISIGI, BSYI jz . + 2;
+		mvi	CLRSINT1,CLRBUSFREE;
+		or	SIMODE1, ENBUSFREE;
+	}
+	mov	SAVED_SCSIID, SCB_SCSIID;
+	mov	SAVED_LUN, SCB_LUN;
+	mvi	SEQ_FLAGS, NO_CDB_SENT;
+END_CRITICAL;
+	or	SXFRCTL0, SPIOEN;
+
+	/*
+	 * As soon as we get a successful selection, the target
+	 * should go into the message out phase since we have ATN
+	 * asserted.
+	 */
+	mvi	MSG_OUT, MSG_IDENTIFYFLAG;
+
+	/*
+	 * Main loop for information transfer phases.  Wait for the
+	 * target to assert REQ before checking MSG, C/D and I/O for
+	 * the bus phase.
+	 */
+mesgin_phasemis:
+ITloop:
+	call	phase_lock;
+
+	mov	A, LASTPHASE;
+
+	test	A, ~P_DATAIN_DT	jz p_data;
+	cmp	A,P_COMMAND	je p_command;
+	cmp	A,P_MESGOUT	je p_mesgout;
+	cmp	A,P_STATUS	je p_status;
+	cmp	A,P_MESGIN	je p_mesgin;
+
+	SET_SEQINTCODE(BAD_PHASE)
+	jmp	ITloop;			/* Try reading the bus again. */
+
+/*
+ * Command phase.  Set up the DMA registers and let 'er rip.
+ */
+p_command:
+	test	SEQ_FLAGS, NOT_IDENTIFIED jz p_command_okay;
+	SET_SEQINTCODE(PROTO_VIOLATION)
+p_command_okay:
+	test	MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1))
+		jnz p_command_allocate_fifo;
+	/*
+	 * Command retry.  Free our current FIFO and
+	 * re-allocate a FIFO so transfer state is
+	 * reset.
+	 */
+SET_SRC_MODE	M_DFF1;
+SET_DST_MODE	M_DFF1;
+	mvi	DFFSXFRCTL, RSTCHN|CLRSHCNT;
+	SET_MODE(M_SCSI, M_SCSI)
+p_command_allocate_fifo:
+	bmov	ALLOCFIFO_SCBPTR, SCBPTR, 2;
+	call	allocate_fifo;
+SET_SRC_MODE	M_DFF1;
+SET_DST_MODE	M_DFF1;
+	add	NONE, -17, SCB_CDB_LEN;
+	jnc	p_command_embedded;
+p_command_from_host:
+	bmov	HADDR[0], SCB_HOST_CDB_PTR, 9;
+	mvi	SG_CACHE_PRE, LAST_SEG;
+	mvi	DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN);
+	jmp	p_command_xfer;
+p_command_embedded:
+	bmov	SHCNT[0], SCB_CDB_LEN,  1;
+	bmov	DFDAT, SCB_CDB_STORE, 16; 
+	mvi	DFCNTRL, SCSIEN;
+p_command_xfer:
+	and	SEQ_FLAGS, ~NO_CDB_SENT;
+	if ((ahd->features & AHD_FAST_CDB_DELIVERY) != 0) {
+		/*
+		 * To speed up CDB delivery in Rev B, all CDB acks
+		 * are "released" to the output sync as soon as the
+		 * command phase starts.  There is only one problem
+		 * with this approach.  If the target changes phase
+		 * before all data are sent, we have left over acks
+		 * that can go out on the bus in a data phase.  Due
+		 * to other chip contraints, this only happens if
+		 * the target goes to data-in, but if the acks go
+		 * out before we can test SDONE, we'll think that
+		 * the transfer has completed successfully.  Work
+		 * around this by taking advantage of the 400ns or
+		 * 800ns dead time between command phase and the REQ
+		 * of the new phase.  If the transfer has completed
+		 * successfully, SCSIEN should fall *long* before we
+		 * see a phase change.  We thus treat any phasemiss
+		 * that occurs before SCSIEN falls as an incomplete
+		 * transfer.
+		 */
+		test	SSTAT1, PHASEMIS jnz p_command_xfer_failed;
+		test	DFCNTRL, SCSIEN jnz . - 1;
+	} else {
+		test	DFCNTRL, SCSIEN jnz .;
+	}
+	/*
+	 * DMA Channel automatically disabled.
+	 * Don't allow a data phase if the command
+	 * was not fully transferred.
+	 */
+	test	SSTAT2, SDONE jnz ITloop;
+p_command_xfer_failed:
+	or	SEQ_FLAGS, NO_CDB_SENT;
+	jmp	ITloop;
+
+
+/*
+ * Status phase.  Wait for the data byte to appear, then read it
+ * and store it into the SCB.
+ */
+SET_SRC_MODE	M_SCSI;
+SET_DST_MODE	M_SCSI;
+p_status:
+	test	SEQ_FLAGS,NOT_IDENTIFIED jnz mesgin_proto_violation;
+p_status_okay:
+	mov	SCB_SCSI_STATUS, SCSIDAT;
+	or	SCB_CONTROL, STATUS_RCVD;
+	jmp	ITloop;
+
+/*
+ * Message out phase.  If MSG_OUT is MSG_IDENTIFYFLAG, build a full
+ * indentify message sequence and send it to the target.  The host may
+ * override this behavior by setting the MK_MESSAGE bit in the SCB
+ * control byte.  This will cause us to interrupt the host and allow
+ * it to handle the message phase completely on its own.  If the bit
+ * associated with this target is set, we will also interrupt the host,
+ * thereby allowing it to send a message on the next selection regardless
+ * of the transaction being sent.
+ * 
+ * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message.
+ * This is done to allow the host to send messages outside of an identify
+ * sequence while protecting the seqencer from testing the MK_MESSAGE bit
+ * on an SCB that might not be for the current nexus. (For example, a
+ * BDR message in responce to a bad reselection would leave us pointed to
+ * an SCB that doesn't have anything to do with the current target).
+ *
+ * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag,
+ * bus device reset).
+ *
+ * When there are no messages to send, MSG_OUT should be set to MSG_NOOP,
+ * in case the target decides to put us in this phase for some strange
+ * reason.
+ */
+p_mesgout_retry:
+	/* Turn on ATN for the retry */
+	mvi	SCSISIGO, ATNO;
+p_mesgout:
+	mov	SINDEX, MSG_OUT;
+	cmp	SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host;
+	test	SCB_CONTROL,MK_MESSAGE	jnz host_message_loop;
+p_mesgout_identify:
+	or	SINDEX, MSG_IDENTIFYFLAG|DISCENB, SCB_LUN;
+	test	SCB_CONTROL, DISCENB jnz . + 2;
+	and	SINDEX, ~DISCENB;
+/*
+ * Send a tag message if TAG_ENB is set in the SCB control block.
+ * Use SCB_NONPACKET_TAG as the tag value.
+ */
+p_mesgout_tag:
+	test	SCB_CONTROL,TAG_ENB jz  p_mesgout_onebyte;
+	mov	SCSIDAT, SINDEX;	/* Send the identify message */
+	call	phase_lock;
+	cmp	LASTPHASE, P_MESGOUT	jne p_mesgout_done;
+	and	SCSIDAT,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL;
+	call	phase_lock;
+	cmp	LASTPHASE, P_MESGOUT	jne p_mesgout_done;
+	mov	SCBPTR jmp p_mesgout_onebyte;
+/*
+ * Interrupt the driver, and allow it to handle this message
+ * phase and any required retries.
+ */
+p_mesgout_from_host:
+	cmp	SINDEX, HOST_MSG	jne p_mesgout_onebyte;
+	jmp	host_message_loop;
+
+p_mesgout_onebyte:
+	mvi	CLRSINT1, CLRATNO;
+	mov	SCSIDAT, SINDEX;
+
+/*
+ * If the next bus phase after ATN drops is message out, it means
+ * that the target is requesting that the last message(s) be resent.
+ */
+	call	phase_lock;
+	cmp	LASTPHASE, P_MESGOUT	je p_mesgout_retry;
+
+p_mesgout_done:
+	mvi	CLRSINT1,CLRATNO;	/* Be sure to turn ATNO off */
+	mov	LAST_MSG, MSG_OUT;
+	mvi	MSG_OUT, MSG_NOOP;	/* No message left */
+	jmp	ITloop;
+
+/*
+ * Message in phase.  Bytes are read using Automatic PIO mode.
+ */
+p_mesgin:
+	/* read the 1st message byte */
+	mvi	ACCUM		call inb_first;
+
+	test	A,MSG_IDENTIFYFLAG	jnz mesgin_identify;
+	cmp	A,MSG_DISCONNECT	je mesgin_disconnect;
+	cmp	A,MSG_SAVEDATAPOINTER	je mesgin_sdptrs;
+	cmp	ALLZEROS,A		je mesgin_complete;
+	cmp	A,MSG_RESTOREPOINTERS	je mesgin_rdptrs;
+	cmp	A,MSG_IGN_WIDE_RESIDUE	je mesgin_ign_wide_residue;
+	cmp	A,MSG_NOOP		je mesgin_done;
+
+/*
+ * Pushed message loop to allow the kernel to
+ * run it's own message state engine.  To avoid an
+ * extra nop instruction after signaling the kernel,
+ * we perform the phase_lock before checking to see
+ * if we should exit the loop and skip the phase_lock
+ * in the ITloop.  Performing back to back phase_locks
+ * shouldn't hurt, but why do it twice...
+ */
+host_message_loop:
+	call	phase_lock;	/* Benign the first time through. */
+	SET_SEQINTCODE(HOST_MSG_LOOP)
+	cmp	RETURN_1, EXIT_MSG_LOOP	je ITloop;
+	cmp	RETURN_1, CONT_MSG_LOOP_WRITE	jne . + 3;
+	mov	SCSIDAT, RETURN_2;
+	jmp	host_message_loop;
+	/* Must be CONT_MSG_LOOP_READ */
+	mov	NONE, SCSIDAT;	/* ACK Byte */
+	jmp	host_message_loop;
+
+mesgin_ign_wide_residue:
+	mov	SAVED_MODE, MODE_PTR;
+	SET_MODE(M_SCSI, M_SCSI)
+	shr	NEGOADDR, 4, SAVED_SCSIID;
+	mov	A, NEGCONOPTS;
+	RESTORE_MODE(SAVED_MODE)
+	test	A, WIDEXFER jz mesgin_reject;
+	/* Pull the residue byte */
+	mvi	REG0	call inb_next;
+	cmp	REG0, 0x01 jne mesgin_reject;
+	test	SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz . + 2;
+	test	SCB_TASK_ATTRIBUTE, SCB_XFERLEN_ODD jnz mesgin_done;
+	SET_SEQINTCODE(IGN_WIDE_RES)
+	jmp	mesgin_done;
+
+mesgin_proto_violation:
+	SET_SEQINTCODE(PROTO_VIOLATION)
+	jmp	mesgin_done;
+mesgin_reject:
+	mvi	MSG_MESSAGE_REJECT	call mk_mesg;
+mesgin_done:
+	mov	NONE,SCSIDAT;		/*dummy read from latch to ACK*/
+	jmp	ITloop;
+
+#define INDEX_DISC_LIST(scsiid, lun)					\
+	and	A, 0xC0, scsiid;					\
+	or	SCBPTR, A, lun;						\
+	clr	SCBPTR[1];						\
+	and	SINDEX, 0x30, scsiid;					\
+	shr	SINDEX, 3;	/* Multiply by 2 */			\
+	add	SINDEX, (SCB_DISCONNECTED_LISTS & 0xFF);		\
+	mvi	SINDEX[1], ((SCB_DISCONNECTED_LISTS >> 8) & 0xFF)
+
+mesgin_identify:
+	/*
+	 * Determine whether a target is using tagged or non-tagged
+	 * transactions by first looking at the transaction stored in
+	 * the per-device, disconnected array.  If there is no untagged
+	 * transaction for this target, this must be a tagged transaction.
+	 */
+	and	SAVED_LUN, MSG_IDENTIFY_LUNMASK, A;
+	INDEX_DISC_LIST(SAVED_SCSIID, SAVED_LUN);
+	bmov	DINDEX, SINDEX, 2;
+	bmov	REG0, SINDIR, 2;
+	cmp	REG0[1], SCB_LIST_NULL je snoop_tag;
+	/* Untagged.  Clear the busy table entry and setup the SCB. */
+	bmov	DINDIR, ALLONES, 2;
+	bmov	SCBPTR, REG0, 2;
+	jmp	setup_SCB;
+
+/*
+ * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message.
+ * If we get one, we use the tag returned to find the proper
+ * SCB.  After receiving the tag, look for the SCB at SCB locations tag and
+ * tag + 256.
+ */
+snoop_tag:
+	if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) {
+		or	SEQ_FLAGS, 0x80;
+	}
+	mov	NONE, SCSIDAT;		/* ACK Identify MSG */
+	call	phase_lock;
+	if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) {
+		or	SEQ_FLAGS, 0x1;
+	}
+	cmp	LASTPHASE, P_MESGIN	jne not_found_ITloop;
+	if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) {
+		or	SEQ_FLAGS, 0x2;
+	}
+	cmp	SCSIBUS, MSG_SIMPLE_Q_TAG jne not_found;
+get_tag:
+	clr	SCBPTR[1];
+	mvi	SCBPTR	call inb_next;	/* tag value */
+verify_scb:
+	test	SCB_CONTROL,DISCONNECTED jz verify_other_scb;
+	mov	A, SAVED_SCSIID;
+	cmp	SCB_SCSIID, A jne verify_other_scb;
+	mov	A, SAVED_LUN;
+	cmp	SCB_LUN, A je setup_SCB_disconnected;
+verify_other_scb:
+	xor	SCBPTR[1], 1;
+	test	SCBPTR[1], 0xFF jnz verify_scb;
+	jmp	not_found;
+
+/*
+ * Ensure that the SCB the tag points to is for
+ * an SCB transaction to the reconnecting target.
+ */
+setup_SCB:
+	if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) {
+		or	SEQ_FLAGS, 0x10;
+	}
+	test	SCB_CONTROL,DISCONNECTED jz not_found;
+setup_SCB_disconnected:
+	and	SCB_CONTROL,~DISCONNECTED;
+	clr	SEQ_FLAGS;	/* make note of IDENTIFY */
+	test	SCB_SGPTR, SG_LIST_NULL jnz . + 3;
+	bmov	ALLOCFIFO_SCBPTR, SCBPTR, 2;
+	call	allocate_fifo;
+	/* See if the host wants to send a message upon reconnection */
+	test	SCB_CONTROL, MK_MESSAGE jz mesgin_done;
+	mvi	HOST_MSG	call mk_mesg;
+	jmp	mesgin_done;
+
+not_found:
+	SET_SEQINTCODE(NO_MATCH)
+	jmp	mesgin_done;
+
+not_found_ITloop:
+	SET_SEQINTCODE(NO_MATCH)
+	jmp	ITloop;
+
+/*
+ * We received a "command complete" message.  Put the SCB on the complete
+ * queue and trigger a completion interrupt via the idle loop.  Before doing
+ * so, check to see if there
+ * is a residual or the status byte is something other than STATUS_GOOD (0).
+ * In either of these conditions, we upload the SCB back to the host so it can
+ * process this information.  In the case of a non zero status byte, we 
+ * additionally interrupt the kernel driver synchronously, allowing it to
+ * decide if sense should be retrieved.  If the kernel driver wishes to request
+ * sense, it will fill the kernel SCB with a request sense command, requeue
+ * it to the QINFIFO and tell us not to post to the QOUTFIFO by setting 
+ * RETURN_1 to SEND_SENSE.
+ */
+mesgin_complete:
+
+	/*
+	 * If ATN is raised, we still want to give the target a message.
+	 * Perhaps there was a parity error on this last message byte.
+	 * Either way, the target should take us to message out phase
+	 * and then attempt to complete the command again.  We should use a
+	 * critical section here to guard against a timeout triggering
+	 * for this command and setting ATN while we are still processing
+	 * the completion.
+	test	SCSISIGI, ATNI jnz mesgin_done;
+	 */
+
+	/*
+	 * If we are identified and have successfully sent the CDB,
+	 * any status will do.  Optimize this fast path.
+	 */
+	test	SCB_CONTROL, STATUS_RCVD jz mesgin_proto_violation;
+	test	SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz complete_accepted;
+
+	/*
+	 * If the target never sent an identify message but instead went
+	 * to mesgin to give an invalid message, let the host abort us.
+	 */
+	test	SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation;
+
+	/*
+	 * If we recevied good status but never successfully sent the
+	 * cdb, abort the command.
+	 */
+	test	SCB_SCSI_STATUS,0xff	jnz complete_accepted;
+	test	SEQ_FLAGS, NO_CDB_SENT jnz mesgin_proto_violation;
+complete_accepted:
+
+	/*
+	 * See if we attempted to deliver a message but the target ingnored us.
+	 */
+	test	SCB_CONTROL, MK_MESSAGE jz complete_nomsg;
+	SET_SEQINTCODE(MKMSG_FAILED)
+complete_nomsg:
+	call	queue_scb_completion;
+	jmp	await_busfree;
+
+freeze_queue:
+	/* Cancel any pending select-out. */
+	test	SSTAT0, SELDO|SELINGO jnz . + 2;
+	and	SCSISEQ0, ~ENSELO;
+	mov	ACCUM_SAVE, A;
+	clr	A;
+	add	QFREEZE_COUNT, 1;
+	adc	QFREEZE_COUNT[1], A;
+	or	SEQ_FLAGS2, SELECTOUT_QFROZEN;
+	mov	A, ACCUM_SAVE ret;
+
+/*
+ * Complete the current FIFO's SCB if data for this same
+ * SCB is not transferring in the other FIFO.
+ */
+SET_SRC_MODE	M_DFF1;
+SET_DST_MODE	M_DFF1;
+pkt_complete_scb_if_fifos_idle:
+	bmov	ARG_1, SCBPTR, 2;
+	mvi	DFFSXFRCTL, CLRCHN;
+	SET_MODE(M_SCSI, M_SCSI)
+	bmov	SCBPTR, ARG_1, 2;
+	test	SCB_FIFO_USE_COUNT, 0xFF jnz return;
+queue_scb_completion:
+	test	SCB_SCSI_STATUS,0xff	jnz bad_status;
+	/*
+	 * Check for residuals
+	 */
+	test	SCB_SGPTR, SG_LIST_NULL jnz complete;	/* No xfer */
+	test	SCB_SGPTR, SG_FULL_RESID jnz upload_scb;/* Never xfered */
+	test	SCB_RESIDUAL_SGPTR, SG_LIST_NULL jz upload_scb;
+complete:
+	bmov	SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2;
+	bmov	COMPLETE_SCB_HEAD, SCBPTR, 2 ret;
+bad_status:
+	cmp	SCB_SCSI_STATUS, STATUS_PKT_SENSE je upload_scb;
+	call	freeze_queue;
+upload_scb:
+	/*
+	 * Restore SCB TAG since we reuse this field
+	 * in the sequencer.  We don't want to corrupt
+	 * it on the host.
+	 */
+	bmov	SCB_TAG, SCBPTR, 2;
+	bmov	SCB_NEXT_COMPLETE, COMPLETE_DMA_SCB_HEAD, 2;
+	bmov	COMPLETE_DMA_SCB_HEAD, SCBPTR, 2;
+	or	SCB_SGPTR, SG_STATUS_VALID ret;
+
+/*
+ * Is it a disconnect message?  Set a flag in the SCB to remind us
+ * and await the bus going free.  If this is an untagged transaction
+ * store the SCB id for it in our untagged target table for lookup on
+ * a reselction.
+ */
+mesgin_disconnect:
+	/*
+	 * If ATN is raised, we still want to give the target a message.
+	 * Perhaps there was a parity error on this last message byte
+	 * or we want to abort this command.  Either way, the target
+	 * should take us to message out phase and then attempt to
+	 * disconnect again.
+	 * XXX - Wait for more testing.
+	test	SCSISIGI, ATNI jnz mesgin_done;
+	 */
+	test	SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT
+		jnz mesgin_proto_violation;
+	or	SCB_CONTROL,DISCONNECTED;
+	test	SCB_CONTROL, TAG_ENB jnz await_busfree;
+queue_disc_scb:
+	bmov	REG0, SCBPTR, 2;
+	INDEX_DISC_LIST(SAVED_SCSIID, SAVED_LUN);
+	bmov	DINDEX, SINDEX, 2;
+	bmov	DINDIR, REG0, 2;
+	bmov	SCBPTR, REG0, 2;
+	/* FALLTHROUGH */
+await_busfree:
+	and	SIMODE1, ~ENBUSFREE;
+	if ((ahd->bugs & AHD_BUSFREEREV_BUG) == 0) {
+		/*
+		 * In the BUSFREEREV_BUG case, the
+		 * busfree status was cleared at the
+		 * beginning of the connection.
+		 */
+		mvi	CLRSINT1,CLRBUSFREE;
+	}
+	mov	NONE, SCSIDAT;		/* Ack the last byte */
+	test	MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1))
+		jnz await_busfree_not_m_dff;
+SET_SRC_MODE	M_DFF1;
+SET_DST_MODE	M_DFF1;
+await_busfree_clrchn:
+	mvi	DFFSXFRCTL, CLRCHN;
+await_busfree_not_m_dff:
+	call	clear_target_state;
+	test	SSTAT1,REQINIT|BUSFREE	jz .;
+	test	SSTAT1, BUSFREE jnz idle_loop;
+	SET_SEQINTCODE(MISSED_BUSFREE)
+
+
+/*
+ * Save data pointers message:
+ * Copying RAM values back to SCB, for Save Data Pointers message, but
+ * only if we've actually been into a data phase to change them.  This
+ * protects against bogus data in scratch ram and the residual counts
+ * since they are only initialized when we go into data_in or data_out.
+ * Ack the message as soon as possible.
+ */
+SET_SRC_MODE	M_DFF1;
+SET_DST_MODE	M_DFF1;
+mesgin_sdptrs:
+	mov	NONE,SCSIDAT;		/*dummy read from latch to ACK*/
+	test	SEQ_FLAGS, DPHASE	jz ITloop;
+	call	save_pointers;
+	jmp	ITloop;
+
+save_pointers:
+	/*
+	 * If we are asked to save our position at the end of the
+	 * transfer, just mark us at the end rather than perform a
+	 * full save.
+	 */
+	test	SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz save_pointers_full;
+	or	SCB_SGPTR, SG_LIST_NULL ret;
+
+save_pointers_full:
+	/*
+	 * The SCB_DATAPTR becomes the current SHADDR.
+	 * All other information comes directly from our residual
+	 * state.
+	 */
+	bmov	SCB_DATAPTR, SHADDR, 8;
+	bmov	SCB_DATACNT, SCB_RESIDUAL_DATACNT, 8 ret;
+
+/*
+ * Restore pointers message?  Data pointers are recopied from the
+ * SCB anytime we enter a data phase for the first time, so all
+ * we need to do is clear the DPHASE flag and let the data phase
+ * code do the rest.  We also reset/reallocate the FIFO to make
+ * sure we have a clean start for the next data or command phase.
+ */
+mesgin_rdptrs:
+	and	SEQ_FLAGS, ~DPHASE;
+	test	MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz msgin_rdptrs_get_fifo;
+	mvi	DFFSXFRCTL, RSTCHN|CLRSHCNT;
+	SET_MODE(M_SCSI, M_SCSI)
+msgin_rdptrs_get_fifo:
+	call	allocate_fifo;
+	jmp	mesgin_done;
+
+clear_target_state:
+	mvi	LASTPHASE, P_BUSFREE;
+	/* clear target specific flags */
+	mvi	SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT ret;
+
+phase_lock:     
+	if ((ahd->bugs & AHD_EARLY_REQ_BUG) != 0) {
+		/*
+		 * Don't ignore persistent REQ assertions just because
+		 * they were asserted within the bus settle delay window.
+		 * This allows us to tolerate devices like the GEM318
+		 * that violate the SCSI spec.  We are careful not to
+		 * count REQ while we are waiting for it to fall during
+		 * an async phase due to our asserted ACK.  Each
+		 * sequencer instruction takes ~25ns, so the REQ must
+		 * last at least 100ns in order to be counted as a true
+		 * REQ.
+		 */
+		test	SCSIPHASE, 0xFF jnz phase_locked;
+		test	SCSISIGI, ACKI jnz phase_lock;
+		test	SCSISIGI, REQI jz phase_lock;
+		test	SCSIPHASE, 0xFF jnz phase_locked;
+		test	SCSISIGI, ACKI jnz phase_lock;
+		test	SCSISIGI, REQI jz phase_lock;
+phase_locked:
+	} else {
+		test	SCSIPHASE, 0xFF jz .;
+	}
+	test	SSTAT1, SCSIPERR jnz phase_lock;
+phase_lock_latch_phase:
+	and	LASTPHASE, PHASE_MASK, SCSISIGI ret;
+
+/*
+ * Functions to read data in Automatic PIO mode.
+ *
+ * An ACK is not sent on input from the target until SCSIDATL is read from.
+ * So we wait until SCSIDATL is latched (the usual way), then read the data
+ * byte directly off the bus using SCSIBUSL.  When we have pulled the ATN
+ * line, or we just want to acknowledge the byte, then we do a dummy read
+ * from SCISDATL.  The SCSI spec guarantees that the target will hold the
+ * data byte on the bus until we send our ACK.
+ *
+ * The assumption here is that these are called in a particular sequence,
+ * and that REQ is already set when inb_first is called.  inb_{first,next}
+ * use the same calling convention as inb.
+ */
+inb_next:
+	mov	NONE,SCSIDAT;		/*dummy read from latch to ACK*/
+inb_next_wait:
+	/*
+	 * If there is a parity error, wait for the kernel to
+	 * see the interrupt and prepare our message response
+	 * before continuing.
+	 */
+	test	SCSIPHASE, 0xFF jz .;
+	test	SSTAT1, SCSIPERR jnz inb_next_wait;
+inb_next_check_phase:
+	and	LASTPHASE, PHASE_MASK, SCSISIGI;
+	cmp	LASTPHASE, P_MESGIN jne mesgin_phasemis;
+inb_first:
+	clr	DINDEX[1];
+	mov	DINDEX,SINDEX;
+	mov	DINDIR,SCSIBUS	ret;		/*read byte directly from bus*/
+inb_last:
+	mov	NONE,SCSIDAT ret;		/*dummy read from latch to ACK*/
+
+mk_mesg:
+	mvi	SCSISIGO, ATNO;
+	mov	MSG_OUT,SINDEX ret;
+
+SET_SRC_MODE	M_DFF1;
+SET_DST_MODE	M_DFF1;
+disable_ccsgen:
+	test	SG_STATE, FETCH_INPROG jz disable_ccsgen_fetch_done;
+	clr	CCSGCTL;
+disable_ccsgen_fetch_done:
+	clr	SG_STATE ret;
+
+service_fifo:
+	/*
+	 * Do we have any prefetch left???
+	 */
+	test	SG_STATE, SEGS_AVAIL jnz idle_sg_avail;
+
+	/*
+	 * Can this FIFO have access to the S/G cache yet?
+	 */
+	test	CCSGCTL, SG_CACHE_AVAIL jz return;
+
+	/* Did we just finish fetching segs? */
+	test	CCSGCTL, CCSGDONE jnz idle_sgfetch_complete;
+
+	/* Are we actively fetching segments? */
+	test	CCSGCTL, CCSGENACK jnz return;
+
+	/*
+	 * We fetch a "cacheline aligned" and sized amount of data
+	 * so we don't end up referencing a non-existant page.
+	 * Cacheline aligned is in quotes because the kernel will
+	 * set the prefetch amount to a reasonable level if the
+	 * cacheline size is unknown.
+	 */
+	bmov	SGHADDR, SCB_RESIDUAL_SGPTR, 4;
+	mvi	SGHCNT, SG_PREFETCH_CNT;
+	if ((ahd->bugs & AHD_REG_SLOW_SETTLE_BUG) != 0) {
+		/*
+		 * Need two instruction between "touches" of SGHADDR.
+		 */
+		nop;
+	}
+	and	SGHADDR[0], SG_PREFETCH_ALIGN_MASK, SCB_RESIDUAL_SGPTR;
+	mvi	CCSGCTL, CCSGEN|CCSGRESET;
+	or	SG_STATE, FETCH_INPROG ret;
+idle_sgfetch_complete:
+	/*
+	 * Guard against SG_CACHE_AVAIL activating during sg fetch
+	 * request in the other FIFO.
+	 */
+	test	SG_STATE, FETCH_INPROG jz return;
+	clr	CCSGCTL;
+	and	CCSGADDR, SG_PREFETCH_ADDR_MASK, SCB_RESIDUAL_SGPTR;
+	mvi	SG_STATE, SEGS_AVAIL|LOADING_NEEDED;
+idle_sg_avail:
+	/* Does the hardware have space for another SG entry? */
+	test	DFSTATUS, PRELOAD_AVAIL jz return;
+	/*
+	 * On the A, preloading a segment before HDMAENACK
+	 * comes true can clobber the shaddow address of the
+	 * first segment in the S/G FIFO.  Wait until it is
+	 * safe to proceed.
+	 */
+	if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) == 0) {
+		test	DFCNTRL, HDMAENACK jz return;
+	}
+	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+		bmov	HADDR, CCSGRAM, 8;
+	} else {
+		bmov 	HADDR, CCSGRAM, 4;
+	}
+	bmov	HCNT, CCSGRAM, 3;
+	bmov	SCB_RESIDUAL_DATACNT[3], CCSGRAM, 1;
+	if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
+		and	HADDR[4], SG_HIGH_ADDR_BITS, SCB_RESIDUAL_DATACNT[3];
+	}
+	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+		/* Skip 4 bytes of pad. */
+		add	CCSGADDR, 4;
+	}
+sg_advance:
+	clr	A;			/* add sizeof(struct scatter) */
+	add	SCB_RESIDUAL_SGPTR[0],SG_SIZEOF;
+	adc	SCB_RESIDUAL_SGPTR[1],A;
+	adc	SCB_RESIDUAL_SGPTR[2],A;
+	adc	SCB_RESIDUAL_SGPTR[3],A;
+	mov	SINDEX, SCB_RESIDUAL_SGPTR[0];
+	test	SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 3;
+	or	SINDEX, LAST_SEG;
+	clr	SG_STATE;
+	mov	SG_CACHE_PRE, SINDEX;
+	if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) {
+		/*
+		 * Use SCSIENWRDIS so that SCSIEN is never
+		 * modified by this operation.
+		 */
+		or	DFCNTRL, PRELOADEN|HDMAEN|SCSIENWRDIS;
+	} else {
+		or	DFCNTRL, PRELOADEN|HDMAEN;
+	}
+	/*
+	 * Do we have another segment in the cache?
+	 */
+	add	NONE, SG_PREFETCH_CNT_LIMIT, CCSGADDR;
+	jnc	return;
+	and	SG_STATE, ~SEGS_AVAIL ret;
+
+/*
+ * Initialize the DMA address and counter from the SCB.
+ */
+load_first_seg:
+	bmov	HADDR, SCB_DATAPTR, 11;
+	and	REG_ISR, ~SG_FULL_RESID, SCB_SGPTR[0];
+	test	SCB_DATACNT[3], SG_LAST_SEG jz . + 2;
+	or	REG_ISR, LAST_SEG;
+	mov	SG_CACHE_PRE, REG_ISR;
+	mvi	DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN);
+	/*
+	 * Since we've are entering a data phase, we will
+	 * rely on the SCB_RESID* fields.  Initialize the
+	 * residual and clear the full residual flag.
+	 */
+	and	SCB_SGPTR[0], ~SG_FULL_RESID;
+	bmov	SCB_RESIDUAL_DATACNT[3], SCB_DATACNT[3], 5;
+	/* If we need more S/G elements, tell the idle loop */
+	test	SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jnz . + 2;
+	mvi	SG_STATE, LOADING_NEEDED ret;
+	clr	SG_STATE ret;
+
+p_data_handle_xfer:
+	call	setjmp;
+	test	SG_STATE, LOADING_NEEDED jnz service_fifo;
+p_data_clear_handler:
+	or	LONGJMP_ADDR[1], INVALID_ADDR ret;
+
+p_data:
+	test	SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT	jz p_data_allowed;
+	SET_SEQINTCODE(PROTO_VIOLATION)
+p_data_allowed:
+ 
+	test	SEQ_FLAGS, DPHASE	jz data_phase_initialize;
+
+	/*
+	 * If we re-enter the data phase after going through another
+	 * phase, our transfer location has almost certainly been
+	 * corrupted by the interveining, non-data, transfers.  Ask
+	 * the host driver to fix us up based on the transfer residual
+	 * unless we already know that we should be bitbucketing.
+	 */
+	test	SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket;
+	SET_SEQINTCODE(PDATA_REINIT)
+	jmp	data_phase_inbounds;
+
+p_data_bitbucket:
+	/*
+	 * Turn on `Bit Bucket' mode, wait until the target takes
+	 * us to another phase, and then notify the host.
+	 */
+	mov	SAVED_MODE, MODE_PTR;
+	test	MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1))
+		jnz bitbucket_not_m_dff;
+	/*
+	 * Ensure that any FIFO contents are cleared out and the
+	 * FIFO free'd prior to starting the BITBUCKET.  BITBUCKET
+	 * doesn't discard data already in the FIFO.
+	 */
+	mvi	DFFSXFRCTL, RSTCHN|CLRSHCNT;
+	SET_MODE(M_SCSI, M_SCSI)
+bitbucket_not_m_dff:
+	or	SXFRCTL1,BITBUCKET;
+	/* Wait for non-data phase. */
+	test	SCSIPHASE, ~DATA_PHASE_MASK jz .;
+	and	SXFRCTL1, ~BITBUCKET;
+	RESTORE_MODE(SAVED_MODE)
+SET_SRC_MODE	M_DFF1;
+SET_DST_MODE	M_DFF1;
+	SET_SEQINTCODE(DATA_OVERRUN)
+	jmp	ITloop;
+
+data_phase_initialize:
+	test	SCB_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket;
+	call	load_first_seg;
+data_phase_inbounds:
+	/* We have seen a data phase at least once. */
+	or	SEQ_FLAGS, DPHASE;
+	mov	SAVED_MODE, MODE_PTR;
+	test	SG_STATE, LOADING_NEEDED jz data_group_dma_loop;
+	call	p_data_handle_xfer;
+data_group_dma_loop:
+	/*
+	 * The transfer is complete if either the last segment
+	 * completes or the target changes phase.  Both conditions
+	 * will clear SCSIEN.
+	 */
+	call	idle_loop_service_fifos;
+	call	idle_loop_cchan;
+	call	idle_loop_gsfifo;
+	RESTORE_MODE(SAVED_MODE)
+	test	DFCNTRL, SCSIEN jnz data_group_dma_loop;
+
+data_group_dmafinish:
+	/*
+	 * The transfer has terminated either due to a phase
+	 * change, and/or the completion of the last segment.
+	 * We have two goals here.  Do as much other work
+	 * as possible while the data fifo drains on a read
+	 * and respond as quickly as possible to the standard
+	 * messages (save data pointers/disconnect and command
+	 * complete) that usually follow a data phase.
+	 */
+	call	calc_residual;
+
+	/*
+	 * Go ahead and shut down the DMA engine now.
+	 */
+	test	DFCNTRL, DIRECTION jnz data_phase_finish;
+data_group_fifoflush:
+	if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) {
+		or	DFCNTRL, FIFOFLUSH;
+	}
+	/*
+	 * We have enabled the auto-ack feature.  This means
+	 * that the controller may have already transferred
+	 * some overrun bytes into the data FIFO and acked them
+	 * on the bus.  The only way to detect this situation is
+	 * to wait for LAST_SEG_DONE to come true on a completed
+	 * transfer and then test to see if the data FIFO is
+	 * non-empty.  We know there is more data yet to transfer
+	 * if SG_LIST_NULL is not yet set, thus there cannot be
+	 * an overrun.
+	 */
+	test	SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz data_phase_finish;
+	test	SG_CACHE_SHADOW, LAST_SEG_DONE jz .;
+	test	DFSTATUS, FIFOEMP jnz data_phase_finish;
+	/* Overrun */
+	jmp	p_data;
+data_phase_finish:
+	/*
+	 * If the target has left us in data phase, loop through
+	 * the dma code again.  We will only loop if there is a
+	 * data overrun.  
+	 */
+	if ((ahd->flags & AHD_TARGETROLE) != 0) {
+		test	SSTAT0, TARGET jnz data_phase_done;
+	}
+	if ((ahd->flags & AHD_INITIATORROLE) != 0) {
+		test	SSTAT1, REQINIT jz .;
+		test	SCSIPHASE, DATA_PHASE_MASK jnz p_data;
+	}
+
+data_phase_done:
+	/* Kill off any pending prefetch */
+	call	disable_ccsgen;
+	or 	LONGJMP_ADDR[1], INVALID_ADDR;
+
+	if ((ahd->flags & AHD_TARGETROLE) != 0) {
+		test	SEQ_FLAGS, DPHASE_PENDING jz ITloop;
+		/*
+		and	SEQ_FLAGS, ~DPHASE_PENDING;
+		 * For data-in phases, wait for any pending acks from the
+		 * initiator before changing phase.  We only need to
+		 * send Ignore Wide Residue messages for data-in phases.
+		test	DFCNTRL, DIRECTION jz target_ITloop;
+		test	SSTAT1, REQINIT	jnz .;
+		test	SCB_TASK_ATTRIBUTE, SCB_XFERLEN_ODD jz target_ITloop;
+		SET_MODE(M_SCSI, M_SCSI)
+		test	NEGCONOPTS, WIDEXFER jz target_ITloop;
+		 */
+		/*
+		 * Issue an Ignore Wide Residue Message.
+		mvi	P_MESGIN|BSYO call change_phase;
+		mvi	MSG_IGN_WIDE_RESIDUE call target_outb;
+		mvi	1 call target_outb;
+		jmp	target_ITloop;
+		 */
+	} else {
+		jmp	ITloop;
+	}
+
+/*
+ * We assume that, even though data may still be
+ * transferring to the host, that the SCSI side of
+ * the DMA engine is now in a static state.  This
+ * allows us to update our notion of where we are
+ * in this transfer.
+ *
+ * If, by chance, we stopped before being able
+ * to fetch additional segments for this transfer,
+ * yet the last S/G was completely exhausted,
+ * call our idle loop until it is able to load
+ * another segment.  This will allow us to immediately
+ * pickup on the next segment on the next data phase.
+ *
+ * If we happened to stop on the last segment, then
+ * our residual information is still correct from
+ * the idle loop and there is no need to perform
+ * any fixups.
+ */
+residual_before_last_seg:
+	test    MDFFSTAT, SHVALID	jnz sgptr_fixup;
+	/*
+	 * Can never happen from an interrupt as the packetized
+	 * hardware will only interrupt us once SHVALID or
+	 * LAST_SEG_DONE.
+	 */
+	call	idle_loop_service_fifos;
+	RESTORE_MODE(SAVED_MODE)
+	/* FALLTHROUGH */
+calc_residual:
+	test	SG_CACHE_SHADOW, LAST_SEG jz residual_before_last_seg;
+	/* Record if we've consumed all S/G entries */
+	test	MDFFSTAT, SHVALID	jz . + 2;
+	bmov	SCB_RESIDUAL_DATACNT, SHCNT, 3 ret;
+	or	SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL ret;
+
+sgptr_fixup:
+	/*
+	 * Fixup the residual next S/G pointer.  The S/G preload
+	 * feature of the chip allows us to load two elements
+	 * in addition to the currently active element.  We
+	 * store the bottom byte of the next S/G pointer in
+	 * the SG_CACHE_PTR register so we can restore the
+	 * correct value when the DMA completes.  If the next
+	 * sg ptr value has advanced to the point where higher
+	 * bytes in the address have been affected, fix them
+	 * too.
+	 */
+	test	SG_CACHE_SHADOW, 0x80 jz sgptr_fixup_done;
+	test	SCB_RESIDUAL_SGPTR[0], 0x80 jnz sgptr_fixup_done;
+	add	SCB_RESIDUAL_SGPTR[1], -1;
+	adc	SCB_RESIDUAL_SGPTR[2], -1; 
+	adc	SCB_RESIDUAL_SGPTR[3], -1;
+sgptr_fixup_done:
+	and	SCB_RESIDUAL_SGPTR[0], SG_ADDR_MASK, SG_CACHE_SHADOW;
+	clr	SCB_RESIDUAL_DATACNT[3]; /* We are not the last seg */
+	bmov	SCB_RESIDUAL_DATACNT, SHCNT, 3 ret;
+
+export timer_isr:
+	call	issue_cmdcmplt;
+	mvi	CLRSEQINTSTAT, CLRSEQ_SWTMRTO;
+	if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) {
+		/*
+		 * In H2A4, the mode pointer is not saved
+		 * for intvec2, but is restored on iret.
+		 * This can lead to the restoration of a
+		 * bogus mode ptr.  Manually clear the
+		 * intmask bits and do a normal return
+		 * to compensate.
+		 */
+		and	SEQINTCTL, ~(INTMASK2|INTMASK1) ret;
+	} else {
+		or	SEQINTCTL, IRET ret;
+	}
+
+export seq_isr:
+	if ((ahd->features & AHD_RTI) == 0) {
+		/*
+		 * On RevA Silicon, if the target returns us to data-out
+		 * after we have already trained for data-out, it is
+		 * possible for us to transition the free running clock to
+		 * data-valid before the required 100ns P1 setup time (8 P1
+		 * assertions in fast-160 mode).  This will only happen if
+		 * this L-Q is a continuation of a data transfer for which
+		 * we have already prefetched data into our FIFO (LQ/Data
+		 * followed by LQ/Data for the same write transaction).
+		 * This can cause some target implementations to miss the
+		 * first few data transfers on the bus.  We detect this
+		 * situation by noticing that this is the first data transfer
+		 * after an LQ (LQIWORKONLQ true), that the data transfer is
+		 * a continuation of a transfer already setup in our FIFO
+		 * (SAVEPTRS interrupt), and that the transaction is a write
+		 * (DIRECTION set in DFCNTRL). The delay is performed by
+		 * disabling SCSIEN until we see the first REQ from the
+		 * target.
+		 * 
+		 * First instruction in an ISR cannot be a branch on
+		 * Rev A.  Snapshot LQISTAT2 so the status is not missed
+		 * and deffer the test by one instruction.
+		 */
+		mov	REG_ISR, LQISTAT2;
+		test	REG_ISR, LQIWORKONLQ jz main_isr;
+		test	SEQINTSRC, SAVEPTRS  jz main_isr;
+		test	LONGJMP_ADDR[1], INVALID_ADDR jz saveptr_active_fifo;
+		/*
+		 * Switch to the active FIFO after clearing the snapshot
+		 * savepointer in the current FIFO.  We do this so that
+		 * a pending CTXTDONE or SAVEPTR is visible in the active
+		 * FIFO.  This status is the only way we can detect if we
+		 * have lost the race (e.g. host paused us) and our attepts
+		 * to disable the channel occurred after all REQs were
+		 * already seen and acked (REQINIT never comes true).
+		 */
+		mvi	DFFSXFRCTL, CLRCHN;
+		xor	MODE_PTR, MK_MODE(M_DFF1, M_DFF1);
+		test	DFCNTRL, DIRECTION jz interrupt_return;
+		and	DFCNTRL, ~SCSIEN;
+snapshot_wait_data_valid:
+		test	SEQINTSRC, (CTXTDONE|SAVEPTRS) jnz snapshot_data_valid;
+		test	SSTAT1, REQINIT	jz snapshot_wait_data_valid;
+snapshot_data_valid:
+		or	DFCNTRL, SCSIEN;
+		or	SEQINTCTL, IRET ret;
+snapshot_saveptr:
+		mvi	DFFSXFRCTL, CLRCHN;
+		or	SEQINTCTL, IRET ret;
+main_isr:
+	}
+	test	SEQINTSRC, CFG4DATA	jnz cfg4data_intr;
+	test	SEQINTSRC, CFG4ISTAT	jnz cfg4istat_intr;
+	test	SEQINTSRC, SAVEPTRS	jnz saveptr_intr;
+	test	SEQINTSRC, CFG4ICMD	jnz cfg4icmd_intr;
+	SET_SEQINTCODE(INVALID_SEQINT)
+
+/*
+ * There are two types of save pointers interrupts:
+ * The first is a snapshot save pointers where the current FIFO is not
+ * active and contains a snapshot of the current poniter information.
+ * This happens between packets in a stream for a single L_Q.  Since we
+ * are not performing a pointer save, we can safely clear the channel
+ * so it can be used for other transactions.  On RTI capable controllers,
+ * where snapshots can, and are, disabled, the code to handle this type
+ * of snapshot is not active.
+ *
+ * The second case is a save pointers on an active FIFO which occurs
+ * if the target changes to a new L_Q or busfrees/QASes and the transfer
+ * has a residual.  This should occur coincident with a ctxtdone.  We
+ * disable the interrupt and allow our active routine to handle the
+ * save.
+ */
+saveptr_intr:
+	if ((ahd->features & AHD_RTI) == 0) {
+		test	LONGJMP_ADDR[1], INVALID_ADDR jnz snapshot_saveptr;
+	}
+saveptr_active_fifo:
+	and	SEQIMODE, ~ENSAVEPTRS;
+	or	SEQINTCTL, IRET ret;
+
+cfg4data_intr:
+	test	SCB_SGPTR[0], SG_LIST_NULL jnz pkt_handle_overrun_inc_use_count;
+	call	load_first_seg;
+	call	pkt_handle_xfer;
+	inc	SCB_FIFO_USE_COUNT;
+interrupt_return:
+	or	SEQINTCTL, IRET ret;
+
+cfg4istat_intr:
+	call	freeze_queue;
+	add	NONE, -13, SCB_CDB_LEN;
+	jnc	cfg4istat_have_sense_addr;
+	test	SCB_CDB_LEN, SCB_CDB_LEN_PTR jnz cfg4istat_have_sense_addr;
+	/*
+	 * Host sets up address/count and enables transfer.
+	 */
+	SET_SEQINTCODE(CFG4ISTAT_INTR)
+	jmp	cfg4istat_setup_handler;
+cfg4istat_have_sense_addr:
+	bmov	HADDR, SCB_SENSE_BUSADDR, 4;
+	mvi	HCNT[1], (AHD_SENSE_BUFSIZE >> 8);
+	mvi	SG_CACHE_PRE, LAST_SEG;
+	mvi	DFCNTRL, PRELOADEN|SCSIEN|HDMAEN;
+cfg4istat_setup_handler:
+	/*
+	 * Status pkt is transferring to host.
+	 * Wait in idle loop for transfer to complete.
+	 * If a command completed before an attempted
+	 * task management function completed, notify the host.
+	 */
+	test	SCB_TASK_MANAGEMENT, 0xFF jz cfg4istat_no_taskmgmt_func;
+	SET_SEQINTCODE(TASKMGMT_CMD_CMPLT_OKAY)
+cfg4istat_no_taskmgmt_func:
+	call	pkt_handle_status;
+	or	SEQINTCTL, IRET ret;
+
+cfg4icmd_intr:
+	/*
+	 * In the case of DMAing a CDB from the host, the normal
+	 * CDB buffer is formatted with an 8 byte address followed
+	 * by a 1 byte count.
+	 */
+	bmov	HADDR[0], SCB_HOST_CDB_PTR, 9;
+	mvi	SG_CACHE_PRE, LAST_SEG;
+	mvi	DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN);
+	call	pkt_handle_cdb;
+	or	SEQINTCTL, IRET ret;
+
+/*
+ * See if the target has gone on in this context creating an
+ * overrun condition.  For the write case, the hardware cannot
+ * ack bytes until data are provided.  So, if the target begins
+ * another  packet without changing contexts, implying we are
+ * not sitting on a packet boundary, we are in an overrun
+ * situation.  For the read case, the hardware will continue to
+ * ack bytes into the FIFO, and may even ack the last overrun packet
+ * into the FIFO.   If the FIFO should become non-empty, we are in
+ * a read overrun case.
+ */
+#define check_overrun							\
+	/* Not on a packet boundary. */					\
+	test 	MDFFSTAT, DLZERO jz pkt_handle_overrun;			\
+	test	DFSTATUS, FIFOEMP jz pkt_handle_overrun
+
+pkt_handle_xfer:
+	test	SG_STATE, LOADING_NEEDED jz pkt_last_seg;
+	call	setjmp;
+	test	SEQINTSRC, SAVEPTRS jnz pkt_saveptrs;
+	test	SCSIPHASE, ~DATA_PHASE_MASK jz . + 2;
+	test	SCSISIGO, ATNO jnz . + 2;
+	test	SSTAT2, NONPACKREQ jz pkt_service_fifo;
+	/*
+	 * Defer handling of this NONPACKREQ until we
+	 * can be sure it pertains to this FIFO.  SAVEPTRS
+	 * will not be asserted if the NONPACKREQ is for us,
+	 * so we must simulate it if shaddow is valid.  If
+	 * shaddow is not valid, keep running this FIFO until we
+	 * have satisfied the transfer by loading segments and
+	 * waiting for either shaddow valid or last_seg_done.
+	 */
+	test	MDFFSTAT, SHVALID jnz pkt_saveptrs;
+pkt_service_fifo:
+	test	SG_STATE, LOADING_NEEDED jnz service_fifo;
+pkt_last_seg:
+	call	setjmp;
+	test	SEQINTSRC, SAVEPTRS jnz pkt_saveptrs;
+	test	SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_last_seg_done;
+	test	SCSIPHASE, ~DATA_PHASE_MASK jz . + 2;
+	test	SCSISIGO, ATNO jnz . + 2;
+	test	SSTAT2, NONPACKREQ jz return;
+	test	MDFFSTAT, SHVALID jz return;
+	/* FALLTHROUGH */
+
+/*
+ * Either a SAVEPTRS interrupt condition is pending for this FIFO
+ * or we have a pending NONPACKREQ for this FIFO.  We differentiate
+ * between the two by capturing the state of the SAVEPTRS interrupt
+ * prior to clearing this status and executing the common code for
+ * these two cases.
+ */
+pkt_saveptrs:
+BEGIN_CRITICAL;
+	if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) {
+		or	DFCNTRL, FIFOFLUSH;
+	}
+	mov	REG0, SEQINTSRC;
+	call	calc_residual;
+	call	save_pointers;
+	mvi	CLRSEQINTSRC, CLRSAVEPTRS;
+	call	disable_ccsgen;
+	or	SEQIMODE, ENSAVEPTRS;
+	test	DFCNTRL, DIRECTION jnz pkt_saveptrs_check_status;
+	test	DFSTATUS, FIFOEMP jnz pkt_saveptrs_check_status;
+	/*
+	 * Keep a handler around for this FIFO until it drains
+	 * to the host to guarantee that we don't complete the
+	 * command to the host before the data arrives.
+	 */
+pkt_saveptrs_wait_fifoemp:
+	call	setjmp;
+	test	DFSTATUS, FIFOEMP jz return;
+pkt_saveptrs_check_status:
+	or	LONGJMP_ADDR[1], INVALID_ADDR;
+	test	REG0, SAVEPTRS jz unexpected_nonpkt_phase;
+	dec	SCB_FIFO_USE_COUNT;
+	test	SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle;
+	mvi	DFFSXFRCTL, CLRCHN ret;
+END_CRITICAL;
+
+/*
+ * LAST_SEG_DONE status has been seen in the current FIFO.
+ * This indicates that all of the allowed data for this
+ * command has transferred across the SCSI and host buses.
+ * Check for overrun and see if we can complete this command.
+ */
+pkt_last_seg_done:
+BEGIN_CRITICAL;
+	/*
+	 * Mark transfer as completed.
+	 */
+	or	SCB_SGPTR, SG_LIST_NULL;
+
+	/*
+	 * Wait for the current context to finish to verify that
+	 * no overrun condition has occurred.
+	 */
+	test	SEQINTSRC, CTXTDONE jnz pkt_ctxt_done;
+	call	setjmp;
+pkt_wait_ctxt_done_loop:
+	test	SEQINTSRC, CTXTDONE jnz pkt_ctxt_done;
+	/*
+	 * A sufficiently large overrun or a NONPACKREQ may
+	 * prevent CTXTDONE from ever asserting, so we must
+	 * poll for these statuses too.
+	 */
+	check_overrun;
+	test	SSTAT2, NONPACKREQ jz return;
+	test	SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase;
+	/* FALLTHROUGH */
+
+pkt_ctxt_done:
+	check_overrun;
+	or	LONGJMP_ADDR[1], INVALID_ADDR;
+	/*
+	 * If status has been received, it is safe to skip
+	 * the check to see if another FIFO is active because
+	 * LAST_SEG_DONE has been observed.  However, we check
+	 * the FIFO anyway since it costs us only one extra
+	 * instruction to leverage common code to perform the
+	 * SCB completion.
+	 */
+	dec	SCB_FIFO_USE_COUNT;
+	test	SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle;
+	mvi	DFFSXFRCTL, CLRCHN ret;
+END_CRITICAL;
+
+/*
+ * Must wait until CDB xfer is over before issuing the
+ * clear channel.
+ */
+pkt_handle_cdb:
+	call	setjmp;
+	test	SG_CACHE_SHADOW, LAST_SEG_DONE jz return;
+	or	LONGJMP_ADDR[1], INVALID_ADDR;
+	mvi	DFFSXFRCTL, CLRCHN ret;
+
+/*
+ * Watch over the status transfer.  Our host sense buffer is
+ * large enough to take the maximum allowed status packet.
+ * None-the-less, we must still catch and report overruns to
+ * the host.  Additionally, properly catch unexpected non-packet
+ * phases that are typically caused by CRC errors in status packet
+ * transmission.
+ */
+pkt_handle_status:
+	call	setjmp;
+	test	SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_status_check_overrun;
+	test	SEQINTSRC, CTXTDONE jz pkt_status_check_nonpackreq;
+	test	SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_status_check_overrun;
+pkt_status_IU_done:
+	if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) {
+		or	DFCNTRL, FIFOFLUSH;
+	}
+	test	DFSTATUS, FIFOEMP jz return;
+BEGIN_CRITICAL;
+	or	LONGJMP_ADDR[1], INVALID_ADDR;
+	mvi	SCB_SCSI_STATUS, STATUS_PKT_SENSE;
+	or	SCB_CONTROL, STATUS_RCVD;
+	jmp	pkt_complete_scb_if_fifos_idle;
+END_CRITICAL;
+pkt_status_check_overrun:
+	/*
+	 * Status PKT overruns are uncerimoniously recovered with a
+	 * bus reset.  If we've overrun, let the host know so that
+	 * recovery can be performed.
+	 *
+	 * LAST_SEG_DONE has been observed.  If either CTXTDONE or
+	 * a NONPACKREQ phase change have occurred and the FIFO is
+	 * empty, there is no overrun.
+	 */
+	test	DFSTATUS, FIFOEMP jz pkt_status_report_overrun;
+	test	SEQINTSRC, CTXTDONE jz . + 2;
+	test	DFSTATUS, FIFOEMP jnz pkt_status_IU_done;
+	test	SCSIPHASE, ~DATA_PHASE_MASK jz return;
+	test	DFSTATUS, FIFOEMP jnz pkt_status_check_nonpackreq;
+pkt_status_report_overrun:
+	SET_SEQINTCODE(STATUS_OVERRUN)
+	/* SEQUENCER RESTARTED */
+pkt_status_check_nonpackreq:
+	/*
+	 * CTXTDONE may be held off if a NONPACKREQ is associated with
+	 * the current context.  If a NONPACKREQ is observed, decide
+	 * if it is for the current context.  If it is for the current
+	 * context, we must defer NONPACKREQ processing until all data
+	 * has transferred to the host.
+	 */
+	test	SCSIPHASE, ~DATA_PHASE_MASK jz return;
+	test	SCSISIGO, ATNO jnz . + 2;
+	test	SSTAT2, NONPACKREQ jz return;
+	test	SEQINTSRC, CTXTDONE jnz pkt_status_IU_done;
+	test	DFSTATUS, FIFOEMP jz return;
+	/*
+	 * The unexpected nonpkt phase handler assumes that any
+	 * data channel use will have a FIFO reference count.  It
+	 * turns out that the status handler doesn't need a refernce
+	 * count since the status received flag, and thus completion
+	 * processing, cannot be set until the handler is finished.
+	 * We increment the count here to make the nonpkt handler
+	 * happy.
+	 */
+	inc	SCB_FIFO_USE_COUNT;
+	/* FALLTHROUGH */
+
+/*
+ * Nonpackreq is a polled status.  It can come true in three situations:
+ * we have received an L_Q, we have sent one or more L_Qs, or there is no
+ * L_Q context associated with this REQ (REQ occurs immediately after a
+ * (re)selection).  Routines that know that the context responsible for this
+ * nonpackreq call directly into unexpected_nonpkt_phase.  In the case of the
+ * top level idle loop, we exhaust all active contexts prior to determining that
+ * we simply do not have the full I_T_L_Q for this phase.
+ */
+unexpected_nonpkt_phase_find_ctxt:
+	/*
+	 * This nonpackreq is most likely associated with one of the tags
+	 * in a FIFO or an outgoing LQ.  Only treat it as an I_T only
+	 * nonpackreq if we've cleared out the FIFOs and handled any
+	 * pending SELDO.
+	 */
+SET_SRC_MODE	M_SCSI;
+SET_DST_MODE	M_SCSI;
+	and	A, FIFO1FREE|FIFO0FREE, DFFSTAT;
+	cmp	A, FIFO1FREE|FIFO0FREE jne return;
+	test	SSTAT0, SELDO jnz return;
+	mvi	SCBPTR[1], SCB_LIST_NULL;
+unexpected_nonpkt_phase:
+	test	MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1))
+		jnz unexpected_nonpkt_mode_cleared;
+SET_SRC_MODE	M_DFF0;
+SET_DST_MODE	M_DFF0;
+	or	LONGJMP_ADDR[1], INVALID_ADDR;
+	dec	SCB_FIFO_USE_COUNT;
+	mvi	DFFSXFRCTL, CLRCHN;
+unexpected_nonpkt_mode_cleared:
+	mvi	CLRSINT2, CLRNONPACKREQ;
+	test	SCSIPHASE, ~(MSG_IN_PHASE|MSG_OUT_PHASE) jnz illegal_phase;
+	SET_SEQINTCODE(ENTERING_NONPACK)
+	jmp	ITloop;
+
+illegal_phase:
+	SET_SEQINTCODE(ILLEGAL_PHASE)
+	jmp	ITloop;
+
+/*
+ * We have entered an overrun situation.  If we have working
+ * BITBUCKET, flip that on and let the hardware eat any overrun
+ * data.  Otherwise use an overrun buffer in the host to simulate
+ * BITBUCKET.
+ */
+pkt_handle_overrun_inc_use_count:
+	inc	SCB_FIFO_USE_COUNT;
+pkt_handle_overrun:
+	SET_SEQINTCODE(CFG4OVERRUN)
+	call	freeze_queue;
+	if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) == 0) {
+		or	DFFSXFRCTL, DFFBITBUCKET;
+SET_SRC_MODE	M_DFF1;
+SET_DST_MODE	M_DFF1;
+	} else {
+		call	load_overrun_buf;
+		mvi	DFCNTRL, (HDMAEN|SCSIEN|PRELOADEN);
+	}
+	call	setjmp;
+	if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) {
+		test	DFSTATUS, PRELOAD_AVAIL jz overrun_load_done;
+		call	load_overrun_buf;
+		or	DFCNTRL, PRELOADEN;
+overrun_load_done:
+		test	SEQINTSRC, CTXTDONE jnz pkt_overrun_end;
+	} else {
+		test	DFFSXFRCTL, DFFBITBUCKET jz pkt_overrun_end;
+	}
+	test	SSTAT2, NONPACKREQ jz return;
+pkt_overrun_end:
+	or	SCB_RESIDUAL_SGPTR, SG_OVERRUN_RESID;
+	test	SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase;
+	dec	SCB_FIFO_USE_COUNT;
+	or	LONGJMP_ADDR[1], INVALID_ADDR;
+	test	SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle;
+	mvi	DFFSXFRCTL, CLRCHN ret;
+
+if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) {
+load_overrun_buf:
+	/*
+	 * Load a dummy segment if preload space is available.
+	 */
+	mov 	HADDR[0], SHARED_DATA_ADDR;
+	add	HADDR[1], PKT_OVERRUN_BUFOFFSET, SHARED_DATA_ADDR[1];
+	mov	ACCUM_SAVE, A;
+	clr	A;
+	adc	HADDR[2], A, SHARED_DATA_ADDR[2];
+	adc	HADDR[3], A, SHARED_DATA_ADDR[3];
+	mov	A, ACCUM_SAVE;
+	bmov	HADDR[4], ALLZEROS, 4;
+	/* PKT_OVERRUN_BUFSIZE is a multiple of 256 */
+	clr	HCNT[0];
+	mvi	HCNT[1], ((PKT_OVERRUN_BUFSIZE >> 8) & 0xFF);
+	clr	HCNT[2] ret;
+}
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
new file mode 100644
index 0000000..137fb1a
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -0,0 +1,9888 @@
+/*
+ * Core routines and tables shareable across OS platforms.
+ *
+ * Copyright (c) 1994-2002 Justin T. Gibbs.
+ * Copyright (c) 2000-2003 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#202 $
+ *
+ * $FreeBSD$
+ */
+
+#ifdef __linux__
+#include "aic79xx_osm.h"
+#include "aic79xx_inline.h"
+#include "aicasm/aicasm_insformat.h"
+#else
+#include <dev/aic7xxx/aic79xx_osm.h>
+#include <dev/aic7xxx/aic79xx_inline.h>
+#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
+#endif
+
+/******************************** Globals *************************************/
+struct ahd_softc_tailq ahd_tailq = TAILQ_HEAD_INITIALIZER(ahd_tailq);
+
+/***************************** Lookup Tables **********************************/
+char *ahd_chip_names[] =
+{
+	"NONE",
+	"aic7901",
+	"aic7902",
+	"aic7901A"
+};
+static const u_int num_chip_names = NUM_ELEMENTS(ahd_chip_names);
+
+/*
+ * Hardware error codes.
+ */
+struct ahd_hard_error_entry {
+        uint8_t errno;
+	char *errmesg;
+};
+
+static struct ahd_hard_error_entry ahd_hard_errors[] = {
+	{ DSCTMOUT,	"Discard Timer has timed out" },
+	{ ILLOPCODE,	"Illegal Opcode in sequencer program" },
+	{ SQPARERR,	"Sequencer Parity Error" },
+	{ DPARERR,	"Data-path Parity Error" },
+	{ MPARERR,	"Scratch or SCB Memory Parity Error" },
+	{ CIOPARERR,	"CIOBUS Parity Error" },
+};
+static const u_int num_errors = NUM_ELEMENTS(ahd_hard_errors);
+
+static struct ahd_phase_table_entry ahd_phase_table[] =
+{
+	{ P_DATAOUT,	MSG_NOOP,		"in Data-out phase"	},
+	{ P_DATAIN,	MSG_INITIATOR_DET_ERR,	"in Data-in phase"	},
+	{ P_DATAOUT_DT,	MSG_NOOP,		"in DT Data-out phase"	},
+	{ P_DATAIN_DT,	MSG_INITIATOR_DET_ERR,	"in DT Data-in phase"	},
+	{ P_COMMAND,	MSG_NOOP,		"in Command phase"	},
+	{ P_MESGOUT,	MSG_NOOP,		"in Message-out phase"	},
+	{ P_STATUS,	MSG_INITIATOR_DET_ERR,	"in Status phase"	},
+	{ P_MESGIN,	MSG_PARITY_ERROR,	"in Message-in phase"	},
+	{ P_BUSFREE,	MSG_NOOP,		"while idle"		},
+	{ 0,		MSG_NOOP,		"in unknown phase"	}
+};
+
+/*
+ * In most cases we only wish to itterate over real phases, so
+ * exclude the last element from the count.
+ */
+static const u_int num_phases = NUM_ELEMENTS(ahd_phase_table) - 1;
+
+/* Our Sequencer Program */
+#include "aic79xx_seq.h"
+
+/**************************** Function Declarations ***************************/
+static void		ahd_handle_transmission_error(struct ahd_softc *ahd);
+static void		ahd_handle_lqiphase_error(struct ahd_softc *ahd,
+						  u_int lqistat1);
+static int		ahd_handle_pkt_busfree(struct ahd_softc *ahd,
+					       u_int busfreetime);
+static int		ahd_handle_nonpkt_busfree(struct ahd_softc *ahd);
+static void		ahd_handle_proto_violation(struct ahd_softc *ahd);
+static void		ahd_force_renegotiation(struct ahd_softc *ahd,
+						struct ahd_devinfo *devinfo);
+
+static struct ahd_tmode_tstate*
+			ahd_alloc_tstate(struct ahd_softc *ahd,
+					 u_int scsi_id, char channel);
+#ifdef AHD_TARGET_MODE
+static void		ahd_free_tstate(struct ahd_softc *ahd,
+					u_int scsi_id, char channel, int force);
+#endif
+static void		ahd_devlimited_syncrate(struct ahd_softc *ahd,
+					        struct ahd_initiator_tinfo *,
+						u_int *period,
+						u_int *ppr_options,
+						role_t role);
+static void		ahd_update_neg_table(struct ahd_softc *ahd,
+					     struct ahd_devinfo *devinfo,
+					     struct ahd_transinfo *tinfo);
+static void		ahd_update_pending_scbs(struct ahd_softc *ahd);
+static void		ahd_fetch_devinfo(struct ahd_softc *ahd,
+					  struct ahd_devinfo *devinfo);
+static void		ahd_scb_devinfo(struct ahd_softc *ahd,
+					struct ahd_devinfo *devinfo,
+					struct scb *scb);
+static void		ahd_setup_initiator_msgout(struct ahd_softc *ahd,
+						   struct ahd_devinfo *devinfo,
+						   struct scb *scb);
+static void		ahd_build_transfer_msg(struct ahd_softc *ahd,
+					       struct ahd_devinfo *devinfo);
+static void		ahd_construct_sdtr(struct ahd_softc *ahd,
+					   struct ahd_devinfo *devinfo,
+					   u_int period, u_int offset);
+static void		ahd_construct_wdtr(struct ahd_softc *ahd,
+					   struct ahd_devinfo *devinfo,
+					   u_int bus_width);
+static void		ahd_construct_ppr(struct ahd_softc *ahd,
+					  struct ahd_devinfo *devinfo,
+					  u_int period, u_int offset,
+					  u_int bus_width, u_int ppr_options);
+static void		ahd_clear_msg_state(struct ahd_softc *ahd);
+static void		ahd_handle_message_phase(struct ahd_softc *ahd);
+typedef enum {
+	AHDMSG_1B,
+	AHDMSG_2B,
+	AHDMSG_EXT
+} ahd_msgtype;
+static int		ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type,
+				     u_int msgval, int full);
+static int		ahd_parse_msg(struct ahd_softc *ahd,
+				      struct ahd_devinfo *devinfo);
+static int		ahd_handle_msg_reject(struct ahd_softc *ahd,
+					      struct ahd_devinfo *devinfo);
+static void		ahd_handle_ign_wide_residue(struct ahd_softc *ahd,
+						struct ahd_devinfo *devinfo);
+static void		ahd_reinitialize_dataptrs(struct ahd_softc *ahd);
+static void		ahd_handle_devreset(struct ahd_softc *ahd,
+					    struct ahd_devinfo *devinfo,
+					    u_int lun, cam_status status,
+					    char *message, int verbose_level);
+#ifdef AHD_TARGET_MODE
+static void		ahd_setup_target_msgin(struct ahd_softc *ahd,
+					       struct ahd_devinfo *devinfo,
+					       struct scb *scb);
+#endif
+
+static u_int		ahd_sglist_size(struct ahd_softc *ahd);
+static u_int		ahd_sglist_allocsize(struct ahd_softc *ahd);
+static bus_dmamap_callback_t
+			ahd_dmamap_cb; 
+static void		ahd_initialize_hscbs(struct ahd_softc *ahd);
+static int		ahd_init_scbdata(struct ahd_softc *ahd);
+static void		ahd_fini_scbdata(struct ahd_softc *ahd);
+static void		ahd_setup_iocell_workaround(struct ahd_softc *ahd);
+static void		ahd_iocell_first_selection(struct ahd_softc *ahd);
+static void		ahd_add_col_list(struct ahd_softc *ahd,
+					 struct scb *scb, u_int col_idx);
+static void		ahd_rem_col_list(struct ahd_softc *ahd,
+					 struct scb *scb);
+static void		ahd_chip_init(struct ahd_softc *ahd);
+static void		ahd_qinfifo_requeue(struct ahd_softc *ahd,
+					    struct scb *prev_scb,
+					    struct scb *scb);
+static int		ahd_qinfifo_count(struct ahd_softc *ahd);
+static int		ahd_search_scb_list(struct ahd_softc *ahd, int target,
+					    char channel, int lun, u_int tag,
+					    role_t role, uint32_t status,
+					    ahd_search_action action,
+					    u_int *list_head, u_int tid);
+static void		ahd_stitch_tid_list(struct ahd_softc *ahd,
+					    u_int tid_prev, u_int tid_cur,
+					    u_int tid_next);
+static void		ahd_add_scb_to_free_list(struct ahd_softc *ahd,
+						 u_int scbid);
+static u_int		ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
+				     u_int prev, u_int next, u_int tid);
+static void		ahd_reset_current_bus(struct ahd_softc *ahd);
+static ahd_callback_t	ahd_reset_poll;
+static ahd_callback_t	ahd_stat_timer;
+#ifdef AHD_DUMP_SEQ
+static void		ahd_dumpseq(struct ahd_softc *ahd);
+#endif
+static void		ahd_loadseq(struct ahd_softc *ahd);
+static int		ahd_check_patch(struct ahd_softc *ahd,
+					struct patch **start_patch,
+					u_int start_instr, u_int *skip_addr);
+static u_int		ahd_resolve_seqaddr(struct ahd_softc *ahd,
+					    u_int address);
+static void		ahd_download_instr(struct ahd_softc *ahd,
+					   u_int instrptr, uint8_t *dconsts);
+static int		ahd_probe_stack_size(struct ahd_softc *ahd);
+static int		ahd_scb_active_in_fifo(struct ahd_softc *ahd,
+					       struct scb *scb);
+static void		ahd_run_data_fifo(struct ahd_softc *ahd,
+					  struct scb *scb);
+
+#ifdef AHD_TARGET_MODE
+static void		ahd_queue_lstate_event(struct ahd_softc *ahd,
+					       struct ahd_tmode_lstate *lstate,
+					       u_int initiator_id,
+					       u_int event_type,
+					       u_int event_arg);
+static void		ahd_update_scsiid(struct ahd_softc *ahd,
+					  u_int targid_mask);
+static int		ahd_handle_target_cmd(struct ahd_softc *ahd,
+					      struct target_cmd *cmd);
+#endif
+
+/******************************** Private Inlines *****************************/
+static __inline void	ahd_assert_atn(struct ahd_softc *ahd);
+static __inline int	ahd_currently_packetized(struct ahd_softc *ahd);
+static __inline int	ahd_set_active_fifo(struct ahd_softc *ahd);
+
+static __inline void
+ahd_assert_atn(struct ahd_softc *ahd)
+{
+	ahd_outb(ahd, SCSISIGO, ATNO);
+}
+
+/*
+ * Determine if the current connection has a packetized
+ * agreement.  This does not necessarily mean that we
+ * are currently in a packetized transfer.  We could
+ * just as easily be sending or receiving a message.
+ */
+static __inline int
+ahd_currently_packetized(struct ahd_softc *ahd)
+{
+	ahd_mode_state	 saved_modes;
+	int		 packetized;
+
+	saved_modes = ahd_save_modes(ahd);
+	if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) {
+		/*
+		 * The packetized bit refers to the last
+		 * connection, not the current one.  Check
+		 * for non-zero LQISTATE instead.
+		 */
+		ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+		packetized = ahd_inb(ahd, LQISTATE) != 0;
+	} else {
+		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+		packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED;
+	}
+	ahd_restore_modes(ahd, saved_modes);
+	return (packetized);
+}
+
+static __inline int
+ahd_set_active_fifo(struct ahd_softc *ahd)
+{
+	u_int active_fifo;
+
+	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+	active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO;
+	switch (active_fifo) {
+	case 0:
+	case 1:
+		ahd_set_modes(ahd, active_fifo, active_fifo);
+		return (1);
+	default:
+		return (0);
+	}
+}
+
+/************************* Sequencer Execution Control ************************/
+/*
+ * Restart the sequencer program from address zero
+ */
+void
+ahd_restart(struct ahd_softc *ahd)
+{
+
+	ahd_pause(ahd);
+
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+
+	/* No more pending messages */
+	ahd_clear_msg_state(ahd);
+	ahd_outb(ahd, SCSISIGO, 0);		/* De-assert BSY */
+	ahd_outb(ahd, MSG_OUT, MSG_NOOP);	/* No message to send */
+	ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET);
+	ahd_outb(ahd, SEQINTCTL, 0);
+	ahd_outb(ahd, LASTPHASE, P_BUSFREE);
+	ahd_outb(ahd, SEQ_FLAGS, 0);
+	ahd_outb(ahd, SAVED_SCSIID, 0xFF);
+	ahd_outb(ahd, SAVED_LUN, 0xFF);
+
+	/*
+	 * Ensure that the sequencer's idea of TQINPOS
+	 * matches our own.  The sequencer increments TQINPOS
+	 * only after it sees a DMA complete and a reset could
+	 * occur before the increment leaving the kernel to believe
+	 * the command arrived but the sequencer to not.
+	 */
+	ahd_outb(ahd, TQINPOS, ahd->tqinfifonext);
+
+	/* Always allow reselection */
+	ahd_outb(ahd, SCSISEQ1,
+		 ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
+	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
+	ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET);
+	ahd_unpause(ahd);
+}
+
+void
+ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo)
+{
+	ahd_mode_state	 saved_modes;
+
+#ifdef AHD_DEBUG
+	if ((ahd_debug & AHD_SHOW_FIFOS) != 0)
+		printf("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo);
+#endif
+	saved_modes = ahd_save_modes(ahd);
+	ahd_set_modes(ahd, fifo, fifo);
+	ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT);
+	if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0)
+		ahd_outb(ahd, CCSGCTL, CCSGRESET);
+	ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
+	ahd_outb(ahd, SG_STATE, 0);
+	ahd_restore_modes(ahd, saved_modes);
+}
+
+/************************* Input/Output Queues ********************************/
+/*
+ * Flush and completed commands that are sitting in the command
+ * complete queues down on the chip but have yet to be dma'ed back up.
+ */
+void
+ahd_flush_qoutfifo(struct ahd_softc *ahd)
+{
+	struct		scb *scb;
+	ahd_mode_state	saved_modes;
+	u_int		saved_scbptr;
+	u_int		ccscbctl;
+	u_int		scbid;
+	u_int		next_scbid;
+
+	saved_modes = ahd_save_modes(ahd);
+
+	/*
+	 * Complete any SCBs that just finished being
+	 * DMA'ed into the qoutfifo.
+	 */
+	ahd_run_qoutfifo(ahd);
+
+	/*
+	 * Flush the good status FIFO for compelted packetized commands.
+	 */
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	saved_scbptr = ahd_get_scbptr(ahd);
+	while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) {
+		u_int fifo_mode;
+		u_int i;
+		
+		scbid = (ahd_inb(ahd, GSFIFO+1) << 8)
+		      | ahd_inb(ahd, GSFIFO);
+		scb = ahd_lookup_scb(ahd, scbid);
+		if (scb == NULL) {
+			printf("%s: Warning - GSFIFO SCB %d invalid\n",
+			       ahd_name(ahd), scbid);
+			continue;
+		}
+		/*
+		 * Determine if this transaction is still active in
+		 * any FIFO.  If it is, we must flush that FIFO to
+		 * the host before completing the  command.
+		 */
+		fifo_mode = 0;
+		for (i = 0; i < 2; i++) {
+			/* Toggle to the other mode. */
+			fifo_mode ^= 1;
+			ahd_set_modes(ahd, fifo_mode, fifo_mode);
+			if (ahd_scb_active_in_fifo(ahd, scb) == 0)
+				continue;
+
+			ahd_run_data_fifo(ahd, scb);
+
+			/*
+			 * Clearing this transaction in this FIFO may
+			 * cause a CFG4DATA for this same transaction
+			 * to assert in the other FIFO.  Make sure we
+			 * loop one more time and check the other FIFO.
+			 */
+			i = 0;
+		}
+		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+		ahd_set_scbptr(ahd, scbid);
+		if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0
+		 && ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0
+		  || (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR)
+		      & SG_LIST_NULL) != 0)) {
+			u_int comp_head;
+
+			/*
+			 * The transfer completed with a residual.
+			 * Place this SCB on the complete DMA list
+			 * so that we Update our in-core copy of the
+			 * SCB before completing the command.
+			 */
+			ahd_outb(ahd, SCB_SCSI_STATUS, 0);
+			ahd_outb(ahd, SCB_SGPTR,
+				 ahd_inb_scbram(ahd, SCB_SGPTR)
+				 | SG_STATUS_VALID);
+			ahd_outw(ahd, SCB_TAG, SCB_GET_TAG(scb));
+			comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
+			ahd_outw(ahd, SCB_NEXT_COMPLETE, comp_head);
+			if (SCBID_IS_NULL(comp_head))
+				ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD,
+					 SCB_GET_TAG(scb));
+		} else
+			ahd_complete_scb(ahd, scb);
+	}
+	ahd_set_scbptr(ahd, saved_scbptr);
+
+	/*
+	 * Setup for command channel portion of flush.
+	 */
+	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
+
+	/*
+	 * Wait for any inprogress DMA to complete and clear DMA state
+	 * if this if for an SCB in the qinfifo.
+	 */
+	while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) {
+
+		if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) {
+			if ((ccscbctl & ARRDONE) != 0)
+				break;
+		} else if ((ccscbctl & CCSCBDONE) != 0)
+			break;
+		ahd_delay(200);
+	}
+	if ((ccscbctl & CCSCBDIR) != 0)
+		ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN));
+
+	saved_scbptr = ahd_get_scbptr(ahd);
+	/*
+	 * Manually update/complete any completed SCBs that are waiting to be
+	 * DMA'ed back up to the host.
+	 */
+	scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
+	while (!SCBID_IS_NULL(scbid)) {
+		uint8_t *hscb_ptr;
+		u_int	 i;
+		
+		ahd_set_scbptr(ahd, scbid);
+		next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
+		scb = ahd_lookup_scb(ahd, scbid);
+		if (scb == NULL) {
+			printf("%s: Warning - DMA-up and complete "
+			       "SCB %d invalid\n", ahd_name(ahd), scbid);
+			continue;
+		}
+		hscb_ptr = (uint8_t *)scb->hscb;
+		for (i = 0; i < sizeof(struct hardware_scb); i++)
+			*hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i);
+
+		ahd_complete_scb(ahd, scb);
+		scbid = next_scbid;
+	}
+	ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL);
+
+	scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD);
+	while (!SCBID_IS_NULL(scbid)) {
+
+		ahd_set_scbptr(ahd, scbid);
+		next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
+		scb = ahd_lookup_scb(ahd, scbid);
+		if (scb == NULL) {
+			printf("%s: Warning - Complete SCB %d invalid\n",
+			       ahd_name(ahd), scbid);
+			continue;
+		}
+
+		ahd_complete_scb(ahd, scb);
+		scbid = next_scbid;
+	}
+	ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL);
+
+	/*
+	 * Restore state.
+	 */
+	ahd_set_scbptr(ahd, saved_scbptr);
+	ahd_restore_modes(ahd, saved_modes);
+	ahd->flags |= AHD_UPDATE_PEND_CMDS;
+}
+
+/*
+ * Determine if an SCB for a packetized transaction
+ * is active in a FIFO.
+ */
+static int
+ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb)
+{
+
+	/*
+	 * The FIFO is only active for our transaction if
+	 * the SCBPTR matches the SCB's ID and the firmware
+	 * has installed a handler for the FIFO or we have
+	 * a pending SAVEPTRS or CFG4DATA interrupt.
+	 */
+	if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb)
+	 || ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0
+	  && (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0))
+		return (0);
+
+	return (1);
+}
+
+/*
+ * Run a data fifo to completion for a transaction we know
+ * has completed across the SCSI bus (good status has been
+ * received).  We are already set to the correct FIFO mode
+ * on entry to this routine.
+ *
+ * This function attempts to operate exactly as the firmware
+ * would when running this FIFO.  Care must be taken to update
+ * this routine any time the firmware's FIFO algorithm is
+ * changed.
+ */
+static void
+ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb)
+{
+	u_int seqintsrc;
+
+	while (1) {
+		seqintsrc = ahd_inb(ahd, SEQINTSRC);
+		if ((seqintsrc & CFG4DATA) != 0) {
+			uint32_t datacnt;
+			uint32_t sgptr;
+
+			/*
+			 * Clear full residual flag.
+			 */
+			sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID;
+			ahd_outb(ahd, SCB_SGPTR, sgptr);
+
+			/*
+			 * Load datacnt and address.
+			 */
+			datacnt = ahd_inl_scbram(ahd, SCB_DATACNT);
+			if ((datacnt & AHD_DMA_LAST_SEG) != 0) {
+				sgptr |= LAST_SEG;
+				ahd_outb(ahd, SG_STATE, 0);
+			} else
+				ahd_outb(ahd, SG_STATE, LOADING_NEEDED);
+			ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR));
+			ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK);
+			ahd_outb(ahd, SG_CACHE_PRE, sgptr);
+			ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN);
+
+			/*
+			 * Initialize Residual Fields.
+			 */
+			ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24);
+			ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK);
+
+			/*
+			 * Mark the SCB as having a FIFO in use.
+			 */
+			ahd_outb(ahd, SCB_FIFO_USE_COUNT,
+				 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1);
+
+			/*
+			 * Install a "fake" handler for this FIFO.
+			 */
+			ahd_outw(ahd, LONGJMP_ADDR, 0);
+
+			/*
+			 * Notify the hardware that we have satisfied
+			 * this sequencer interrupt.
+			 */
+			ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA);
+		} else if ((seqintsrc & SAVEPTRS) != 0) {
+			uint32_t sgptr;
+			uint32_t resid;
+
+			if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) {
+				/*
+				 * Snapshot Save Pointers.  Clear
+				 * the snapshot and continue.
+				 */
+				ahd_outb(ahd, DFFSXFRCTL, CLRCHN);
+				continue;
+			}
+
+			/*
+			 * Disable S/G fetch so the DMA engine
+			 * is available to future users.
+			 */
+			if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0)
+				ahd_outb(ahd, CCSGCTL, 0);
+			ahd_outb(ahd, SG_STATE, 0);
+
+			/*
+			 * Flush the data FIFO.  Strickly only
+			 * necessary for Rev A parts.
+			 */
+			ahd_outb(ahd, DFCNTRL,
+				 ahd_inb(ahd, DFCNTRL) | FIFOFLUSH);
+
+			/*
+			 * Calculate residual.
+			 */
+			sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
+			resid = ahd_inl(ahd, SHCNT);
+			resid |=
+			    ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24;
+			ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid);
+			if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) {
+				/*
+				 * Must back up to the correct S/G element.
+				 * Typically this just means resetting our
+				 * low byte to the offset in the SG_CACHE,
+				 * but if we wrapped, we have to correct
+				 * the other bytes of the sgptr too.
+				 */
+				if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0
+				 && (sgptr & 0x80) == 0)
+					sgptr -= 0x100;
+				sgptr &= ~0xFF;
+				sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW)
+				       & SG_ADDR_MASK;
+				ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
+				ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0);
+			} else if ((resid & AHD_SG_LEN_MASK) == 0) {
+				ahd_outb(ahd, SCB_RESIDUAL_SGPTR,
+					 sgptr | SG_LIST_NULL);
+			}
+			/*
+			 * Save Pointers.
+			 */
+			ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR));
+			ahd_outl(ahd, SCB_DATACNT, resid);
+			ahd_outl(ahd, SCB_SGPTR, sgptr);
+			ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS);
+			ahd_outb(ahd, SEQIMODE,
+				 ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS);
+			/*
+			 * If the data is to the SCSI bus, we are
+			 * done, otherwise wait for FIFOEMP.
+			 */
+			if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0)
+				break;
+		} else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) {
+			uint32_t sgptr;
+			uint64_t data_addr;
+			uint32_t data_len;
+			u_int	 dfcntrl;
+
+			/*
+			 * Disable S/G fetch so the DMA engine
+			 * is available to future users.
+			 */
+			if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) {
+				ahd_outb(ahd, CCSGCTL, 0);
+				ahd_outb(ahd, SG_STATE, LOADING_NEEDED);
+			}
+
+			/*
+			 * Wait for the DMA engine to notice that the
+			 * host transfer is enabled and that there is
+			 * space in the S/G FIFO for new segments before
+			 * loading more segments.
+			 */
+			if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) == 0)
+				continue;
+			if ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) == 0)
+				continue;
+
+			/*
+			 * Determine the offset of the next S/G
+			 * element to load.
+			 */
+			sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
+			sgptr &= SG_PTR_MASK;
+			if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+				struct ahd_dma64_seg *sg;
+
+				sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
+				data_addr = sg->addr;
+				data_len = sg->len;
+				sgptr += sizeof(*sg);
+			} else {
+				struct	ahd_dma_seg *sg;
+
+				sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
+				data_addr = sg->len & AHD_SG_HIGH_ADDR_MASK;
+				data_addr <<= 8;
+				data_addr |= sg->addr;
+				data_len = sg->len;
+				sgptr += sizeof(*sg);
+			}
+
+			/*
+			 * Update residual information.
+			 */
+			ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, data_len >> 24);
+			ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
+
+			/*
+			 * Load the S/G.
+			 */
+			if (data_len & AHD_DMA_LAST_SEG) {
+				sgptr |= LAST_SEG;
+				ahd_outb(ahd, SG_STATE, 0);
+			}
+			ahd_outq(ahd, HADDR, data_addr);
+			ahd_outl(ahd, HCNT, data_len & AHD_SG_LEN_MASK);
+			ahd_outb(ahd, SG_CACHE_PRE, sgptr & 0xFF);
+
+			/*
+			 * Advertise the segment to the hardware.
+			 */
+			dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN;
+			if ((ahd->features & AHD_NEW_DFCNTRL_OPTS)!=0) {
+				/*
+				 * Use SCSIENWRDIS so that SCSIEN
+				 * is never modified by this
+				 * operation.
+				 */
+				dfcntrl |= SCSIENWRDIS;
+			}
+			ahd_outb(ahd, DFCNTRL, dfcntrl);
+		} else if ((ahd_inb(ahd, SG_CACHE_SHADOW)
+			 & LAST_SEG_DONE) != 0) {
+
+			/*
+			 * Transfer completed to the end of SG list
+			 * and has flushed to the host.
+			 */
+			ahd_outb(ahd, SCB_SGPTR,
+				 ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL);
+			break;
+		} else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) {
+			break;
+		}
+		ahd_delay(200);
+	}
+	/*
+	 * Clear any handler for this FIFO, decrement
+	 * the FIFO use count for the SCB, and release
+	 * the FIFO.
+	 */
+	ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
+	ahd_outb(ahd, SCB_FIFO_USE_COUNT,
+		 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1);
+	ahd_outb(ahd, DFFSXFRCTL, CLRCHN);
+}
+
+void
+ahd_run_qoutfifo(struct ahd_softc *ahd)
+{
+	struct scb *scb;
+	u_int  scb_index;
+
+	if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0)
+		panic("ahd_run_qoutfifo recursion");
+	ahd->flags |= AHD_RUNNING_QOUTFIFO;
+	ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD);
+	while ((ahd->qoutfifo[ahd->qoutfifonext]
+	     & QOUTFIFO_ENTRY_VALID_LE) == ahd->qoutfifonext_valid_tag) {
+
+		scb_index = ahd_le16toh(ahd->qoutfifo[ahd->qoutfifonext]
+				      & ~QOUTFIFO_ENTRY_VALID_LE);
+		scb = ahd_lookup_scb(ahd, scb_index);
+		if (scb == NULL) {
+			printf("%s: WARNING no command for scb %d "
+			       "(cmdcmplt)\nQOUTPOS = %d\n",
+			       ahd_name(ahd), scb_index,
+			       ahd->qoutfifonext);
+			ahd_dump_card_state(ahd);
+		} else
+			ahd_complete_scb(ahd, scb);
+
+		ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1);
+		if (ahd->qoutfifonext == 0)
+			ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID_LE;
+	}
+	ahd->flags &= ~AHD_RUNNING_QOUTFIFO;
+}
+
+/************************* Interrupt Handling *********************************/
+void
+ahd_handle_hwerrint(struct ahd_softc *ahd)
+{
+	/*
+	 * Some catastrophic hardware error has occurred.
+	 * Print it for the user and disable the controller.
+	 */
+	int i;
+	int error;
+
+	error = ahd_inb(ahd, ERROR);
+	for (i = 0; i < num_errors; i++) {
+		if ((error & ahd_hard_errors[i].errno) != 0)
+			printf("%s: hwerrint, %s\n",
+			       ahd_name(ahd), ahd_hard_errors[i].errmesg);
+	}
+
+	ahd_dump_card_state(ahd);
+	panic("BRKADRINT");
+
+	/* Tell everyone that this HBA is no longer available */
+	ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS,
+		       CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
+		       CAM_NO_HBA);
+
+	/* Tell the system that this controller has gone away. */
+	ahd_free(ahd);
+}
+
+void
+ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
+{
+	u_int seqintcode;
+
+	/*
+	 * Save the sequencer interrupt code and clear the SEQINT
+	 * bit. We will unpause the sequencer, if appropriate,
+	 * after servicing the request.
+	 */
+	seqintcode = ahd_inb(ahd, SEQINTCODE);
+	ahd_outb(ahd, CLRINT, CLRSEQINT);
+	if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
+		/*
+		 * Unpause the sequencer and let it clear
+		 * SEQINT by writing NO_SEQINT to it.  This
+		 * will cause the sequencer to be paused again,
+		 * which is the expected state of this routine.
+		 */
+		ahd_unpause(ahd);
+		while (!ahd_is_paused(ahd))
+			;
+		ahd_outb(ahd, CLRINT, CLRSEQINT);
+	}
+	ahd_update_modes(ahd);
+#ifdef AHD_DEBUG
+	if ((ahd_debug & AHD_SHOW_MISC) != 0)
+		printf("%s: Handle Seqint Called for code %d\n",
+		       ahd_name(ahd), seqintcode);
+#endif
+	switch (seqintcode) {
+	case BAD_SCB_STATUS:
+	{
+		struct	scb *scb;
+		u_int	scbid;
+		int	cmds_pending;
+
+		scbid = ahd_get_scbptr(ahd);
+		scb = ahd_lookup_scb(ahd, scbid);
+		if (scb != NULL) {
+			ahd_complete_scb(ahd, scb);
+		} else {
+			printf("%s: WARNING no command for scb %d "
+			       "(bad status)\n", ahd_name(ahd), scbid);
+			ahd_dump_card_state(ahd);
+		}
+		cmds_pending = ahd_inw(ahd, CMDS_PENDING);
+		if (cmds_pending > 0)
+			ahd_outw(ahd, CMDS_PENDING, cmds_pending - 1);
+		break;
+	}
+	case ENTERING_NONPACK:
+	{
+		struct	scb *scb;
+		u_int	scbid;
+
+		AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+				 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+		scbid = ahd_get_scbptr(ahd);
+		scb = ahd_lookup_scb(ahd, scbid);
+		if (scb == NULL) {
+			/*
+			 * Somehow need to know if this
+			 * is from a selection or reselection.
+			 * From that, we can determine target
+			 * ID so we at least have an I_T nexus.
+			 */
+		} else {
+			ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
+			ahd_outb(ahd, SAVED_LUN, scb->hscb->lun);
+			ahd_outb(ahd, SEQ_FLAGS, 0x0);
+		}
+		if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0
+		 && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) {
+			/*
+			 * Phase change after read stream with
+			 * CRC error with P0 asserted on last
+			 * packet.
+			 */
+#ifdef AHD_DEBUG
+			if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
+				printf("%s: Assuming LQIPHASE_NLQ with "
+				       "P0 assertion\n", ahd_name(ahd));
+#endif
+		}
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
+			printf("%s: Entering NONPACK\n", ahd_name(ahd));
+#endif
+		break;
+	}
+	case INVALID_SEQINT:
+		printf("%s: Invalid Sequencer interrupt occurred.\n",
+		       ahd_name(ahd));
+		ahd_dump_card_state(ahd);
+		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+		break;
+	case STATUS_OVERRUN:
+	{
+		struct	scb *scb;
+		u_int	scbid;
+
+		scbid = ahd_get_scbptr(ahd);
+		scb = ahd_lookup_scb(ahd, scbid);
+		if (scb != NULL)
+			ahd_print_path(ahd, scb);
+		else
+			printf("%s: ", ahd_name(ahd));
+		printf("SCB %d Packetized Status Overrun", scbid);
+		ahd_dump_card_state(ahd);
+		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+		break;
+	}
+	case CFG4ISTAT_INTR:
+	{
+		struct	scb *scb;
+		u_int	scbid;
+
+		scbid = ahd_get_scbptr(ahd);
+		scb = ahd_lookup_scb(ahd, scbid);
+		if (scb == NULL) {
+			ahd_dump_card_state(ahd);
+			printf("CFG4ISTAT: Free SCB %d referenced", scbid);
+			panic("For safety");
+		}
+		ahd_outq(ahd, HADDR, scb->sense_busaddr);
+		ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE);
+		ahd_outb(ahd, HCNT + 2, 0);
+		ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG);
+		ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN);
+		break;
+	}
+	case ILLEGAL_PHASE:
+	{
+		u_int bus_phase;
+
+		bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
+		printf("%s: ILLEGAL_PHASE 0x%x\n",
+		       ahd_name(ahd), bus_phase);
+
+		switch (bus_phase) {
+		case P_DATAOUT:
+		case P_DATAIN:
+		case P_DATAOUT_DT:
+		case P_DATAIN_DT:
+		case P_MESGOUT:
+		case P_STATUS:
+		case P_MESGIN:
+			ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+			printf("%s: Issued Bus Reset.\n", ahd_name(ahd));
+			break;
+		case P_COMMAND:
+		{
+			struct	ahd_devinfo devinfo;
+			struct	scb *scb;
+			struct	ahd_initiator_tinfo *targ_info;
+			struct	ahd_tmode_tstate *tstate;
+			struct	ahd_transinfo *tinfo;
+			u_int	scbid;
+
+			/*
+			 * If a target takes us into the command phase
+			 * assume that it has been externally reset and
+			 * has thus lost our previous packetized negotiation
+			 * agreement.  Since we have not sent an identify
+			 * message and may not have fully qualified the
+			 * connection, we change our command to TUR, assert
+			 * ATN and ABORT the task when we go to message in
+			 * phase.  The OSM will see the REQUEUE_REQUEST
+			 * status and retry the command.
+			 */
+			scbid = ahd_get_scbptr(ahd);
+			scb = ahd_lookup_scb(ahd, scbid);
+			if (scb == NULL) {
+				printf("Invalid phase with no valid SCB.  "
+				       "Resetting bus.\n");
+				ahd_reset_channel(ahd, 'A',
+						  /*Initiate Reset*/TRUE);
+				break;
+			}
+			ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb),
+					    SCB_GET_TARGET(ahd, scb),
+					    SCB_GET_LUN(scb),
+					    SCB_GET_CHANNEL(ahd, scb),
+					    ROLE_INITIATOR);
+			targ_info = ahd_fetch_transinfo(ahd,
+							devinfo.channel,
+							devinfo.our_scsiid,
+							devinfo.target,
+							&tstate);
+			tinfo = &targ_info->curr;
+			ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+				      AHD_TRANS_ACTIVE, /*paused*/TRUE);
+			ahd_set_syncrate(ahd, &devinfo, /*period*/0,
+					 /*offset*/0, /*ppr_options*/0,
+					 AHD_TRANS_ACTIVE, /*paused*/TRUE);
+			ahd_outb(ahd, SCB_CDB_STORE, 0);
+			ahd_outb(ahd, SCB_CDB_STORE+1, 0);
+			ahd_outb(ahd, SCB_CDB_STORE+2, 0);
+			ahd_outb(ahd, SCB_CDB_STORE+3, 0);
+			ahd_outb(ahd, SCB_CDB_STORE+4, 0);
+			ahd_outb(ahd, SCB_CDB_STORE+5, 0);
+			ahd_outb(ahd, SCB_CDB_LEN, 6);
+			scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE);
+			scb->hscb->control |= MK_MESSAGE;
+			ahd_outb(ahd, SCB_CONTROL, scb->hscb->control);
+			ahd_outb(ahd, MSG_OUT, HOST_MSG);
+			ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
+			/*
+			 * The lun is 0, regardless of the SCB's lun
+			 * as we have not sent an identify message.
+			 */
+			ahd_outb(ahd, SAVED_LUN, 0);
+			ahd_outb(ahd, SEQ_FLAGS, 0);
+			ahd_assert_atn(ahd);
+			scb->flags &= ~(SCB_PACKETIZED);
+			scb->flags |= SCB_ABORT|SCB_CMDPHASE_ABORT;
+			ahd_freeze_devq(ahd, scb);
+			ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
+			ahd_freeze_scb(scb);
+
+			/*
+			 * Allow the sequencer to continue with
+			 * non-pack processing.
+			 */
+			ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+			ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT);
+			if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
+				ahd_outb(ahd, CLRLQOINT1, 0);
+			}
+#ifdef AHD_DEBUG
+			if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
+				ahd_print_path(ahd, scb);
+				printf("Unexpected command phase from "
+				       "packetized target\n");
+			}
+#endif
+			break;
+		}
+		}
+		break;
+	}
+	case CFG4OVERRUN:
+	{
+		struct	scb *scb;
+		u_int	scb_index;
+		
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
+			printf("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd),
+			       ahd_inb(ahd, MODE_PTR));
+		}
+#endif
+		scb_index = ahd_get_scbptr(ahd);
+		scb = ahd_lookup_scb(ahd, scb_index);
+		if (scb == NULL) {
+			/*
+			 * Attempt to transfer to an SCB that is
+			 * not outstanding.
+			 */
+			ahd_assert_atn(ahd);
+			ahd_outb(ahd, MSG_OUT, HOST_MSG);
+			ahd->msgout_buf[0] = MSG_ABORT_TASK;
+			ahd->msgout_len = 1;
+			ahd->msgout_index = 0;
+			ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+			/*
+			 * Clear status received flag to prevent any
+			 * attempt to complete this bogus SCB.
+			 */
+			ahd_outb(ahd, SCB_CONTROL,
+				 ahd_inb_scbram(ahd, SCB_CONTROL)
+				 & ~STATUS_RCVD);
+		}
+		break;
+	}
+	case DUMP_CARD_STATE:
+	{
+		ahd_dump_card_state(ahd);
+		break;
+	}
+	case PDATA_REINIT:
+	{
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
+			printf("%s: PDATA_REINIT - DFCNTRL = 0x%x "
+			       "SG_CACHE_SHADOW = 0x%x\n",
+			       ahd_name(ahd), ahd_inb(ahd, DFCNTRL),
+			       ahd_inb(ahd, SG_CACHE_SHADOW));
+		}
+#endif
+		ahd_reinitialize_dataptrs(ahd);
+		break;
+	}
+	case HOST_MSG_LOOP:
+	{
+		struct ahd_devinfo devinfo;
+
+		/*
+		 * The sequencer has encountered a message phase
+		 * that requires host assistance for completion.
+		 * While handling the message phase(s), we will be
+		 * notified by the sequencer after each byte is
+		 * transfered so we can track bus phase changes.
+		 *
+		 * If this is the first time we've seen a HOST_MSG_LOOP
+		 * interrupt, initialize the state of the host message
+		 * loop.
+		 */
+		ahd_fetch_devinfo(ahd, &devinfo);
+		if (ahd->msg_type == MSG_TYPE_NONE) {
+			struct scb *scb;
+			u_int scb_index;
+			u_int bus_phase;
+
+			bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
+			if (bus_phase != P_MESGIN
+			 && bus_phase != P_MESGOUT) {
+				printf("ahd_intr: HOST_MSG_LOOP bad "
+				       "phase 0x%x\n", bus_phase);
+				/*
+				 * Probably transitioned to bus free before
+				 * we got here.  Just punt the message.
+				 */
+				ahd_dump_card_state(ahd);
+				ahd_clear_intstat(ahd);
+				ahd_restart(ahd);
+				return;
+			}
+
+			scb_index = ahd_get_scbptr(ahd);
+			scb = ahd_lookup_scb(ahd, scb_index);
+			if (devinfo.role == ROLE_INITIATOR) {
+				if (bus_phase == P_MESGOUT)
+					ahd_setup_initiator_msgout(ahd,
+								   &devinfo,
+								   scb);
+				else {
+					ahd->msg_type =
+					    MSG_TYPE_INITIATOR_MSGIN;
+					ahd->msgin_index = 0;
+				}
+			}
+#ifdef AHD_TARGET_MODE
+			else {
+				if (bus_phase == P_MESGOUT) {
+					ahd->msg_type =
+					    MSG_TYPE_TARGET_MSGOUT;
+					ahd->msgin_index = 0;
+				}
+				else 
+					ahd_setup_target_msgin(ahd,
+							       &devinfo,
+							       scb);
+			}
+#endif
+		}
+
+		ahd_handle_message_phase(ahd);
+		break;
+	}
+	case NO_MATCH:
+	{
+		/* Ensure we don't leave the selection hardware on */
+		AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+		ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
+
+		printf("%s:%c:%d: no active SCB for reconnecting "
+		       "target - issuing BUS DEVICE RESET\n",
+		       ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4);
+		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
+		       "REG0 == 0x%x ACCUM = 0x%x\n",
+		       ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN),
+		       ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM));
+		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
+		       "SINDEX == 0x%x\n",
+		       ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd),
+		       ahd_find_busy_tcl(ahd,
+					 BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID),
+						   ahd_inb(ahd, SAVED_LUN))),
+		       ahd_inw(ahd, SINDEX));
+		printf("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
+		       "SCB_CONTROL == 0x%x\n",
+		       ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID),
+		       ahd_inb_scbram(ahd, SCB_LUN),
+		       ahd_inb_scbram(ahd, SCB_CONTROL));
+		printf("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n",
+		       ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI));
+		printf("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0));
+		printf("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0));
+		ahd_dump_card_state(ahd);
+		ahd->msgout_buf[0] = MSG_BUS_DEV_RESET;
+		ahd->msgout_len = 1;
+		ahd->msgout_index = 0;
+		ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+		ahd_outb(ahd, MSG_OUT, HOST_MSG);
+		ahd_assert_atn(ahd);
+		break;
+	}
+	case PROTO_VIOLATION:
+	{
+		ahd_handle_proto_violation(ahd);
+		break;
+	}
+	case IGN_WIDE_RES:
+	{
+		struct ahd_devinfo devinfo;
+
+		ahd_fetch_devinfo(ahd, &devinfo);
+		ahd_handle_ign_wide_residue(ahd, &devinfo);
+		break;
+	}
+	case BAD_PHASE:
+	{
+		u_int lastphase;
+
+		lastphase = ahd_inb(ahd, LASTPHASE);
+		printf("%s:%c:%d: unknown scsi bus phase %x, "
+		       "lastphase = 0x%x.  Attempting to continue\n",
+		       ahd_name(ahd), 'A',
+		       SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
+		       lastphase, ahd_inb(ahd, SCSISIGI));
+		break;
+	}
+	case MISSED_BUSFREE:
+	{
+		u_int lastphase;
+
+		lastphase = ahd_inb(ahd, LASTPHASE);
+		printf("%s:%c:%d: Missed busfree. "
+		       "Lastphase = 0x%x, Curphase = 0x%x\n",
+		       ahd_name(ahd), 'A',
+		       SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
+		       lastphase, ahd_inb(ahd, SCSISIGI));
+		ahd_restart(ahd);
+		return;
+	}
+	case DATA_OVERRUN:
+	{
+		/*
+		 * When the sequencer detects an overrun, it
+		 * places the controller in "BITBUCKET" mode
+		 * and allows the target to complete its transfer.
+		 * Unfortunately, none of the counters get updated
+		 * when the controller is in this mode, so we have
+		 * no way of knowing how large the overrun was.
+		 */
+		struct	scb *scb;
+		u_int	scbindex;
+#ifdef AHD_DEBUG
+		u_int	lastphase;
+#endif
+
+		scbindex = ahd_get_scbptr(ahd);
+		scb = ahd_lookup_scb(ahd, scbindex);
+#ifdef AHD_DEBUG
+		lastphase = ahd_inb(ahd, LASTPHASE);
+		if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
+			ahd_print_path(ahd, scb);
+			printf("data overrun detected %s.  Tag == 0x%x.\n",
+			       ahd_lookup_phase_entry(lastphase)->phasemsg,
+			       SCB_GET_TAG(scb));
+			ahd_print_path(ahd, scb);
+			printf("%s seen Data Phase.  Length = %ld.  "
+			       "NumSGs = %d.\n",
+			       ahd_inb(ahd, SEQ_FLAGS) & DPHASE
+			       ? "Have" : "Haven't",
+			       ahd_get_transfer_length(scb), scb->sg_count);
+			ahd_dump_sglist(scb);
+		}
+#endif
+
+		/*
+		 * Set this and it will take effect when the
+		 * target does a command complete.
+		 */
+		ahd_freeze_devq(ahd, scb);
+		ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);
+		ahd_freeze_scb(scb);
+		break;
+	}
+	case MKMSG_FAILED:
+	{
+		struct ahd_devinfo devinfo;
+		struct scb *scb;
+		u_int scbid;
+
+		ahd_fetch_devinfo(ahd, &devinfo);
+		printf("%s:%c:%d:%d: Attempt to issue message failed\n",
+		       ahd_name(ahd), devinfo.channel, devinfo.target,
+		       devinfo.lun);
+		scbid = ahd_get_scbptr(ahd);
+		scb = ahd_lookup_scb(ahd, scbid);
+		if (scb != NULL
+		 && (scb->flags & SCB_RECOVERY_SCB) != 0)
+			/*
+			 * Ensure that we didn't put a second instance of this
+			 * SCB into the QINFIFO.
+			 */
+			ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
+					   SCB_GET_CHANNEL(ahd, scb),
+					   SCB_GET_LUN(scb), SCB_GET_TAG(scb),
+					   ROLE_INITIATOR, /*status*/0,
+					   SEARCH_REMOVE);
+		ahd_outb(ahd, SCB_CONTROL,
+			 ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE);
+		break;
+	}
+	case TASKMGMT_FUNC_COMPLETE:
+	{
+		u_int	scbid;
+		struct	scb *scb;
+
+		scbid = ahd_get_scbptr(ahd);
+		scb = ahd_lookup_scb(ahd, scbid);
+		if (scb != NULL) {
+			u_int	   lun;
+			u_int	   tag;
+			cam_status error;
+
+			ahd_print_path(ahd, scb);
+			printf("Task Management Func 0x%x Complete\n",
+			       scb->hscb->task_management);
+			lun = CAM_LUN_WILDCARD;
+			tag = SCB_LIST_NULL;
+
+			switch (scb->hscb->task_management) {
+			case SIU_TASKMGMT_ABORT_TASK:
+				tag = SCB_GET_TAG(scb);
+			case SIU_TASKMGMT_ABORT_TASK_SET:
+			case SIU_TASKMGMT_CLEAR_TASK_SET:
+				lun = scb->hscb->lun;
+				error = CAM_REQ_ABORTED;
+				ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
+					       'A', lun, tag, ROLE_INITIATOR,
+					       error);
+				break;
+			case SIU_TASKMGMT_LUN_RESET:
+				lun = scb->hscb->lun;
+			case SIU_TASKMGMT_TARGET_RESET:
+			{
+				struct ahd_devinfo devinfo;
+
+				ahd_scb_devinfo(ahd, &devinfo, scb);
+				error = CAM_BDR_SENT;
+				ahd_handle_devreset(ahd, &devinfo, lun,
+						    CAM_BDR_SENT,
+						    lun != CAM_LUN_WILDCARD
+						    ? "Lun Reset"
+						    : "Target Reset",
+						    /*verbose_level*/0);
+				break;
+			}
+			default:
+				panic("Unexpected TaskMgmt Func\n");
+				break;
+			}
+		}
+		break;
+	}
+	case TASKMGMT_CMD_CMPLT_OKAY:
+	{
+		u_int	scbid;
+		struct	scb *scb;
+
+		/*
+		 * An ABORT TASK TMF failed to be delivered before
+		 * the targeted command completed normally.
+		 */
+		scbid = ahd_get_scbptr(ahd);
+		scb = ahd_lookup_scb(ahd, scbid);
+		if (scb != NULL) {
+			/*
+			 * Remove the second instance of this SCB from
+			 * the QINFIFO if it is still there.
+                         */
+			ahd_print_path(ahd, scb);
+			printf("SCB completes before TMF\n");
+			/*
+			 * Handle losing the race.  Wait until any
+			 * current selection completes.  We will then
+			 * set the TMF back to zero in this SCB so that
+			 * the sequencer doesn't bother to issue another
+			 * sequencer interrupt for its completion.
+			 */
+			while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0
+			    && (ahd_inb(ahd, SSTAT0) & SELDO) == 0
+			    && (ahd_inb(ahd, SSTAT1) & SELTO) == 0)
+				;
+			ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0);
+			ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
+					   SCB_GET_CHANNEL(ahd, scb),  
+					   SCB_GET_LUN(scb), SCB_GET_TAG(scb), 
+					   ROLE_INITIATOR, /*status*/0,   
+					   SEARCH_REMOVE);
+		}
+		break;
+	}
+	case TRACEPOINT0:
+	case TRACEPOINT1:
+	case TRACEPOINT2:
+	case TRACEPOINT3:
+		printf("%s: Tracepoint %d\n", ahd_name(ahd),
+		       seqintcode - TRACEPOINT0);
+		break;
+	case NO_SEQINT:
+		break;
+	case SAW_HWERR:
+		ahd_handle_hwerrint(ahd);
+		break;
+	default:
+		printf("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd),
+		       seqintcode);
+		break;
+	}
+	/*
+	 *  The sequencer is paused immediately on
+	 *  a SEQINT, so we should restart it when
+	 *  we're done.
+	 */
+	ahd_unpause(ahd);
+}
+
+void
+ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
+{
+	struct scb	*scb;
+	u_int		 status0;
+	u_int		 status3;
+	u_int		 status;
+	u_int		 lqistat1;
+	u_int		 lqostat0;
+	u_int		 scbid;
+	u_int		 busfreetime;
+
+	ahd_update_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+
+	status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR);
+	status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO);
+	status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
+	lqistat1 = ahd_inb(ahd, LQISTAT1);
+	lqostat0 = ahd_inb(ahd, LQOSTAT0);
+	busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
+	if ((status0 & (SELDI|SELDO)) != 0) {
+		u_int simode0;
+
+		ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+		simode0 = ahd_inb(ahd, SIMODE0);
+		status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO);
+		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	}
+	scbid = ahd_get_scbptr(ahd);
+	scb = ahd_lookup_scb(ahd, scbid);
+	if (scb != NULL
+	 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
+		scb = NULL;
+
+	/* Make sure the sequencer is in a safe location. */
+	ahd_clear_critical_section(ahd);
+
+	if ((status0 & IOERR) != 0) {
+		u_int now_lvd;
+
+		now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40;
+		printf("%s: Transceiver State Has Changed to %s mode\n",
+		       ahd_name(ahd), now_lvd ? "LVD" : "SE");
+		ahd_outb(ahd, CLRSINT0, CLRIOERR);
+		/*
+		 * A change in I/O mode is equivalent to a bus reset.
+		 */
+		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+		ahd_pause(ahd);
+		ahd_setup_iocell_workaround(ahd);
+		ahd_unpause(ahd);
+	} else if ((status0 & OVERRUN) != 0) {
+		printf("%s: SCSI offset overrun detected.  Resetting bus.\n",
+		       ahd_name(ahd));
+		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+	} else if ((status & SCSIRSTI) != 0) {
+		printf("%s: Someone reset channel A\n", ahd_name(ahd));
+		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE);
+	} else if ((status & SCSIPERR) != 0) {
+		ahd_handle_transmission_error(ahd);
+	} else if (lqostat0 != 0) {
+		printf("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0);
+		ahd_outb(ahd, CLRLQOINT0, lqostat0);
+		if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
+			ahd_outb(ahd, CLRLQOINT1, 0);
+		}
+	} else if ((status & SELTO) != 0) {
+		u_int  scbid;
+
+		/* Stop the selection */
+		ahd_outb(ahd, SCSISEQ0, 0);
+
+		/* No more pending messages */
+		ahd_clear_msg_state(ahd);
+
+		/* Clear interrupt state */
+		ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
+
+		/*
+		 * Although the driver does not care about the
+		 * 'Selection in Progress' status bit, the busy
+		 * LED does.  SELINGO is only cleared by a sucessfull
+		 * selection, so we must manually clear it to insure
+		 * the LED turns off just incase no future successful
+		 * selections occur (e.g. no devices on the bus).
+		 */
+		ahd_outb(ahd, CLRSINT0, CLRSELINGO);
+
+		scbid = ahd_inw(ahd, WAITING_TID_HEAD);
+		scb = ahd_lookup_scb(ahd, scbid);
+		if (scb == NULL) {
+			printf("%s: ahd_intr - referenced scb not "
+			       "valid during SELTO scb(0x%x)\n",
+			       ahd_name(ahd), scbid);
+			ahd_dump_card_state(ahd);
+		} else {
+			struct ahd_devinfo devinfo;
+#ifdef AHD_DEBUG
+			if ((ahd_debug & AHD_SHOW_SELTO) != 0) {
+				ahd_print_path(ahd, scb);
+				printf("Saw Selection Timeout for SCB 0x%x\n",
+				       scbid);
+			}
+#endif
+			/*
+			 * Force a renegotiation with this target just in
+			 * case the cable was pulled and will later be
+			 * re-attached.  The target may forget its negotiation
+			 * settings with us should it attempt to reselect
+			 * during the interruption.  The target will not issue
+			 * a unit attention in this case, so we must always
+			 * renegotiate.
+			 */
+			ahd_scb_devinfo(ahd, &devinfo, scb);
+			ahd_force_renegotiation(ahd, &devinfo);
+			ahd_set_transaction_status(scb, CAM_SEL_TIMEOUT);
+			ahd_freeze_devq(ahd, scb);
+		}
+		ahd_outb(ahd, CLRINT, CLRSCSIINT);
+		ahd_iocell_first_selection(ahd);
+		ahd_unpause(ahd);
+	} else if ((status0 & (SELDI|SELDO)) != 0) {
+		ahd_iocell_first_selection(ahd);
+		ahd_unpause(ahd);
+	} else if (status3 != 0) {
+		printf("%s: SCSI Cell parity error SSTAT3 == 0x%x\n",
+		       ahd_name(ahd), status3);
+		ahd_outb(ahd, CLRSINT3, status3);
+	} else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) {
+		ahd_handle_lqiphase_error(ahd, lqistat1);
+	} else if ((lqistat1 & LQICRCI_NLQ) != 0) {
+		/*
+		 * This status can be delayed during some
+		 * streaming operations.  The SCSIPHASE
+		 * handler has already dealt with this case
+		 * so just clear the error.
+		 */
+		ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ);
+	} else if ((status & BUSFREE) != 0) {
+		u_int lqostat1;
+		int   restart;
+		int   clear_fifo;
+		int   packetized;
+		u_int mode;
+
+		/*
+		 * Clear our selection hardware as soon as possible.
+		 * We may have an entry in the waiting Q for this target,
+		 * that is affected by this busfree and we don't want to
+		 * go about selecting the target while we handle the event.
+		 */
+		ahd_outb(ahd, SCSISEQ0, 0);
+
+		/*
+		 * Determine what we were up to at the time of
+		 * the busfree.
+		 */
+		mode = AHD_MODE_SCSI;
+		busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
+		lqostat1 = ahd_inb(ahd, LQOSTAT1);
+		switch (busfreetime) {
+		case BUSFREE_DFF0:
+		case BUSFREE_DFF1:
+		{
+			u_int	scbid;
+			struct	scb *scb;
+
+			mode = busfreetime == BUSFREE_DFF0
+			     ? AHD_MODE_DFF0 : AHD_MODE_DFF1;
+			ahd_set_modes(ahd, mode, mode);
+			scbid = ahd_get_scbptr(ahd);
+			scb = ahd_lookup_scb(ahd, scbid);
+			if (scb == NULL) {
+				printf("%s: Invalid SCB %d in DFF%d "
+				       "during unexpected busfree\n",
+				       ahd_name(ahd), scbid, mode);
+				packetized = 0;
+			} else
+				packetized = (scb->flags & SCB_PACKETIZED) != 0;
+			clear_fifo = 1;
+			break;
+		}
+		case BUSFREE_LQO:
+			clear_fifo = 0;
+			packetized = 1;
+			break;
+		default:
+			clear_fifo = 0;
+			packetized =  (lqostat1 & LQOBUSFREE) != 0;
+			if (!packetized
+			 && ahd_inb(ahd, LASTPHASE) == P_BUSFREE)
+				packetized = 1;
+			break;
+		}
+
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_MISC) != 0)
+			printf("Saw Busfree.  Busfreetime = 0x%x.\n",
+			       busfreetime);
+#endif
+		/*
+		 * Busfrees that occur in non-packetized phases are
+		 * handled by the nonpkt_busfree handler.
+		 */
+		if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) {
+			restart = ahd_handle_pkt_busfree(ahd, busfreetime);
+		} else {
+			packetized = 0;
+			restart = ahd_handle_nonpkt_busfree(ahd);
+		}
+		/*
+		 * Clear the busfree interrupt status.  The setting of
+		 * the interrupt is a pulse, so in a perfect world, we
+		 * would not need to muck with the ENBUSFREE logic.  This
+		 * would ensure that if the bus moves on to another
+		 * connection, busfree protection is still in force.  If
+		 * BUSFREEREV is broken, however, we must manually clear
+		 * the ENBUSFREE if the busfree occurred during a non-pack
+		 * connection so that we don't get false positives during
+		 * future, packetized, connections.
+		 */
+		ahd_outb(ahd, CLRSINT1, CLRBUSFREE);
+		if (packetized == 0
+		 && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0)
+			ahd_outb(ahd, SIMODE1,
+				 ahd_inb(ahd, SIMODE1) & ~ENBUSFREE);
+
+		if (clear_fifo)
+			ahd_clear_fifo(ahd, mode);
+
+		ahd_clear_msg_state(ahd);
+		ahd_outb(ahd, CLRINT, CLRSCSIINT);
+		if (restart) {
+			ahd_restart(ahd);
+		} else {
+			ahd_unpause(ahd);
+		}
+	} else {
+		printf("%s: Missing case in ahd_handle_scsiint. status = %x\n",
+		       ahd_name(ahd), status);
+		ahd_dump_card_state(ahd);
+		ahd_clear_intstat(ahd);
+		ahd_unpause(ahd);
+	}
+}
+
+static void
+ahd_handle_transmission_error(struct ahd_softc *ahd)
+{
+	struct	scb *scb;
+	u_int	scbid;
+	u_int	lqistat1;
+	u_int	lqistat2;
+	u_int	msg_out;
+	u_int	curphase;
+	u_int	lastphase;
+	u_int	perrdiag;
+	u_int	cur_col;
+	int	silent;
+
+	scb = NULL;
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ);
+	lqistat2 = ahd_inb(ahd, LQISTAT2);
+	if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0
+	 && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) {
+		u_int lqistate;
+
+		ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+		lqistate = ahd_inb(ahd, LQISTATE);
+		if ((lqistate >= 0x1E && lqistate <= 0x24)
+		 || (lqistate == 0x29)) {
+#ifdef AHD_DEBUG
+			if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
+				printf("%s: NLQCRC found via LQISTATE\n",
+				       ahd_name(ahd));
+			}
+#endif
+			lqistat1 |= LQICRCI_NLQ;
+		}
+		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	}
+
+	ahd_outb(ahd, CLRLQIINT1, lqistat1);
+	lastphase = ahd_inb(ahd, LASTPHASE);
+	curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
+	perrdiag = ahd_inb(ahd, PERRDIAG);
+	msg_out = MSG_INITIATOR_DET_ERR;
+	ahd_outb(ahd, CLRSINT1, CLRSCSIPERR);
+	
+	/*
+	 * Try to find the SCB associated with this error.
+	 */
+	silent = FALSE;
+	if (lqistat1 == 0
+	 || (lqistat1 & LQICRCI_NLQ) != 0) {
+	 	if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0)
+			ahd_set_active_fifo(ahd);
+		scbid = ahd_get_scbptr(ahd);
+		scb = ahd_lookup_scb(ahd, scbid);
+		if (scb != NULL && SCB_IS_SILENT(scb))
+			silent = TRUE;
+	}
+
+	cur_col = 0;
+	if (silent == FALSE) {
+		printf("%s: Transmission error detected\n", ahd_name(ahd));
+		ahd_lqistat1_print(lqistat1, &cur_col, 50);
+		ahd_lastphase_print(lastphase, &cur_col, 50);
+		ahd_scsisigi_print(curphase, &cur_col, 50);
+		ahd_perrdiag_print(perrdiag, &cur_col, 50);
+		printf("\n");
+		ahd_dump_card_state(ahd);
+	}
+
+	if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) {
+		if (silent == FALSE) {
+			printf("%s: Gross protocol error during incoming "
+			       "packet.  lqistat1 == 0x%x.  Resetting bus.\n",
+			       ahd_name(ahd), lqistat1);
+		}
+		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+		return;
+	} else if ((lqistat1 & LQICRCI_LQ) != 0) {
+		/*
+		 * A CRC error has been detected on an incoming LQ.
+		 * The bus is currently hung on the last ACK.
+		 * Hit LQIRETRY to release the last ack, and
+		 * wait for the sequencer to determine that ATNO
+		 * is asserted while in message out to take us
+		 * to our host message loop.  No NONPACKREQ or
+		 * LQIPHASE type errors will occur in this
+		 * scenario.  After this first LQIRETRY, the LQI
+		 * manager will be in ISELO where it will
+		 * happily sit until another packet phase begins.
+		 * Unexpected bus free detection is enabled
+		 * through any phases that occur after we release
+		 * this last ack until the LQI manager sees a
+		 * packet phase.  This implies we may have to
+		 * ignore a perfectly valid "unexected busfree"
+		 * after our "initiator detected error" message is
+		 * sent.  A busfree is the expected response after
+		 * we tell the target that it's L_Q was corrupted.
+		 * (SPI4R09 10.7.3.3.3)
+		 */
+		ahd_outb(ahd, LQCTL2, LQIRETRY);
+		printf("LQIRetry for LQICRCI_LQ to release ACK\n");
+	} else if ((lqistat1 & LQICRCI_NLQ) != 0) {
+		/*
+		 * We detected a CRC error in a NON-LQ packet.
+		 * The hardware has varying behavior in this situation
+		 * depending on whether this packet was part of a
+		 * stream or not.
+		 *
+		 * PKT by PKT mode:
+		 * The hardware has already acked the complete packet.
+		 * If the target honors our outstanding ATN condition,
+		 * we should be (or soon will be) in MSGOUT phase.
+		 * This will trigger the LQIPHASE_LQ status bit as the
+		 * hardware was expecting another LQ.  Unexpected
+		 * busfree detection is enabled.  Once LQIPHASE_LQ is
+		 * true (first entry into host message loop is much
+		 * the same), we must clear LQIPHASE_LQ and hit
+		 * LQIRETRY so the hardware is ready to handle
+		 * a future LQ.  NONPACKREQ will not be asserted again
+		 * once we hit LQIRETRY until another packet is
+		 * processed.  The target may either go busfree
+		 * or start another packet in response to our message.
+		 *
+		 * Read Streaming P0 asserted:
+		 * If we raise ATN and the target completes the entire
+		 * stream (P0 asserted during the last packet), the
+		 * hardware will ack all data and return to the ISTART
+		 * state.  When the target reponds to our ATN condition,
+		 * LQIPHASE_LQ will be asserted.  We should respond to
+		 * this with an LQIRETRY to prepare for any future
+		 * packets.  NONPACKREQ will not be asserted again
+		 * once we hit LQIRETRY until another packet is
+		 * processed.  The target may either go busfree or
+		 * start another packet in response to our message.
+		 * Busfree detection is enabled.
+		 *
+		 * Read Streaming P0 not asserted:
+		 * If we raise ATN and the target transitions to
+		 * MSGOUT in or after a packet where P0 is not
+		 * asserted, the hardware will assert LQIPHASE_NLQ.
+		 * We should respond to the LQIPHASE_NLQ with an
+		 * LQIRETRY.  Should the target stay in a non-pkt
+		 * phase after we send our message, the hardware
+		 * will assert LQIPHASE_LQ.  Recovery is then just as
+		 * listed above for the read streaming with P0 asserted.
+		 * Busfree detection is enabled.
+		 */
+		if (silent == FALSE)
+			printf("LQICRC_NLQ\n");
+		if (scb == NULL) {
+			printf("%s: No SCB valid for LQICRC_NLQ.  "
+			       "Resetting bus\n", ahd_name(ahd));
+			ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+			return;
+		}
+	} else if ((lqistat1 & LQIBADLQI) != 0) {
+		printf("Need to handle BADLQI!\n");
+		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+		return;
+	} else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) {
+		if ((curphase & ~P_DATAIN_DT) != 0) {
+			/* Ack the byte.  So we can continue. */
+			if (silent == FALSE)
+				printf("Acking %s to clear perror\n",
+				    ahd_lookup_phase_entry(curphase)->phasemsg);
+			ahd_inb(ahd, SCSIDAT);
+		}
+	
+		if (curphase == P_MESGIN)
+			msg_out = MSG_PARITY_ERROR;
+	}
+
+	/*
+	 * We've set the hardware to assert ATN if we 
+	 * get a parity error on "in" phases, so all we
+	 * need to do is stuff the message buffer with
+	 * the appropriate message.  "In" phases have set
+	 * mesg_out to something other than MSG_NOP.
+	 */
+	ahd->send_msg_perror = msg_out;
+	if (scb != NULL && msg_out == MSG_INITIATOR_DET_ERR)
+		scb->flags |= SCB_TRANSMISSION_ERROR;
+	ahd_outb(ahd, MSG_OUT, HOST_MSG);
+	ahd_outb(ahd, CLRINT, CLRSCSIINT);
+	ahd_unpause(ahd);
+}
+
+static void
+ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1)
+{
+	/*
+	 * Clear the sources of the interrupts.
+	 */
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	ahd_outb(ahd, CLRLQIINT1, lqistat1);
+
+	/*
+	 * If the "illegal" phase changes were in response
+	 * to our ATN to flag a CRC error, AND we ended up
+	 * on packet boundaries, clear the error, restart the
+	 * LQI manager as appropriate, and go on our merry
+	 * way toward sending the message.  Otherwise, reset
+	 * the bus to clear the error.
+	 */
+	ahd_set_active_fifo(ahd);
+	if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0
+	 && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) {
+		if ((lqistat1 & LQIPHASE_LQ) != 0) {
+			printf("LQIRETRY for LQIPHASE_LQ\n");
+			ahd_outb(ahd, LQCTL2, LQIRETRY);
+		} else if ((lqistat1 & LQIPHASE_NLQ) != 0) {
+			printf("LQIRETRY for LQIPHASE_NLQ\n");
+			ahd_outb(ahd, LQCTL2, LQIRETRY);
+		} else
+			panic("ahd_handle_lqiphase_error: No phase errors\n");
+		ahd_dump_card_state(ahd);
+		ahd_outb(ahd, CLRINT, CLRSCSIINT);
+		ahd_unpause(ahd);
+	} else {
+		printf("Reseting Channel for LQI Phase error\n");
+		ahd_dump_card_state(ahd);
+		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
+	}
+}
+
+/*
+ * Packetized unexpected or expected busfree.
+ * Entered in mode based on busfreetime.
+ */
+static int
+ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime)
+{
+	u_int lqostat1;
+
+	AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+			 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+	lqostat1 = ahd_inb(ahd, LQOSTAT1);
+	if ((lqostat1 & LQOBUSFREE) != 0) {
+		struct scb *scb;
+		u_int scbid;
+		u_int saved_scbptr;
+		u_int waiting_h;
+		u_int waiting_t;
+		u_int next;
+
+		if ((busfreetime & BUSFREE_LQO) == 0)
+			printf("%s: Warning, BUSFREE time is 0x%x.  "
+			       "Expected BUSFREE_LQO.\n",
+			       ahd_name(ahd), busfreetime);
+		/*
+		 * The LQO manager detected an unexpected busfree
+		 * either:
+		 *
+		 * 1) During an outgoing LQ.
+		 * 2) After an outgoing LQ but before the first
+		 *    REQ of the command packet.
+		 * 3) During an outgoing command packet.
+		 *
+		 * In all cases, CURRSCB is pointing to the
+		 * SCB that encountered the failure.  Clean
+		 * up the queue, clear SELDO and LQOBUSFREE,
+		 * and allow the sequencer to restart the select
+		 * out at its lesure.
+		 */
+		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+		scbid = ahd_inw(ahd, CURRSCB);
+		scb = ahd_lookup_scb(ahd, scbid);
+		if (scb == NULL)
+		       panic("SCB not valid during LQOBUSFREE");
+		/*
+		 * Clear the status.
+		 */
+		ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE);
+		if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0)
+			ahd_outb(ahd, CLRLQOINT1, 0);
+		ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
+		ahd_flush_device_writes(ahd);
+		ahd_outb(ahd, CLRSINT0, CLRSELDO);
+
+		/*
+		 * Return the LQO manager to its idle loop.  It will
+		 * not do this automatically if the busfree occurs
+		 * after the first REQ of either the LQ or command
+		 * packet or between the LQ and command packet.
+		 */
+		ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE);
+
+		/*
+		 * Update the waiting for selection queue so
+		 * we restart on the correct SCB.
+		 */
+		waiting_h = ahd_inw(ahd, WAITING_TID_HEAD);
+		saved_scbptr = ahd_get_scbptr(ahd);
+		if (waiting_h != scbid) {
+
+			ahd_outw(ahd, WAITING_TID_HEAD, scbid);
+			waiting_t = ahd_inw(ahd, WAITING_TID_TAIL);
+			if (waiting_t == waiting_h) {
+				ahd_outw(ahd, WAITING_TID_TAIL, scbid);
+				next = SCB_LIST_NULL;
+			} else {
+				ahd_set_scbptr(ahd, waiting_h);
+				next = ahd_inw_scbram(ahd, SCB_NEXT2);
+			}
+			ahd_set_scbptr(ahd, scbid);
+			ahd_outw(ahd, SCB_NEXT2, next);
+		}
+		ahd_set_scbptr(ahd, saved_scbptr);
+		if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) {
+			if (SCB_IS_SILENT(scb) == FALSE) {
+				ahd_print_path(ahd, scb);
+				printf("Probable outgoing LQ CRC error.  "
+				       "Retrying command\n");
+			}
+			scb->crc_retry_count++;
+		} else {
+			ahd_set_transaction_status(scb, CAM_UNCOR_PARITY);
+			ahd_freeze_scb(scb);
+			ahd_freeze_devq(ahd, scb);
+		}
+		/* Return unpausing the sequencer. */
+		return (0);
+	} else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) {
+		/*
+		 * Ignore what are really parity errors that
+		 * occur on the last REQ of a free running
+		 * clock prior to going busfree.  Some drives
+		 * do not properly active negate just before
+		 * going busfree resulting in a parity glitch.
+		 */
+		ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE);
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0)
+			printf("%s: Parity on last REQ detected "
+			       "during busfree phase.\n",
+			       ahd_name(ahd));
+#endif
+		/* Return unpausing the sequencer. */
+		return (0);
+	}
+	if (ahd->src_mode != AHD_MODE_SCSI) {
+		u_int	scbid;
+		struct	scb *scb;
+
+		scbid = ahd_get_scbptr(ahd);
+		scb = ahd_lookup_scb(ahd, scbid);
+		ahd_print_path(ahd, scb);
+		printf("Unexpected PKT busfree condition\n");
+		ahd_dump_card_state(ahd);
+		ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A',
+			       SCB_GET_LUN(scb), SCB_GET_TAG(scb),
+			       ROLE_INITIATOR, CAM_UNEXP_BUSFREE);
+
+		/* Return restarting the sequencer. */
+		return (1);
+	}
+	printf("%s: Unexpected PKT busfree condition\n", ahd_name(ahd));
+	ahd_dump_card_state(ahd);
+	/* Restart the sequencer. */
+	return (1);
+}
+
+/*
+ * Non-packetized unexpected or expected busfree.
+ */
+static int
+ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
+{
+	struct	ahd_devinfo devinfo;
+	struct	scb *scb;
+	u_int	lastphase;
+	u_int	saved_scsiid;
+	u_int	saved_lun;
+	u_int	target;
+	u_int	initiator_role_id;
+	u_int	scbid;
+	u_int	ppr_busfree;
+	int	printerror;
+
+	/*
+	 * Look at what phase we were last in.  If its message out,
+	 * chances are pretty good that the busfree was in response
+	 * to one of our abort requests.
+	 */
+	lastphase = ahd_inb(ahd, LASTPHASE);
+	saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
+	saved_lun = ahd_inb(ahd, SAVED_LUN);
+	target = SCSIID_TARGET(ahd, saved_scsiid);
+	initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
+	ahd_compile_devinfo(&devinfo, initiator_role_id,
+			    target, saved_lun, 'A', ROLE_INITIATOR);
+	printerror = 1;
+
+	scbid = ahd_get_scbptr(ahd);
+	scb = ahd_lookup_scb(ahd, scbid);
+	if (scb != NULL
+	 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
+		scb = NULL;
+
+	ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0;
+	if (lastphase == P_MESGOUT) {
+		u_int tag;
+
+		tag = SCB_LIST_NULL;
+		if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE)
+		 || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) {
+			int found;
+			int sent_msg;
+
+			if (scb == NULL) {
+				ahd_print_devinfo(ahd, &devinfo);
+				printf("Abort for unidentified "
+				       "connection completed.\n");
+				/* restart the sequencer. */
+				return (1);
+			}
+			sent_msg = ahd->msgout_buf[ahd->msgout_index - 1];
+			ahd_print_path(ahd, scb);
+			printf("SCB %d - Abort%s Completed.\n",
+			       SCB_GET_TAG(scb),
+			       sent_msg == MSG_ABORT_TAG ? "" : " Tag");
+
+			if (sent_msg == MSG_ABORT_TAG)
+				tag = SCB_GET_TAG(scb);
+
+			if ((scb->flags & SCB_CMDPHASE_ABORT) != 0) {
+				/*
+				 * This abort is in response to an
+				 * unexpected switch to command phase
+				 * for a packetized connection.  Since
+				 * the identify message was never sent,
+				 * "saved lun" is 0.  We really want to
+				 * abort only the SCB that encountered
+				 * this error, which could have a different
+				 * lun.  The SCB will be retried so the OS
+				 * will see the UA after renegotiating to
+				 * packetized.
+				 */
+				tag = SCB_GET_TAG(scb);
+				saved_lun = scb->hscb->lun;
+			}
+			found = ahd_abort_scbs(ahd, target, 'A', saved_lun,
+					       tag, ROLE_INITIATOR,
+					       CAM_REQ_ABORTED);
+			printf("found == 0x%x\n", found);
+			printerror = 0;
+		} else if (ahd_sent_msg(ahd, AHDMSG_1B,
+					MSG_BUS_DEV_RESET, TRUE)) {
+#ifdef __FreeBSD__
+			/*
+			 * Don't mark the user's request for this BDR
+			 * as completing with CAM_BDR_SENT.  CAM3
+			 * specifies CAM_REQ_CMP.
+			 */
+			if (scb != NULL
+			 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
+			 && ahd_match_scb(ahd, scb, target, 'A',
+					  CAM_LUN_WILDCARD, SCB_LIST_NULL,
+					  ROLE_INITIATOR))
+				ahd_set_transaction_status(scb, CAM_REQ_CMP);
+#endif
+			ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD,
+					    CAM_BDR_SENT, "Bus Device Reset",
+					    /*verbose_level*/0);
+			printerror = 0;
+		} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE)
+			&& ppr_busfree == 0) {
+			struct ahd_initiator_tinfo *tinfo;
+			struct ahd_tmode_tstate *tstate;
+
+			/*
+			 * PPR Rejected.  Try non-ppr negotiation
+			 * and retry command.
+			 */
+#ifdef AHD_DEBUG
+			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+				printf("PPR negotiation rejected busfree.\n");
+#endif
+			tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
+						    devinfo.our_scsiid,
+						    devinfo.target, &tstate);
+			tinfo->curr.transport_version = 2;
+			tinfo->goal.transport_version = 2;
+			tinfo->goal.ppr_options = 0;
+			ahd_qinfifo_requeue_tail(ahd, scb);
+			printerror = 0;
+		} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE)
+			&& ppr_busfree == 0) {
+			/*
+			 * Negotiation Rejected.  Go-narrow and
+			 * retry command.
+			 */
+#ifdef AHD_DEBUG
+			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+				printf("WDTR negotiation rejected busfree.\n");
+#endif
+			ahd_set_width(ahd, &devinfo,
+				      MSG_EXT_WDTR_BUS_8_BIT,
+				      AHD_TRANS_CUR|AHD_TRANS_GOAL,
+				      /*paused*/TRUE);
+			ahd_qinfifo_requeue_tail(ahd, scb);
+			printerror = 0;
+		} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)
+			&& ppr_busfree == 0) {
+			/*
+			 * Negotiation Rejected.  Go-async and
+			 * retry command.
+			 */
+#ifdef AHD_DEBUG
+			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+				printf("SDTR negotiation rejected busfree.\n");
+#endif
+			ahd_set_syncrate(ahd, &devinfo,
+					/*period*/0, /*offset*/0,
+					/*ppr_options*/0,
+					AHD_TRANS_CUR|AHD_TRANS_GOAL,
+					/*paused*/TRUE);
+			ahd_qinfifo_requeue_tail(ahd, scb);
+			printerror = 0;
+		} else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0
+			&& ahd_sent_msg(ahd, AHDMSG_1B,
+					 MSG_INITIATOR_DET_ERR, TRUE)) {
+
+#ifdef AHD_DEBUG
+			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+				printf("Expected IDE Busfree\n");
+#endif
+			printerror = 0;
+		} else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE)
+			&& ahd_sent_msg(ahd, AHDMSG_1B,
+					MSG_MESSAGE_REJECT, TRUE)) {
+
+#ifdef AHD_DEBUG
+			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+				printf("Expected QAS Reject Busfree\n");
+#endif
+			printerror = 0;
+		}
+	}
+
+	/*
+	 * The busfree required flag is honored at the end of
+	 * the message phases.  We check it last in case we
+	 * had to send some other message that caused a busfree.
+	 */
+	if (printerror != 0
+	 && (lastphase == P_MESGIN || lastphase == P_MESGOUT)
+	 && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) {
+
+		ahd_freeze_devq(ahd, scb);
+		ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
+		ahd_freeze_scb(scb);
+		if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) {
+			ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
+				       SCB_GET_CHANNEL(ahd, scb),
+				       SCB_GET_LUN(scb), SCB_LIST_NULL,
+				       ROLE_INITIATOR, CAM_REQ_ABORTED);
+		} else {
+#ifdef AHD_DEBUG
+			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+				printf("PPR Negotiation Busfree.\n");
+#endif
+			ahd_done(ahd, scb);
+		}
+		printerror = 0;
+	}
+	if (printerror != 0) {
+		int aborted;
+
+		aborted = 0;
+		if (scb != NULL) {
+			u_int tag;
+
+			if ((scb->hscb->control & TAG_ENB) != 0)
+				tag = SCB_GET_TAG(scb);
+			else
+				tag = SCB_LIST_NULL;
+			ahd_print_path(ahd, scb);
+			aborted = ahd_abort_scbs(ahd, target, 'A',
+				       SCB_GET_LUN(scb), tag,
+				       ROLE_INITIATOR,
+				       CAM_UNEXP_BUSFREE);
+		} else {
+			/*
+			 * We had not fully identified this connection,
+			 * so we cannot abort anything.
+			 */
+			printf("%s: ", ahd_name(ahd));
+		}
+		if (lastphase != P_BUSFREE)
+			ahd_force_renegotiation(ahd, &devinfo);
+		printf("Unexpected busfree %s, %d SCBs aborted, "
+		       "PRGMCNT == 0x%x\n",
+		       ahd_lookup_phase_entry(lastphase)->phasemsg,
+		       aborted,
+		       ahd_inb(ahd, PRGMCNT)
+			| (ahd_inb(ahd, PRGMCNT+1) << 8));
+		ahd_dump_card_state(ahd);
+	}
+	/* Always restart the sequencer. */
+	return (1);
+}
+
+static void
+ahd_handle_proto_violation(struct ahd_softc *ahd)
+{
+	struct	ahd_devinfo devinfo;
+	struct	scb *scb;
+	u_int	scbid;
+	u_int	seq_flags;
+	u_int	curphase;
+	u_int	lastphase;
+	int	found;
+
+	ahd_fetch_devinfo(ahd, &devinfo);
+	scbid = ahd_get_scbptr(ahd);
+	scb = ahd_lookup_scb(ahd, scbid);
+	seq_flags = ahd_inb(ahd, SEQ_FLAGS);
+	curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
+	lastphase = ahd_inb(ahd, LASTPHASE);
+	if ((seq_flags & NOT_IDENTIFIED) != 0) {
+
+		/*
+		 * The reconnecting target either did not send an
+		 * identify message, or did, but we didn't find an SCB
+		 * to match.
+		 */
+		ahd_print_devinfo(ahd, &devinfo);
+		printf("Target did not send an IDENTIFY message. "
+		       "LASTPHASE = 0x%x.\n", lastphase);
+		scb = NULL;
+	} else if (scb == NULL) {
+		/*
+		 * We don't seem to have an SCB active for this
+		 * transaction.  Print an error and reset the bus.
+		 */
+		ahd_print_devinfo(ahd, &devinfo);
+		printf("No SCB found during protocol violation\n");
+		goto proto_violation_reset;
+	} else {
+		ahd_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
+		if ((seq_flags & NO_CDB_SENT) != 0) {
+			ahd_print_path(ahd, scb);
+			printf("No or incomplete CDB sent to device.\n");
+		} else if ((ahd_inb_scbram(ahd, SCB_CONTROL)
+			  & STATUS_RCVD) == 0) {
+			/*
+			 * The target never bothered to provide status to
+			 * us prior to completing the command.  Since we don't
+			 * know the disposition of this command, we must attempt
+			 * to abort it.  Assert ATN and prepare to send an abort
+			 * message.
+			 */
+			ahd_print_path(ahd, scb);
+			printf("Completed command without status.\n");
+		} else {
+			ahd_print_path(ahd, scb);
+			printf("Unknown protocol violation.\n");
+			ahd_dump_card_state(ahd);
+		}
+	}
+	if ((lastphase & ~P_DATAIN_DT) == 0
+	 || lastphase == P_COMMAND) {
+proto_violation_reset:
+		/*
+		 * Target either went directly to data
+		 * phase or didn't respond to our ATN.
+		 * The only safe thing to do is to blow
+		 * it away with a bus reset.
+		 */
+		found = ahd_reset_channel(ahd, 'A', TRUE);
+		printf("%s: Issued Channel %c Bus Reset. "
+		       "%d SCBs aborted\n", ahd_name(ahd), 'A', found);
+	} else {
+		/*
+		 * Leave the selection hardware off in case
+		 * this abort attempt will affect yet to
+		 * be sent commands.
+		 */
+		ahd_outb(ahd, SCSISEQ0,
+			 ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
+		ahd_assert_atn(ahd);
+		ahd_outb(ahd, MSG_OUT, HOST_MSG);
+		if (scb == NULL) {
+			ahd_print_devinfo(ahd, &devinfo);
+			ahd->msgout_buf[0] = MSG_ABORT_TASK;
+			ahd->msgout_len = 1;
+			ahd->msgout_index = 0;
+			ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+		} else {
+			ahd_print_path(ahd, scb);
+			scb->flags |= SCB_ABORT;
+		}
+		printf("Protocol violation %s.  Attempting to abort.\n",
+		       ahd_lookup_phase_entry(curphase)->phasemsg);
+	}
+}
+
+/*
+ * Force renegotiation to occur the next time we initiate
+ * a command to the current device.
+ */
+static void
+ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+	struct	ahd_initiator_tinfo *targ_info;
+	struct	ahd_tmode_tstate *tstate;
+
+#ifdef AHD_DEBUG
+	if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
+		ahd_print_devinfo(ahd, devinfo);
+		printf("Forcing renegotiation\n");
+	}
+#endif
+	targ_info = ahd_fetch_transinfo(ahd,
+					devinfo->channel,
+					devinfo->our_scsiid,
+					devinfo->target,
+					&tstate);
+	ahd_update_neg_request(ahd, devinfo, tstate,
+			       targ_info, AHD_NEG_IF_NON_ASYNC);
+}
+
+#define AHD_MAX_STEPS 2000
+void
+ahd_clear_critical_section(struct ahd_softc *ahd)
+{
+	ahd_mode_state	saved_modes;
+	int		stepping;
+	int		steps;
+	int		first_instr;
+	u_int		simode0;
+	u_int		simode1;
+	u_int		simode3;
+	u_int		lqimode0;
+	u_int		lqimode1;
+	u_int		lqomode0;
+	u_int		lqomode1;
+
+	if (ahd->num_critical_sections == 0)
+		return;
+
+	stepping = FALSE;
+	steps = 0;
+	first_instr = 0;
+	simode0 = 0;
+	simode1 = 0;
+	simode3 = 0;
+	lqimode0 = 0;
+	lqimode1 = 0;
+	lqomode0 = 0;
+	lqomode1 = 0;
+	saved_modes = ahd_save_modes(ahd);
+	for (;;) {
+		struct	cs *cs;
+		u_int	seqaddr;
+		u_int	i;
+
+		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+		seqaddr = ahd_inb(ahd, CURADDR)
+			| (ahd_inb(ahd, CURADDR+1) << 8);
+
+		cs = ahd->critical_sections;
+		for (i = 0; i < ahd->num_critical_sections; i++, cs++) {
+			
+			if (cs->begin < seqaddr && cs->end >= seqaddr)
+				break;
+		}
+
+		if (i == ahd->num_critical_sections)
+			break;
+
+		if (steps > AHD_MAX_STEPS) {
+			printf("%s: Infinite loop in critical section\n"
+			       "%s: First Instruction 0x%x now 0x%x\n",
+			       ahd_name(ahd), ahd_name(ahd), first_instr,
+			       seqaddr);
+			ahd_dump_card_state(ahd);
+			panic("critical section loop");
+		}
+
+		steps++;
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_MISC) != 0)
+			printf("%s: Single stepping at 0x%x\n", ahd_name(ahd),
+			       seqaddr);
+#endif
+		if (stepping == FALSE) {
+
+			first_instr = seqaddr;
+  			ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+  			simode0 = ahd_inb(ahd, SIMODE0);
+			simode3 = ahd_inb(ahd, SIMODE3);
+			lqimode0 = ahd_inb(ahd, LQIMODE0);
+			lqimode1 = ahd_inb(ahd, LQIMODE1);
+			lqomode0 = ahd_inb(ahd, LQOMODE0);
+			lqomode1 = ahd_inb(ahd, LQOMODE1);
+			ahd_outb(ahd, SIMODE0, 0);
+			ahd_outb(ahd, SIMODE3, 0);
+			ahd_outb(ahd, LQIMODE0, 0);
+			ahd_outb(ahd, LQIMODE1, 0);
+			ahd_outb(ahd, LQOMODE0, 0);
+			ahd_outb(ahd, LQOMODE1, 0);
+			ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+			simode1 = ahd_inb(ahd, SIMODE1);
+			/*
+			 * We don't clear ENBUSFREE.  Unfortunately
+			 * we cannot re-enable busfree detection within
+			 * the current connection, so we must leave it
+			 * on while single stepping.
+			 */
+			ahd_outb(ahd, SIMODE1, simode1 & ENBUSFREE);
+			ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP);
+			stepping = TRUE;
+		}
+		ahd_outb(ahd, CLRSINT1, CLRBUSFREE);
+		ahd_outb(ahd, CLRINT, CLRSCSIINT);
+		ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
+		ahd_outb(ahd, HCNTRL, ahd->unpause);
+		while (!ahd_is_paused(ahd))
+			ahd_delay(200);
+		ahd_update_modes(ahd);
+	}
+	if (stepping) {
+		ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+		ahd_outb(ahd, SIMODE0, simode0);
+		ahd_outb(ahd, SIMODE3, simode3);
+		ahd_outb(ahd, LQIMODE0, lqimode0);
+		ahd_outb(ahd, LQIMODE1, lqimode1);
+		ahd_outb(ahd, LQOMODE0, lqomode0);
+		ahd_outb(ahd, LQOMODE1, lqomode1);
+		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+		ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP);
+  		ahd_outb(ahd, SIMODE1, simode1);
+		/*
+		 * SCSIINT seems to glitch occassionally when
+		 * the interrupt masks are restored.  Clear SCSIINT
+		 * one more time so that only persistent errors
+		 * are seen as a real interrupt.
+		 */
+		ahd_outb(ahd, CLRINT, CLRSCSIINT);
+	}
+	ahd_restore_modes(ahd, saved_modes);
+}
+
+/*
+ * Clear any pending interrupt status.
+ */
+void
+ahd_clear_intstat(struct ahd_softc *ahd)
+{
+	AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+			 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+	/* Clear any interrupt conditions this may have caused */
+	ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2
+				 |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD);
+	ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT
+				 |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI
+				 |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ);
+	ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ
+				 |CLRLQOATNPKT|CLRLQOTCRC);
+	ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS
+				 |CLRLQOBUSFREE|CLRLQOPHACHGINPKT);
+	if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
+		ahd_outb(ahd, CLRLQOINT0, 0);
+		ahd_outb(ahd, CLRLQOINT1, 0);
+	}
+	ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR);
+	ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
+				|CLRBUSFREE|CLRSCSIPERR|CLRREQINIT);
+	ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO
+			        |CLRIOERR|CLROVERRUN);
+	ahd_outb(ahd, CLRINT, CLRSCSIINT);
+}
+
+/**************************** Debugging Routines ******************************/
+#ifdef AHD_DEBUG
+uint32_t ahd_debug = AHD_DEBUG_OPTS;
+#endif
+void
+ahd_print_scb(struct scb *scb)
+{
+	struct hardware_scb *hscb;
+	int i;
+
+	hscb = scb->hscb;
+	printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
+	       (void *)scb,
+	       hscb->control,
+	       hscb->scsiid,
+	       hscb->lun,
+	       hscb->cdb_len);
+	printf("Shared Data: ");
+	for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++)
+		printf("%#02x", hscb->shared_data.idata.cdb[i]);
+	printf("        dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n",
+	       (uint32_t)((ahd_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF),
+	       (uint32_t)(ahd_le64toh(hscb->dataptr) & 0xFFFFFFFF),
+	       ahd_le32toh(hscb->datacnt),
+	       ahd_le32toh(hscb->sgptr),
+	       SCB_GET_TAG(scb));
+	ahd_dump_sglist(scb);
+}
+
+void
+ahd_dump_sglist(struct scb *scb)
+{
+	int i;
+
+	if (scb->sg_count > 0) {
+		if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) {
+			struct ahd_dma64_seg *sg_list;
+
+			sg_list = (struct ahd_dma64_seg*)scb->sg_list;
+			for (i = 0; i < scb->sg_count; i++) {
+				uint64_t addr;
+				uint32_t len;
+
+				addr = ahd_le64toh(sg_list[i].addr);
+				len = ahd_le32toh(sg_list[i].len);
+				printf("sg[%d] - Addr 0x%x%x : Length %d%s\n",
+				       i,
+				       (uint32_t)((addr >> 32) & 0xFFFFFFFF),
+				       (uint32_t)(addr & 0xFFFFFFFF),
+				       sg_list[i].len & AHD_SG_LEN_MASK,
+				       (sg_list[i].len & AHD_DMA_LAST_SEG)
+				     ? " Last" : "");
+			}
+		} else {
+			struct ahd_dma_seg *sg_list;
+
+			sg_list = (struct ahd_dma_seg*)scb->sg_list;
+			for (i = 0; i < scb->sg_count; i++) {
+				uint32_t len;
+
+				len = ahd_le32toh(sg_list[i].len);
+				printf("sg[%d] - Addr 0x%x%x : Length %d%s\n",
+				       i,
+				       (len & AHD_SG_HIGH_ADDR_MASK) >> 24,
+				       ahd_le32toh(sg_list[i].addr),
+				       len & AHD_SG_LEN_MASK,
+				       len & AHD_DMA_LAST_SEG ? " Last" : "");
+			}
+		}
+	}
+}
+
+/************************* Transfer Negotiation *******************************/
+/*
+ * Allocate per target mode instance (ID we respond to as a target)
+ * transfer negotiation data structures.
+ */
+static struct ahd_tmode_tstate *
+ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel)
+{
+	struct ahd_tmode_tstate *master_tstate;
+	struct ahd_tmode_tstate *tstate;
+	int i;
+
+	master_tstate = ahd->enabled_targets[ahd->our_id];
+	if (ahd->enabled_targets[scsi_id] != NULL
+	 && ahd->enabled_targets[scsi_id] != master_tstate)
+		panic("%s: ahd_alloc_tstate - Target already allocated",
+		      ahd_name(ahd));
+	tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT);
+	if (tstate == NULL)
+		return (NULL);
+
+	/*
+	 * If we have allocated a master tstate, copy user settings from
+	 * the master tstate (taken from SRAM or the EEPROM) for this
+	 * channel, but reset our current and goal settings to async/narrow
+	 * until an initiator talks to us.
+	 */
+	if (master_tstate != NULL) {
+		memcpy(tstate, master_tstate, sizeof(*tstate));
+		memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
+		for (i = 0; i < 16; i++) {
+			memset(&tstate->transinfo[i].curr, 0,
+			      sizeof(tstate->transinfo[i].curr));
+			memset(&tstate->transinfo[i].goal, 0,
+			      sizeof(tstate->transinfo[i].goal));
+		}
+	} else
+		memset(tstate, 0, sizeof(*tstate));
+	ahd->enabled_targets[scsi_id] = tstate;
+	return (tstate);
+}
+
+#ifdef AHD_TARGET_MODE
+/*
+ * Free per target mode instance (ID we respond to as a target)
+ * transfer negotiation data structures.
+ */
+static void
+ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force)
+{
+	struct ahd_tmode_tstate *tstate;
+
+	/*
+	 * Don't clean up our "master" tstate.
+	 * It has our default user settings.
+	 */
+	if (scsi_id == ahd->our_id
+	 && force == FALSE)
+		return;
+
+	tstate = ahd->enabled_targets[scsi_id];
+	if (tstate != NULL)
+		free(tstate, M_DEVBUF);
+	ahd->enabled_targets[scsi_id] = NULL;
+}
+#endif
+
+/*
+ * Called when we have an active connection to a target on the bus,
+ * this function finds the nearest period to the input period limited
+ * by the capabilities of the bus connectivity of and sync settings for
+ * the target.
+ */
+void
+ahd_devlimited_syncrate(struct ahd_softc *ahd,
+			struct ahd_initiator_tinfo *tinfo,
+			u_int *period, u_int *ppr_options, role_t role)
+{
+	struct	ahd_transinfo *transinfo;
+	u_int	maxsync;
+
+	if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0
+	 && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) {
+		maxsync = AHD_SYNCRATE_PACED;
+	} else {
+		maxsync = AHD_SYNCRATE_ULTRA;
+		/* Can't do DT related options on an SE bus */
+		*ppr_options &= MSG_EXT_PPR_QAS_REQ;
+	}
+	/*
+	 * Never allow a value higher than our current goal
+	 * period otherwise we may allow a target initiated
+	 * negotiation to go above the limit as set by the
+	 * user.  In the case of an initiator initiated
+	 * sync negotiation, we limit based on the user
+	 * setting.  This allows the system to still accept
+	 * incoming negotiations even if target initiated
+	 * negotiation is not performed.
+	 */
+	if (role == ROLE_TARGET)
+		transinfo = &tinfo->user;
+	else 
+		transinfo = &tinfo->goal;
+	*ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN);
+	if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
+		maxsync = MAX(maxsync, AHD_SYNCRATE_ULTRA2);
+		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+	}
+	if (transinfo->period == 0) {
+		*period = 0;
+		*ppr_options = 0;
+	} else {
+		*period = MAX(*period, transinfo->period);
+		ahd_find_syncrate(ahd, period, ppr_options, maxsync);
+	}
+}
+
+/*
+ * Look up the valid period to SCSIRATE conversion in our table.
+ * Return the period and offset that should be sent to the target
+ * if this was the beginning of an SDTR.
+ */
+void
+ahd_find_syncrate(struct ahd_softc *ahd, u_int *period,
+		  u_int *ppr_options, u_int maxsync)
+{
+	if (*period < maxsync)
+		*period = maxsync;
+
+	if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0
+	 && *period > AHD_SYNCRATE_MIN_DT)
+		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+		
+	if (*period > AHD_SYNCRATE_MIN)
+		*period = 0;
+
+	/* Honor PPR option conformance rules. */
+	if (*period > AHD_SYNCRATE_PACED)
+		*ppr_options &= ~MSG_EXT_PPR_RTI;
+
+	if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0)
+		*ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ);
+
+	if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0)
+		*ppr_options &= MSG_EXT_PPR_QAS_REQ;
+
+	/* Skip all PACED only entries if IU is not available */
+	if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0
+	 && *period < AHD_SYNCRATE_DT)
+		*period = AHD_SYNCRATE_DT;
+
+	/* Skip all DT only entries if DT is not available */
+	if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
+	 && *period < AHD_SYNCRATE_ULTRA2)
+		*period = AHD_SYNCRATE_ULTRA2;
+}
+
+/*
+ * Truncate the given synchronous offset to a value the
+ * current adapter type and syncrate are capable of.
+ */
+void
+ahd_validate_offset(struct ahd_softc *ahd,
+		    struct ahd_initiator_tinfo *tinfo,
+		    u_int period, u_int *offset, int wide,
+		    role_t role)
+{
+	u_int maxoffset;
+
+	/* Limit offset to what we can do */
+	if (period == 0)
+		maxoffset = 0;
+	else if (period <= AHD_SYNCRATE_PACED) {
+		if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0)
+			maxoffset = MAX_OFFSET_PACED_BUG;
+		else
+			maxoffset = MAX_OFFSET_PACED;
+	} else
+		maxoffset = MAX_OFFSET_NON_PACED;
+	*offset = MIN(*offset, maxoffset);
+	if (tinfo != NULL) {
+		if (role == ROLE_TARGET)
+			*offset = MIN(*offset, tinfo->user.offset);
+		else
+			*offset = MIN(*offset, tinfo->goal.offset);
+	}
+}
+
+/*
+ * Truncate the given transfer width parameter to a value the
+ * current adapter type is capable of.
+ */
+void
+ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo,
+		   u_int *bus_width, role_t role)
+{
+	switch (*bus_width) {
+	default:
+		if (ahd->features & AHD_WIDE) {
+			/* Respond Wide */
+			*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
+			break;
+		}
+		/* FALLTHROUGH */
+	case MSG_EXT_WDTR_BUS_8_BIT:
+		*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
+		break;
+	}
+	if (tinfo != NULL) {
+		if (role == ROLE_TARGET)
+			*bus_width = MIN(tinfo->user.width, *bus_width);
+		else
+			*bus_width = MIN(tinfo->goal.width, *bus_width);
+	}
+}
+
+/*
+ * Update the bitmask of targets for which the controller should
+ * negotiate with at the next convenient oportunity.  This currently
+ * means the next time we send the initial identify messages for
+ * a new transaction.
+ */
+int
+ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+		       struct ahd_tmode_tstate *tstate,
+		       struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type)
+{
+	u_int auto_negotiate_orig;
+
+	auto_negotiate_orig = tstate->auto_negotiate;
+	if (neg_type == AHD_NEG_ALWAYS) {
+		/*
+		 * Force our "current" settings to be
+		 * unknown so that unless a bus reset
+		 * occurs the need to renegotiate is
+		 * recorded persistently.
+		 */
+		if ((ahd->features & AHD_WIDE) != 0)
+			tinfo->curr.width = AHD_WIDTH_UNKNOWN;
+		tinfo->curr.period = AHD_PERIOD_UNKNOWN;
+		tinfo->curr.offset = AHD_OFFSET_UNKNOWN;
+	}
+	if (tinfo->curr.period != tinfo->goal.period
+	 || tinfo->curr.width != tinfo->goal.width
+	 || tinfo->curr.offset != tinfo->goal.offset
+	 || tinfo->curr.ppr_options != tinfo->goal.ppr_options
+	 || (neg_type == AHD_NEG_IF_NON_ASYNC
+	  && (tinfo->goal.offset != 0
+	   || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
+	   || tinfo->goal.ppr_options != 0)))
+		tstate->auto_negotiate |= devinfo->target_mask;
+	else
+		tstate->auto_negotiate &= ~devinfo->target_mask;
+
+	return (auto_negotiate_orig != tstate->auto_negotiate);
+}
+
+/*
+ * Update the user/goal/curr tables of synchronous negotiation
+ * parameters as well as, in the case of a current or active update,
+ * any data structures on the host controller.  In the case of an
+ * active update, the specified target is currently talking to us on
+ * the bus, so the transfer parameter update must take effect
+ * immediately.
+ */
+void
+ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+		 u_int period, u_int offset, u_int ppr_options,
+		 u_int type, int paused)
+{
+	struct	ahd_initiator_tinfo *tinfo;
+	struct	ahd_tmode_tstate *tstate;
+	u_int	old_period;
+	u_int	old_offset;
+	u_int	old_ppr;
+	int	active;
+	int	update_needed;
+
+	active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE;
+	update_needed = 0;
+
+	if (period == 0 || offset == 0) {
+		period = 0;
+		offset = 0;
+	}
+
+	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
+				    devinfo->target, &tstate);
+
+	if ((type & AHD_TRANS_USER) != 0) {
+		tinfo->user.period = period;
+		tinfo->user.offset = offset;
+		tinfo->user.ppr_options = ppr_options;
+	}
+
+	if ((type & AHD_TRANS_GOAL) != 0) {
+		tinfo->goal.period = period;
+		tinfo->goal.offset = offset;
+		tinfo->goal.ppr_options = ppr_options;
+	}
+
+	old_period = tinfo->curr.period;
+	old_offset = tinfo->curr.offset;
+	old_ppr	   = tinfo->curr.ppr_options;
+
+	if ((type & AHD_TRANS_CUR) != 0
+	 && (old_period != period
+	  || old_offset != offset
+	  || old_ppr != ppr_options)) {
+
+		update_needed++;
+
+		tinfo->curr.period = period;
+		tinfo->curr.offset = offset;
+		tinfo->curr.ppr_options = ppr_options;
+
+		ahd_send_async(ahd, devinfo->channel, devinfo->target,
+			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
+		if (bootverbose) {
+			if (offset != 0) {
+				int options;
+
+				printf("%s: target %d synchronous with "
+				       "period = 0x%x, offset = 0x%x",
+				       ahd_name(ahd), devinfo->target,
+				       period, offset);
+				options = 0;
+				if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) {
+					printf("(RDSTRM");
+					options++;
+				}
+				if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
+					printf("%s", options ? "|DT" : "(DT");
+					options++;
+				}
+				if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
+					printf("%s", options ? "|IU" : "(IU");
+					options++;
+				}
+				if ((ppr_options & MSG_EXT_PPR_RTI) != 0) {
+					printf("%s", options ? "|RTI" : "(RTI");
+					options++;
+				}
+				if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) {
+					printf("%s", options ? "|QAS" : "(QAS");
+					options++;
+				}
+				if (options != 0)
+					printf(")\n");
+				else
+					printf("\n");
+			} else {
+				printf("%s: target %d using "
+				       "asynchronous transfers%s\n",
+				       ahd_name(ahd), devinfo->target,
+				       (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0
+				     ?  "(QAS)" : "");
+			}
+		}
+	}
+	/*
+	 * Always refresh the neg-table to handle the case of the
+	 * sequencer setting the ENATNO bit for a MK_MESSAGE request.
+	 * We will always renegotiate in that case if this is a
+	 * packetized request.  Also manage the busfree expected flag
+	 * from this common routine so that we catch changes due to
+	 * WDTR or SDTR messages.
+	 */
+	if ((type & AHD_TRANS_CUR) != 0) {
+		if (!paused)
+			ahd_pause(ahd);
+		ahd_update_neg_table(ahd, devinfo, &tinfo->curr);
+		if (!paused)
+			ahd_unpause(ahd);
+		if (ahd->msg_type != MSG_TYPE_NONE) {
+			if ((old_ppr & MSG_EXT_PPR_IU_REQ)
+			 != (ppr_options & MSG_EXT_PPR_IU_REQ)) {
+#ifdef AHD_DEBUG
+				if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
+					ahd_print_devinfo(ahd, devinfo);
+					printf("Expecting IU Change busfree\n");
+				}
+#endif
+				ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE
+					       |  MSG_FLAG_IU_REQ_CHANGED;
+			}
+			if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) {
+#ifdef AHD_DEBUG
+				if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+					printf("PPR with IU_REQ outstanding\n");
+#endif
+				ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE;
+			}
+		}
+	}
+
+	update_needed += ahd_update_neg_request(ahd, devinfo, tstate,
+						tinfo, AHD_NEG_TO_GOAL);
+
+	if (update_needed && active)
+		ahd_update_pending_scbs(ahd);
+}
+
+/*
+ * Update the user/goal/curr tables of wide negotiation
+ * parameters as well as, in the case of a current or active update,
+ * any data structures on the host controller.  In the case of an
+ * active update, the specified target is currently talking to us on
+ * the bus, so the transfer parameter update must take effect
+ * immediately.
+ */
+void
+ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+	      u_int width, u_int type, int paused)
+{
+	struct	ahd_initiator_tinfo *tinfo;
+	struct	ahd_tmode_tstate *tstate;
+	u_int	oldwidth;
+	int	active;
+	int	update_needed;
+
+	active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE;
+	update_needed = 0;
+	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
+				    devinfo->target, &tstate);
+
+	if ((type & AHD_TRANS_USER) != 0)
+		tinfo->user.width = width;
+
+	if ((type & AHD_TRANS_GOAL) != 0)
+		tinfo->goal.width = width;
+
+	oldwidth = tinfo->curr.width;
+	if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) {
+
+		update_needed++;
+
+		tinfo->curr.width = width;
+		ahd_send_async(ahd, devinfo->channel, devinfo->target,
+			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
+		if (bootverbose) {
+			printf("%s: target %d using %dbit transfers\n",
+			       ahd_name(ahd), devinfo->target,
+			       8 * (0x01 << width));
+		}
+	}
+
+	if ((type & AHD_TRANS_CUR) != 0) {
+		if (!paused)
+			ahd_pause(ahd);
+		ahd_update_neg_table(ahd, devinfo, &tinfo->curr);
+		if (!paused)
+			ahd_unpause(ahd);
+	}
+
+	update_needed += ahd_update_neg_request(ahd, devinfo, tstate,
+						tinfo, AHD_NEG_TO_GOAL);
+	if (update_needed && active)
+		ahd_update_pending_scbs(ahd);
+
+}
+
+/*
+ * Update the current state of tagged queuing for a given target.
+ */
+void
+ahd_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+	     ahd_queue_alg alg)
+{
+	ahd_platform_set_tags(ahd, devinfo, alg);
+	ahd_send_async(ahd, devinfo->channel, devinfo->target,
+		       devinfo->lun, AC_TRANSFER_NEG, &alg);
+}
+
+static void
+ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+		     struct ahd_transinfo *tinfo)
+{
+	ahd_mode_state	saved_modes;
+	u_int		period;
+	u_int		ppr_opts;
+	u_int		con_opts;
+	u_int		offset;
+	u_int		saved_negoaddr;
+	uint8_t		iocell_opts[sizeof(ahd->iocell_opts)];
+
+	saved_modes = ahd_save_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+
+	saved_negoaddr = ahd_inb(ahd, NEGOADDR);
+	ahd_outb(ahd, NEGOADDR, devinfo->target);
+	period = tinfo->period;
+	offset = tinfo->offset;
+	memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts)); 
+	ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ
+					|MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI);
+	con_opts = 0;
+	if (period == 0)
+		period = AHD_SYNCRATE_ASYNC;
+	if (period == AHD_SYNCRATE_160) {
+
+		if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) {
+			/*
+			 * When the SPI4 spec was finalized, PACE transfers
+			 * was not made a configurable option in the PPR
+			 * message.  Instead it is assumed to be enabled for
+			 * any syncrate faster than 80MHz.  Nevertheless,
+			 * Harpoon2A4 allows this to be configurable.
+			 *
+			 * Harpoon2A4 also assumes at most 2 data bytes per
+			 * negotiated REQ/ACK offset.  Paced transfers take
+			 * 4, so we must adjust our offset.
+			 */
+			ppr_opts |= PPROPT_PACE;
+			offset *= 2;
+
+			/*
+			 * Harpoon2A assumed that there would be a
+			 * fallback rate between 160MHz and 80Mhz,
+			 * so 7 is used as the period factor rather
+			 * than 8 for 160MHz.
+			 */
+			period = AHD_SYNCRATE_REVA_160;
+		}
+		if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0)
+			iocell_opts[AHD_PRECOMP_SLEW_INDEX] &=
+			    ~AHD_PRECOMP_MASK;
+	} else {
+		/*
+		 * Precomp should be disabled for non-paced transfers.
+		 */
+		iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK;
+
+		if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0
+		 && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0) {
+			/*
+			 * Slow down our CRC interval to be
+			 * compatible with devices that can't
+			 * handle a CRC at full speed.
+			 */
+			con_opts |= ENSLOWCRC;
+		}
+	}
+
+	ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW);
+	ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]);
+	ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE);
+	ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]);
+
+	ahd_outb(ahd, NEGPERIOD, period);
+	ahd_outb(ahd, NEGPPROPTS, ppr_opts);
+	ahd_outb(ahd, NEGOFFSET, offset);
+
+	if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT)
+		con_opts |= WIDEXFER;
+
+	/*
+	 * During packetized transfers, the target will
+	 * give us the oportunity to send command packets
+	 * without us asserting attention.
+	 */
+	if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0)
+		con_opts |= ENAUTOATNO;
+	ahd_outb(ahd, NEGCONOPTS, con_opts);
+	ahd_outb(ahd, NEGOADDR, saved_negoaddr);
+	ahd_restore_modes(ahd, saved_modes);
+}
+
+/*
+ * When the transfer settings for a connection change, setup for
+ * negotiation in pending SCBs to effect the change as quickly as
+ * possible.  We also cancel any negotiations that are scheduled
+ * for inflight SCBs that have not been started yet.
+ */
+static void
+ahd_update_pending_scbs(struct ahd_softc *ahd)
+{
+	struct		scb *pending_scb;
+	int		pending_scb_count;
+	u_int		scb_tag;
+	int		paused;
+	u_int		saved_scbptr;
+	ahd_mode_state	saved_modes;
+
+	/*
+	 * Traverse the pending SCB list and ensure that all of the
+	 * SCBs there have the proper settings.  We can only safely
+	 * clear the negotiation required flag (setting requires the
+	 * execution queue to be modified) and this is only possible
+	 * if we are not already attempting to select out for this
+	 * SCB.  For this reason, all callers only call this routine
+	 * if we are changing the negotiation settings for the currently
+	 * active transaction on the bus.
+	 */
+	pending_scb_count = 0;
+	LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
+		struct ahd_devinfo devinfo;
+		struct hardware_scb *pending_hscb;
+		struct ahd_initiator_tinfo *tinfo;
+		struct ahd_tmode_tstate *tstate;
+
+		ahd_scb_devinfo(ahd, &devinfo, pending_scb);
+		tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
+					    devinfo.our_scsiid,
+					    devinfo.target, &tstate);
+		pending_hscb = pending_scb->hscb;
+		if ((tstate->auto_negotiate & devinfo.target_mask) == 0
+		 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
+			pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
+			pending_hscb->control &= ~MK_MESSAGE;
+		}
+		ahd_sync_scb(ahd, pending_scb,
+			     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+		pending_scb_count++;
+	}
+
+	if (pending_scb_count == 0)
+		return;
+
+	if (ahd_is_paused(ahd)) {
+		paused = 1;
+	} else {
+		paused = 0;
+		ahd_pause(ahd);
+	}
+
+	/*
+	 * Force the sequencer to reinitialize the selection for
+	 * the command at the head of the execution queue if it
+	 * has already been setup.  The negotiation changes may
+	 * effect whether we select-out with ATN.
+	 */
+	saved_modes = ahd_save_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
+	saved_scbptr = ahd_get_scbptr(ahd);
+	/* Ensure that the hscbs down on the card match the new information */
+	for (scb_tag = 0; scb_tag < ahd->scb_data.maxhscbs; scb_tag++) {
+		struct	hardware_scb *pending_hscb;
+		u_int	control;
+
+		pending_scb = ahd_lookup_scb(ahd, scb_tag);
+		if (pending_scb == NULL)
+			continue;
+		ahd_set_scbptr(ahd, scb_tag);
+		pending_hscb = pending_scb->hscb;
+		control = ahd_inb_scbram(ahd, SCB_CONTROL);
+		control &= ~MK_MESSAGE;
+		control |= pending_hscb->control & MK_MESSAGE;
+		ahd_outb(ahd, SCB_CONTROL, control);
+	}
+	ahd_set_scbptr(ahd, saved_scbptr);
+	ahd_restore_modes(ahd, saved_modes);
+
+	if (paused == 0)
+		ahd_unpause(ahd);
+}
+
+/**************************** Pathing Information *****************************/
+static void
+ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+	ahd_mode_state	saved_modes;
+	u_int		saved_scsiid;
+	role_t		role;
+	int		our_id;
+
+	saved_modes = ahd_save_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+
+	if (ahd_inb(ahd, SSTAT0) & TARGET)
+		role = ROLE_TARGET;
+	else
+		role = ROLE_INITIATOR;
+
+	if (role == ROLE_TARGET
+	 && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) {
+		/* We were selected, so pull our id from TARGIDIN */
+		our_id = ahd_inb(ahd, TARGIDIN) & OID;
+	} else if (role == ROLE_TARGET)
+		our_id = ahd_inb(ahd, TOWNID);
+	else
+		our_id = ahd_inb(ahd, IOWNID);
+
+	saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
+	ahd_compile_devinfo(devinfo,
+			    our_id,
+			    SCSIID_TARGET(ahd, saved_scsiid),
+			    ahd_inb(ahd, SAVED_LUN),
+			    SCSIID_CHANNEL(ahd, saved_scsiid),
+			    role);
+	ahd_restore_modes(ahd, saved_modes);
+}
+
+void
+ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+	printf("%s:%c:%d:%d: ", ahd_name(ahd), 'A',
+	       devinfo->target, devinfo->lun);
+}
+
+struct ahd_phase_table_entry*
+ahd_lookup_phase_entry(int phase)
+{
+	struct ahd_phase_table_entry *entry;
+	struct ahd_phase_table_entry *last_entry;
+
+	/*
+	 * num_phases doesn't include the default entry which
+	 * will be returned if the phase doesn't match.
+	 */
+	last_entry = &ahd_phase_table[num_phases];
+	for (entry = ahd_phase_table; entry < last_entry; entry++) {
+		if (phase == entry->phase)
+			break;
+	}
+	return (entry);
+}
+
+void
+ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target,
+		    u_int lun, char channel, role_t role)
+{
+	devinfo->our_scsiid = our_id;
+	devinfo->target = target;
+	devinfo->lun = lun;
+	devinfo->target_offset = target;
+	devinfo->channel = channel;
+	devinfo->role = role;
+	if (channel == 'B')
+		devinfo->target_offset += 8;
+	devinfo->target_mask = (0x01 << devinfo->target_offset);
+}
+
+static void
+ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+		struct scb *scb)
+{
+	role_t	role;
+	int	our_id;
+
+	our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
+	role = ROLE_INITIATOR;
+	if ((scb->hscb->control & TARGET_SCB) != 0)
+		role = ROLE_TARGET;
+	ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb),
+			    SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role);
+}
+
+
+/************************ Message Phase Processing ****************************/
+/*
+ * When an initiator transaction with the MK_MESSAGE flag either reconnects
+ * or enters the initial message out phase, we are interrupted.  Fill our
+ * outgoing message buffer with the appropriate message and beging handing
+ * the message phase(s) manually.
+ */
+static void
+ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+			   struct scb *scb)
+{
+	/*
+	 * To facilitate adding multiple messages together,
+	 * each routine should increment the index and len
+	 * variables instead of setting them explicitly.
+	 */
+	ahd->msgout_index = 0;
+	ahd->msgout_len = 0;
+
+	if (ahd_currently_packetized(ahd))
+		ahd->msg_flags |= MSG_FLAG_PACKETIZED;
+
+	if (ahd->send_msg_perror
+	 && ahd_inb(ahd, MSG_OUT) == HOST_MSG) {
+		ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror;
+		ahd->msgout_len++;
+		ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+			printf("Setting up for Parity Error delivery\n");
+#endif
+		return;
+	} else if (scb == NULL) {
+		printf("%s: WARNING. No pending message for "
+		       "I_T msgin.  Issuing NO-OP\n", ahd_name(ahd));
+		ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP;
+		ahd->msgout_len++;
+		ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+		return;
+	}
+
+	if ((scb->flags & SCB_DEVICE_RESET) == 0
+	 && (scb->flags & SCB_PACKETIZED) == 0
+	 && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) {
+		u_int identify_msg;
+
+		identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
+		if ((scb->hscb->control & DISCENB) != 0)
+			identify_msg |= MSG_IDENTIFY_DISCFLAG;
+		ahd->msgout_buf[ahd->msgout_index++] = identify_msg;
+		ahd->msgout_len++;
+
+		if ((scb->hscb->control & TAG_ENB) != 0) {
+			ahd->msgout_buf[ahd->msgout_index++] =
+			    scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
+			ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb);
+			ahd->msgout_len += 2;
+		}
+	}
+
+	if (scb->flags & SCB_DEVICE_RESET) {
+		ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET;
+		ahd->msgout_len++;
+		ahd_print_path(ahd, scb);
+		printf("Bus Device Reset Message Sent\n");
+		/*
+		 * Clear our selection hardware in advance of
+		 * the busfree.  We may have an entry in the waiting
+		 * Q for this target, and we don't want to go about
+		 * selecting while we handle the busfree and blow it
+		 * away.
+		 */
+		ahd_outb(ahd, SCSISEQ0, 0);
+	} else if ((scb->flags & SCB_ABORT) != 0) {
+
+		if ((scb->hscb->control & TAG_ENB) != 0) {
+			ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG;
+		} else {
+			ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT;
+		}
+		ahd->msgout_len++;
+		ahd_print_path(ahd, scb);
+		printf("Abort%s Message Sent\n",
+		       (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
+		/*
+		 * Clear our selection hardware in advance of
+		 * the busfree.  We may have an entry in the waiting
+		 * Q for this target, and we don't want to go about
+		 * selecting while we handle the busfree and blow it
+		 * away.
+		 */
+		ahd_outb(ahd, SCSISEQ0, 0);
+	} else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
+		ahd_build_transfer_msg(ahd, devinfo);
+		/*
+		 * Clear our selection hardware in advance of potential
+		 * PPR IU status change busfree.  We may have an entry in
+		 * the waiting Q for this target, and we don't want to go
+		 * about selecting while we handle the busfree and blow
+		 * it away.
+		 */
+		ahd_outb(ahd, SCSISEQ0, 0);
+	} else {
+		printf("ahd_intr: AWAITING_MSG for an SCB that "
+		       "does not have a waiting message\n");
+		printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
+		       devinfo->target_mask);
+		panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x "
+		      "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control,
+		      ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT),
+		      scb->flags);
+	}
+
+	/*
+	 * Clear the MK_MESSAGE flag from the SCB so we aren't
+	 * asked to send this message again.
+	 */
+	ahd_outb(ahd, SCB_CONTROL,
+		 ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE);
+	scb->hscb->control &= ~MK_MESSAGE;
+	ahd->msgout_index = 0;
+	ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+}
+
+/*
+ * Build an appropriate transfer negotiation message for the
+ * currently active target.
+ */
+static void
+ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+	/*
+	 * We need to initiate transfer negotiations.
+	 * If our current and goal settings are identical,
+	 * we want to renegotiate due to a check condition.
+	 */
+	struct	ahd_initiator_tinfo *tinfo;
+	struct	ahd_tmode_tstate *tstate;
+	int	dowide;
+	int	dosync;
+	int	doppr;
+	u_int	period;
+	u_int	ppr_options;
+	u_int	offset;
+
+	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
+				    devinfo->target, &tstate);
+	/*
+	 * Filter our period based on the current connection.
+	 * If we can't perform DT transfers on this segment (not in LVD
+	 * mode for instance), then our decision to issue a PPR message
+	 * may change.
+	 */
+	period = tinfo->goal.period;
+	offset = tinfo->goal.offset;
+	ppr_options = tinfo->goal.ppr_options;
+	/* Target initiated PPR is not allowed in the SCSI spec */
+	if (devinfo->role == ROLE_TARGET)
+		ppr_options = 0;
+	ahd_devlimited_syncrate(ahd, tinfo, &period,
+				&ppr_options, devinfo->role);
+	dowide = tinfo->curr.width != tinfo->goal.width;
+	dosync = tinfo->curr.offset != offset || tinfo->curr.period != period;
+	/*
+	 * Only use PPR if we have options that need it, even if the device
+	 * claims to support it.  There might be an expander in the way
+	 * that doesn't.
+	 */
+	doppr = ppr_options != 0;
+
+	if (!dowide && !dosync && !doppr) {
+		dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
+		dosync = tinfo->goal.offset != 0;
+	}
+
+	if (!dowide && !dosync && !doppr) {
+		/*
+		 * Force async with a WDTR message if we have a wide bus,
+		 * or just issue an SDTR with a 0 offset.
+		 */
+		if ((ahd->features & AHD_WIDE) != 0)
+			dowide = 1;
+		else
+			dosync = 1;
+
+		if (bootverbose) {
+			ahd_print_devinfo(ahd, devinfo);
+			printf("Ensuring async\n");
+		}
+	}
+	/* Target initiated PPR is not allowed in the SCSI spec */
+	if (devinfo->role == ROLE_TARGET)
+		doppr = 0;
+
+	/*
+	 * Both the PPR message and SDTR message require the
+	 * goal syncrate to be limited to what the target device
+	 * is capable of handling (based on whether an LVD->SE
+	 * expander is on the bus), so combine these two cases.
+	 * Regardless, guarantee that if we are using WDTR and SDTR
+	 * messages that WDTR comes first.
+	 */
+	if (doppr || (dosync && !dowide)) {
+
+		offset = tinfo->goal.offset;
+		ahd_validate_offset(ahd, tinfo, period, &offset,
+				    doppr ? tinfo->goal.width
+					  : tinfo->curr.width,
+				    devinfo->role);
+		if (doppr) {
+			ahd_construct_ppr(ahd, devinfo, period, offset,
+					  tinfo->goal.width, ppr_options);
+		} else {
+			ahd_construct_sdtr(ahd, devinfo, period, offset);
+		}
+	} else {
+		ahd_construct_wdtr(ahd, devinfo, tinfo->goal.width);
+	}
+}
+
+/*
+ * Build a synchronous negotiation message in our message
+ * buffer based on the input parameters.
+ */
+static void
+ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+		   u_int period, u_int offset)
+{
+	if (offset == 0)
+		period = AHD_ASYNC_XFER_PERIOD;
+	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED;
+	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_SDTR_LEN;
+	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_SDTR;
+	ahd->msgout_buf[ahd->msgout_index++] = period;
+	ahd->msgout_buf[ahd->msgout_index++] = offset;
+	ahd->msgout_len += 5;
+	if (bootverbose) {
+		printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
+		       ahd_name(ahd), devinfo->channel, devinfo->target,
+		       devinfo->lun, period, offset);
+	}
+}
+
+/*
+ * Build a wide negotiateion message in our message
+ * buffer based on the input parameters.
+ */
+static void
+ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+		   u_int bus_width)
+{
+	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED;
+	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_WDTR_LEN;
+	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_WDTR;
+	ahd->msgout_buf[ahd->msgout_index++] = bus_width;
+	ahd->msgout_len += 4;
+	if (bootverbose) {
+		printf("(%s:%c:%d:%d): Sending WDTR %x\n",
+		       ahd_name(ahd), devinfo->channel, devinfo->target,
+		       devinfo->lun, bus_width);
+	}
+}
+
+/*
+ * Build a parallel protocol request message in our message
+ * buffer based on the input parameters.
+ */
+static void
+ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+		  u_int period, u_int offset, u_int bus_width,
+		  u_int ppr_options)
+{
+	/*
+	 * Always request precompensation from
+	 * the other target if we are running
+	 * at paced syncrates.
+	 */
+	if (period <= AHD_SYNCRATE_PACED)
+		ppr_options |= MSG_EXT_PPR_PCOMP_EN;
+	if (offset == 0)
+		period = AHD_ASYNC_XFER_PERIOD;
+	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED;
+	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_PPR_LEN;
+	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_PPR;
+	ahd->msgout_buf[ahd->msgout_index++] = period;
+	ahd->msgout_buf[ahd->msgout_index++] = 0;
+	ahd->msgout_buf[ahd->msgout_index++] = offset;
+	ahd->msgout_buf[ahd->msgout_index++] = bus_width;
+	ahd->msgout_buf[ahd->msgout_index++] = ppr_options;
+	ahd->msgout_len += 8;
+	if (bootverbose) {
+		printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
+		       "offset %x, ppr_options %x\n", ahd_name(ahd),
+		       devinfo->channel, devinfo->target, devinfo->lun,
+		       bus_width, period, offset, ppr_options);
+	}
+}
+
+/*
+ * Clear any active message state.
+ */
+static void
+ahd_clear_msg_state(struct ahd_softc *ahd)
+{
+	ahd_mode_state saved_modes;
+
+	saved_modes = ahd_save_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	ahd->send_msg_perror = 0;
+	ahd->msg_flags = MSG_FLAG_NONE;
+	ahd->msgout_len = 0;
+	ahd->msgin_index = 0;
+	ahd->msg_type = MSG_TYPE_NONE;
+	if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0) {
+		/*
+		 * The target didn't care to respond to our
+		 * message request, so clear ATN.
+		 */
+		ahd_outb(ahd, CLRSINT1, CLRATNO);
+	}
+	ahd_outb(ahd, MSG_OUT, MSG_NOOP);
+	ahd_outb(ahd, SEQ_FLAGS2,
+		 ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
+	ahd_restore_modes(ahd, saved_modes);
+}
+
+/*
+ * Manual message loop handler.
+ */
+static void
+ahd_handle_message_phase(struct ahd_softc *ahd)
+{ 
+	struct	ahd_devinfo devinfo;
+	u_int	bus_phase;
+	int	end_session;
+
+	ahd_fetch_devinfo(ahd, &devinfo);
+	end_session = FALSE;
+	bus_phase = ahd_inb(ahd, LASTPHASE);
+
+	if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) {
+		printf("LQIRETRY for LQIPHASE_OUTPKT\n");
+		ahd_outb(ahd, LQCTL2, LQIRETRY);
+	}
+reswitch:
+	switch (ahd->msg_type) {
+	case MSG_TYPE_INITIATOR_MSGOUT:
+	{
+		int lastbyte;
+		int phasemis;
+		int msgdone;
+
+		if (ahd->msgout_len == 0 && ahd->send_msg_perror == 0)
+			panic("HOST_MSG_LOOP interrupt with no active message");
+
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
+			ahd_print_devinfo(ahd, &devinfo);
+			printf("INITIATOR_MSG_OUT");
+		}
+#endif
+		phasemis = bus_phase != P_MESGOUT;
+		if (phasemis) {
+#ifdef AHD_DEBUG
+			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
+				printf(" PHASEMIS %s\n",
+				       ahd_lookup_phase_entry(bus_phase)
+							     ->phasemsg);
+			}
+#endif
+			if (bus_phase == P_MESGIN) {
+				/*
+				 * Change gears and see if
+				 * this messages is of interest to
+				 * us or should be passed back to
+				 * the sequencer.
+				 */
+				ahd_outb(ahd, CLRSINT1, CLRATNO);
+				ahd->send_msg_perror = 0;
+				ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN;
+				ahd->msgin_index = 0;
+				goto reswitch;
+			}
+			end_session = TRUE;
+			break;
+		}
+
+		if (ahd->send_msg_perror) {
+			ahd_outb(ahd, CLRSINT1, CLRATNO);
+			ahd_outb(ahd, CLRSINT1, CLRREQINIT);
+#ifdef AHD_DEBUG
+			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+				printf(" byte 0x%x\n", ahd->send_msg_perror);
+#endif
+			/*
+			 * If we are notifying the target of a CRC error
+			 * during packetized operations, the target is
+			 * within its rights to acknowledge our message
+			 * with a busfree.
+			 */
+			if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0
+			 && ahd->send_msg_perror == MSG_INITIATOR_DET_ERR)
+				ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE;
+
+			ahd_outb(ahd, RETURN_2, ahd->send_msg_perror);
+			ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE);
+			break;
+		}
+
+		msgdone	= ahd->msgout_index == ahd->msgout_len;
+		if (msgdone) {
+			/*
+			 * The target has requested a retry.
+			 * Re-assert ATN, reset our message index to
+			 * 0, and try again.
+			 */
+			ahd->msgout_index = 0;
+			ahd_assert_atn(ahd);
+		}
+
+		lastbyte = ahd->msgout_index == (ahd->msgout_len - 1);
+		if (lastbyte) {
+			/* Last byte is signified by dropping ATN */
+			ahd_outb(ahd, CLRSINT1, CLRATNO);
+		}
+
+		/*
+		 * Clear our interrupt status and present
+		 * the next byte on the bus.
+		 */
+		ahd_outb(ahd, CLRSINT1, CLRREQINIT);
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+			printf(" byte 0x%x\n",
+			       ahd->msgout_buf[ahd->msgout_index]);
+#endif
+		ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]);
+		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE);
+		break;
+	}
+	case MSG_TYPE_INITIATOR_MSGIN:
+	{
+		int phasemis;
+		int message_done;
+
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
+			ahd_print_devinfo(ahd, &devinfo);
+			printf("INITIATOR_MSG_IN");
+		}
+#endif
+		phasemis = bus_phase != P_MESGIN;
+		if (phasemis) {
+#ifdef AHD_DEBUG
+			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
+				printf(" PHASEMIS %s\n",
+				       ahd_lookup_phase_entry(bus_phase)
+							     ->phasemsg);
+			}
+#endif
+			ahd->msgin_index = 0;
+			if (bus_phase == P_MESGOUT
+			 && (ahd->send_msg_perror != 0
+			  || (ahd->msgout_len != 0
+			   && ahd->msgout_index == 0))) {
+				ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+				goto reswitch;
+			}
+			end_session = TRUE;
+			break;
+		}
+
+		/* Pull the byte in without acking it */
+		ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS);
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+			printf(" byte 0x%x\n",
+			       ahd->msgin_buf[ahd->msgin_index]);
+#endif
+
+		message_done = ahd_parse_msg(ahd, &devinfo);
+
+		if (message_done) {
+			/*
+			 * Clear our incoming message buffer in case there
+			 * is another message following this one.
+			 */
+			ahd->msgin_index = 0;
+
+			/*
+			 * If this message illicited a response,
+			 * assert ATN so the target takes us to the
+			 * message out phase.
+			 */
+			if (ahd->msgout_len != 0) {
+#ifdef AHD_DEBUG
+				if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
+					ahd_print_devinfo(ahd, &devinfo);
+					printf("Asserting ATN for response\n");
+				}
+#endif
+				ahd_assert_atn(ahd);
+			}
+		} else 
+			ahd->msgin_index++;
+
+		if (message_done == MSGLOOP_TERMINATED) {
+			end_session = TRUE;
+		} else {
+			/* Ack the byte */
+			ahd_outb(ahd, CLRSINT1, CLRREQINIT);
+			ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_READ);
+		}
+		break;
+	}
+	case MSG_TYPE_TARGET_MSGIN:
+	{
+		int msgdone;
+		int msgout_request;
+
+		/*
+		 * By default, the message loop will continue.
+		 */
+		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
+
+		if (ahd->msgout_len == 0)
+			panic("Target MSGIN with no active message");
+
+		/*
+		 * If we interrupted a mesgout session, the initiator
+		 * will not know this until our first REQ.  So, we
+		 * only honor mesgout requests after we've sent our
+		 * first byte.
+		 */
+		if ((ahd_inb(ahd, SCSISIGI) & ATNI) != 0
+		 && ahd->msgout_index > 0)
+			msgout_request = TRUE;
+		else
+			msgout_request = FALSE;
+
+		if (msgout_request) {
+
+			/*
+			 * Change gears and see if
+			 * this messages is of interest to
+			 * us or should be passed back to
+			 * the sequencer.
+			 */
+			ahd->msg_type = MSG_TYPE_TARGET_MSGOUT;
+			ahd_outb(ahd, SCSISIGO, P_MESGOUT | BSYO);
+			ahd->msgin_index = 0;
+			/* Dummy read to REQ for first byte */
+			ahd_inb(ahd, SCSIDAT);
+			ahd_outb(ahd, SXFRCTL0,
+				 ahd_inb(ahd, SXFRCTL0) | SPIOEN);
+			break;
+		}
+
+		msgdone = ahd->msgout_index == ahd->msgout_len;
+		if (msgdone) {
+			ahd_outb(ahd, SXFRCTL0,
+				 ahd_inb(ahd, SXFRCTL0) & ~SPIOEN);
+			end_session = TRUE;
+			break;
+		}
+
+		/*
+		 * Present the next byte on the bus.
+		 */
+		ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN);
+		ahd_outb(ahd, SCSIDAT, ahd->msgout_buf[ahd->msgout_index++]);
+		break;
+	}
+	case MSG_TYPE_TARGET_MSGOUT:
+	{
+		int lastbyte;
+		int msgdone;
+
+		/*
+		 * By default, the message loop will continue.
+		 */
+		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
+
+		/*
+		 * The initiator signals that this is
+		 * the last byte by dropping ATN.
+		 */
+		lastbyte = (ahd_inb(ahd, SCSISIGI) & ATNI) == 0;
+
+		/*
+		 * Read the latched byte, but turn off SPIOEN first
+		 * so that we don't inadvertently cause a REQ for the
+		 * next byte.
+		 */
+		ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN);
+		ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIDAT);
+		msgdone = ahd_parse_msg(ahd, &devinfo);
+		if (msgdone == MSGLOOP_TERMINATED) {
+			/*
+			 * The message is *really* done in that it caused
+			 * us to go to bus free.  The sequencer has already
+			 * been reset at this point, so pull the ejection
+			 * handle.
+			 */
+			return;
+		}
+		
+		ahd->msgin_index++;
+
+		/*
+		 * XXX Read spec about initiator dropping ATN too soon
+		 *     and use msgdone to detect it.
+		 */
+		if (msgdone == MSGLOOP_MSGCOMPLETE) {
+			ahd->msgin_index = 0;
+
+			/*
+			 * If this message illicited a response, transition
+			 * to the Message in phase and send it.
+			 */
+			if (ahd->msgout_len != 0) {
+				ahd_outb(ahd, SCSISIGO, P_MESGIN | BSYO);
+				ahd_outb(ahd, SXFRCTL0,
+					 ahd_inb(ahd, SXFRCTL0) | SPIOEN);
+				ahd->msg_type = MSG_TYPE_TARGET_MSGIN;
+				ahd->msgin_index = 0;
+				break;
+			}
+		}
+
+		if (lastbyte)
+			end_session = TRUE;
+		else {
+			/* Ask for the next byte. */
+			ahd_outb(ahd, SXFRCTL0,
+				 ahd_inb(ahd, SXFRCTL0) | SPIOEN);
+		}
+
+		break;
+	}
+	default:
+		panic("Unknown REQINIT message type");
+	}
+
+	if (end_session) {
+		if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) {
+			printf("%s: Returning to Idle Loop\n",
+			       ahd_name(ahd));
+			ahd_clear_msg_state(ahd);
+
+			/*
+			 * Perform the equivalent of a clear_target_state.
+			 */
+			ahd_outb(ahd, LASTPHASE, P_BUSFREE);
+			ahd_outb(ahd, SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT);
+			ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET);
+		} else {
+			ahd_clear_msg_state(ahd);
+			ahd_outb(ahd, RETURN_1, EXIT_MSG_LOOP);
+		}
+	}
+}
+
+/*
+ * See if we sent a particular extended message to the target.
+ * If "full" is true, return true only if the target saw the full
+ * message.  If "full" is false, return true if the target saw at
+ * least the first byte of the message.
+ */
+static int
+ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full)
+{
+	int found;
+	u_int index;
+
+	found = FALSE;
+	index = 0;
+
+	while (index < ahd->msgout_len) {
+		if (ahd->msgout_buf[index] == MSG_EXTENDED) {
+			u_int end_index;
+
+			end_index = index + 1 + ahd->msgout_buf[index + 1];
+			if (ahd->msgout_buf[index+2] == msgval
+			 && type == AHDMSG_EXT) {
+
+				if (full) {
+					if (ahd->msgout_index > end_index)
+						found = TRUE;
+				} else if (ahd->msgout_index > index)
+					found = TRUE;
+			}
+			index = end_index;
+		} else if (ahd->msgout_buf[index] >= MSG_SIMPLE_TASK
+			&& ahd->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
+
+			/* Skip tag type and tag id or residue param*/
+			index += 2;
+		} else {
+			/* Single byte message */
+			if (type == AHDMSG_1B
+			 && ahd->msgout_index > index
+			 && (ahd->msgout_buf[index] == msgval
+			  || ((ahd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
+			   && msgval == MSG_IDENTIFYFLAG)))
+				found = TRUE;
+			index++;
+		}
+
+		if (found)
+			break;
+	}
+	return (found);
+}
+
+/*
+ * Wait for a complete incoming message, parse it, and respond accordingly.
+ */
+static int
+ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+	struct	ahd_initiator_tinfo *tinfo;
+	struct	ahd_tmode_tstate *tstate;
+	int	reject;
+	int	done;
+	int	response;
+
+	done = MSGLOOP_IN_PROG;
+	response = FALSE;
+	reject = FALSE;
+	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
+				    devinfo->target, &tstate);
+
+	/*
+	 * Parse as much of the message as is available,
+	 * rejecting it if we don't support it.  When
+	 * the entire message is available and has been
+	 * handled, return MSGLOOP_MSGCOMPLETE, indicating
+	 * that we have parsed an entire message.
+	 *
+	 * In the case of extended messages, we accept the length
+	 * byte outright and perform more checking once we know the
+	 * extended message type.
+	 */
+	switch (ahd->msgin_buf[0]) {
+	case MSG_DISCONNECT:
+	case MSG_SAVEDATAPOINTER:
+	case MSG_CMDCOMPLETE:
+	case MSG_RESTOREPOINTERS:
+	case MSG_IGN_WIDE_RESIDUE:
+		/*
+		 * End our message loop as these are messages
+		 * the sequencer handles on its own.
+		 */
+		done = MSGLOOP_TERMINATED;
+		break;
+	case MSG_MESSAGE_REJECT:
+		response = ahd_handle_msg_reject(ahd, devinfo);
+		/* FALLTHROUGH */
+	case MSG_NOOP:
+		done = MSGLOOP_MSGCOMPLETE;
+		break;
+	case MSG_EXTENDED:
+	{
+		/* Wait for enough of the message to begin validation */
+		if (ahd->msgin_index < 2)
+			break;
+		switch (ahd->msgin_buf[2]) {
+		case MSG_EXT_SDTR:
+		{
+			u_int	 period;
+			u_int	 ppr_options;
+			u_int	 offset;
+			u_int	 saved_offset;
+			
+			if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
+				reject = TRUE;
+				break;
+			}
+
+			/*
+			 * Wait until we have both args before validating
+			 * and acting on this message.
+			 *
+			 * Add one to MSG_EXT_SDTR_LEN to account for
+			 * the extended message preamble.
+			 */
+			if (ahd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
+				break;
+
+			period = ahd->msgin_buf[3];
+			ppr_options = 0;
+			saved_offset = offset = ahd->msgin_buf[4];
+			ahd_devlimited_syncrate(ahd, tinfo, &period,
+						&ppr_options, devinfo->role);
+			ahd_validate_offset(ahd, tinfo, period, &offset,
+					    tinfo->curr.width, devinfo->role);
+			if (bootverbose) {
+				printf("(%s:%c:%d:%d): Received "
+				       "SDTR period %x, offset %x\n\t"
+				       "Filtered to period %x, offset %x\n",
+				       ahd_name(ahd), devinfo->channel,
+				       devinfo->target, devinfo->lun,
+				       ahd->msgin_buf[3], saved_offset,
+				       period, offset);
+			}
+			ahd_set_syncrate(ahd, devinfo, period,
+					 offset, ppr_options,
+					 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
+					 /*paused*/TRUE);
+
+			/*
+			 * See if we initiated Sync Negotiation
+			 * and didn't have to fall down to async
+			 * transfers.
+			 */
+			if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, TRUE)) {
+				/* We started it */
+				if (saved_offset != offset) {
+					/* Went too low - force async */
+					reject = TRUE;
+				}
+			} else {
+				/*
+				 * Send our own SDTR in reply
+				 */
+				if (bootverbose
+				 && devinfo->role == ROLE_INITIATOR) {
+					printf("(%s:%c:%d:%d): Target "
+					       "Initiated SDTR\n",
+					       ahd_name(ahd), devinfo->channel,
+					       devinfo->target, devinfo->lun);
+				}
+				ahd->msgout_index = 0;
+				ahd->msgout_len = 0;
+				ahd_construct_sdtr(ahd, devinfo,
+						   period, offset);
+				ahd->msgout_index = 0;
+				response = TRUE;
+			}
+			done = MSGLOOP_MSGCOMPLETE;
+			break;
+		}
+		case MSG_EXT_WDTR:
+		{
+			u_int bus_width;
+			u_int saved_width;
+			u_int sending_reply;
+
+			sending_reply = FALSE;
+			if (ahd->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
+				reject = TRUE;
+				break;
+			}
+
+			/*
+			 * Wait until we have our arg before validating
+			 * and acting on this message.
+			 *
+			 * Add one to MSG_EXT_WDTR_LEN to account for
+			 * the extended message preamble.
+			 */
+			if (ahd->msgin_index < (MSG_EXT_WDTR_LEN + 1))
+				break;
+
+			bus_width = ahd->msgin_buf[3];
+			saved_width = bus_width;
+			ahd_validate_width(ahd, tinfo, &bus_width,
+					   devinfo->role);
+			if (bootverbose) {
+				printf("(%s:%c:%d:%d): Received WDTR "
+				       "%x filtered to %x\n",
+				       ahd_name(ahd), devinfo->channel,
+				       devinfo->target, devinfo->lun,
+				       saved_width, bus_width);
+			}
+
+			if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, TRUE)) {
+				/*
+				 * Don't send a WDTR back to the
+				 * target, since we asked first.
+				 * If the width went higher than our
+				 * request, reject it.
+				 */
+				if (saved_width > bus_width) {
+					reject = TRUE;
+					printf("(%s:%c:%d:%d): requested %dBit "
+					       "transfers.  Rejecting...\n",
+					       ahd_name(ahd), devinfo->channel,
+					       devinfo->target, devinfo->lun,
+					       8 * (0x01 << bus_width));
+					bus_width = 0;
+				}
+			} else {
+				/*
+				 * Send our own WDTR in reply
+				 */
+				if (bootverbose
+				 && devinfo->role == ROLE_INITIATOR) {
+					printf("(%s:%c:%d:%d): Target "
+					       "Initiated WDTR\n",
+					       ahd_name(ahd), devinfo->channel,
+					       devinfo->target, devinfo->lun);
+				}
+				ahd->msgout_index = 0;
+				ahd->msgout_len = 0;
+				ahd_construct_wdtr(ahd, devinfo, bus_width);
+				ahd->msgout_index = 0;
+				response = TRUE;
+				sending_reply = TRUE;
+			}
+			/*
+			 * After a wide message, we are async, but
+			 * some devices don't seem to honor this portion
+			 * of the spec.  Force a renegotiation of the
+			 * sync component of our transfer agreement even
+			 * if our goal is async.  By updating our width
+			 * after forcing the negotiation, we avoid
+			 * renegotiating for width.
+			 */
+			ahd_update_neg_request(ahd, devinfo, tstate,
+					       tinfo, AHD_NEG_ALWAYS);
+			ahd_set_width(ahd, devinfo, bus_width,
+				      AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
+				      /*paused*/TRUE);
+			if (sending_reply == FALSE && reject == FALSE) {
+
+				/*
+				 * We will always have an SDTR to send.
+				 */
+				ahd->msgout_index = 0;
+				ahd->msgout_len = 0;
+				ahd_build_transfer_msg(ahd, devinfo);
+				ahd->msgout_index = 0;
+				response = TRUE;
+			}
+			done = MSGLOOP_MSGCOMPLETE;
+			break;
+		}
+		case MSG_EXT_PPR:
+		{
+			u_int	period;
+			u_int	offset;
+			u_int	bus_width;
+			u_int	ppr_options;
+			u_int	saved_width;
+			u_int	saved_offset;
+			u_int	saved_ppr_options;
+
+			if (ahd->msgin_buf[1] != MSG_EXT_PPR_LEN) {
+				reject = TRUE;
+				break;
+			}
+
+			/*
+			 * Wait until we have all args before validating
+			 * and acting on this message.
+			 *
+			 * Add one to MSG_EXT_PPR_LEN to account for
+			 * the extended message preamble.
+			 */
+			if (ahd->msgin_index < (MSG_EXT_PPR_LEN + 1))
+				break;
+
+			period = ahd->msgin_buf[3];
+			offset = ahd->msgin_buf[5];
+			bus_width = ahd->msgin_buf[6];
+			saved_width = bus_width;
+			ppr_options = ahd->msgin_buf[7];
+			/*
+			 * According to the spec, a DT only
+			 * period factor with no DT option
+			 * set implies async.
+			 */
+			if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
+			 && period <= 9)
+				offset = 0;
+			saved_ppr_options = ppr_options;
+			saved_offset = offset;
+
+			/*
+			 * Transfer options are only available if we
+			 * are negotiating wide.
+			 */
+			if (bus_width == 0)
+				ppr_options &= MSG_EXT_PPR_QAS_REQ;
+
+			ahd_validate_width(ahd, tinfo, &bus_width,
+					   devinfo->role);
+			ahd_devlimited_syncrate(ahd, tinfo, &period,
+						&ppr_options, devinfo->role);
+			ahd_validate_offset(ahd, tinfo, period, &offset,
+					    bus_width, devinfo->role);
+
+			if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, TRUE)) {
+				/*
+				 * If we are unable to do any of the
+				 * requested options (we went too low),
+				 * then we'll have to reject the message.
+				 */
+				if (saved_width > bus_width
+				 || saved_offset != offset
+				 || saved_ppr_options != ppr_options) {
+					reject = TRUE;
+					period = 0;
+					offset = 0;
+					bus_width = 0;
+					ppr_options = 0;
+				}
+			} else {
+				if (devinfo->role != ROLE_TARGET)
+					printf("(%s:%c:%d:%d): Target "
+					       "Initiated PPR\n",
+					       ahd_name(ahd), devinfo->channel,
+					       devinfo->target, devinfo->lun);
+				else
+					printf("(%s:%c:%d:%d): Initiator "
+					       "Initiated PPR\n",
+					       ahd_name(ahd), devinfo->channel,
+					       devinfo->target, devinfo->lun);
+				ahd->msgout_index = 0;
+				ahd->msgout_len = 0;
+				ahd_construct_ppr(ahd, devinfo, period, offset,
+						  bus_width, ppr_options);
+				ahd->msgout_index = 0;
+				response = TRUE;
+			}
+			if (bootverbose) {
+				printf("(%s:%c:%d:%d): Received PPR width %x, "
+				       "period %x, offset %x,options %x\n"
+				       "\tFiltered to width %x, period %x, "
+				       "offset %x, options %x\n",
+				       ahd_name(ahd), devinfo->channel,
+				       devinfo->target, devinfo->lun,
+				       saved_width, ahd->msgin_buf[3],
+				       saved_offset, saved_ppr_options,
+				       bus_width, period, offset, ppr_options);
+			}
+			ahd_set_width(ahd, devinfo, bus_width,
+				      AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
+				      /*paused*/TRUE);
+			ahd_set_syncrate(ahd, devinfo, period,
+					 offset, ppr_options,
+					 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
+					 /*paused*/TRUE);
+
+			done = MSGLOOP_MSGCOMPLETE;
+			break;
+		}
+		default:
+			/* Unknown extended message.  Reject it. */
+			reject = TRUE;
+			break;
+		}
+		break;
+	}
+#ifdef AHD_TARGET_MODE
+	case MSG_BUS_DEV_RESET:
+		ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD,
+				    CAM_BDR_SENT,
+				    "Bus Device Reset Received",
+				    /*verbose_level*/0);
+		ahd_restart(ahd);
+		done = MSGLOOP_TERMINATED;
+		break;
+	case MSG_ABORT_TAG:
+	case MSG_ABORT:
+	case MSG_CLEAR_QUEUE:
+	{
+		int tag;
+
+		/* Target mode messages */
+		if (devinfo->role != ROLE_TARGET) {
+			reject = TRUE;
+			break;
+		}
+		tag = SCB_LIST_NULL;
+		if (ahd->msgin_buf[0] == MSG_ABORT_TAG)
+			tag = ahd_inb(ahd, INITIATOR_TAG);
+		ahd_abort_scbs(ahd, devinfo->target, devinfo->channel,
+			       devinfo->lun, tag, ROLE_TARGET,
+			       CAM_REQ_ABORTED);
+
+		tstate = ahd->enabled_targets[devinfo->our_scsiid];
+		if (tstate != NULL) {
+			struct ahd_tmode_lstate* lstate;
+
+			lstate = tstate->enabled_luns[devinfo->lun];
+			if (lstate != NULL) {
+				ahd_queue_lstate_event(ahd, lstate,
+						       devinfo->our_scsiid,
+						       ahd->msgin_buf[0],
+						       /*arg*/tag);
+				ahd_send_lstate_events(ahd, lstate);
+			}
+		}
+		ahd_restart(ahd);
+		done = MSGLOOP_TERMINATED;
+		break;
+	}
+#endif
+	case MSG_QAS_REQUEST:
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+			printf("%s: QAS request.  SCSISIGI == 0x%x\n",
+			       ahd_name(ahd), ahd_inb(ahd, SCSISIGI));
+#endif
+		ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE;
+		/* FALLTHROUGH */
+	case MSG_TERM_IO_PROC:
+	default:
+		reject = TRUE;
+		break;
+	}
+
+	if (reject) {
+		/*
+		 * Setup to reject the message.
+		 */
+		ahd->msgout_index = 0;
+		ahd->msgout_len = 1;
+		ahd->msgout_buf[0] = MSG_MESSAGE_REJECT;
+		done = MSGLOOP_MSGCOMPLETE;
+		response = TRUE;
+	}
+
+	if (done != MSGLOOP_IN_PROG && !response)
+		/* Clear the outgoing message buffer */
+		ahd->msgout_len = 0;
+
+	return (done);
+}
+
+/*
+ * Process a message reject message.
+ */
+static int
+ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+	/*
+	 * What we care about here is if we had an
+	 * outstanding SDTR or WDTR message for this
+	 * target.  If we did, this is a signal that
+	 * the target is refusing negotiation.
+	 */
+	struct scb *scb;
+	struct ahd_initiator_tinfo *tinfo;
+	struct ahd_tmode_tstate *tstate;
+	u_int scb_index;
+	u_int last_msg;
+	int   response = 0;
+
+	scb_index = ahd_get_scbptr(ahd);
+	scb = ahd_lookup_scb(ahd, scb_index);
+	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel,
+				    devinfo->our_scsiid,
+				    devinfo->target, &tstate);
+	/* Might be necessary */
+	last_msg = ahd_inb(ahd, LAST_MSG);
+
+	if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
+		if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/TRUE)
+		 && tinfo->goal.period <= AHD_SYNCRATE_PACED) {
+			/*
+			 * Target may not like our SPI-4 PPR Options.
+			 * Attempt to negotiate 80MHz which will turn
+			 * off these options.
+			 */
+			if (bootverbose) {
+				printf("(%s:%c:%d:%d): PPR Rejected. "
+				       "Trying simple U160 PPR\n",
+				       ahd_name(ahd), devinfo->channel,
+				       devinfo->target, devinfo->lun);
+			}
+			tinfo->goal.period = AHD_SYNCRATE_DT;
+			tinfo->goal.ppr_options &= MSG_EXT_PPR_IU_REQ
+						|  MSG_EXT_PPR_QAS_REQ
+						|  MSG_EXT_PPR_DT_REQ;
+		} else {
+			/*
+			 * Target does not support the PPR message.
+			 * Attempt to negotiate SPI-2 style.
+			 */
+			if (bootverbose) {
+				printf("(%s:%c:%d:%d): PPR Rejected. "
+				       "Trying WDTR/SDTR\n",
+				       ahd_name(ahd), devinfo->channel,
+				       devinfo->target, devinfo->lun);
+			}
+			tinfo->goal.ppr_options = 0;
+			tinfo->curr.transport_version = 2;
+			tinfo->goal.transport_version = 2;
+		}
+		ahd->msgout_index = 0;
+		ahd->msgout_len = 0;
+		ahd_build_transfer_msg(ahd, devinfo);
+		ahd->msgout_index = 0;
+		response = 1;
+	} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
+
+		/* note 8bit xfers */
+		printf("(%s:%c:%d:%d): refuses WIDE negotiation.  Using "
+		       "8bit transfers\n", ahd_name(ahd),
+		       devinfo->channel, devinfo->target, devinfo->lun);
+		ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+			      AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
+			      /*paused*/TRUE);
+		/*
+		 * No need to clear the sync rate.  If the target
+		 * did not accept the command, our syncrate is
+		 * unaffected.  If the target started the negotiation,
+		 * but rejected our response, we already cleared the
+		 * sync rate before sending our WDTR.
+		 */
+		if (tinfo->goal.offset != tinfo->curr.offset) {
+
+			/* Start the sync negotiation */
+			ahd->msgout_index = 0;
+			ahd->msgout_len = 0;
+			ahd_build_transfer_msg(ahd, devinfo);
+			ahd->msgout_index = 0;
+			response = 1;
+		}
+	} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
+		/* note asynch xfers and clear flag */
+		ahd_set_syncrate(ahd, devinfo, /*period*/0,
+				 /*offset*/0, /*ppr_options*/0,
+				 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
+				 /*paused*/TRUE);
+		printf("(%s:%c:%d:%d): refuses synchronous negotiation. "
+		       "Using asynchronous transfers\n",
+		       ahd_name(ahd), devinfo->channel,
+		       devinfo->target, devinfo->lun);
+	} else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
+		int tag_type;
+		int mask;
+
+		tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
+
+		if (tag_type == MSG_SIMPLE_TASK) {
+			printf("(%s:%c:%d:%d): refuses tagged commands.  "
+			       "Performing non-tagged I/O\n", ahd_name(ahd),
+			       devinfo->channel, devinfo->target, devinfo->lun);
+			ahd_set_tags(ahd, devinfo, AHD_QUEUE_NONE);
+			mask = ~0x23;
+		} else {
+			printf("(%s:%c:%d:%d): refuses %s tagged commands.  "
+			       "Performing simple queue tagged I/O only\n",
+			       ahd_name(ahd), devinfo->channel, devinfo->target,
+			       devinfo->lun, tag_type == MSG_ORDERED_TASK
+			       ? "ordered" : "head of queue");
+			ahd_set_tags(ahd, devinfo, AHD_QUEUE_BASIC);
+			mask = ~0x03;
+		}
+
+		/*
+		 * Resend the identify for this CCB as the target
+		 * may believe that the selection is invalid otherwise.
+		 */
+		ahd_outb(ahd, SCB_CONTROL,
+			 ahd_inb_scbram(ahd, SCB_CONTROL) & mask);
+	 	scb->hscb->control &= mask;
+		ahd_set_transaction_tag(scb, /*enabled*/FALSE,
+					/*type*/MSG_SIMPLE_TASK);
+		ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG);
+		ahd_assert_atn(ahd);
+		ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
+			     SCB_GET_TAG(scb));
+
+		/*
+		 * Requeue all tagged commands for this target
+		 * currently in our posession so they can be
+		 * converted to untagged commands.
+		 */
+		ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
+				   SCB_GET_CHANNEL(ahd, scb),
+				   SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
+				   ROLE_INITIATOR, CAM_REQUEUE_REQ,
+				   SEARCH_COMPLETE);
+	} else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_IDENTIFYFLAG, TRUE)) {
+		/*
+		 * Most likely the device believes that we had
+		 * previously negotiated packetized.
+		 */
+		ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE
+			       |  MSG_FLAG_IU_REQ_CHANGED;
+
+		ahd_force_renegotiation(ahd, devinfo);
+		ahd->msgout_index = 0;
+		ahd->msgout_len = 0;
+		ahd_build_transfer_msg(ahd, devinfo);
+		ahd->msgout_index = 0;
+		response = 1;
+	} else {
+		/*
+		 * Otherwise, we ignore it.
+		 */
+		printf("%s:%c:%d: Message reject for %x -- ignored\n",
+		       ahd_name(ahd), devinfo->channel, devinfo->target,
+		       last_msg);
+	}
+	return (response);
+}
+
+/*
+ * Process an ingnore wide residue message.
+ */
+static void
+ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+	u_int scb_index;
+	struct scb *scb;
+
+	scb_index = ahd_get_scbptr(ahd);
+	scb = ahd_lookup_scb(ahd, scb_index);
+	/*
+	 * XXX Actually check data direction in the sequencer?
+	 * Perhaps add datadir to some spare bits in the hscb?
+	 */
+	if ((ahd_inb(ahd, SEQ_FLAGS) & DPHASE) == 0
+	 || ahd_get_transfer_dir(scb) != CAM_DIR_IN) {
+		/*
+		 * Ignore the message if we haven't
+		 * seen an appropriate data phase yet.
+		 */
+	} else {
+		/*
+		 * If the residual occurred on the last
+		 * transfer and the transfer request was
+		 * expected to end on an odd count, do
+		 * nothing.  Otherwise, subtract a byte
+		 * and update the residual count accordingly.
+		 */
+		uint32_t sgptr;
+
+		sgptr = ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR);
+		if ((sgptr & SG_LIST_NULL) != 0
+		 && (ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE)
+		     & SCB_XFERLEN_ODD) != 0) {
+			/*
+			 * If the residual occurred on the last
+			 * transfer and the transfer request was
+			 * expected to end on an odd count, do
+			 * nothing.
+			 */
+		} else {
+			uint32_t data_cnt;
+			uint64_t data_addr;
+			uint32_t sglen;
+
+			/* Pull in the rest of the sgptr */
+			sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
+			data_cnt = ahd_inl_scbram(ahd, SCB_RESIDUAL_DATACNT);
+			if ((sgptr & SG_LIST_NULL) != 0) {
+				/*
+				 * The residual data count is not updated
+				 * for the command run to completion case.
+				 * Explicitly zero the count.
+				 */
+				data_cnt &= ~AHD_SG_LEN_MASK;
+			}
+			data_addr = ahd_inq(ahd, SHADDR);
+			data_cnt += 1;
+			data_addr -= 1;
+			sgptr &= SG_PTR_MASK;
+			if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+				struct ahd_dma64_seg *sg;
+
+				sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
+
+				/*
+				 * The residual sg ptr points to the next S/G
+				 * to load so we must go back one.
+				 */
+				sg--;
+				sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK;
+				if (sg != scb->sg_list
+				 && sglen < (data_cnt & AHD_SG_LEN_MASK)) {
+
+					sg--;
+					sglen = ahd_le32toh(sg->len);
+					/*
+					 * Preserve High Address and SG_LIST
+					 * bits while setting the count to 1.
+					 */
+					data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK));
+					data_addr = ahd_le64toh(sg->addr)
+						  + (sglen & AHD_SG_LEN_MASK)
+						  - 1;
+
+					/*
+					 * Increment sg so it points to the
+					 * "next" sg.
+					 */
+					sg++;
+					sgptr = ahd_sg_virt_to_bus(ahd, scb,
+								   sg);
+				}
+			} else {
+				struct ahd_dma_seg *sg;
+
+				sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
+
+				/*
+				 * The residual sg ptr points to the next S/G
+				 * to load so we must go back one.
+				 */
+				sg--;
+				sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK;
+				if (sg != scb->sg_list
+				 && sglen < (data_cnt & AHD_SG_LEN_MASK)) {
+
+					sg--;
+					sglen = ahd_le32toh(sg->len);
+					/*
+					 * Preserve High Address and SG_LIST
+					 * bits while setting the count to 1.
+					 */
+					data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK));
+					data_addr = ahd_le32toh(sg->addr)
+						  + (sglen & AHD_SG_LEN_MASK)
+						  - 1;
+
+					/*
+					 * Increment sg so it points to the
+					 * "next" sg.
+					 */
+					sg++;
+					sgptr = ahd_sg_virt_to_bus(ahd, scb,
+								  sg);
+				}
+			}
+			/*
+			 * Toggle the "oddness" of the transfer length
+			 * to handle this mid-transfer ignore wide
+			 * residue.  This ensures that the oddness is
+			 * correct for subsequent data transfers.
+			 */
+			ahd_outb(ahd, SCB_TASK_ATTRIBUTE,
+			    ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE)
+			    ^ SCB_XFERLEN_ODD);
+
+			ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
+			ahd_outl(ahd, SCB_RESIDUAL_DATACNT, data_cnt);
+			/*
+			 * The FIFO's pointers will be updated if/when the
+			 * sequencer re-enters a data phase.
+			 */
+		}
+	}
+}
+
+
+/*
+ * Reinitialize the data pointers for the active transfer
+ * based on its current residual.
+ */
+static void
+ahd_reinitialize_dataptrs(struct ahd_softc *ahd)
+{
+	struct		 scb *scb;
+	ahd_mode_state	 saved_modes;
+	u_int		 scb_index;
+	u_int		 wait;
+	uint32_t	 sgptr;
+	uint32_t	 resid;
+	uint64_t	 dataptr;
+
+	AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK,
+			 AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK);
+			 
+	scb_index = ahd_get_scbptr(ahd);
+	scb = ahd_lookup_scb(ahd, scb_index);
+
+	/*
+	 * Release and reacquire the FIFO so we
+	 * have a clean slate.
+	 */
+	ahd_outb(ahd, DFFSXFRCTL, CLRCHN);
+	wait = 1000;
+	while (--wait && !(ahd_inb(ahd, MDFFSTAT) & FIFOFREE))
+		ahd_delay(100);
+	if (wait == 0) {
+		ahd_print_path(ahd, scb);
+		printf("ahd_reinitialize_dataptrs: Forcing FIFO free.\n");
+		ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT);
+	}
+	saved_modes = ahd_save_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	ahd_outb(ahd, DFFSTAT,
+		 ahd_inb(ahd, DFFSTAT)
+		| (saved_modes == 0x11 ? CURRFIFO_1 : CURRFIFO_0));
+
+	/*
+	 * Determine initial values for data_addr and data_cnt
+	 * for resuming the data phase.
+	 */
+	sgptr = (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 3) << 24)
+	      | (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 2) << 16)
+	      | (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 1) << 8)
+	      |	ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR);
+	sgptr &= SG_PTR_MASK;
+
+	resid = (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 2) << 16)
+	      | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 1) << 8)
+	      | ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT);
+
+	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+		struct ahd_dma64_seg *sg;
+
+		sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
+
+		/* The residual sg_ptr always points to the next sg */
+		sg--;
+
+		dataptr = ahd_le64toh(sg->addr)
+			+ (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK)
+			- resid;
+		ahd_outb(ahd, HADDR + 7, dataptr >> 56);
+		ahd_outb(ahd, HADDR + 6, dataptr >> 48);
+		ahd_outb(ahd, HADDR + 5, dataptr >> 40);
+		ahd_outb(ahd, HADDR + 4, dataptr >> 32);
+	} else {
+		struct	 ahd_dma_seg *sg;
+
+		sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
+
+		/* The residual sg_ptr always points to the next sg */
+		sg--;
+
+		dataptr = ahd_le32toh(sg->addr)
+			+ (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK)
+			- resid;
+		ahd_outb(ahd, HADDR + 4,
+			 (ahd_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24);
+	}
+	ahd_outb(ahd, HADDR + 3, dataptr >> 24);
+	ahd_outb(ahd, HADDR + 2, dataptr >> 16);
+	ahd_outb(ahd, HADDR + 1, dataptr >> 8);
+	ahd_outb(ahd, HADDR, dataptr);
+	ahd_outb(ahd, HCNT + 2, resid >> 16);
+	ahd_outb(ahd, HCNT + 1, resid >> 8);
+	ahd_outb(ahd, HCNT, resid);
+}
+
+/*
+ * Handle the effects of issuing a bus device reset message.
+ */
+static void
+ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+		    u_int lun, cam_status status, char *message,
+		    int verbose_level)
+{
+#ifdef AHD_TARGET_MODE
+	struct ahd_tmode_tstate* tstate;
+#endif
+	int found;
+
+	found = ahd_abort_scbs(ahd, devinfo->target, devinfo->channel,
+			       lun, SCB_LIST_NULL, devinfo->role,
+			       status);
+
+#ifdef AHD_TARGET_MODE
+	/*
+	 * Send an immediate notify ccb to all target mord peripheral
+	 * drivers affected by this action.
+	 */
+	tstate = ahd->enabled_targets[devinfo->our_scsiid];
+	if (tstate != NULL) {
+		u_int cur_lun;
+		u_int max_lun;
+
+		if (lun != CAM_LUN_WILDCARD) {
+			cur_lun = 0;
+			max_lun = AHD_NUM_LUNS - 1;
+		} else {
+			cur_lun = lun;
+			max_lun = lun;
+		}
+		for (cur_lun <= max_lun; cur_lun++) {
+			struct ahd_tmode_lstate* lstate;
+
+			lstate = tstate->enabled_luns[cur_lun];
+			if (lstate == NULL)
+				continue;
+
+			ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid,
+					       MSG_BUS_DEV_RESET, /*arg*/0);
+			ahd_send_lstate_events(ahd, lstate);
+		}
+	}
+#endif
+
+	/*
+	 * Go back to async/narrow transfers and renegotiate.
+	 */
+	ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+		      AHD_TRANS_CUR, /*paused*/TRUE);
+	ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0,
+			 /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE);
+	
+	ahd_send_async(ahd, devinfo->channel, devinfo->target,
+		       lun, AC_SENT_BDR, NULL);
+
+	if (message != NULL
+	 && (verbose_level <= bootverbose))
+		printf("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd),
+		       message, devinfo->channel, devinfo->target, found);
+}
+
+#ifdef AHD_TARGET_MODE
+static void
+ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+		       struct scb *scb)
+{
+
+	/*              
+	 * To facilitate adding multiple messages together,
+	 * each routine should increment the index and len
+	 * variables instead of setting them explicitly.
+	 */             
+	ahd->msgout_index = 0;
+	ahd->msgout_len = 0;
+
+	if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0)
+		ahd_build_transfer_msg(ahd, devinfo);
+	else
+		panic("ahd_intr: AWAITING target message with no message");
+
+	ahd->msgout_index = 0;
+	ahd->msg_type = MSG_TYPE_TARGET_MSGIN;
+}
+#endif
+/**************************** Initialization **********************************/
+static u_int
+ahd_sglist_size(struct ahd_softc *ahd)
+{
+	bus_size_t list_size;
+
+	list_size = sizeof(struct ahd_dma_seg) * AHD_NSEG;
+	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
+		list_size = sizeof(struct ahd_dma64_seg) * AHD_NSEG;
+	return (list_size);
+}
+
+/*
+ * Calculate the optimum S/G List allocation size.  S/G elements used
+ * for a given transaction must be physically contiguous.  Assume the
+ * OS will allocate full pages to us, so it doesn't make sense to request
+ * less than a page.
+ */
+static u_int
+ahd_sglist_allocsize(struct ahd_softc *ahd)
+{
+	bus_size_t sg_list_increment;
+	bus_size_t sg_list_size;
+	bus_size_t max_list_size;
+	bus_size_t best_list_size;
+
+	/* Start out with the minimum required for AHD_NSEG. */
+	sg_list_increment = ahd_sglist_size(ahd);
+	sg_list_size = sg_list_increment;
+
+	/* Get us as close as possible to a page in size. */
+	while ((sg_list_size + sg_list_increment) <= PAGE_SIZE)
+		sg_list_size += sg_list_increment;
+
+	/*
+	 * Try to reduce the amount of wastage by allocating
+	 * multiple pages.
+	 */
+	best_list_size = sg_list_size;
+	max_list_size = roundup(sg_list_increment, PAGE_SIZE);
+	if (max_list_size < 4 * PAGE_SIZE)
+		max_list_size = 4 * PAGE_SIZE;
+	if (max_list_size > (AHD_SCB_MAX_ALLOC * sg_list_increment))
+		max_list_size = (AHD_SCB_MAX_ALLOC * sg_list_increment);
+	while ((sg_list_size + sg_list_increment) <= max_list_size
+	   &&  (sg_list_size % PAGE_SIZE) != 0) {
+		bus_size_t new_mod;
+		bus_size_t best_mod;
+
+		sg_list_size += sg_list_increment;
+		new_mod = sg_list_size % PAGE_SIZE;
+		best_mod = best_list_size % PAGE_SIZE;
+		if (new_mod > best_mod || new_mod == 0) {
+			best_list_size = sg_list_size;
+		}
+	}
+	return (best_list_size);
+}
+
+/*
+ * Allocate a controller structure for a new device
+ * and perform initial initializion.
+ */
+struct ahd_softc *
+ahd_alloc(void *platform_arg, char *name)
+{
+	struct  ahd_softc *ahd;
+
+#ifndef	__FreeBSD__
+	ahd = malloc(sizeof(*ahd), M_DEVBUF, M_NOWAIT);
+	if (!ahd) {
+		printf("aic7xxx: cannot malloc softc!\n");
+		free(name, M_DEVBUF);
+		return NULL;
+	}
+#else
+	ahd = device_get_softc((device_t)platform_arg);
+#endif
+	memset(ahd, 0, sizeof(*ahd));
+	ahd->seep_config = malloc(sizeof(*ahd->seep_config),
+				  M_DEVBUF, M_NOWAIT);
+	if (ahd->seep_config == NULL) {
+#ifndef	__FreeBSD__
+		free(ahd, M_DEVBUF);
+#endif
+		free(name, M_DEVBUF);
+		return (NULL);
+	}
+	LIST_INIT(&ahd->pending_scbs);
+	/* We don't know our unit number until the OSM sets it */
+	ahd->name = name;
+	ahd->unit = -1;
+	ahd->description = NULL;
+	ahd->bus_description = NULL;
+	ahd->channel = 'A';
+	ahd->chip = AHD_NONE;
+	ahd->features = AHD_FENONE;
+	ahd->bugs = AHD_BUGNONE;
+	ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A
+		   | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A;
+	ahd_timer_init(&ahd->reset_timer);
+	ahd_timer_init(&ahd->stat_timer);
+	ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT;
+	ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT;
+	ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT;
+	ahd->int_coalescing_threshold = AHD_INT_COALESCING_THRESHOLD_DEFAULT;
+	ahd->int_coalescing_stop_threshold =
+	    AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT;
+
+	if (ahd_platform_alloc(ahd, platform_arg) != 0) {
+		ahd_free(ahd);
+		ahd = NULL;
+	}
+#ifdef AHD_DEBUG
+	if ((ahd_debug & AHD_SHOW_MEMORY) != 0) {
+		printf("%s: scb size = 0x%x, hscb size = 0x%x\n",
+		       ahd_name(ahd), (u_int)sizeof(struct scb),
+		       (u_int)sizeof(struct hardware_scb));
+	}
+#endif
+	return (ahd);
+}
+
+int
+ahd_softc_init(struct ahd_softc *ahd)
+{
+
+	ahd->unpause = 0;
+	ahd->pause = PAUSE; 
+	return (0);
+}
+
+void
+ahd_softc_insert(struct ahd_softc *ahd)
+{
+	struct ahd_softc *list_ahd;
+
+#if AHD_PCI_CONFIG > 0
+	/*
+	 * Second Function PCI devices need to inherit some
+	 * settings from function 0.
+	 */
+	if ((ahd->features & AHD_MULTI_FUNC) != 0) {
+		TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
+			ahd_dev_softc_t list_pci;
+			ahd_dev_softc_t pci;
+
+			list_pci = list_ahd->dev_softc;
+			pci = ahd->dev_softc;
+			if (ahd_get_pci_slot(list_pci) == ahd_get_pci_slot(pci)
+			 && ahd_get_pci_bus(list_pci) == ahd_get_pci_bus(pci)) {
+				struct ahd_softc *master;
+				struct ahd_softc *slave;
+
+				if (ahd_get_pci_function(list_pci) == 0) {
+					master = list_ahd;
+					slave = ahd;
+				} else {
+					master = ahd;
+					slave = list_ahd;
+				}
+				slave->flags &= ~AHD_BIOS_ENABLED; 
+				slave->flags |=
+				    master->flags & AHD_BIOS_ENABLED;
+				break;
+			}
+		}
+	}
+#endif
+
+	/*
+	 * Insertion sort into our list of softcs.
+	 */
+	list_ahd = TAILQ_FIRST(&ahd_tailq);
+	while (list_ahd != NULL
+	    && ahd_softc_comp(ahd, list_ahd) <= 0)
+		list_ahd = TAILQ_NEXT(list_ahd, links);
+	if (list_ahd != NULL)
+		TAILQ_INSERT_BEFORE(list_ahd, ahd, links);
+	else
+		TAILQ_INSERT_TAIL(&ahd_tailq, ahd, links);
+	ahd->init_level++;
+}
+
+/*
+ * Verify that the passed in softc pointer is for a
+ * controller that is still configured.
+ */
+struct ahd_softc *
+ahd_find_softc(struct ahd_softc *ahd)
+{
+	struct ahd_softc *list_ahd;
+
+	TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
+		if (list_ahd == ahd)
+			return (ahd);
+	}
+	return (NULL);
+}
+
+void
+ahd_set_unit(struct ahd_softc *ahd, int unit)
+{
+	ahd->unit = unit;
+}
+
+void
+ahd_set_name(struct ahd_softc *ahd, char *name)
+{
+	if (ahd->name != NULL)
+		free(ahd->name, M_DEVBUF);
+	ahd->name = name;
+}
+
+void
+ahd_free(struct ahd_softc *ahd)
+{
+	int i;
+
+	switch (ahd->init_level) {
+	default:
+	case 5:
+		ahd_shutdown(ahd);
+		/* FALLTHROUGH */
+	case 4:
+		ahd_dmamap_unload(ahd, ahd->shared_data_dmat,
+				  ahd->shared_data_dmamap);
+		/* FALLTHROUGH */
+	case 3:
+		ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo,
+				ahd->shared_data_dmamap);
+		ahd_dmamap_destroy(ahd, ahd->shared_data_dmat,
+				   ahd->shared_data_dmamap);
+		/* FALLTHROUGH */
+	case 2:
+		ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat);
+	case 1:
+#ifndef __linux__
+		ahd_dma_tag_destroy(ahd, ahd->buffer_dmat);
+#endif
+		break;
+	case 0:
+		break;
+	}
+
+#ifndef __linux__
+	ahd_dma_tag_destroy(ahd, ahd->parent_dmat);
+#endif
+	ahd_platform_free(ahd);
+	ahd_fini_scbdata(ahd);
+	for (i = 0; i < AHD_NUM_TARGETS; i++) {
+		struct ahd_tmode_tstate *tstate;
+
+		tstate = ahd->enabled_targets[i];
+		if (tstate != NULL) {
+#ifdef AHD_TARGET_MODE
+			int j;
+
+			for (j = 0; j < AHD_NUM_LUNS; j++) {
+				struct ahd_tmode_lstate *lstate;
+
+				lstate = tstate->enabled_luns[j];
+				if (lstate != NULL) {
+					xpt_free_path(lstate->path);
+					free(lstate, M_DEVBUF);
+				}
+			}
+#endif
+			free(tstate, M_DEVBUF);
+		}
+	}
+#ifdef AHD_TARGET_MODE
+	if (ahd->black_hole != NULL) {
+		xpt_free_path(ahd->black_hole->path);
+		free(ahd->black_hole, M_DEVBUF);
+	}
+#endif
+	if (ahd->name != NULL)
+		free(ahd->name, M_DEVBUF);
+	if (ahd->seep_config != NULL)
+		free(ahd->seep_config, M_DEVBUF);
+	if (ahd->saved_stack != NULL)
+		free(ahd->saved_stack, M_DEVBUF);
+#ifndef __FreeBSD__
+	free(ahd, M_DEVBUF);
+#endif
+	return;
+}
+
+void
+ahd_shutdown(void *arg)
+{
+	struct	ahd_softc *ahd;
+
+	ahd = (struct ahd_softc *)arg;
+
+	/*
+	 * Stop periodic timer callbacks.
+	 */
+	ahd_timer_stop(&ahd->reset_timer);
+	ahd_timer_stop(&ahd->stat_timer);
+
+	/* This will reset most registers to 0, but not all */
+	ahd_reset(ahd, /*reinit*/FALSE);
+}
+
+/*
+ * Reset the controller and record some information about it
+ * that is only available just after a reset.  If "reinit" is
+ * non-zero, this reset occured after initial configuration
+ * and the caller requests that the chip be fully reinitialized
+ * to a runable state.  Chip interrupts are *not* enabled after
+ * a reinitialization.  The caller must enable interrupts via
+ * ahd_intr_enable().
+ */
+int
+ahd_reset(struct ahd_softc *ahd, int reinit)
+{
+	u_int	 sxfrctl1;
+	int	 wait;
+	uint32_t cmd;
+	
+	/*
+	 * Preserve the value of the SXFRCTL1 register for all channels.
+	 * It contains settings that affect termination and we don't want
+	 * to disturb the integrity of the bus.
+	 */
+	ahd_pause(ahd);
+	ahd_update_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	sxfrctl1 = ahd_inb(ahd, SXFRCTL1);
+
+	cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2);
+	if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) {
+		uint32_t mod_cmd;
+
+		/*
+		 * A4 Razor #632
+		 * During the assertion of CHIPRST, the chip
+		 * does not disable its parity logic prior to
+		 * the start of the reset.  This may cause a
+		 * parity error to be detected and thus a
+		 * spurious SERR or PERR assertion.  Disble
+		 * PERR and SERR responses during the CHIPRST.
+		 */
+		mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN);
+		ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND,
+				     mod_cmd, /*bytes*/2);
+	}
+	ahd_outb(ahd, HCNTRL, CHIPRST | ahd->pause);
+
+	/*
+	 * Ensure that the reset has finished.  We delay 1000us
+	 * prior to reading the register to make sure the chip
+	 * has sufficiently completed its reset to handle register
+	 * accesses.
+	 */
+	wait = 1000;
+	do {
+		ahd_delay(1000);
+	} while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK));
+
+	if (wait == 0) {
+		printf("%s: WARNING - Failed chip reset!  "
+		       "Trying to initialize anyway.\n", ahd_name(ahd));
+	}
+	ahd_outb(ahd, HCNTRL, ahd->pause);
+
+	if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) {
+		/*
+		 * Clear any latched PCI error status and restore
+		 * previous SERR and PERR response enables.
+		 */
+		ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1,
+				     0xFF, /*bytes*/1);
+		ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND,
+				     cmd, /*bytes*/2);
+	}
+
+	/*
+	 * Mode should be SCSI after a chip reset, but lets
+	 * set it just to be safe.  We touch the MODE_PTR
+	 * register directly so as to bypass the lazy update
+	 * code in ahd_set_modes().
+	 */
+	ahd_known_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	ahd_outb(ahd, MODE_PTR,
+		 ahd_build_mode_state(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI));
+
+	/*
+	 * Restore SXFRCTL1.
+	 *
+	 * We must always initialize STPWEN to 1 before we
+	 * restore the saved values.  STPWEN is initialized
+	 * to a tri-state condition which can only be cleared
+	 * by turning it on.
+	 */
+	ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN);
+	ahd_outb(ahd, SXFRCTL1, sxfrctl1);
+
+	/* Determine chip configuration */
+	ahd->features &= ~AHD_WIDE;
+	if ((ahd_inb(ahd, SBLKCTL) & SELWIDE) != 0)
+		ahd->features |= AHD_WIDE;
+
+	/*
+	 * If a recovery action has forced a chip reset,
+	 * re-initialize the chip to our liking.
+	 */
+	if (reinit != 0)
+		ahd_chip_init(ahd);
+
+	return (0);
+}
+
+/*
+ * Determine the number of SCBs available on the controller
+ */
+int
+ahd_probe_scbs(struct ahd_softc *ahd) {
+	int i;
+
+	AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+			 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+	for (i = 0; i < AHD_SCB_MAX; i++) {
+		int j;
+
+		ahd_set_scbptr(ahd, i);
+		ahd_outw(ahd, SCB_BASE, i);
+		for (j = 2; j < 64; j++)
+			ahd_outb(ahd, SCB_BASE+j, 0);
+		/* Start out life as unallocated (needing an abort) */
+		ahd_outb(ahd, SCB_CONTROL, MK_MESSAGE);
+		if (ahd_inw_scbram(ahd, SCB_BASE) != i)
+			break;
+		ahd_set_scbptr(ahd, 0);
+		if (ahd_inw_scbram(ahd, SCB_BASE) != 0)
+			break;
+	}
+	return (i);
+}
+
+static void
+ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 
+{
+	dma_addr_t *baddr;
+
+	baddr = (dma_addr_t *)arg;
+	*baddr = segs->ds_addr;
+}
+
+static void
+ahd_initialize_hscbs(struct ahd_softc *ahd)
+{
+	int i;
+
+	for (i = 0; i < ahd->scb_data.maxhscbs; i++) {
+		ahd_set_scbptr(ahd, i);
+
+		/* Clear the control byte. */
+		ahd_outb(ahd, SCB_CONTROL, 0);
+
+		/* Set the next pointer */
+		ahd_outw(ahd, SCB_NEXT, SCB_LIST_NULL);
+	}
+}
+
+static int
+ahd_init_scbdata(struct ahd_softc *ahd)
+{
+	struct	scb_data *scb_data;
+	int	i;
+
+	scb_data = &ahd->scb_data;
+	TAILQ_INIT(&scb_data->free_scbs);
+	for (i = 0; i < AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT; i++)
+		LIST_INIT(&scb_data->free_scb_lists[i]);
+	LIST_INIT(&scb_data->any_dev_free_scb_list);
+	SLIST_INIT(&scb_data->hscb_maps);
+	SLIST_INIT(&scb_data->sg_maps);
+	SLIST_INIT(&scb_data->sense_maps);
+
+	/* Determine the number of hardware SCBs and initialize them */
+	scb_data->maxhscbs = ahd_probe_scbs(ahd);
+	if (scb_data->maxhscbs == 0) {
+		printf("%s: No SCB space found\n", ahd_name(ahd));
+		return (ENXIO);
+	}
+
+	ahd_initialize_hscbs(ahd);
+
+	/*
+	 * Create our DMA tags.  These tags define the kinds of device
+	 * accessible memory allocations and memory mappings we will
+	 * need to perform during normal operation.
+	 *
+	 * Unless we need to further restrict the allocation, we rely
+	 * on the restrictions of the parent dmat, hence the common
+	 * use of MAXADDR and MAXSIZE.
+	 */
+
+	/* DMA tag for our hardware scb structures */
+	if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
+			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+			       /*highaddr*/BUS_SPACE_MAXADDR,
+			       /*filter*/NULL, /*filterarg*/NULL,
+			       PAGE_SIZE, /*nsegments*/1,
+			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+			       /*flags*/0, &scb_data->hscb_dmat) != 0) {
+		goto error_exit;
+	}
+
+	scb_data->init_level++;
+
+	/* DMA tag for our S/G structures. */
+	if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/8,
+			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+			       /*highaddr*/BUS_SPACE_MAXADDR,
+			       /*filter*/NULL, /*filterarg*/NULL,
+			       ahd_sglist_allocsize(ahd), /*nsegments*/1,
+			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+			       /*flags*/0, &scb_data->sg_dmat) != 0) {
+		goto error_exit;
+	}
+#ifdef AHD_DEBUG
+	if ((ahd_debug & AHD_SHOW_MEMORY) != 0)
+		printf("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd),
+		       ahd_sglist_allocsize(ahd));
+#endif
+
+	scb_data->init_level++;
+
+	/* DMA tag for our sense buffers.  We allocate in page sized chunks */
+	if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
+			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+			       /*highaddr*/BUS_SPACE_MAXADDR,
+			       /*filter*/NULL, /*filterarg*/NULL,
+			       PAGE_SIZE, /*nsegments*/1,
+			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+			       /*flags*/0, &scb_data->sense_dmat) != 0) {
+		goto error_exit;
+	}
+
+	scb_data->init_level++;
+
+	/* Perform initial CCB allocation */
+	ahd_alloc_scbs(ahd);
+
+	if (scb_data->numscbs == 0) {
+		printf("%s: ahd_init_scbdata - "
+		       "Unable to allocate initial scbs\n",
+		       ahd_name(ahd));
+		goto error_exit;
+	}
+
+	/*
+	 * Note that we were successfull
+	 */
+	return (0); 
+
+error_exit:
+
+	return (ENOMEM);
+}
+
+static struct scb *
+ahd_find_scb_by_tag(struct ahd_softc *ahd, u_int tag)
+{
+	struct scb *scb;
+
+	/*
+	 * Look on the pending list.
+	 */
+	LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
+		if (SCB_GET_TAG(scb) == tag)
+			return (scb);
+	}
+
+	/*
+	 * Then on all of the collision free lists.
+	 */
+	TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
+		struct scb *list_scb;
+
+		list_scb = scb;
+		do {
+			if (SCB_GET_TAG(list_scb) == tag)
+				return (list_scb);
+			list_scb = LIST_NEXT(list_scb, collision_links);
+		} while (list_scb);
+	}
+
+	/*
+	 * And finally on the generic free list.
+	 */
+	LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) {
+		if (SCB_GET_TAG(scb) == tag)
+			return (scb);
+	}
+
+	return (NULL);
+}
+
+static void
+ahd_fini_scbdata(struct ahd_softc *ahd)
+{
+	struct scb_data *scb_data;
+
+	scb_data = &ahd->scb_data;
+	if (scb_data == NULL)
+		return;
+
+	switch (scb_data->init_level) {
+	default:
+	case 7:
+	{
+		struct map_node *sns_map;
+
+		while ((sns_map = SLIST_FIRST(&scb_data->sense_maps)) != NULL) {
+			SLIST_REMOVE_HEAD(&scb_data->sense_maps, links);
+			ahd_dmamap_unload(ahd, scb_data->sense_dmat,
+					  sns_map->dmamap);
+			ahd_dmamem_free(ahd, scb_data->sense_dmat,
+					sns_map->vaddr, sns_map->dmamap);
+			free(sns_map, M_DEVBUF);
+		}
+		ahd_dma_tag_destroy(ahd, scb_data->sense_dmat);
+		/* FALLTHROUGH */
+	}
+	case 6:
+	{
+		struct map_node *sg_map;
+
+		while ((sg_map = SLIST_FIRST(&scb_data->sg_maps)) != NULL) {
+			SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
+			ahd_dmamap_unload(ahd, scb_data->sg_dmat,
+					  sg_map->dmamap);
+			ahd_dmamem_free(ahd, scb_data->sg_dmat,
+					sg_map->vaddr, sg_map->dmamap);
+			free(sg_map, M_DEVBUF);
+		}
+		ahd_dma_tag_destroy(ahd, scb_data->sg_dmat);
+		/* FALLTHROUGH */
+	}
+	case 5:
+	{
+		struct map_node *hscb_map;
+
+		while ((hscb_map = SLIST_FIRST(&scb_data->hscb_maps)) != NULL) {
+			SLIST_REMOVE_HEAD(&scb_data->hscb_maps, links);
+			ahd_dmamap_unload(ahd, scb_data->hscb_dmat,
+					  hscb_map->dmamap);
+			ahd_dmamem_free(ahd, scb_data->hscb_dmat,
+					hscb_map->vaddr, hscb_map->dmamap);
+			free(hscb_map, M_DEVBUF);
+		}
+		ahd_dma_tag_destroy(ahd, scb_data->hscb_dmat);
+		/* FALLTHROUGH */
+	}
+	case 4:
+	case 3:
+	case 2:
+	case 1:
+	case 0:
+		break;
+	}
+}
+
+/*
+ * DSP filter Bypass must be enabled until the first selection
+ * after a change in bus mode (Razor #491 and #493).
+ */
+static void
+ahd_setup_iocell_workaround(struct ahd_softc *ahd)
+{
+	ahd_mode_state saved_modes;
+
+	saved_modes = ahd_save_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+	ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL)
+	       | BYPASSENAB | RCVROFFSTDIS | XMITOFFSTDIS);
+	ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI));
+#ifdef AHD_DEBUG
+	if ((ahd_debug & AHD_SHOW_MISC) != 0)
+		printf("%s: Setting up iocell workaround\n", ahd_name(ahd));
+#endif
+	ahd_restore_modes(ahd, saved_modes);
+	ahd->flags &= ~AHD_HAD_FIRST_SEL;
+}
+
+static void
+ahd_iocell_first_selection(struct ahd_softc *ahd)
+{
+	ahd_mode_state	saved_modes;
+	u_int		sblkctl;
+
+	if ((ahd->flags & AHD_HAD_FIRST_SEL) != 0)
+		return;
+	saved_modes = ahd_save_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	sblkctl = ahd_inb(ahd, SBLKCTL);
+	ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+#ifdef AHD_DEBUG
+	if ((ahd_debug & AHD_SHOW_MISC) != 0)
+		printf("%s: iocell first selection\n", ahd_name(ahd));
+#endif
+	if ((sblkctl & ENAB40) != 0) {
+		ahd_outb(ahd, DSPDATACTL,
+			 ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB);
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_MISC) != 0)
+			printf("%s: BYPASS now disabled\n", ahd_name(ahd));
+#endif
+	}
+	ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI));
+	ahd_outb(ahd, CLRINT, CLRSCSIINT);
+	ahd_restore_modes(ahd, saved_modes);
+	ahd->flags |= AHD_HAD_FIRST_SEL;
+}
+
+/*************************** SCB Management ***********************************/
+static void
+ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx)
+{
+	struct	scb_list *free_list;
+	struct	scb_tailq *free_tailq;
+	struct	scb *first_scb;
+
+	scb->flags |= SCB_ON_COL_LIST;
+	AHD_SET_SCB_COL_IDX(scb, col_idx);
+	free_list = &ahd->scb_data.free_scb_lists[col_idx];
+	free_tailq = &ahd->scb_data.free_scbs;
+	first_scb = LIST_FIRST(free_list);
+	if (first_scb != NULL) {
+		LIST_INSERT_AFTER(first_scb, scb, collision_links);
+	} else {
+		LIST_INSERT_HEAD(free_list, scb, collision_links);
+		TAILQ_INSERT_TAIL(free_tailq, scb, links.tqe);
+	}
+}
+
+static void
+ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb)
+{
+	struct	scb_list *free_list;
+	struct	scb_tailq *free_tailq;
+	struct	scb *first_scb;
+	u_int	col_idx;
+
+	scb->flags &= ~SCB_ON_COL_LIST;
+	col_idx = AHD_GET_SCB_COL_IDX(ahd, scb);
+	free_list = &ahd->scb_data.free_scb_lists[col_idx];
+	free_tailq = &ahd->scb_data.free_scbs;
+	first_scb = LIST_FIRST(free_list);
+	if (first_scb == scb) {
+		struct scb *next_scb;
+
+		/*
+		 * Maintain order in the collision free
+		 * lists for fairness if this device has
+		 * other colliding tags active.
+		 */
+		next_scb = LIST_NEXT(scb, collision_links);
+		if (next_scb != NULL) {
+			TAILQ_INSERT_AFTER(free_tailq, scb,
+					   next_scb, links.tqe);
+		}
+		TAILQ_REMOVE(free_tailq, scb, links.tqe);
+	}
+	LIST_REMOVE(scb, collision_links);
+}
+
+/*
+ * Get a free scb. If there are none, see if we can allocate a new SCB.
+ */
+struct scb *
+ahd_get_scb(struct ahd_softc *ahd, u_int col_idx)
+{
+	struct scb *scb;
+	int tries;
+
+	tries = 0;
+look_again:
+	TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
+		if (AHD_GET_SCB_COL_IDX(ahd, scb) != col_idx) {
+			ahd_rem_col_list(ahd, scb);
+			goto found;
+		}
+	}
+	if ((scb = LIST_FIRST(&ahd->scb_data.any_dev_free_scb_list)) == NULL) {
+
+		if (tries++ != 0)
+			return (NULL);
+		ahd_alloc_scbs(ahd);
+		goto look_again;
+	}
+	LIST_REMOVE(scb, links.le);
+	if (col_idx != AHD_NEVER_COL_IDX
+	 && (scb->col_scb != NULL)
+	 && (scb->col_scb->flags & SCB_ACTIVE) == 0) {
+		LIST_REMOVE(scb->col_scb, links.le);
+		ahd_add_col_list(ahd, scb->col_scb, col_idx);
+	}
+found:
+	scb->flags |= SCB_ACTIVE;
+	return (scb);
+}
+
+/*
+ * Return an SCB resource to the free list.
+ */
+void
+ahd_free_scb(struct ahd_softc *ahd, struct scb *scb)
+{       
+
+	/* Clean up for the next user */
+	scb->flags = SCB_FLAG_NONE;
+	scb->hscb->control = 0;
+	ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL;
+
+	if (scb->col_scb == NULL) {
+
+		/*
+		 * No collision possible.  Just free normally.
+		 */
+		LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
+				 scb, links.le);
+	} else if ((scb->col_scb->flags & SCB_ON_COL_LIST) != 0) {
+
+		/*
+		 * The SCB we might have collided with is on
+		 * a free collision list.  Put both SCBs on
+		 * the generic list.
+		 */
+		ahd_rem_col_list(ahd, scb->col_scb);
+		LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
+				 scb, links.le);
+		LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
+				 scb->col_scb, links.le);
+	} else if ((scb->col_scb->flags
+		  & (SCB_PACKETIZED|SCB_ACTIVE)) == SCB_ACTIVE
+		&& (scb->col_scb->hscb->control & TAG_ENB) != 0) {
+
+		/*
+		 * The SCB we might collide with on the next allocation
+		 * is still active in a non-packetized, tagged, context.
+		 * Put us on the SCB collision list.
+		 */
+		ahd_add_col_list(ahd, scb,
+				 AHD_GET_SCB_COL_IDX(ahd, scb->col_scb));
+	} else {
+		/*
+		 * The SCB we might collide with on the next allocation
+		 * is either active in a packetized context, or free.
+		 * Since we can't collide, put this SCB on the generic
+		 * free list.
+		 */
+		LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
+				 scb, links.le);
+	}
+
+	ahd_platform_scb_free(ahd, scb);
+}
+
+void
+ahd_alloc_scbs(struct ahd_softc *ahd)
+{
+	struct scb_data *scb_data;
+	struct scb	*next_scb;
+	struct hardware_scb *hscb;
+	struct map_node *hscb_map;
+	struct map_node *sg_map;
+	struct map_node *sense_map;
+	uint8_t		*segs;
+	uint8_t		*sense_data;
+	dma_addr_t	 hscb_busaddr;
+	dma_addr_t	 sg_busaddr;
+	dma_addr_t	 sense_busaddr;
+	int		 newcount;
+	int		 i;
+
+	scb_data = &ahd->scb_data;
+	if (scb_data->numscbs >= AHD_SCB_MAX_ALLOC)
+		/* Can't allocate any more */
+		return;
+
+	if (scb_data->scbs_left != 0) {
+		int offset;
+
+		offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left;
+		hscb_map = SLIST_FIRST(&scb_data->hscb_maps);
+		hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset];
+		hscb_busaddr = hscb_map->physaddr + (offset * sizeof(*hscb));
+	} else {
+		hscb_map = malloc(sizeof(*hscb_map), M_DEVBUF, M_NOWAIT);
+
+		if (hscb_map == NULL)
+			return;
+
+		/* Allocate the next batch of hardware SCBs */
+		if (ahd_dmamem_alloc(ahd, scb_data->hscb_dmat,
+				     (void **)&hscb_map->vaddr,
+				     BUS_DMA_NOWAIT, &hscb_map->dmamap) != 0) {
+			free(hscb_map, M_DEVBUF);
+			return;
+		}
+
+		SLIST_INSERT_HEAD(&scb_data->hscb_maps, hscb_map, links);
+
+		ahd_dmamap_load(ahd, scb_data->hscb_dmat, hscb_map->dmamap,
+				hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb,
+				&hscb_map->physaddr, /*flags*/0);
+
+		hscb = (struct hardware_scb *)hscb_map->vaddr;
+		hscb_busaddr = hscb_map->physaddr;
+		scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb);
+	}
+
+	if (scb_data->sgs_left != 0) {
+		int offset;
+
+		offset = ((ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd))
+		       - scb_data->sgs_left) * ahd_sglist_size(ahd);
+		sg_map = SLIST_FIRST(&scb_data->sg_maps);
+		segs = sg_map->vaddr + offset;
+		sg_busaddr = sg_map->physaddr + offset;
+	} else {
+		sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
+
+		if (sg_map == NULL)
+			return;
+
+		/* Allocate the next batch of S/G lists */
+		if (ahd_dmamem_alloc(ahd, scb_data->sg_dmat,
+				     (void **)&sg_map->vaddr,
+				     BUS_DMA_NOWAIT, &sg_map->dmamap) != 0) {
+			free(sg_map, M_DEVBUF);
+			return;
+		}
+
+		SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
+
+		ahd_dmamap_load(ahd, scb_data->sg_dmat, sg_map->dmamap,
+				sg_map->vaddr, ahd_sglist_allocsize(ahd),
+				ahd_dmamap_cb, &sg_map->physaddr, /*flags*/0);
+
+		segs = sg_map->vaddr;
+		sg_busaddr = sg_map->physaddr;
+		scb_data->sgs_left =
+		    ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd);
+#ifdef AHD_DEBUG
+		if (ahd_debug & AHD_SHOW_MEMORY)
+			printf("Mapped SG data\n");
+#endif
+	}
+
+	if (scb_data->sense_left != 0) {
+		int offset;
+
+		offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left);
+		sense_map = SLIST_FIRST(&scb_data->sense_maps);
+		sense_data = sense_map->vaddr + offset;
+		sense_busaddr = sense_map->physaddr + offset;
+	} else {
+		sense_map = malloc(sizeof(*sense_map), M_DEVBUF, M_NOWAIT);
+
+		if (sense_map == NULL)
+			return;
+
+		/* Allocate the next batch of sense buffers */
+		if (ahd_dmamem_alloc(ahd, scb_data->sense_dmat,
+				     (void **)&sense_map->vaddr,
+				     BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) {
+			free(sense_map, M_DEVBUF);
+			return;
+		}
+
+		SLIST_INSERT_HEAD(&scb_data->sense_maps, sense_map, links);
+
+		ahd_dmamap_load(ahd, scb_data->sense_dmat, sense_map->dmamap,
+				sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb,
+				&sense_map->physaddr, /*flags*/0);
+
+		sense_data = sense_map->vaddr;
+		sense_busaddr = sense_map->physaddr;
+		scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE;
+#ifdef AHD_DEBUG
+		if (ahd_debug & AHD_SHOW_MEMORY)
+			printf("Mapped sense data\n");
+#endif
+	}
+
+	newcount = MIN(scb_data->sense_left, scb_data->scbs_left);
+	newcount = MIN(newcount, scb_data->sgs_left);
+	newcount = MIN(newcount, (AHD_SCB_MAX_ALLOC - scb_data->numscbs));
+	scb_data->sense_left -= newcount;
+	scb_data->scbs_left -= newcount;
+	scb_data->sgs_left -= newcount;
+	for (i = 0; i < newcount; i++) {
+		u_int col_tag;
+
+		struct scb_platform_data *pdata;
+#ifndef __linux__
+		int error;
+#endif
+		next_scb = (struct scb *)malloc(sizeof(*next_scb),
+						M_DEVBUF, M_NOWAIT);
+		if (next_scb == NULL)
+			break;
+
+		pdata = (struct scb_platform_data *)malloc(sizeof(*pdata),
+							   M_DEVBUF, M_NOWAIT);
+		if (pdata == NULL) {
+			free(next_scb, M_DEVBUF);
+			break;
+		}
+		next_scb->platform_data = pdata;
+		next_scb->hscb_map = hscb_map;
+		next_scb->sg_map = sg_map;
+		next_scb->sense_map = sense_map;
+		next_scb->sg_list = segs;
+		next_scb->sense_data = sense_data;
+		next_scb->sense_busaddr = sense_busaddr;
+		memset(hscb, 0, sizeof(*hscb));
+		next_scb->hscb = hscb;
+		hscb->hscb_busaddr = ahd_htole32(hscb_busaddr);
+
+		/*
+		 * The sequencer always starts with the second entry.
+		 * The first entry is embedded in the scb.
+		 */
+		next_scb->sg_list_busaddr = sg_busaddr;
+		if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
+			next_scb->sg_list_busaddr
+			    += sizeof(struct ahd_dma64_seg);
+		else
+			next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg);
+		next_scb->ahd_softc = ahd;
+		next_scb->flags = SCB_FLAG_NONE;
+#ifndef __linux__
+		error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0,
+					  &next_scb->dmamap);
+		if (error != 0) {
+			free(next_scb, M_DEVBUF);
+			free(pdata, M_DEVBUF);
+			break;
+		}
+#endif
+		next_scb->hscb->tag = ahd_htole16(scb_data->numscbs);
+		col_tag = scb_data->numscbs ^ 0x100;
+		next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag);
+		if (next_scb->col_scb != NULL)
+			next_scb->col_scb->col_scb = next_scb;
+		ahd_free_scb(ahd, next_scb);
+		hscb++;
+		hscb_busaddr += sizeof(*hscb);
+		segs += ahd_sglist_size(ahd);
+		sg_busaddr += ahd_sglist_size(ahd);
+		sense_data += AHD_SENSE_BUFSIZE;
+		sense_busaddr += AHD_SENSE_BUFSIZE;
+		scb_data->numscbs++;
+	}
+}
+
+void
+ahd_controller_info(struct ahd_softc *ahd, char *buf)
+{
+	const char *speed;
+	const char *type;
+	int len;
+
+	len = sprintf(buf, "%s: ", ahd_chip_names[ahd->chip & AHD_CHIPID_MASK]);
+	buf += len;
+
+	speed = "Ultra320 ";
+	if ((ahd->features & AHD_WIDE) != 0) {
+		type = "Wide ";
+	} else {
+		type = "Single ";
+	}
+	len = sprintf(buf, "%s%sChannel %c, SCSI Id=%d, ",
+		      speed, type, ahd->channel, ahd->our_id);
+	buf += len;
+
+	sprintf(buf, "%s, %d SCBs", ahd->bus_description,
+		ahd->scb_data.maxhscbs);
+}
+
+static const char *channel_strings[] = {
+	"Primary Low",
+	"Primary High",
+	"Secondary Low", 
+	"Secondary High"
+};
+
+static const char *termstat_strings[] = {
+	"Terminated Correctly",
+	"Over Terminated",
+	"Under Terminated",
+	"Not Configured"
+};
+
+/*
+ * Start the board, ready for normal operation
+ */
+int
+ahd_init(struct ahd_softc *ahd)
+{
+	uint8_t		*base_vaddr;
+	uint8_t		*next_vaddr;
+	dma_addr_t	 next_baddr;
+	size_t		 driver_data_size;
+	int		 i;
+	int		 error;
+	u_int		 warn_user;
+	uint8_t		 current_sensing;
+	uint8_t		 fstat;
+
+	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+
+	ahd->stack_size = ahd_probe_stack_size(ahd);
+	ahd->saved_stack = malloc(ahd->stack_size * sizeof(uint16_t),
+				  M_DEVBUF, M_NOWAIT);
+	if (ahd->saved_stack == NULL)
+		return (ENOMEM);
+
+	/*
+	 * Verify that the compiler hasn't over-agressively
+	 * padded important structures.
+	 */
+	if (sizeof(struct hardware_scb) != 64)
+		panic("Hardware SCB size is incorrect");
+
+#ifdef AHD_DEBUG
+	if ((ahd_debug & AHD_DEBUG_SEQUENCER) != 0)
+		ahd->flags |= AHD_SEQUENCER_DEBUG;
+#endif
+
+	/*
+	 * Default to allowing initiator operations.
+	 */
+	ahd->flags |= AHD_INITIATORROLE;
+
+	/*
+	 * Only allow target mode features if this unit has them enabled.
+	 */
+	if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0)
+		ahd->features &= ~AHD_TARGETMODE;
+
+#ifndef __linux__
+	/* DMA tag for mapping buffers into device visible space. */
+	if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
+			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+			       /*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING
+					? (dma_addr_t)0x7FFFFFFFFFULL
+					: BUS_SPACE_MAXADDR_32BIT,
+			       /*highaddr*/BUS_SPACE_MAXADDR,
+			       /*filter*/NULL, /*filterarg*/NULL,
+			       /*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE,
+			       /*nsegments*/AHD_NSEG,
+			       /*maxsegsz*/AHD_MAXTRANSFER_SIZE,
+			       /*flags*/BUS_DMA_ALLOCNOW,
+			       &ahd->buffer_dmat) != 0) {
+		return (ENOMEM);
+	}
+#endif
+
+	ahd->init_level++;
+
+	/*
+	 * DMA tag for our command fifos and other data in system memory
+	 * the card's sequencer must be able to access.  For initiator
+	 * roles, we need to allocate space for the qoutfifo.  When providing
+	 * for the target mode role, we must additionally provide space for
+	 * the incoming target command fifo.
+	 */
+	driver_data_size = AHD_SCB_MAX * sizeof(uint16_t)
+			 + sizeof(struct hardware_scb);
+	if ((ahd->features & AHD_TARGETMODE) != 0)
+		driver_data_size += AHD_TMODE_CMDS * sizeof(struct target_cmd);
+	if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0)
+		driver_data_size += PKT_OVERRUN_BUFSIZE;
+	if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
+			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+			       /*highaddr*/BUS_SPACE_MAXADDR,
+			       /*filter*/NULL, /*filterarg*/NULL,
+			       driver_data_size,
+			       /*nsegments*/1,
+			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+			       /*flags*/0, &ahd->shared_data_dmat) != 0) {
+		return (ENOMEM);
+	}
+
+	ahd->init_level++;
+
+	/* Allocation of driver data */
+	if (ahd_dmamem_alloc(ahd, ahd->shared_data_dmat,
+			     (void **)&base_vaddr,
+			     BUS_DMA_NOWAIT, &ahd->shared_data_dmamap) != 0) {
+		return (ENOMEM);
+	}
+
+	ahd->init_level++;
+
+	/* And permanently map it in */
+	ahd_dmamap_load(ahd, ahd->shared_data_dmat, ahd->shared_data_dmamap,
+			base_vaddr, driver_data_size, ahd_dmamap_cb,
+			&ahd->shared_data_busaddr, /*flags*/0);
+	ahd->qoutfifo = (uint16_t *)base_vaddr;
+	next_vaddr = (uint8_t *)&ahd->qoutfifo[AHD_QOUT_SIZE];
+	next_baddr = ahd->shared_data_busaddr + AHD_QOUT_SIZE*sizeof(uint16_t);
+	if ((ahd->features & AHD_TARGETMODE) != 0) {
+		ahd->targetcmds = (struct target_cmd *)next_vaddr;
+		next_vaddr += AHD_TMODE_CMDS * sizeof(struct target_cmd);
+		next_baddr += AHD_TMODE_CMDS * sizeof(struct target_cmd);
+	}
+
+	if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) {
+		ahd->overrun_buf = next_vaddr;
+		next_vaddr += PKT_OVERRUN_BUFSIZE;
+		next_baddr += PKT_OVERRUN_BUFSIZE;
+	}
+
+	/*
+	 * We need one SCB to serve as the "next SCB".  Since the
+	 * tag identifier in this SCB will never be used, there is
+	 * no point in using a valid HSCB tag from an SCB pulled from
+	 * the standard free pool.  So, we allocate this "sentinel"
+	 * specially from the DMA safe memory chunk used for the QOUTFIFO.
+	 */
+	ahd->next_queued_hscb = (struct hardware_scb *)next_vaddr;
+	ahd->next_queued_hscb->hscb_busaddr = ahd_htole32(next_baddr);
+
+	ahd->init_level++;
+
+	/* Allocate SCB data now that buffer_dmat is initialized */
+	if (ahd_init_scbdata(ahd) != 0)
+		return (ENOMEM);
+
+	if ((ahd->flags & AHD_INITIATORROLE) == 0)
+		ahd->flags &= ~AHD_RESET_BUS_A;
+
+	/*
+	 * Before committing these settings to the chip, give
+	 * the OSM one last chance to modify our configuration.
+	 */
+	ahd_platform_init(ahd);
+
+	/* Bring up the chip. */
+	ahd_chip_init(ahd);
+
+	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+
+	if ((ahd->flags & AHD_CURRENT_SENSING) == 0)
+		goto init_done;
+
+	/*
+	 * Verify termination based on current draw and
+	 * warn user if the bus is over/under terminated.
+	 */
+	error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL,
+				   CURSENSE_ENB);
+	if (error != 0) {
+		printf("%s: current sensing timeout 1\n", ahd_name(ahd));
+		goto init_done;
+	}
+	for (i = 20, fstat = FLX_FSTAT_BUSY;
+	     (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) {
+		error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat);
+		if (error != 0) {
+			printf("%s: current sensing timeout 2\n",
+			       ahd_name(ahd));
+			goto init_done;
+		}
+	}
+	if (i == 0) {
+		printf("%s: Timedout during current-sensing test\n",
+		       ahd_name(ahd));
+		goto init_done;
+	}
+
+	/* Latch Current Sensing status. */
+	error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, &current_sensing);
+	if (error != 0) {
+		printf("%s: current sensing timeout 3\n", ahd_name(ahd));
+		goto init_done;
+	}
+
+	/* Diable current sensing. */
+	ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0);
+
+#ifdef AHD_DEBUG
+	if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) {
+		printf("%s: current_sensing == 0x%x\n",
+		       ahd_name(ahd), current_sensing);
+	}
+#endif
+	warn_user = 0;
+	for (i = 0; i < 4; i++, current_sensing >>= FLX_CSTAT_SHIFT) {
+		u_int term_stat;
+
+		term_stat = (current_sensing & FLX_CSTAT_MASK);
+		switch (term_stat) {
+		case FLX_CSTAT_OVER:
+		case FLX_CSTAT_UNDER:
+			warn_user++;
+		case FLX_CSTAT_INVALID:
+		case FLX_CSTAT_OKAY:
+			if (warn_user == 0 && bootverbose == 0)
+				break;
+			printf("%s: %s Channel %s\n", ahd_name(ahd),
+			       channel_strings[i], termstat_strings[term_stat]);
+			break;
+		}
+	}
+	if (warn_user) {
+		printf("%s: WARNING. Termination is not configured correctly.\n"
+		       "%s: WARNING. SCSI bus operations may FAIL.\n",
+		       ahd_name(ahd), ahd_name(ahd));
+	}
+init_done:
+	ahd_restart(ahd);
+	ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US,
+			ahd_stat_timer, ahd);
+	return (0);
+}
+
+/*
+ * (Re)initialize chip state after a chip reset.
+ */
+static void
+ahd_chip_init(struct ahd_softc *ahd)
+{
+	uint32_t busaddr;
+	u_int	 sxfrctl1;
+	u_int	 scsiseq_template;
+	u_int	 wait;
+	u_int	 i;
+	u_int	 target;
+
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	/*
+	 * Take the LED out of diagnostic mode
+	 */
+	ahd_outb(ahd, SBLKCTL, ahd_inb(ahd, SBLKCTL) & ~(DIAGLEDEN|DIAGLEDON));
+
+	/*
+	 * Return HS_MAILBOX to its default value.
+	 */
+	ahd->hs_mailbox = 0;
+	ahd_outb(ahd, HS_MAILBOX, 0);
+
+	/* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1. */
+	ahd_outb(ahd, IOWNID, ahd->our_id);
+	ahd_outb(ahd, TOWNID, ahd->our_id);
+	sxfrctl1 = (ahd->flags & AHD_TERM_ENB_A) != 0 ? STPWEN : 0;
+	sxfrctl1 |= (ahd->flags & AHD_SPCHK_ENB_A) != 0 ? ENSPCHK : 0;
+	if ((ahd->bugs & AHD_LONG_SETIMO_BUG)
+	 && (ahd->seltime != STIMESEL_MIN)) {
+		/*
+		 * The selection timer duration is twice as long
+		 * as it should be.  Halve it by adding "1" to
+		 * the user specified setting.
+		 */
+		sxfrctl1 |= ahd->seltime + STIMESEL_BUG_ADJ;
+	} else {
+		sxfrctl1 |= ahd->seltime;
+	}
+		
+	ahd_outb(ahd, SXFRCTL0, DFON);
+	ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN);
+	ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
+
+	/*
+	 * Now that termination is set, wait for up
+	 * to 500ms for our transceivers to settle.  If
+	 * the adapter does not have a cable attached,
+	 * the transceivers may never settle, so don't
+	 * complain if we fail here.
+	 */
+	for (wait = 10000;
+	     (ahd_inb(ahd, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
+	     wait--)
+		ahd_delay(100);
+
+	/* Clear any false bus resets due to the transceivers settling */
+	ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI);
+	ahd_outb(ahd, CLRINT, CLRSCSIINT);
+
+	/* Initialize mode specific S/G state. */
+	for (i = 0; i < 2; i++) {
+		ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i);
+		ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
+		ahd_outb(ahd, SG_STATE, 0);
+		ahd_outb(ahd, CLRSEQINTSRC, 0xFF);
+		ahd_outb(ahd, SEQIMODE,
+			 ENSAVEPTRS|ENCFG4DATA|ENCFG4ISTAT
+			|ENCFG4TSTAT|ENCFG4ICMD|ENCFG4TCMD);
+	}
+
+	ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+	ahd_outb(ahd, DSCOMMAND0, ahd_inb(ahd, DSCOMMAND0)|MPARCKEN|CACHETHEN);
+	ahd_outb(ahd, DFF_THRSH, RD_DFTHRSH_75|WR_DFTHRSH_75);
+	ahd_outb(ahd, SIMODE0, ENIOERR|ENOVERRUN);
+	ahd_outb(ahd, SIMODE3, ENNTRAMPERR|ENOSRAMPERR);
+	if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
+		ahd_outb(ahd, OPTIONMODE, AUTOACKEN|AUTO_MSGOUT_DE);
+	} else {
+		ahd_outb(ahd, OPTIONMODE, AUTOACKEN|BUSFREEREV|AUTO_MSGOUT_DE);
+	}
+	ahd_outb(ahd, SCSCHKN, CURRFIFODEF|WIDERESEN|SHVALIDSTDIS);
+	if ((ahd->chip & AHD_BUS_MASK) == AHD_PCIX)
+		/*
+		 * Do not issue a target abort when a split completion
+		 * error occurs.  Let our PCIX interrupt handler deal
+		 * with it instead. H2A4 Razor #625
+		 */
+		ahd_outb(ahd, PCIXCTL, ahd_inb(ahd, PCIXCTL) | SPLTSTADIS);
+
+	if ((ahd->bugs & AHD_LQOOVERRUN_BUG) != 0)
+		ahd_outb(ahd, LQOSCSCTL, LQONOCHKOVER);
+
+	/*
+	 * Tweak IOCELL settings.
+	 */
+	if ((ahd->flags & AHD_HP_BOARD) != 0) {
+		for (i = 0; i < NUMDSPS; i++) {
+			ahd_outb(ahd, DSPSELECT, i);
+			ahd_outb(ahd, WRTBIASCTL, WRTBIASCTL_HP_DEFAULT);
+		}
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_MISC) != 0)
+			printf("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd),
+			       WRTBIASCTL_HP_DEFAULT);
+#endif
+	}
+	ahd_setup_iocell_workaround(ahd);
+
+	/*
+	 * Enable LQI Manager interrupts.
+	 */
+	ahd_outb(ahd, LQIMODE1, ENLQIPHASE_LQ|ENLQIPHASE_NLQ|ENLIQABORT
+			      | ENLQICRCI_LQ|ENLQICRCI_NLQ|ENLQIBADLQI
+			      | ENLQIOVERI_LQ|ENLQIOVERI_NLQ);
+	ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC);
+	/*
+	 * An interrupt from LQOBUSFREE is made redundant by the
+	 * BUSFREE interrupt.  We choose to have the sequencer catch
+	 * LQOPHCHGINPKT errors manually for the command phase at the
+	 * start of a packetized selection case.
+	ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE|ENLQOPHACHGINPKT);
+	 */
+	ahd_outb(ahd, LQOMODE1, 0);
+
+	/*
+	 * Setup sequencer interrupt handlers.
+	 */
+	ahd_outw(ahd, INTVEC1_ADDR, ahd_resolve_seqaddr(ahd, LABEL_seq_isr));
+	ahd_outw(ahd, INTVEC2_ADDR, ahd_resolve_seqaddr(ahd, LABEL_timer_isr));
+
+	/*
+	 * Setup SCB Offset registers.
+	 */
+	if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) {
+		ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb,
+			 pkt_long_lun));
+	} else {
+		ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, lun));
+	}
+	ahd_outb(ahd, CMDLENPTR, offsetof(struct hardware_scb, cdb_len));
+	ahd_outb(ahd, ATTRPTR, offsetof(struct hardware_scb, task_attribute));
+	ahd_outb(ahd, FLAGPTR, offsetof(struct hardware_scb, task_management));
+	ahd_outb(ahd, CMDPTR, offsetof(struct hardware_scb,
+				       shared_data.idata.cdb));
+	ahd_outb(ahd, QNEXTPTR,
+		 offsetof(struct hardware_scb, next_hscb_busaddr));
+	ahd_outb(ahd, ABRTBITPTR, MK_MESSAGE_BIT_OFFSET);
+	ahd_outb(ahd, ABRTBYTEPTR, offsetof(struct hardware_scb, control));
+	if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) {
+		ahd_outb(ahd, LUNLEN,
+			 sizeof(ahd->next_queued_hscb->pkt_long_lun) - 1);
+	} else {
+		ahd_outb(ahd, LUNLEN, LUNLEN_SINGLE_LEVEL_LUN);
+	}
+	ahd_outb(ahd, CDBLIMIT, SCB_CDB_LEN_PTR - 1);
+	ahd_outb(ahd, MAXCMD, 0xFF);
+	ahd_outb(ahd, SCBAUTOPTR,
+		 AUSCBPTR_EN | offsetof(struct hardware_scb, tag));
+
+	/* We haven't been enabled for target mode yet. */
+	ahd_outb(ahd, MULTARGID, 0);
+	ahd_outb(ahd, MULTARGID + 1, 0);
+
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	/* Initialize the negotiation table. */
+	if ((ahd->features & AHD_NEW_IOCELL_OPTS) == 0) {
+		/*
+		 * Clear the spare bytes in the neg table to avoid
+		 * spurious parity errors.
+		 */
+		for (target = 0; target < AHD_NUM_TARGETS; target++) {
+			ahd_outb(ahd, NEGOADDR, target);
+			ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PER_DEV0);
+			for (i = 0; i < AHD_NUM_PER_DEV_ANNEXCOLS; i++)
+				ahd_outb(ahd, ANNEXDAT, 0);
+		}
+	}
+	for (target = 0; target < AHD_NUM_TARGETS; target++) {
+		struct	 ahd_devinfo devinfo;
+		struct	 ahd_initiator_tinfo *tinfo;
+		struct	 ahd_tmode_tstate *tstate;
+
+		tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
+					    target, &tstate);
+		ahd_compile_devinfo(&devinfo, ahd->our_id,
+				    target, CAM_LUN_WILDCARD,
+				    'A', ROLE_INITIATOR);
+		ahd_update_neg_table(ahd, &devinfo, &tinfo->curr);
+	}
+
+	ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR);
+	ahd_outb(ahd, CLRINT, CLRSCSIINT);
+
+#ifdef NEEDS_MORE_TESTING
+	/*
+	 * Always enable abort on incoming L_Qs if this feature is
+	 * supported.  We use this to catch invalid SCB references.
+	 */
+	if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0)
+		ahd_outb(ahd, LQCTL1, ABORTPENDING);
+	else
+#endif
+		ahd_outb(ahd, LQCTL1, 0);
+
+	/* All of our queues are empty */
+	ahd->qoutfifonext = 0;
+	ahd->qoutfifonext_valid_tag = QOUTFIFO_ENTRY_VALID_LE;
+	ahd_outb(ahd, QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID >> 8);
+	for (i = 0; i < AHD_QOUT_SIZE; i++)
+		ahd->qoutfifo[i] = 0;
+	ahd_sync_qoutfifo(ahd, BUS_DMASYNC_PREREAD);
+
+	ahd->qinfifonext = 0;
+	for (i = 0; i < AHD_QIN_SIZE; i++)
+		ahd->qinfifo[i] = SCB_LIST_NULL;
+
+	if ((ahd->features & AHD_TARGETMODE) != 0) {
+		/* All target command blocks start out invalid. */
+		for (i = 0; i < AHD_TMODE_CMDS; i++)
+			ahd->targetcmds[i].cmd_valid = 0;
+		ahd_sync_tqinfifo(ahd, BUS_DMASYNC_PREREAD);
+		ahd->tqinfifonext = 1;
+		ahd_outb(ahd, KERNEL_TQINPOS, ahd->tqinfifonext - 1);
+		ahd_outb(ahd, TQINPOS, ahd->tqinfifonext);
+	}
+
+	/* Initialize Scratch Ram. */
+	ahd_outb(ahd, SEQ_FLAGS, 0);
+	ahd_outb(ahd, SEQ_FLAGS2, 0);
+
+	/* We don't have any waiting selections */
+	ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL);
+	ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL);
+	for (i = 0; i < AHD_NUM_TARGETS; i++)
+		ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL);
+
+	/*
+	 * Nobody is waiting to be DMAed into the QOUTFIFO.
+	 */
+	ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL);
+	ahd_outw(ahd, COMPLETE_SCB_DMAINPROG_HEAD, SCB_LIST_NULL);
+	ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL);
+
+	/*
+	 * The Freeze Count is 0.
+	 */
+	ahd_outw(ahd, QFREEZE_COUNT, 0);
+
+	/*
+	 * Tell the sequencer where it can find our arrays in memory.
+	 */
+	busaddr = ahd->shared_data_busaddr;
+	ahd_outb(ahd, SHARED_DATA_ADDR, busaddr & 0xFF);
+	ahd_outb(ahd, SHARED_DATA_ADDR + 1, (busaddr >> 8) & 0xFF);
+	ahd_outb(ahd, SHARED_DATA_ADDR + 2, (busaddr >> 16) & 0xFF);
+	ahd_outb(ahd, SHARED_DATA_ADDR + 3, (busaddr >> 24) & 0xFF);
+	ahd_outb(ahd, QOUTFIFO_NEXT_ADDR, busaddr & 0xFF);
+	ahd_outb(ahd, QOUTFIFO_NEXT_ADDR + 1, (busaddr >> 8) & 0xFF);
+	ahd_outb(ahd, QOUTFIFO_NEXT_ADDR + 2, (busaddr >> 16) & 0xFF);
+	ahd_outb(ahd, QOUTFIFO_NEXT_ADDR + 3, (busaddr >> 24) & 0xFF);
+
+	/*
+	 * Setup the allowed SCSI Sequences based on operational mode.
+	 * If we are a target, we'll enable select in operations once
+	 * we've had a lun enabled.
+	 */
+	scsiseq_template = ENAUTOATNP;
+	if ((ahd->flags & AHD_INITIATORROLE) != 0)
+		scsiseq_template |= ENRSELI;
+	ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq_template);
+
+	/* There are no busy SCBs yet. */
+	for (target = 0; target < AHD_NUM_TARGETS; target++) {
+		int lun;
+
+		for (lun = 0; lun < AHD_NUM_LUNS_NONPKT; lun++)
+			ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(target, 'A', lun));
+	}
+
+	/*
+	 * Initialize the group code to command length table.
+	 * Vendor Unique codes are set to 0 so we only capture
+	 * the first byte of the cdb.  These can be overridden
+	 * when target mode is enabled.
+	 */
+	ahd_outb(ahd, CMDSIZE_TABLE, 5);
+	ahd_outb(ahd, CMDSIZE_TABLE + 1, 9);
+	ahd_outb(ahd, CMDSIZE_TABLE + 2, 9);
+	ahd_outb(ahd, CMDSIZE_TABLE + 3, 0);
+	ahd_outb(ahd, CMDSIZE_TABLE + 4, 15);
+	ahd_outb(ahd, CMDSIZE_TABLE + 5, 11);
+	ahd_outb(ahd, CMDSIZE_TABLE + 6, 0);
+	ahd_outb(ahd, CMDSIZE_TABLE + 7, 0);
+		
+	/* Tell the sequencer of our initial queue positions */
+	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
+	ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512);
+	ahd->qinfifonext = 0;
+	ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
+	ahd_set_hescb_qoff(ahd, 0);
+	ahd_set_snscb_qoff(ahd, 0);
+	ahd_set_sescb_qoff(ahd, 0);
+	ahd_set_sdscb_qoff(ahd, 0);
+
+	/*
+	 * Tell the sequencer which SCB will be the next one it receives.
+	 */
+	busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr);
+	ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 0, busaddr & 0xFF);
+	ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 1, (busaddr >> 8) & 0xFF);
+	ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 2, (busaddr >> 16) & 0xFF);
+	ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 3, (busaddr >> 24) & 0xFF);
+
+	/*
+	 * Default to coalescing disabled.
+	 */
+	ahd_outw(ahd, INT_COALESCING_CMDCOUNT, 0);
+	ahd_outw(ahd, CMDS_PENDING, 0);
+	ahd_update_coalescing_values(ahd, ahd->int_coalescing_timer,
+				     ahd->int_coalescing_maxcmds,
+				     ahd->int_coalescing_mincmds);
+	ahd_enable_coalescing(ahd, FALSE);
+
+	ahd_loadseq(ahd);
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+}
+
+/*
+ * Setup default device and controller settings.
+ * This should only be called if our probe has
+ * determined that no configuration data is available.
+ */
+int
+ahd_default_config(struct ahd_softc *ahd)
+{
+	int	targ;
+
+	ahd->our_id = 7;
+
+	/*
+	 * Allocate a tstate to house information for our
+	 * initiator presence on the bus as well as the user
+	 * data for any target mode initiator.
+	 */
+	if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) {
+		printf("%s: unable to allocate ahd_tmode_tstate.  "
+		       "Failing attach\n", ahd_name(ahd));
+		return (ENOMEM);
+	}
+
+	for (targ = 0; targ < AHD_NUM_TARGETS; targ++) {
+		struct	 ahd_devinfo devinfo;
+		struct	 ahd_initiator_tinfo *tinfo;
+		struct	 ahd_tmode_tstate *tstate;
+		uint16_t target_mask;
+
+		tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
+					    targ, &tstate);
+		/*
+		 * We support SPC2 and SPI4.
+		 */
+		tinfo->user.protocol_version = 4;
+		tinfo->user.transport_version = 4;
+
+		target_mask = 0x01 << targ;
+		ahd->user_discenable |= target_mask;
+		tstate->discenable |= target_mask;
+		ahd->user_tagenable |= target_mask;
+#ifdef AHD_FORCE_160
+		tinfo->user.period = AHD_SYNCRATE_DT;
+#else
+		tinfo->user.period = AHD_SYNCRATE_160;
+#endif
+		tinfo->user.offset = MAX_OFFSET;
+		tinfo->user.ppr_options = MSG_EXT_PPR_RD_STRM
+					| MSG_EXT_PPR_WR_FLOW
+					| MSG_EXT_PPR_HOLD_MCS
+					| MSG_EXT_PPR_IU_REQ
+					| MSG_EXT_PPR_QAS_REQ
+					| MSG_EXT_PPR_DT_REQ;
+		if ((ahd->features & AHD_RTI) != 0)
+			tinfo->user.ppr_options |= MSG_EXT_PPR_RTI;
+
+		tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
+
+		/*
+		 * Start out Async/Narrow/Untagged and with
+		 * conservative protocol support.
+		 */
+		tinfo->goal.protocol_version = 2;
+		tinfo->goal.transport_version = 2;
+		tinfo->curr.protocol_version = 2;
+		tinfo->curr.transport_version = 2;
+		ahd_compile_devinfo(&devinfo, ahd->our_id,
+				    targ, CAM_LUN_WILDCARD,
+				    'A', ROLE_INITIATOR);
+		tstate->tagenable &= ~target_mask;
+		ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+			      AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE);
+		ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
+				 /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL,
+				 /*paused*/TRUE);
+	}
+	return (0);
+}
+
+/*
+ * Parse device configuration information.
+ */
+int
+ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc)
+{
+	int targ;
+	int max_targ;
+
+	max_targ = sc->max_targets & CFMAXTARG;
+	ahd->our_id = sc->brtime_id & CFSCSIID;
+
+	/*
+	 * Allocate a tstate to house information for our
+	 * initiator presence on the bus as well as the user
+	 * data for any target mode initiator.
+	 */
+	if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) {
+		printf("%s: unable to allocate ahd_tmode_tstate.  "
+		       "Failing attach\n", ahd_name(ahd));
+		return (ENOMEM);
+	}
+
+	for (targ = 0; targ < max_targ; targ++) {
+		struct	 ahd_devinfo devinfo;
+		struct	 ahd_initiator_tinfo *tinfo;
+		struct	 ahd_transinfo *user_tinfo;
+		struct	 ahd_tmode_tstate *tstate;
+		uint16_t target_mask;
+
+		tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
+					    targ, &tstate);
+		user_tinfo = &tinfo->user;
+
+		/*
+		 * We support SPC2 and SPI4.
+		 */
+		tinfo->user.protocol_version = 4;
+		tinfo->user.transport_version = 4;
+
+		target_mask = 0x01 << targ;
+		ahd->user_discenable &= ~target_mask;
+		tstate->discenable &= ~target_mask;
+		ahd->user_tagenable &= ~target_mask;
+		if (sc->device_flags[targ] & CFDISC) {
+			tstate->discenable |= target_mask;
+			ahd->user_discenable |= target_mask;
+			ahd->user_tagenable |= target_mask;
+		} else {
+			/*
+			 * Cannot be packetized without disconnection.
+			 */
+			sc->device_flags[targ] &= ~CFPACKETIZED;
+		}
+
+		user_tinfo->ppr_options = 0;
+		user_tinfo->period = (sc->device_flags[targ] & CFXFER);
+		if (user_tinfo->period < CFXFER_ASYNC) {
+			if (user_tinfo->period <= AHD_PERIOD_10MHz)
+				user_tinfo->ppr_options |= MSG_EXT_PPR_DT_REQ;
+			user_tinfo->offset = MAX_OFFSET;
+		} else  {
+			user_tinfo->offset = 0;
+			user_tinfo->period = AHD_ASYNC_XFER_PERIOD;
+		}
+#ifdef AHD_FORCE_160
+		if (user_tinfo->period <= AHD_SYNCRATE_160)
+			user_tinfo->period = AHD_SYNCRATE_DT;
+#endif
+
+		if ((sc->device_flags[targ] & CFPACKETIZED) != 0) {
+			user_tinfo->ppr_options |= MSG_EXT_PPR_RD_STRM
+						|  MSG_EXT_PPR_WR_FLOW
+						|  MSG_EXT_PPR_HOLD_MCS
+						|  MSG_EXT_PPR_IU_REQ;
+			if ((ahd->features & AHD_RTI) != 0)
+				user_tinfo->ppr_options |= MSG_EXT_PPR_RTI;
+		}
+
+		if ((sc->device_flags[targ] & CFQAS) != 0)
+			user_tinfo->ppr_options |= MSG_EXT_PPR_QAS_REQ;
+
+		if ((sc->device_flags[targ] & CFWIDEB) != 0)
+			user_tinfo->width = MSG_EXT_WDTR_BUS_16_BIT;
+		else
+			user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT;
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_MISC) != 0)
+			printf("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width,
+			       user_tinfo->period, user_tinfo->offset,
+			       user_tinfo->ppr_options);
+#endif
+		/*
+		 * Start out Async/Narrow/Untagged and with
+		 * conservative protocol support.
+		 */
+		tstate->tagenable &= ~target_mask;
+		tinfo->goal.protocol_version = 2;
+		tinfo->goal.transport_version = 2;
+		tinfo->curr.protocol_version = 2;
+		tinfo->curr.transport_version = 2;
+		ahd_compile_devinfo(&devinfo, ahd->our_id,
+				    targ, CAM_LUN_WILDCARD,
+				    'A', ROLE_INITIATOR);
+		ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+			      AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE);
+		ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
+				 /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL,
+				 /*paused*/TRUE);
+	}
+
+	ahd->flags &= ~AHD_SPCHK_ENB_A;
+	if (sc->bios_control & CFSPARITY)
+		ahd->flags |= AHD_SPCHK_ENB_A;
+
+	ahd->flags &= ~AHD_RESET_BUS_A;
+	if (sc->bios_control & CFRESETB)
+		ahd->flags |= AHD_RESET_BUS_A;
+
+	ahd->flags &= ~AHD_EXTENDED_TRANS_A;
+	if (sc->bios_control & CFEXTEND)
+		ahd->flags |= AHD_EXTENDED_TRANS_A;
+
+	ahd->flags &= ~AHD_BIOS_ENABLED;
+	if ((sc->bios_control & CFBIOSSTATE) == CFBS_ENABLED)
+		ahd->flags |= AHD_BIOS_ENABLED;
+
+	ahd->flags &= ~AHD_STPWLEVEL_A;
+	if ((sc->adapter_control & CFSTPWLEVEL) != 0)
+		ahd->flags |= AHD_STPWLEVEL_A;
+
+	return (0);
+}
+
+/*
+ * Parse device configuration information.
+ */
+int
+ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd)
+{
+	int error;
+
+	error = ahd_verify_vpd_cksum(vpd);
+	if (error == 0)
+		return (EINVAL);
+	if ((vpd->bios_flags & VPDBOOTHOST) != 0)
+		ahd->flags |= AHD_BOOT_CHANNEL;
+	return (0);
+}
+
+void
+ahd_intr_enable(struct ahd_softc *ahd, int enable)
+{
+	u_int hcntrl;
+
+	hcntrl = ahd_inb(ahd, HCNTRL);
+	hcntrl &= ~INTEN;
+	ahd->pause &= ~INTEN;
+	ahd->unpause &= ~INTEN;
+	if (enable) {
+		hcntrl |= INTEN;
+		ahd->pause |= INTEN;
+		ahd->unpause |= INTEN;
+	}
+	ahd_outb(ahd, HCNTRL, hcntrl);
+}
+
+void
+ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds,
+			     u_int mincmds)
+{
+	if (timer > AHD_TIMER_MAX_US)
+		timer = AHD_TIMER_MAX_US;
+	ahd->int_coalescing_timer = timer;
+
+	if (maxcmds > AHD_INT_COALESCING_MAXCMDS_MAX)
+		maxcmds = AHD_INT_COALESCING_MAXCMDS_MAX;
+	if (mincmds > AHD_INT_COALESCING_MINCMDS_MAX)
+		mincmds = AHD_INT_COALESCING_MINCMDS_MAX;
+	ahd->int_coalescing_maxcmds = maxcmds;
+	ahd_outw(ahd, INT_COALESCING_TIMER, timer / AHD_TIMER_US_PER_TICK);
+	ahd_outb(ahd, INT_COALESCING_MAXCMDS, -maxcmds);
+	ahd_outb(ahd, INT_COALESCING_MINCMDS, -mincmds);
+}
+
+void
+ahd_enable_coalescing(struct ahd_softc *ahd, int enable)
+{
+
+	ahd->hs_mailbox &= ~ENINT_COALESCE;
+	if (enable)
+		ahd->hs_mailbox |= ENINT_COALESCE;
+	ahd_outb(ahd, HS_MAILBOX, ahd->hs_mailbox);
+	ahd_flush_device_writes(ahd);
+	ahd_run_qoutfifo(ahd);
+}
+
+/*
+ * Ensure that the card is paused in a location
+ * outside of all critical sections and that all
+ * pending work is completed prior to returning.
+ * This routine should only be called from outside
+ * an interrupt context.
+ */
+void
+ahd_pause_and_flushwork(struct ahd_softc *ahd)
+{
+	u_int intstat;
+	u_int maxloops;
+	u_int qfreeze_cnt;
+
+	maxloops = 1000;
+	ahd->flags |= AHD_ALL_INTERRUPTS;
+	ahd_pause(ahd);
+	/*
+	 * Increment the QFreeze Count so that the sequencer
+	 * will not start new selections.  We do this only
+	 * until we are safely paused without further selections
+	 * pending.
+	 */
+	ahd_outw(ahd, QFREEZE_COUNT, ahd_inw(ahd, QFREEZE_COUNT) + 1);
+	ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) | SELECTOUT_QFROZEN);
+	do {
+		struct scb *waiting_scb;
+
+		ahd_unpause(ahd);
+		ahd_intr(ahd);
+		ahd_pause(ahd);
+		ahd_clear_critical_section(ahd);
+		intstat = ahd_inb(ahd, INTSTAT);
+		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+		if ((ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0)
+			ahd_outb(ahd, SCSISEQ0,
+				 ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
+		/*
+		 * In the non-packetized case, the sequencer (for Rev A),
+		 * relies on ENSELO remaining set after SELDO.  The hardware
+		 * auto-clears ENSELO in the packetized case.
+		 */
+		waiting_scb = ahd_lookup_scb(ahd,
+					     ahd_inw(ahd, WAITING_TID_HEAD));
+		if (waiting_scb != NULL
+		 && (waiting_scb->flags & SCB_PACKETIZED) == 0
+		 && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0)
+			ahd_outb(ahd, SCSISEQ0,
+				 ahd_inb(ahd, SCSISEQ0) | ENSELO);
+	} while (--maxloops
+	      && (intstat != 0xFF || (ahd->features & AHD_REMOVABLE) == 0)
+	      && ((intstat & INT_PEND) != 0
+	       || (ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0
+	       || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0));
+
+	if (maxloops == 0) {
+		printf("Infinite interrupt loop, INTSTAT = %x",
+		      ahd_inb(ahd, INTSTAT));
+	}
+	qfreeze_cnt = ahd_inw(ahd, QFREEZE_COUNT);
+	if (qfreeze_cnt == 0) {
+		printf("%s: ahd_pause_and_flushwork with 0 qfreeze count!\n",
+		       ahd_name(ahd));
+	} else {
+		qfreeze_cnt--;
+	}
+	ahd_outw(ahd, QFREEZE_COUNT, qfreeze_cnt);
+	if (qfreeze_cnt == 0)
+		ahd_outb(ahd, SEQ_FLAGS2,
+			 ahd_inb(ahd, SEQ_FLAGS2) & ~SELECTOUT_QFROZEN);
+
+	ahd_flush_qoutfifo(ahd);
+
+	ahd_platform_flushwork(ahd);
+	ahd->flags &= ~AHD_ALL_INTERRUPTS;
+}
+
+int
+ahd_suspend(struct ahd_softc *ahd)
+{
+
+	ahd_pause_and_flushwork(ahd);
+
+	if (LIST_FIRST(&ahd->pending_scbs) != NULL) {
+		ahd_unpause(ahd);
+		return (EBUSY);
+	}
+	ahd_shutdown(ahd);
+	return (0);
+}
+
+int
+ahd_resume(struct ahd_softc *ahd)
+{
+
+	ahd_reset(ahd, /*reinit*/TRUE);
+	ahd_intr_enable(ahd, TRUE); 
+	ahd_restart(ahd);
+	return (0);
+}
+
+/************************** Busy Target Table *********************************/
+/*
+ * Set SCBPTR to the SCB that contains the busy
+ * table entry for TCL.  Return the offset into
+ * the SCB that contains the entry for TCL.
+ * saved_scbid is dereferenced and set to the
+ * scbid that should be restored once manipualtion
+ * of the TCL entry is complete.
+ */
+static __inline u_int
+ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl)
+{
+	/*
+	 * Index to the SCB that contains the busy entry.
+	 */
+	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+	*saved_scbid = ahd_get_scbptr(ahd);
+	ahd_set_scbptr(ahd, TCL_LUN(tcl)
+		     | ((TCL_TARGET_OFFSET(tcl) & 0xC) << 4));
+
+	/*
+	 * And now calculate the SCB offset to the entry.
+	 * Each entry is 2 bytes wide, hence the
+	 * multiplication by 2.
+	 */
+	return (((TCL_TARGET_OFFSET(tcl) & 0x3) << 1) + SCB_DISCONNECTED_LISTS);
+}
+
+/*
+ * Return the untagged transaction id for a given target/channel lun.
+ */
+u_int
+ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl)
+{
+	u_int scbid;
+	u_int scb_offset;
+	u_int saved_scbptr;
+		
+	scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl);
+	scbid = ahd_inw_scbram(ahd, scb_offset);
+	ahd_set_scbptr(ahd, saved_scbptr);
+	return (scbid);
+}
+
+void
+ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid)
+{
+	u_int scb_offset;
+	u_int saved_scbptr;
+		
+	scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl);
+	ahd_outw(ahd, scb_offset, scbid);
+	ahd_set_scbptr(ahd, saved_scbptr);
+}
+
+/************************** SCB and SCB queue management **********************/
+int
+ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target,
+	      char channel, int lun, u_int tag, role_t role)
+{
+	int targ = SCB_GET_TARGET(ahd, scb);
+	char chan = SCB_GET_CHANNEL(ahd, scb);
+	int slun = SCB_GET_LUN(scb);
+	int match;
+
+	match = ((chan == channel) || (channel == ALL_CHANNELS));
+	if (match != 0)
+		match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
+	if (match != 0)
+		match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
+	if (match != 0) {
+#ifdef AHD_TARGET_MODE
+		int group;
+
+		group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
+		if (role == ROLE_INITIATOR) {
+			match = (group != XPT_FC_GROUP_TMODE)
+			      && ((tag == SCB_GET_TAG(scb))
+			       || (tag == SCB_LIST_NULL));
+		} else if (role == ROLE_TARGET) {
+			match = (group == XPT_FC_GROUP_TMODE)
+			      && ((tag == scb->io_ctx->csio.tag_id)
+			       || (tag == SCB_LIST_NULL));
+		}
+#else /* !AHD_TARGET_MODE */
+		match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL));
+#endif /* AHD_TARGET_MODE */
+	}
+
+	return match;
+}
+
+void
+ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb)
+{
+	int	target;
+	char	channel;
+	int	lun;
+
+	target = SCB_GET_TARGET(ahd, scb);
+	lun = SCB_GET_LUN(scb);
+	channel = SCB_GET_CHANNEL(ahd, scb);
+	
+	ahd_search_qinfifo(ahd, target, channel, lun,
+			   /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
+			   CAM_REQUEUE_REQ, SEARCH_COMPLETE);
+
+	ahd_platform_freeze_devq(ahd, scb);
+}
+
+void
+ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb)
+{
+	struct scb	*prev_scb;
+	ahd_mode_state	 saved_modes;
+
+	saved_modes = ahd_save_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
+	prev_scb = NULL;
+	if (ahd_qinfifo_count(ahd) != 0) {
+		u_int prev_tag;
+		u_int prev_pos;
+
+		prev_pos = AHD_QIN_WRAP(ahd->qinfifonext - 1);
+		prev_tag = ahd->qinfifo[prev_pos];
+		prev_scb = ahd_lookup_scb(ahd, prev_tag);
+	}
+	ahd_qinfifo_requeue(ahd, prev_scb, scb);
+	ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
+	ahd_restore_modes(ahd, saved_modes);
+}
+
+static void
+ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb,
+		    struct scb *scb)
+{
+	if (prev_scb == NULL) {
+		uint32_t busaddr;
+
+		busaddr = ahd_le32toh(scb->hscb->hscb_busaddr);
+		ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 0, busaddr & 0xFF);
+		ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 1, (busaddr >> 8) & 0xFF);
+		ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 2, (busaddr >> 16) & 0xFF);
+		ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 3, (busaddr >> 24) & 0xFF);
+	} else {
+		prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
+		ahd_sync_scb(ahd, prev_scb, 
+			     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+	}
+	ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
+	ahd->qinfifonext++;
+	scb->hscb->next_hscb_busaddr = ahd->next_queued_hscb->hscb_busaddr;
+	ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+}
+
+static int
+ahd_qinfifo_count(struct ahd_softc *ahd)
+{
+	u_int qinpos;
+	u_int wrap_qinpos;
+	u_int wrap_qinfifonext;
+
+	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+	qinpos = ahd_get_snscb_qoff(ahd);
+	wrap_qinpos = AHD_QIN_WRAP(qinpos);
+	wrap_qinfifonext = AHD_QIN_WRAP(ahd->qinfifonext);
+	if (wrap_qinfifonext >= wrap_qinpos)
+		return (wrap_qinfifonext - wrap_qinpos);
+	else
+		return (wrap_qinfifonext
+		      + NUM_ELEMENTS(ahd->qinfifo) - wrap_qinpos);
+}
+
+void
+ahd_reset_cmds_pending(struct ahd_softc *ahd)
+{
+	struct		scb *scb;
+	ahd_mode_state	saved_modes;
+	u_int		pending_cmds;
+
+	saved_modes = ahd_save_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
+
+	/*
+	 * Don't count any commands as outstanding that the
+	 * sequencer has already marked for completion.
+	 */
+	ahd_flush_qoutfifo(ahd);
+
+	pending_cmds = 0;
+	LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
+		pending_cmds++;
+	}
+	ahd_outw(ahd, CMDS_PENDING, pending_cmds - ahd_qinfifo_count(ahd));
+	ahd_restore_modes(ahd, saved_modes);
+	ahd->flags &= ~AHD_UPDATE_PEND_CMDS;
+}
+
+int
+ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
+		   int lun, u_int tag, role_t role, uint32_t status,
+		   ahd_search_action action)
+{
+	struct scb	*scb;
+	struct scb	*prev_scb;
+	ahd_mode_state	 saved_modes;
+	u_int		 qinstart;
+	u_int		 qinpos;
+	u_int		 qintail;
+	u_int		 tid_next;
+	u_int		 tid_prev;
+	u_int		 scbid;
+	u_int		 savedscbptr;
+	uint32_t	 busaddr;
+	int		 found;
+	int		 targets;
+
+	/* Must be in CCHAN mode */
+	saved_modes = ahd_save_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
+
+	/*
+	 * Halt any pending SCB DMA.  The sequencer will reinitiate
+	 * this dma if the qinfifo is not empty once we unpause.
+	 */
+	if ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN|CCSCBDIR))
+	 == (CCARREN|CCSCBEN|CCSCBDIR)) {
+		ahd_outb(ahd, CCSCBCTL,
+			 ahd_inb(ahd, CCSCBCTL) & ~(CCARREN|CCSCBEN));
+		while ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0)
+			;
+	}
+	/* Determine sequencer's position in the qinfifo. */
+	qintail = AHD_QIN_WRAP(ahd->qinfifonext);
+	qinstart = ahd_get_snscb_qoff(ahd);
+	qinpos = AHD_QIN_WRAP(qinstart);
+	found = 0;
+	prev_scb = NULL;
+
+	if (action == SEARCH_PRINT) {
+		printf("qinstart = %d qinfifonext = %d\nQINFIFO:",
+		       qinstart, ahd->qinfifonext);
+	}
+
+	/*
+	 * Start with an empty queue.  Entries that are not chosen
+	 * for removal will be re-added to the queue as we go.
+	 */
+	ahd->qinfifonext = qinstart;
+	busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr);
+	ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 0, busaddr & 0xFF);
+	ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 1, (busaddr >> 8) & 0xFF);
+	ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 2, (busaddr >> 16) & 0xFF);
+	ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 3, (busaddr >> 24) & 0xFF);
+
+	while (qinpos != qintail) {
+		scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]);
+		if (scb == NULL) {
+			printf("qinpos = %d, SCB index = %d\n",
+				qinpos, ahd->qinfifo[qinpos]);
+			panic("Loop 1\n");
+		}
+
+		if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) {
+			/*
+			 * We found an scb that needs to be acted on.
+			 */
+			found++;
+			switch (action) {
+			case SEARCH_COMPLETE:
+			{
+				cam_status ostat;
+				cam_status cstat;
+
+				ostat = ahd_get_transaction_status(scb);
+				if (ostat == CAM_REQ_INPROG)
+					ahd_set_transaction_status(scb,
+								   status);
+				cstat = ahd_get_transaction_status(scb);
+				if (cstat != CAM_REQ_CMP)
+					ahd_freeze_scb(scb);
+				if ((scb->flags & SCB_ACTIVE) == 0)
+					printf("Inactive SCB in qinfifo\n");
+				ahd_done(ahd, scb);
+
+				/* FALLTHROUGH */
+			}
+			case SEARCH_REMOVE:
+				break;
+			case SEARCH_PRINT:
+				printf(" 0x%x", ahd->qinfifo[qinpos]);
+				/* FALLTHROUGH */
+			case SEARCH_COUNT:
+				ahd_qinfifo_requeue(ahd, prev_scb, scb);
+				prev_scb = scb;
+				break;
+			}
+		} else {
+			ahd_qinfifo_requeue(ahd, prev_scb, scb);
+			prev_scb = scb;
+		}
+		qinpos = AHD_QIN_WRAP(qinpos+1);
+	}
+
+	ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
+
+	if (action == SEARCH_PRINT)
+		printf("\nWAITING_TID_QUEUES:\n");
+
+	/*
+	 * Search waiting for selection lists.  We traverse the
+	 * list of "their ids" waiting for selection and, if
+	 * appropriate, traverse the SCBs of each "their id"
+	 * looking for matches.
+	 */
+	savedscbptr = ahd_get_scbptr(ahd);
+	tid_next = ahd_inw(ahd, WAITING_TID_HEAD);
+	tid_prev = SCB_LIST_NULL;
+	targets = 0;
+	for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) {
+		u_int tid_head;
+
+		/*
+		 * We limit based on the number of SCBs since
+		 * MK_MESSAGE SCBs are not in the per-tid lists.
+		 */
+		targets++;
+		if (targets > AHD_SCB_MAX) {
+			panic("TID LIST LOOP");
+		}
+		if (scbid >= ahd->scb_data.numscbs) {
+			printf("%s: Waiting TID List inconsistency. "
+			       "SCB index == 0x%x, yet numscbs == 0x%x.",
+			       ahd_name(ahd), scbid, ahd->scb_data.numscbs);
+			ahd_dump_card_state(ahd);
+			panic("for safety");
+		}
+		scb = ahd_lookup_scb(ahd, scbid);
+		if (scb == NULL) {
+			printf("%s: SCB = 0x%x Not Active!\n",
+			       ahd_name(ahd), scbid);
+			panic("Waiting TID List traversal\n");
+		}
+		ahd_set_scbptr(ahd, scbid);
+		tid_next = ahd_inw_scbram(ahd, SCB_NEXT2);
+		if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD,
+				  SCB_LIST_NULL, ROLE_UNKNOWN) == 0) {
+			tid_prev = scbid;
+			continue;
+		}
+
+		/*
+		 * We found a list of scbs that needs to be searched.
+		 */
+		if (action == SEARCH_PRINT)
+			printf("       %d ( ", SCB_GET_TARGET(ahd, scb));
+		tid_head = scbid;
+		found += ahd_search_scb_list(ahd, target, channel,
+					     lun, tag, role, status,
+					     action, &tid_head,
+					     SCB_GET_TARGET(ahd, scb));
+		if (tid_head != scbid)
+			ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next);
+		if (!SCBID_IS_NULL(tid_head))
+			tid_prev = tid_head;
+		if (action == SEARCH_PRINT)
+			printf(")\n");
+	}
+	ahd_set_scbptr(ahd, savedscbptr);
+	ahd_restore_modes(ahd, saved_modes);
+	return (found);
+}
+
+static int
+ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
+		    int lun, u_int tag, role_t role, uint32_t status,
+		    ahd_search_action action, u_int *list_head, u_int tid)
+{
+	struct	scb *scb;
+	u_int	scbid;
+	u_int	next;
+	u_int	prev;
+	int	found;
+
+	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+	found = 0;
+	prev = SCB_LIST_NULL;
+	next = *list_head;
+	for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) {
+		if (scbid >= ahd->scb_data.numscbs) {
+			printf("%s:SCB List inconsistency. "
+			       "SCB == 0x%x, yet numscbs == 0x%x.",
+			       ahd_name(ahd), scbid, ahd->scb_data.numscbs);
+			ahd_dump_card_state(ahd);
+			panic("for safety");
+		}
+		scb = ahd_lookup_scb(ahd, scbid);
+		if (scb == NULL) {
+			printf("%s: SCB = %d Not Active!\n",
+			       ahd_name(ahd), scbid);
+			panic("Waiting List traversal\n");
+		}
+		ahd_set_scbptr(ahd, scbid);
+		next = ahd_inw_scbram(ahd, SCB_NEXT);
+		if (ahd_match_scb(ahd, scb, target, channel,
+				  lun, SCB_LIST_NULL, role) == 0) {
+			prev = scbid;
+			continue;
+		}
+		found++;
+		switch (action) {
+		case SEARCH_COMPLETE:
+		{
+			cam_status ostat;
+			cam_status cstat;
+
+			ostat = ahd_get_transaction_status(scb);
+			if (ostat == CAM_REQ_INPROG)
+				ahd_set_transaction_status(scb, status);
+			cstat = ahd_get_transaction_status(scb);
+			if (cstat != CAM_REQ_CMP)
+				ahd_freeze_scb(scb);
+			if ((scb->flags & SCB_ACTIVE) == 0)
+				printf("Inactive SCB in Waiting List\n");
+			ahd_done(ahd, scb);
+			/* FALLTHROUGH */
+		}
+		case SEARCH_REMOVE:
+			ahd_rem_wscb(ahd, scbid, prev, next, tid);
+			if (prev == SCB_LIST_NULL)
+				*list_head = next;
+			break;
+		case SEARCH_PRINT:
+			printf("0x%x ", scbid);
+		case SEARCH_COUNT:
+			prev = scbid;
+			break;
+		}
+		if (found > AHD_SCB_MAX)
+			panic("SCB LIST LOOP");
+	}
+	if (action == SEARCH_COMPLETE
+	 || action == SEARCH_REMOVE)
+		ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING) - found);
+	return (found);
+}
+
+static void
+ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev,
+		    u_int tid_cur, u_int tid_next)
+{
+	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+
+	if (SCBID_IS_NULL(tid_cur)) {
+
+		/* Bypass current TID list */
+		if (SCBID_IS_NULL(tid_prev)) {
+			ahd_outw(ahd, WAITING_TID_HEAD, tid_next);
+		} else {
+			ahd_set_scbptr(ahd, tid_prev);
+			ahd_outw(ahd, SCB_NEXT2, tid_next);
+		}
+		if (SCBID_IS_NULL(tid_next))
+			ahd_outw(ahd, WAITING_TID_TAIL, tid_prev);
+	} else {
+
+		/* Stitch through tid_cur */
+		if (SCBID_IS_NULL(tid_prev)) {
+			ahd_outw(ahd, WAITING_TID_HEAD, tid_cur);
+		} else {
+			ahd_set_scbptr(ahd, tid_prev);
+			ahd_outw(ahd, SCB_NEXT2, tid_cur);
+		}
+		ahd_set_scbptr(ahd, tid_cur);
+		ahd_outw(ahd, SCB_NEXT2, tid_next);
+
+		if (SCBID_IS_NULL(tid_next))
+			ahd_outw(ahd, WAITING_TID_TAIL, tid_cur);
+	}
+}
+
+/*
+ * Manipulate the waiting for selection list and return the
+ * scb that follows the one that we remove.
+ */
+static u_int
+ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
+	     u_int prev, u_int next, u_int tid)
+{
+	u_int tail_offset;
+
+	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+	if (!SCBID_IS_NULL(prev)) {
+		ahd_set_scbptr(ahd, prev);
+		ahd_outw(ahd, SCB_NEXT, next);
+	}
+
+	/*
+	 * SCBs that had MK_MESSAGE set in them will not
+	 * be queued to the per-target lists, so don't
+	 * blindly clear the tail pointer.
+	 */
+	tail_offset = WAITING_SCB_TAILS + (2 * tid);
+	if (SCBID_IS_NULL(next)
+	 && ahd_inw(ahd, tail_offset) == scbid)
+		ahd_outw(ahd, tail_offset, prev);
+	ahd_add_scb_to_free_list(ahd, scbid);
+	return (next);
+}
+
+/*
+ * Add the SCB as selected by SCBPTR onto the on chip list of
+ * free hardware SCBs.  This list is empty/unused if we are not
+ * performing SCB paging.
+ */
+static void
+ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid)
+{
+/* XXX Need some other mechanism to designate "free". */
+	/*
+	 * Invalidate the tag so that our abort
+	 * routines don't think it's active.
+	ahd_outb(ahd, SCB_TAG, SCB_LIST_NULL);
+	 */
+}
+
+/******************************** Error Handling ******************************/
+/*
+ * Abort all SCBs that match the given description (target/channel/lun/tag),
+ * setting their status to the passed in status if the status has not already
+ * been modified from CAM_REQ_INPROG.  This routine assumes that the sequencer
+ * is paused before it is called.
+ */
+int
+ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel,
+	       int lun, u_int tag, role_t role, uint32_t status)
+{
+	struct		scb *scbp;
+	struct		scb *scbp_next;
+	u_int		i, j;
+	u_int		maxtarget;
+	u_int		minlun;
+	u_int		maxlun;
+	int		found;
+	ahd_mode_state	saved_modes;
+
+	/* restore this when we're done */
+	saved_modes = ahd_save_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+
+	found = ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL,
+				   role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
+
+	/*
+	 * Clean out the busy target table for any untagged commands.
+	 */
+	i = 0;
+	maxtarget = 16;
+	if (target != CAM_TARGET_WILDCARD) {
+		i = target;
+		if (channel == 'B')
+			i += 8;
+		maxtarget = i + 1;
+	}
+
+	if (lun == CAM_LUN_WILDCARD) {
+		minlun = 0;
+		maxlun = AHD_NUM_LUNS_NONPKT;
+	} else if (lun >= AHD_NUM_LUNS_NONPKT) {
+		minlun = maxlun = 0;
+	} else {
+		minlun = lun;
+		maxlun = lun + 1;
+	}
+
+	if (role != ROLE_TARGET) {
+		for (;i < maxtarget; i++) {
+			for (j = minlun;j < maxlun; j++) {
+				u_int scbid;
+				u_int tcl;
+
+				tcl = BUILD_TCL_RAW(i, 'A', j);
+				scbid = ahd_find_busy_tcl(ahd, tcl);
+				scbp = ahd_lookup_scb(ahd, scbid);
+				if (scbp == NULL
+				 || ahd_match_scb(ahd, scbp, target, channel,
+						  lun, tag, role) == 0)
+					continue;
+				ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(i, 'A', j));
+			}
+		}
+	}
+
+	/*
+	 * Don't abort commands that have already completed,
+	 * but haven't quite made it up to the host yet.
+	 */
+	ahd_flush_qoutfifo(ahd);
+
+	/*
+	 * Go through the pending CCB list and look for
+	 * commands for this target that are still active.
+	 * These are other tagged commands that were
+	 * disconnected when the reset occurred.
+	 */
+	scbp_next = LIST_FIRST(&ahd->pending_scbs);
+	while (scbp_next != NULL) {
+		scbp = scbp_next;
+		scbp_next = LIST_NEXT(scbp, pending_links);
+		if (ahd_match_scb(ahd, scbp, target, channel, lun, tag, role)) {
+			cam_status ostat;
+
+			ostat = ahd_get_transaction_status(scbp);
+			if (ostat == CAM_REQ_INPROG)
+				ahd_set_transaction_status(scbp, status);
+			if (ahd_get_transaction_status(scbp) != CAM_REQ_CMP)
+				ahd_freeze_scb(scbp);
+			if ((scbp->flags & SCB_ACTIVE) == 0)
+				printf("Inactive SCB on pending list\n");
+			ahd_done(ahd, scbp);
+			found++;
+		}
+	}
+	ahd_restore_modes(ahd, saved_modes);
+	ahd_platform_abort_scbs(ahd, target, channel, lun, tag, role, status);
+	ahd->flags |= AHD_UPDATE_PEND_CMDS;
+	return found;
+}
+
+static void
+ahd_reset_current_bus(struct ahd_softc *ahd)
+{
+	uint8_t scsiseq;
+
+	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+	ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENSCSIRST);
+	scsiseq = ahd_inb(ahd, SCSISEQ0) & ~(ENSELO|ENARBO|SCSIRSTO);
+	ahd_outb(ahd, SCSISEQ0, scsiseq | SCSIRSTO);
+	ahd_flush_device_writes(ahd);
+	ahd_delay(AHD_BUSRESET_DELAY);
+	/* Turn off the bus reset */
+	ahd_outb(ahd, SCSISEQ0, scsiseq);
+	ahd_flush_device_writes(ahd);
+	ahd_delay(AHD_BUSRESET_DELAY);
+	if ((ahd->bugs & AHD_SCSIRST_BUG) != 0) {
+		/*
+		 * 2A Razor #474
+		 * Certain chip state is not cleared for
+		 * SCSI bus resets that we initiate, so
+		 * we must reset the chip.
+		 */
+		ahd_reset(ahd, /*reinit*/TRUE);
+		ahd_intr_enable(ahd, /*enable*/TRUE);
+		AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+	}
+
+	ahd_clear_intstat(ahd);
+}
+
+int
+ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
+{
+	struct	ahd_devinfo devinfo;
+	u_int	initiator;
+	u_int	target;
+	u_int	max_scsiid;
+	int	found;
+	u_int	fifo;
+	u_int	next_fifo;
+
+	ahd->pending_device = NULL;
+
+	ahd_compile_devinfo(&devinfo,
+			    CAM_TARGET_WILDCARD,
+			    CAM_TARGET_WILDCARD,
+			    CAM_LUN_WILDCARD,
+			    channel, ROLE_UNKNOWN);
+	ahd_pause(ahd);
+
+	/* Make sure the sequencer is in a safe location. */
+	ahd_clear_critical_section(ahd);
+
+#ifdef AHD_TARGET_MODE
+	if ((ahd->flags & AHD_TARGETROLE) != 0) {
+		ahd_run_tqinfifo(ahd, /*paused*/TRUE);
+	}
+#endif
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+
+	/*
+	 * Disable selections so no automatic hardware
+	 * functions will modify chip state.
+	 */
+	ahd_outb(ahd, SCSISEQ0, 0);
+	ahd_outb(ahd, SCSISEQ1, 0);
+
+	/*
+	 * Safely shut down our DMA engines.  Always start with
+	 * the FIFO that is not currently active (if any are
+	 * actively connected).
+	 */
+	next_fifo = fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO;
+	if (next_fifo > CURRFIFO_1)
+		/* If disconneced, arbitrarily start with FIFO1. */
+		next_fifo = fifo = 0;
+	do {
+		next_fifo ^= CURRFIFO_1;
+		ahd_set_modes(ahd, next_fifo, next_fifo);
+		ahd_outb(ahd, DFCNTRL,
+			 ahd_inb(ahd, DFCNTRL) & ~(SCSIEN|HDMAEN));
+		while ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0)
+			ahd_delay(10);
+		/*
+		 * Set CURRFIFO to the now inactive channel.
+		 */
+		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+		ahd_outb(ahd, DFFSTAT, next_fifo);
+	} while (next_fifo != fifo);
+
+	/*
+	 * Reset the bus if we are initiating this reset
+	 */
+	ahd_clear_msg_state(ahd);
+	ahd_outb(ahd, SIMODE1,
+		 ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST|ENBUSFREE));
+
+	if (initiate_reset)
+		ahd_reset_current_bus(ahd);
+
+	ahd_clear_intstat(ahd);
+
+	/*
+	 * Clean up all the state information for the
+	 * pending transactions on this bus.
+	 */
+	found = ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, channel,
+			       CAM_LUN_WILDCARD, SCB_LIST_NULL,
+			       ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
+
+	/*
+	 * Cleanup anything left in the FIFOs.
+	 */
+	ahd_clear_fifo(ahd, 0);
+	ahd_clear_fifo(ahd, 1);
+
+	/*
+	 * Revert to async/narrow transfers until we renegotiate.
+	 */
+	max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7;
+	for (target = 0; target <= max_scsiid; target++) {
+
+		if (ahd->enabled_targets[target] == NULL)
+			continue;
+		for (initiator = 0; initiator <= max_scsiid; initiator++) {
+			struct ahd_devinfo devinfo;
+
+			ahd_compile_devinfo(&devinfo, target, initiator,
+					    CAM_LUN_WILDCARD,
+					    'A', ROLE_UNKNOWN);
+			ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+				      AHD_TRANS_CUR, /*paused*/TRUE);
+			ahd_set_syncrate(ahd, &devinfo, /*period*/0,
+					 /*offset*/0, /*ppr_options*/0,
+					 AHD_TRANS_CUR, /*paused*/TRUE);
+		}
+	}
+
+#ifdef AHD_TARGET_MODE
+	max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7;
+
+	/*
+	 * Send an immediate notify ccb to all target more peripheral
+	 * drivers affected by this action.
+	 */
+	for (target = 0; target <= max_scsiid; target++) {
+		struct ahd_tmode_tstate* tstate;
+		u_int lun;
+
+		tstate = ahd->enabled_targets[target];
+		if (tstate == NULL)
+			continue;
+		for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
+			struct ahd_tmode_lstate* lstate;
+
+			lstate = tstate->enabled_luns[lun];
+			if (lstate == NULL)
+				continue;
+
+			ahd_queue_lstate_event(ahd, lstate, CAM_TARGET_WILDCARD,
+					       EVENT_TYPE_BUS_RESET, /*arg*/0);
+			ahd_send_lstate_events(ahd, lstate);
+		}
+	}
+#endif
+	/* Notify the XPT that a bus reset occurred */
+	ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD,
+		       CAM_LUN_WILDCARD, AC_BUS_RESET, NULL);
+	ahd_restart(ahd);
+	/*
+	 * Freeze the SIMQ until our poller can determine that
+	 * the bus reset has really gone away.  We set the initial
+	 * timer to 0 to have the check performed as soon as possible
+	 * from the timer context.
+	 */
+	if ((ahd->flags & AHD_RESET_POLL_ACTIVE) == 0) {
+		ahd->flags |= AHD_RESET_POLL_ACTIVE;
+		ahd_freeze_simq(ahd);
+		ahd_timer_reset(&ahd->reset_timer, 0, ahd_reset_poll, ahd);
+	}
+	return (found);
+}
+
+
+#define AHD_RESET_POLL_US 1000
+static void
+ahd_reset_poll(void *arg)
+{
+	struct	ahd_softc *ahd;
+	u_int	scsiseq1;
+	u_long	l;
+	u_long	s;
+	
+	ahd_list_lock(&l);
+	ahd = ahd_find_softc((struct ahd_softc *)arg);
+	if (ahd == NULL) {
+		printf("ahd_reset_poll: Instance %p no longer exists\n", arg);
+		ahd_list_unlock(&l);
+		return;
+	}
+	ahd_lock(ahd, &s);
+	ahd_pause(ahd);
+	ahd_update_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI);
+	if ((ahd_inb(ahd, SSTAT1) & SCSIRSTI) != 0) {
+		ahd_timer_reset(&ahd->reset_timer, AHD_RESET_POLL_US,
+				ahd_reset_poll, ahd);
+		ahd_unpause(ahd);
+		ahd_unlock(ahd, &s);
+		ahd_list_unlock(&l);
+		return;
+	}
+
+	/* Reset is now low.  Complete chip reinitialization. */
+	ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST);
+	scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE);
+	ahd_outb(ahd, SCSISEQ1, scsiseq1 & (ENSELI|ENRSELI|ENAUTOATNP));
+	ahd_unpause(ahd);
+	ahd->flags &= ~AHD_RESET_POLL_ACTIVE;
+	ahd_unlock(ahd, &s);
+	ahd_release_simq(ahd);
+	ahd_list_unlock(&l);
+}
+
+/**************************** Statistics Processing ***************************/
+static void
+ahd_stat_timer(void *arg)
+{
+	struct	ahd_softc *ahd;
+	u_long	l;
+	u_long	s;
+	int	enint_coal;
+	
+	ahd_list_lock(&l);
+	ahd = ahd_find_softc((struct ahd_softc *)arg);
+	if (ahd == NULL) {
+		printf("ahd_stat_timer: Instance %p no longer exists\n", arg);
+		ahd_list_unlock(&l);
+		return;
+	}
+	ahd_lock(ahd, &s);
+
+	enint_coal = ahd->hs_mailbox & ENINT_COALESCE;
+	if (ahd->cmdcmplt_total > ahd->int_coalescing_threshold)
+		enint_coal |= ENINT_COALESCE;
+	else if (ahd->cmdcmplt_total < ahd->int_coalescing_stop_threshold)
+		enint_coal &= ~ENINT_COALESCE;
+
+	if (enint_coal != (ahd->hs_mailbox & ENINT_COALESCE)) {
+		ahd_enable_coalescing(ahd, enint_coal);
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_INT_COALESCING) != 0)
+			printf("%s: Interrupt coalescing "
+			       "now %sabled. Cmds %d\n",
+			       ahd_name(ahd),
+			       (enint_coal & ENINT_COALESCE) ? "en" : "dis",
+			       ahd->cmdcmplt_total);
+#endif
+	}
+
+	ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1);
+	ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket];
+	ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0;
+	ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US,
+			ahd_stat_timer, ahd);
+	ahd_unlock(ahd, &s);
+	ahd_list_unlock(&l);
+}
+
+/****************************** Status Processing *****************************/
+void
+ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb)
+{
+	if (scb->hscb->shared_data.istatus.scsi_status != 0) {
+		ahd_handle_scsi_status(ahd, scb);
+	} else {
+		ahd_calc_residual(ahd, scb);
+		ahd_done(ahd, scb);
+	}
+}
+
+void
+ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
+{
+	struct hardware_scb *hscb;
+	u_int  qfreeze_cnt;
+
+	/*
+	 * The sequencer freezes its select-out queue
+	 * anytime a SCSI status error occurs.  We must
+	 * handle the error and decrement the QFREEZE count
+	 * to allow the sequencer to continue.
+	 */
+	hscb = scb->hscb; 
+
+	/* Freeze the queue until the client sees the error. */
+	ahd_freeze_devq(ahd, scb);
+	ahd_freeze_scb(scb);
+	qfreeze_cnt = ahd_inw(ahd, QFREEZE_COUNT);
+	if (qfreeze_cnt == 0) {
+		printf("%s: Bad status with 0 qfreeze count!\n", ahd_name(ahd));
+	} else {
+		qfreeze_cnt--;
+		ahd_outw(ahd, QFREEZE_COUNT, qfreeze_cnt);
+	}
+	if (qfreeze_cnt == 0)
+		ahd_outb(ahd, SEQ_FLAGS2,
+			 ahd_inb(ahd, SEQ_FLAGS2) & ~SELECTOUT_QFROZEN);
+
+	/* Don't want to clobber the original sense code */
+	if ((scb->flags & SCB_SENSE) != 0) {
+		/*
+		 * Clear the SCB_SENSE Flag and perform
+		 * a normal command completion.
+		 */
+		scb->flags &= ~SCB_SENSE;
+		ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
+		ahd_done(ahd, scb);
+		return;
+	}
+	ahd_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
+	ahd_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status);
+	switch (hscb->shared_data.istatus.scsi_status) {
+	case STATUS_PKT_SENSE:
+	{
+		struct scsi_status_iu_header *siu;
+
+		ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD);
+		siu = (struct scsi_status_iu_header *)scb->sense_data;
+		ahd_set_scsi_status(scb, siu->status);
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_SENSE) != 0) {
+			ahd_print_path(ahd, scb);
+			printf("SCB 0x%x Received PKT Status of 0x%x\n",
+			       SCB_GET_TAG(scb), siu->status);
+			printf("\tflags = 0x%x, sense len = 0x%x, "
+			       "pktfail = 0x%x\n",
+			       siu->flags, scsi_4btoul(siu->sense_length),
+			       scsi_4btoul(siu->pkt_failures_length));
+		}
+#endif
+		if ((siu->flags & SIU_RSPVALID) != 0) {
+			ahd_print_path(ahd, scb);
+			if (scsi_4btoul(siu->pkt_failures_length) < 4) {
+				printf("Unable to parse pkt_failures\n");
+			} else {
+
+				switch (SIU_PKTFAIL_CODE(siu)) {
+				case SIU_PFC_NONE:
+					printf("No packet failure found\n");
+					break;
+				case SIU_PFC_CIU_FIELDS_INVALID:
+					printf("Invalid Command IU Field\n");
+					break;
+				case SIU_PFC_TMF_NOT_SUPPORTED:
+					printf("TMF not supportd\n");
+					break;
+				case SIU_PFC_TMF_FAILED:
+					printf("TMF failed\n");
+					break;
+				case SIU_PFC_INVALID_TYPE_CODE:
+					printf("Invalid L_Q Type code\n");
+					break;
+				case SIU_PFC_ILLEGAL_REQUEST:
+					printf("Illegal request\n");
+				default:
+					break;
+				}
+			}
+			if (siu->status == SCSI_STATUS_OK)
+				ahd_set_transaction_status(scb,
+							   CAM_REQ_CMP_ERR);
+		}
+		if ((siu->flags & SIU_SNSVALID) != 0) {
+			scb->flags |= SCB_PKT_SENSE;
+#ifdef AHD_DEBUG
+			if ((ahd_debug & AHD_SHOW_SENSE) != 0)
+				printf("Sense data available\n");
+#endif
+		}
+		ahd_done(ahd, scb);
+		break;
+	}
+	case SCSI_STATUS_CMD_TERMINATED:
+	case SCSI_STATUS_CHECK_COND:
+	{
+		struct ahd_devinfo devinfo;
+		struct ahd_dma_seg *sg;
+		struct scsi_sense *sc;
+		struct ahd_initiator_tinfo *targ_info;
+		struct ahd_tmode_tstate *tstate;
+		struct ahd_transinfo *tinfo;
+#ifdef AHD_DEBUG
+		if (ahd_debug & AHD_SHOW_SENSE) {
+			ahd_print_path(ahd, scb);
+			printf("SCB %d: requests Check Status\n",
+			       SCB_GET_TAG(scb));
+		}
+#endif
+
+		if (ahd_perform_autosense(scb) == 0)
+			break;
+
+		ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb),
+				    SCB_GET_TARGET(ahd, scb),
+				    SCB_GET_LUN(scb),
+				    SCB_GET_CHANNEL(ahd, scb),
+				    ROLE_INITIATOR);
+		targ_info = ahd_fetch_transinfo(ahd,
+						devinfo.channel,
+						devinfo.our_scsiid,
+						devinfo.target,
+						&tstate);
+		tinfo = &targ_info->curr;
+		sg = scb->sg_list;
+		sc = (struct scsi_sense *)hscb->shared_data.idata.cdb;
+		/*
+		 * Save off the residual if there is one.
+		 */
+		ahd_update_residual(ahd, scb);
+#ifdef AHD_DEBUG
+		if (ahd_debug & AHD_SHOW_SENSE) {
+			ahd_print_path(ahd, scb);
+			printf("Sending Sense\n");
+		}
+#endif
+		scb->sg_count = 0;
+		sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb),
+				  ahd_get_sense_bufsize(ahd, scb),
+				  /*last*/TRUE);
+		sc->opcode = REQUEST_SENSE;
+		sc->byte2 = 0;
+		if (tinfo->protocol_version <= SCSI_REV_2
+		 && SCB_GET_LUN(scb) < 8)
+			sc->byte2 = SCB_GET_LUN(scb) << 5;
+		sc->unused[0] = 0;
+		sc->unused[1] = 0;
+		sc->length = ahd_get_sense_bufsize(ahd, scb);
+		sc->control = 0;
+
+		/*
+		 * We can't allow the target to disconnect.
+		 * This will be an untagged transaction and
+		 * having the target disconnect will make this
+		 * transaction indestinguishable from outstanding
+		 * tagged transactions.
+		 */
+		hscb->control = 0;
+
+		/*
+		 * This request sense could be because the
+		 * the device lost power or in some other
+		 * way has lost our transfer negotiations.
+		 * Renegotiate if appropriate.  Unit attention
+		 * errors will be reported before any data
+		 * phases occur.
+		 */
+		if (ahd_get_residual(scb) == ahd_get_transfer_length(scb)) {
+			ahd_update_neg_request(ahd, &devinfo,
+					       tstate, targ_info,
+					       AHD_NEG_IF_NON_ASYNC);
+		}
+		if (tstate->auto_negotiate & devinfo.target_mask) {
+			hscb->control |= MK_MESSAGE;
+			scb->flags &=
+			    ~(SCB_NEGOTIATE|SCB_ABORT|SCB_DEVICE_RESET);
+			scb->flags |= SCB_AUTO_NEGOTIATE;
+		}
+		hscb->cdb_len = sizeof(*sc);
+		ahd_setup_data_scb(ahd, scb);
+		scb->flags |= SCB_SENSE;
+		ahd_queue_scb(ahd, scb);
+		/*
+		 * Ensure we have enough time to actually
+		 * retrieve the sense.
+		 */
+		ahd_scb_timer_reset(scb, 5 * 1000000);
+		break;
+	}
+	case SCSI_STATUS_OK:
+		printf("%s: Interrupted for staus of 0???\n",
+		       ahd_name(ahd));
+		/* FALLTHROUGH */
+	default:
+		ahd_done(ahd, scb);
+		break;
+	}
+}
+
+/*
+ * Calculate the residual for a just completed SCB.
+ */
+void
+ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb)
+{
+	struct hardware_scb *hscb;
+	struct initiator_status *spkt;
+	uint32_t sgptr;
+	uint32_t resid_sgptr;
+	uint32_t resid;
+
+	/*
+	 * 5 cases.
+	 * 1) No residual.
+	 *    SG_STATUS_VALID clear in sgptr.
+	 * 2) Transferless command
+	 * 3) Never performed any transfers.
+	 *    sgptr has SG_FULL_RESID set.
+	 * 4) No residual but target did not
+	 *    save data pointers after the
+	 *    last transfer, so sgptr was
+	 *    never updated.
+	 * 5) We have a partial residual.
+	 *    Use residual_sgptr to determine
+	 *    where we are.
+	 */
+
+	hscb = scb->hscb;
+	sgptr = ahd_le32toh(hscb->sgptr);
+	if ((sgptr & SG_STATUS_VALID) == 0)
+		/* Case 1 */
+		return;
+	sgptr &= ~SG_STATUS_VALID;
+
+	if ((sgptr & SG_LIST_NULL) != 0)
+		/* Case 2 */
+		return;
+
+	/*
+	 * Residual fields are the same in both
+	 * target and initiator status packets,
+	 * so we can always use the initiator fields
+	 * regardless of the role for this SCB.
+	 */
+	spkt = &hscb->shared_data.istatus;
+	resid_sgptr = ahd_le32toh(spkt->residual_sgptr);
+	if ((sgptr & SG_FULL_RESID) != 0) {
+		/* Case 3 */
+		resid = ahd_get_transfer_length(scb);
+	} else if ((resid_sgptr & SG_LIST_NULL) != 0) {
+		/* Case 4 */
+		return;
+	} else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) {
+		ahd_print_path(ahd, scb);
+		printf("data overrun detected Tag == 0x%x.\n",
+		       SCB_GET_TAG(scb));
+		ahd_freeze_devq(ahd, scb);
+		ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);
+		ahd_freeze_scb(scb);
+		return;
+	} else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
+		panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
+		/* NOTREACHED */
+	} else {
+		struct ahd_dma_seg *sg;
+
+		/*
+		 * Remainder of the SG where the transfer
+		 * stopped.  
+		 */
+		resid = ahd_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK;
+		sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK);
+
+		/* The residual sg_ptr always points to the next sg */
+		sg--;
+
+		/*
+		 * Add up the contents of all residual
+		 * SG segments that are after the SG where
+		 * the transfer stopped.
+		 */
+		while ((ahd_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) {
+			sg++;
+			resid += ahd_le32toh(sg->len) & AHD_SG_LEN_MASK;
+		}
+	}
+	if ((scb->flags & SCB_SENSE) == 0)
+		ahd_set_residual(scb, resid);
+	else
+		ahd_set_sense_residual(scb, resid);
+
+#ifdef AHD_DEBUG
+	if ((ahd_debug & AHD_SHOW_MISC) != 0) {
+		ahd_print_path(ahd, scb);
+		printf("Handled %sResidual of %d bytes\n",
+		       (scb->flags & SCB_SENSE) ? "Sense " : "", resid);
+	}
+#endif
+}
+
+/******************************* Target Mode **********************************/
+#ifdef AHD_TARGET_MODE
+/*
+ * Add a target mode event to this lun's queue
+ */
+static void
+ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate,
+		       u_int initiator_id, u_int event_type, u_int event_arg)
+{
+	struct ahd_tmode_event *event;
+	int pending;
+
+	xpt_freeze_devq(lstate->path, /*count*/1);
+	if (lstate->event_w_idx >= lstate->event_r_idx)
+		pending = lstate->event_w_idx - lstate->event_r_idx;
+	else
+		pending = AHD_TMODE_EVENT_BUFFER_SIZE + 1
+			- (lstate->event_r_idx - lstate->event_w_idx);
+
+	if (event_type == EVENT_TYPE_BUS_RESET
+	 || event_type == MSG_BUS_DEV_RESET) {
+		/*
+		 * Any earlier events are irrelevant, so reset our buffer.
+		 * This has the effect of allowing us to deal with reset
+		 * floods (an external device holding down the reset line)
+		 * without losing the event that is really interesting.
+		 */
+		lstate->event_r_idx = 0;
+		lstate->event_w_idx = 0;
+		xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
+	}
+
+	if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) {
+		xpt_print_path(lstate->path);
+		printf("immediate event %x:%x lost\n",
+		       lstate->event_buffer[lstate->event_r_idx].event_type,
+		       lstate->event_buffer[lstate->event_r_idx].event_arg);
+		lstate->event_r_idx++;
+		if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE)
+			lstate->event_r_idx = 0;
+		xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
+	}
+
+	event = &lstate->event_buffer[lstate->event_w_idx];
+	event->initiator_id = initiator_id;
+	event->event_type = event_type;
+	event->event_arg = event_arg;
+	lstate->event_w_idx++;
+	if (lstate->event_w_idx == AHD_TMODE_EVENT_BUFFER_SIZE)
+		lstate->event_w_idx = 0;
+}
+
+/*
+ * Send any target mode events queued up waiting
+ * for immediate notify resources.
+ */
+void
+ahd_send_lstate_events(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate)
+{
+	struct ccb_hdr *ccbh;
+	struct ccb_immed_notify *inot;
+
+	while (lstate->event_r_idx != lstate->event_w_idx
+	    && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
+		struct ahd_tmode_event *event;
+
+		event = &lstate->event_buffer[lstate->event_r_idx];
+		SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
+		inot = (struct ccb_immed_notify *)ccbh;
+		switch (event->event_type) {
+		case EVENT_TYPE_BUS_RESET:
+			ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
+			break;
+		default:
+			ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
+			inot->message_args[0] = event->event_type;
+			inot->message_args[1] = event->event_arg;
+			break;
+		}
+		inot->initiator_id = event->initiator_id;
+		inot->sense_len = 0;
+		xpt_done((union ccb *)inot);
+		lstate->event_r_idx++;
+		if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE)
+			lstate->event_r_idx = 0;
+	}
+}
+#endif
+
+/******************** Sequencer Program Patching/Download *********************/
+
+#ifdef AHD_DUMP_SEQ
+void
+ahd_dumpseq(struct ahd_softc* ahd)
+{
+	int i;
+	int max_prog;
+
+	max_prog = 2048;
+
+	ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
+	ahd_outb(ahd, PRGMCNT, 0);
+	ahd_outb(ahd, PRGMCNT+1, 0);
+	for (i = 0; i < max_prog; i++) {
+		uint8_t ins_bytes[4];
+
+		ahd_insb(ahd, SEQRAM, ins_bytes, 4);
+		printf("0x%08x\n", ins_bytes[0] << 24
+				 | ins_bytes[1] << 16
+				 | ins_bytes[2] << 8
+				 | ins_bytes[3]);
+	}
+}
+#endif
+
+static void
+ahd_loadseq(struct ahd_softc *ahd)
+{
+	struct	cs cs_table[num_critical_sections];
+	u_int	begin_set[num_critical_sections];
+	u_int	end_set[num_critical_sections];
+	struct	patch *cur_patch;
+	u_int	cs_count;
+	u_int	cur_cs;
+	u_int	i;
+	int	downloaded;
+	u_int	skip_addr;
+	u_int	sg_prefetch_cnt;
+	u_int	sg_prefetch_cnt_limit;
+	u_int	sg_prefetch_align;
+	u_int	sg_size;
+	uint8_t	download_consts[DOWNLOAD_CONST_COUNT];
+
+	if (bootverbose)
+		printf("%s: Downloading Sequencer Program...",
+		       ahd_name(ahd));
+
+#if DOWNLOAD_CONST_COUNT != 7
+#error "Download Const Mismatch"
+#endif
+	/*
+	 * Start out with 0 critical sections
+	 * that apply to this firmware load.
+	 */
+	cs_count = 0;
+	cur_cs = 0;
+	memset(begin_set, 0, sizeof(begin_set));
+	memset(end_set, 0, sizeof(end_set));
+
+	/*
+	 * Setup downloadable constant table.
+	 * 
+	 * The computation for the S/G prefetch variables is
+	 * a bit complicated.  We would like to always fetch
+	 * in terms of cachelined sized increments.  However,
+	 * if the cacheline is not an even multiple of the
+	 * SG element size or is larger than our SG RAM, using
+	 * just the cache size might leave us with only a portion
+	 * of an SG element at the tail of a prefetch.  If the
+	 * cacheline is larger than our S/G prefetch buffer less
+	 * the size of an SG element, we may round down to a cacheline
+	 * that doesn't contain any or all of the S/G of interest
+	 * within the bounds of our S/G ram.  Provide variables to
+	 * the sequencer that will allow it to handle these edge
+	 * cases.
+	 */
+	/* Start by aligning to the nearest cacheline. */
+	sg_prefetch_align = ahd->pci_cachesize;
+	if (sg_prefetch_align == 0)
+		sg_prefetch_align = 8;
+	/* Round down to the nearest power of 2. */
+	while (powerof2(sg_prefetch_align) == 0)
+		sg_prefetch_align--;
+	/*
+	 * If the cacheline boundary is greater than half our prefetch RAM
+	 * we risk not being able to fetch even a single complete S/G
+	 * segment if we align to that boundary.
+	 */
+	if (sg_prefetch_align > CCSGADDR_MAX/2)
+		sg_prefetch_align = CCSGADDR_MAX/2;
+	/* Start by fetching a single cacheline. */
+	sg_prefetch_cnt = sg_prefetch_align;
+	/*
+	 * Increment the prefetch count by cachelines until
+	 * at least one S/G element will fit.
+	 */
+	sg_size = sizeof(struct ahd_dma_seg);
+	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
+		sg_size = sizeof(struct ahd_dma64_seg);
+	while (sg_prefetch_cnt < sg_size)
+		sg_prefetch_cnt += sg_prefetch_align;
+	/*
+	 * If the cacheline is not an even multiple of
+	 * the S/G size, we may only get a partial S/G when
+	 * we align. Add a cacheline if this is the case.
+	 */
+	if ((sg_prefetch_align % sg_size) != 0
+	 && (sg_prefetch_cnt < CCSGADDR_MAX))
+		sg_prefetch_cnt += sg_prefetch_align;
+	/*
+	 * Lastly, compute a value that the sequencer can use
+	 * to determine if the remainder of the CCSGRAM buffer
+	 * has a full S/G element in it.
+	 */
+	sg_prefetch_cnt_limit = -(sg_prefetch_cnt - sg_size + 1);
+	download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
+	download_consts[SG_PREFETCH_CNT_LIMIT] = sg_prefetch_cnt_limit;
+	download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_align - 1);
+	download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_align - 1);
+	download_consts[SG_SIZEOF] = sg_size;
+	download_consts[PKT_OVERRUN_BUFOFFSET] =
+		(ahd->overrun_buf - (uint8_t *)ahd->qoutfifo) / 256;
+	download_consts[SCB_TRANSFER_SIZE] = SCB_TRANSFER_SIZE_1BYTE_LUN;
+	cur_patch = patches;
+	downloaded = 0;
+	skip_addr = 0;
+	ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
+	ahd_outb(ahd, PRGMCNT, 0);
+	ahd_outb(ahd, PRGMCNT+1, 0);
+
+	for (i = 0; i < sizeof(seqprog)/4; i++) {
+		if (ahd_check_patch(ahd, &cur_patch, i, &skip_addr) == 0) {
+			/*
+			 * Don't download this instruction as it
+			 * is in a patch that was removed.
+			 */
+			continue;
+		}
+		/*
+		 * Move through the CS table until we find a CS
+		 * that might apply to this instruction.
+		 */
+		for (; cur_cs < num_critical_sections; cur_cs++) {
+			if (critical_sections[cur_cs].end <= i) {
+				if (begin_set[cs_count] == TRUE
+				 && end_set[cs_count] == FALSE) {
+					cs_table[cs_count].end = downloaded;
+				 	end_set[cs_count] = TRUE;
+					cs_count++;
+				}
+				continue;
+			}
+			if (critical_sections[cur_cs].begin <= i
+			 && begin_set[cs_count] == FALSE) {
+				cs_table[cs_count].begin = downloaded;
+				begin_set[cs_count] = TRUE;
+			}
+			break;
+		}
+		ahd_download_instr(ahd, i, download_consts);
+		downloaded++;
+	}
+
+	ahd->num_critical_sections = cs_count;
+	if (cs_count != 0) {
+
+		cs_count *= sizeof(struct cs);
+		ahd->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT);
+		if (ahd->critical_sections == NULL)
+			panic("ahd_loadseq: Could not malloc");
+		memcpy(ahd->critical_sections, cs_table, cs_count);
+	}
+	ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE);
+
+	if (bootverbose) {
+		printf(" %d instructions downloaded\n", downloaded);
+		printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n",
+		       ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags);
+	}
+}
+
+static int
+ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch,
+		u_int start_instr, u_int *skip_addr)
+{
+	struct	patch *cur_patch;
+	struct	patch *last_patch;
+	u_int	num_patches;
+
+	num_patches = sizeof(patches)/sizeof(struct patch);
+	last_patch = &patches[num_patches];
+	cur_patch = *start_patch;
+
+	while (cur_patch < last_patch && start_instr == cur_patch->begin) {
+
+		if (cur_patch->patch_func(ahd) == 0) {
+
+			/* Start rejecting code */
+			*skip_addr = start_instr + cur_patch->skip_instr;
+			cur_patch += cur_patch->skip_patch;
+		} else {
+			/* Accepted this patch.  Advance to the next
+			 * one and wait for our intruction pointer to
+			 * hit this point.
+			 */
+			cur_patch++;
+		}
+	}
+
+	*start_patch = cur_patch;
+	if (start_instr < *skip_addr)
+		/* Still skipping */
+		return (0);
+
+	return (1);
+}
+
+static u_int
+ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address)
+{
+	struct patch *cur_patch;
+	int address_offset;
+	u_int skip_addr;
+	u_int i;
+
+	address_offset = 0;
+	cur_patch = patches;
+	skip_addr = 0;
+
+	for (i = 0; i < address;) {
+
+		ahd_check_patch(ahd, &cur_patch, i, &skip_addr);
+
+		if (skip_addr > i) {
+			int end_addr;
+
+			end_addr = MIN(address, skip_addr);
+			address_offset += end_addr - i;
+			i = skip_addr;
+		} else {
+			i++;
+		}
+	}
+	return (address - address_offset);
+}
+
+static void
+ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts)
+{
+	union	ins_formats instr;
+	struct	ins_format1 *fmt1_ins;
+	struct	ins_format3 *fmt3_ins;
+	u_int	opcode;
+
+	/*
+	 * The firmware is always compiled into a little endian format.
+	 */
+	instr.integer = ahd_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
+
+	fmt1_ins = &instr.format1;
+	fmt3_ins = NULL;
+
+	/* Pull the opcode */
+	opcode = instr.format1.opcode;
+	switch (opcode) {
+	case AIC_OP_JMP:
+	case AIC_OP_JC:
+	case AIC_OP_JNC:
+	case AIC_OP_CALL:
+	case AIC_OP_JNE:
+	case AIC_OP_JNZ:
+	case AIC_OP_JE:
+	case AIC_OP_JZ:
+	{
+		fmt3_ins = &instr.format3;
+		fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address);
+		/* FALLTHROUGH */
+	}
+	case AIC_OP_OR:
+	case AIC_OP_AND:
+	case AIC_OP_XOR:
+	case AIC_OP_ADD:
+	case AIC_OP_ADC:
+	case AIC_OP_BMOV:
+		if (fmt1_ins->parity != 0) {
+			fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
+		}
+		fmt1_ins->parity = 0;
+		/* FALLTHROUGH */
+	case AIC_OP_ROL:
+	{
+		int i, count;
+
+		/* Calculate odd parity for the instruction */
+		for (i = 0, count = 0; i < 31; i++) {
+			uint32_t mask;
+
+			mask = 0x01 << i;
+			if ((instr.integer & mask) != 0)
+				count++;
+		}
+		if ((count & 0x01) == 0)
+			instr.format1.parity = 1;
+
+		/* The sequencer is a little endian cpu */
+		instr.integer = ahd_htole32(instr.integer);
+		ahd_outsb(ahd, SEQRAM, instr.bytes, 4);
+		break;
+	}
+	default:
+		panic("Unknown opcode encountered in seq program");
+		break;
+	}
+}
+
+static int
+ahd_probe_stack_size(struct ahd_softc *ahd)
+{
+	int last_probe;
+
+	last_probe = 0;
+	while (1) {
+		int i;
+
+		/*
+		 * We avoid using 0 as a pattern to avoid
+		 * confusion if the stack implementation
+		 * "back-fills" with zeros when "poping'
+		 * entries.
+		 */
+		for (i = 1; i <= last_probe+1; i++) {
+		       ahd_outb(ahd, STACK, i & 0xFF);
+		       ahd_outb(ahd, STACK, (i >> 8) & 0xFF);
+		}
+
+		/* Verify */
+		for (i = last_probe+1; i > 0; i--) {
+			u_int stack_entry;
+
+			stack_entry = ahd_inb(ahd, STACK)
+				    |(ahd_inb(ahd, STACK) << 8);
+			if (stack_entry != i)
+				goto sized;
+		}
+		last_probe++;
+	}
+sized:
+	return (last_probe);
+}
+
+void
+ahd_dump_all_cards_state(void)
+{
+	struct ahd_softc *list_ahd;
+
+	TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
+		ahd_dump_card_state(list_ahd);
+	}
+}
+
+int
+ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries,
+		   const char *name, u_int address, u_int value,
+		   u_int *cur_column, u_int wrap_point)
+{
+	int	printed;
+	u_int	printed_mask;
+
+	if (cur_column != NULL && *cur_column >= wrap_point) {
+		printf("\n");
+		*cur_column = 0;
+	}
+	printed = printf("%s[0x%x]", name, value);
+	if (table == NULL) {
+		printed += printf(" ");
+		*cur_column += printed;
+		return (printed);
+	}
+	printed_mask = 0;
+	while (printed_mask != 0xFF) {
+		int entry;
+
+		for (entry = 0; entry < num_entries; entry++) {
+			if (((value & table[entry].mask)
+			  != table[entry].value)
+			 || ((printed_mask & table[entry].mask)
+			  == table[entry].mask))
+				continue;
+
+			printed += printf("%s%s",
+					  printed_mask == 0 ? ":(" : "|",
+					  table[entry].name);
+			printed_mask |= table[entry].mask;
+			
+			break;
+		}
+		if (entry >= num_entries)
+			break;
+	}
+	if (printed_mask != 0)
+		printed += printf(") ");
+	else
+		printed += printf(" ");
+	if (cur_column != NULL)
+		*cur_column += printed;
+	return (printed);
+}
+
+void
+ahd_dump_card_state(struct ahd_softc *ahd)
+{
+	struct scb	*scb;
+	ahd_mode_state	 saved_modes;
+	u_int		 dffstat;
+	int		 paused;
+	u_int		 scb_index;
+	u_int		 saved_scb_index;
+	u_int		 cur_col;
+	int		 i;
+
+	if (ahd_is_paused(ahd)) {
+		paused = 1;
+	} else {
+		paused = 0;
+		ahd_pause(ahd);
+	}
+	saved_modes = ahd_save_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
+	       "%s: Dumping Card State at program address 0x%x Mode 0x%x\n",
+	       ahd_name(ahd), 
+	       ahd_inb(ahd, CURADDR) | (ahd_inb(ahd, CURADDR+1) << 8),
+	       ahd_build_mode_state(ahd, ahd->saved_src_mode,
+				    ahd->saved_dst_mode));
+	if (paused)
+		printf("Card was paused\n");
+
+	if (ahd_check_cmdcmpltqueues(ahd))
+		printf("Completions are pending\n");
+
+	/*
+	 * Mode independent registers.
+	 */
+	cur_col = 0;
+	ahd_hs_mailbox_print(ahd_inb(ahd, LOCAL_HS_MAILBOX), &cur_col, 50);
+	ahd_intctl_print(ahd_inb(ahd, INTCTL), &cur_col, 50);
+	ahd_seqintstat_print(ahd_inb(ahd, SEQINTSTAT), &cur_col, 50);
+	ahd_saved_mode_print(ahd_inb(ahd, SAVED_MODE), &cur_col, 50);
+	ahd_dffstat_print(ahd_inb(ahd, DFFSTAT), &cur_col, 50);
+	ahd_scsisigi_print(ahd_inb(ahd, SCSISIGI), &cur_col, 50);
+	ahd_scsiphase_print(ahd_inb(ahd, SCSIPHASE), &cur_col, 50);
+	ahd_scsibus_print(ahd_inb(ahd, SCSIBUS), &cur_col, 50);
+	ahd_lastphase_print(ahd_inb(ahd, LASTPHASE), &cur_col, 50);
+	ahd_scsiseq0_print(ahd_inb(ahd, SCSISEQ0), &cur_col, 50);
+	ahd_scsiseq1_print(ahd_inb(ahd, SCSISEQ1), &cur_col, 50);
+	ahd_seqctl0_print(ahd_inb(ahd, SEQCTL0), &cur_col, 50);
+	ahd_seqintctl_print(ahd_inb(ahd, SEQINTCTL), &cur_col, 50);
+	ahd_seq_flags_print(ahd_inb(ahd, SEQ_FLAGS), &cur_col, 50);
+	ahd_seq_flags2_print(ahd_inb(ahd, SEQ_FLAGS2), &cur_col, 50);
+	ahd_sstat0_print(ahd_inb(ahd, SSTAT0), &cur_col, 50);
+	ahd_sstat1_print(ahd_inb(ahd, SSTAT1), &cur_col, 50);
+	ahd_sstat2_print(ahd_inb(ahd, SSTAT2), &cur_col, 50);
+	ahd_sstat3_print(ahd_inb(ahd, SSTAT3), &cur_col, 50);
+	ahd_perrdiag_print(ahd_inb(ahd, PERRDIAG), &cur_col, 50);
+	ahd_simode1_print(ahd_inb(ahd, SIMODE1), &cur_col, 50);
+	ahd_lqistat0_print(ahd_inb(ahd, LQISTAT0), &cur_col, 50);
+	ahd_lqistat1_print(ahd_inb(ahd, LQISTAT1), &cur_col, 50);
+	ahd_lqistat2_print(ahd_inb(ahd, LQISTAT2), &cur_col, 50);
+	ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50);
+	ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50);
+	ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50);
+	printf("\n");
+	printf("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x "
+	       "CURRSCB 0x%x NEXTSCB 0x%x\n",
+	       ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING),
+	       ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB),
+	       ahd_inw(ahd, NEXTSCB));
+	cur_col = 0;
+	/* QINFIFO */
+	ahd_search_qinfifo(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS,
+			   CAM_LUN_WILDCARD, SCB_LIST_NULL,
+			   ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT);
+	saved_scb_index = ahd_get_scbptr(ahd);
+	printf("Pending list:");
+	i = 0;
+	LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
+		if (i++ > AHD_SCB_MAX)
+			break;
+		cur_col = printf("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb),
+				 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT));
+		ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
+		ahd_scb_control_print(ahd_inb_scbram(ahd, SCB_CONTROL),
+				      &cur_col, 60);
+		ahd_scb_scsiid_print(ahd_inb_scbram(ahd, SCB_SCSIID),
+				     &cur_col, 60);
+	}
+	printf("\nTotal %d\n", i);
+
+	printf("Kernel Free SCB list: ");
+	i = 0;
+	TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
+		struct scb *list_scb;
+
+		list_scb = scb;
+		do {
+			printf("%d ", SCB_GET_TAG(list_scb));
+			list_scb = LIST_NEXT(list_scb, collision_links);
+		} while (list_scb && i++ < AHD_SCB_MAX);
+	}
+
+	LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) {
+		if (i++ > AHD_SCB_MAX)
+			break;
+		printf("%d ", SCB_GET_TAG(scb));
+	}
+	printf("\n");
+
+	printf("Sequencer Complete DMA-inprog list: ");
+	scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD);
+	i = 0;
+	while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
+		ahd_set_scbptr(ahd, scb_index);
+		printf("%d ", scb_index);
+		scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
+	}
+	printf("\n");
+
+	printf("Sequencer Complete list: ");
+	scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD);
+	i = 0;
+	while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
+		ahd_set_scbptr(ahd, scb_index);
+		printf("%d ", scb_index);
+		scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
+	}
+	printf("\n");
+
+	
+	printf("Sequencer DMA-Up and Complete list: ");
+	scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
+	i = 0;
+	while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
+		ahd_set_scbptr(ahd, scb_index);
+		printf("%d ", scb_index);
+		scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
+	}
+	printf("\n");
+	ahd_set_scbptr(ahd, saved_scb_index);
+	dffstat = ahd_inb(ahd, DFFSTAT);
+	for (i = 0; i < 2; i++) {
+#ifdef AHD_DEBUG
+		struct scb *fifo_scb;
+#endif
+		u_int	    fifo_scbptr;
+
+		ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i);
+		fifo_scbptr = ahd_get_scbptr(ahd);
+		printf("\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n",
+		       ahd_name(ahd), i,
+		       (dffstat & (FIFO0FREE << i)) ? "Free" : "Active",
+		       ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr);
+		cur_col = 0;
+		ahd_seqimode_print(ahd_inb(ahd, SEQIMODE), &cur_col, 50);
+		ahd_seqintsrc_print(ahd_inb(ahd, SEQINTSRC), &cur_col, 50);
+		ahd_dfcntrl_print(ahd_inb(ahd, DFCNTRL), &cur_col, 50);
+		ahd_dfstatus_print(ahd_inb(ahd, DFSTATUS), &cur_col, 50);
+		ahd_sg_cache_shadow_print(ahd_inb(ahd, SG_CACHE_SHADOW),
+					  &cur_col, 50);
+		ahd_sg_state_print(ahd_inb(ahd, SG_STATE), &cur_col, 50);
+		ahd_dffsxfrctl_print(ahd_inb(ahd, DFFSXFRCTL), &cur_col, 50);
+		ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50);
+		ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50);
+		if (cur_col > 50) {
+			printf("\n");
+			cur_col = 0;
+		}
+		cur_col += printf("SHADDR = 0x%x%x, SHCNT = 0x%x ",
+				  ahd_inl(ahd, SHADDR+4),
+				  ahd_inl(ahd, SHADDR),
+				  (ahd_inb(ahd, SHCNT)
+				| (ahd_inb(ahd, SHCNT + 1) << 8)
+				| (ahd_inb(ahd, SHCNT + 2) << 16)));
+		if (cur_col > 50) {
+			printf("\n");
+			cur_col = 0;
+		}
+		cur_col += printf("HADDR = 0x%x%x, HCNT = 0x%x ",
+				  ahd_inl(ahd, HADDR+4),
+				  ahd_inl(ahd, HADDR),
+				  (ahd_inb(ahd, HCNT)
+				| (ahd_inb(ahd, HCNT + 1) << 8)
+				| (ahd_inb(ahd, HCNT + 2) << 16)));
+		ahd_ccsgctl_print(ahd_inb(ahd, CCSGCTL), &cur_col, 50);
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_SG) != 0) {
+			fifo_scb = ahd_lookup_scb(ahd, fifo_scbptr);
+			if (fifo_scb != NULL)
+				ahd_dump_sglist(fifo_scb);
+		}
+#endif
+	}
+	printf("\nLQIN: ");
+	for (i = 0; i < 20; i++)
+		printf("0x%x ", ahd_inb(ahd, LQIN + i));
+	printf("\n");
+	ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+	printf("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n",
+	       ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE),
+	       ahd_inb(ahd, OPTIONMODE));
+	printf("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n",
+	       ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT),
+	       ahd_inb(ahd, MAXCMDCNT));
+	ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50);
+	printf("\n");
+	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
+	cur_col = 0;
+	ahd_ccscbctl_print(ahd_inb(ahd, CCSCBCTL), &cur_col, 50);
+	printf("\n");
+	ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
+	printf("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n",
+	       ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX),
+	       ahd_inw(ahd, DINDEX));
+	printf("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n",
+	       ahd_name(ahd), ahd_get_scbptr(ahd),
+	       ahd_inw_scbram(ahd, SCB_NEXT),
+	       ahd_inw_scbram(ahd, SCB_NEXT2));
+	printf("CDB %x %x %x %x %x %x\n",
+	       ahd_inb_scbram(ahd, SCB_CDB_STORE),
+	       ahd_inb_scbram(ahd, SCB_CDB_STORE+1),
+	       ahd_inb_scbram(ahd, SCB_CDB_STORE+2),
+	       ahd_inb_scbram(ahd, SCB_CDB_STORE+3),
+	       ahd_inb_scbram(ahd, SCB_CDB_STORE+4),
+	       ahd_inb_scbram(ahd, SCB_CDB_STORE+5));
+	printf("STACK:");
+	for (i = 0; i < ahd->stack_size; i++) {
+		ahd->saved_stack[i] =
+		    ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8);
+		printf(" 0x%x", ahd->saved_stack[i]);
+	}
+	for (i = ahd->stack_size-1; i >= 0; i--) {
+		ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF);
+		ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF);
+	}
+	printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
+	ahd_platform_dump_card_state(ahd);
+	ahd_restore_modes(ahd, saved_modes);
+	if (paused == 0)
+		ahd_unpause(ahd);
+}
+
+void
+ahd_dump_scbs(struct ahd_softc *ahd)
+{
+	ahd_mode_state saved_modes;
+	u_int	       saved_scb_index;
+	int	       i;
+
+	saved_modes = ahd_save_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	saved_scb_index = ahd_get_scbptr(ahd);
+	for (i = 0; i < AHD_SCB_MAX; i++) {
+		ahd_set_scbptr(ahd, i);
+		printf("%3d", i);
+		printf("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n",
+		       ahd_inb_scbram(ahd, SCB_CONTROL),
+		       ahd_inb_scbram(ahd, SCB_SCSIID),
+		       ahd_inw_scbram(ahd, SCB_NEXT),
+		       ahd_inw_scbram(ahd, SCB_NEXT2),
+		       ahd_inl_scbram(ahd, SCB_SGPTR),
+		       ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR));
+	}
+	printf("\n");
+	ahd_set_scbptr(ahd, saved_scb_index);
+	ahd_restore_modes(ahd, saved_modes);
+}
+
+/**************************** Flexport Logic **********************************/
+/*
+ * Read count 16bit words from 16bit word address start_addr from the
+ * SEEPROM attached to the controller, into buf, using the controller's
+ * SEEPROM reading state machine.  Optionally treat the data as a byte
+ * stream in terms of byte order.
+ */
+int
+ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf,
+		 u_int start_addr, u_int count, int bytestream)
+{
+	u_int cur_addr;
+	u_int end_addr;
+	int   error;
+
+	/*
+	 * If we never make it through the loop even once,
+	 * we were passed invalid arguments.
+	 */
+	error = EINVAL;
+	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+	end_addr = start_addr + count;
+	for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) {
+
+		ahd_outb(ahd, SEEADR, cur_addr);
+		ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART);
+		
+		error = ahd_wait_seeprom(ahd);
+		if (error)
+			break;
+		if (bytestream != 0) {
+			uint8_t *bytestream_ptr;
+
+			bytestream_ptr = (uint8_t *)buf;
+			*bytestream_ptr++ = ahd_inb(ahd, SEEDAT);
+			*bytestream_ptr = ahd_inb(ahd, SEEDAT+1);
+		} else {
+			/*
+			 * ahd_inw() already handles machine byte order.
+			 */
+			*buf = ahd_inw(ahd, SEEDAT);
+		}
+		buf++;
+	}
+	return (error);
+}
+
+/*
+ * Write count 16bit words from buf, into SEEPROM attache to the
+ * controller starting at 16bit word address start_addr, using the
+ * controller's SEEPROM writing state machine.
+ */
+int
+ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf,
+		  u_int start_addr, u_int count)
+{
+	u_int cur_addr;
+	u_int end_addr;
+	int   error;
+	int   retval;
+
+	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+	error = ENOENT;
+
+	/* Place the chip into write-enable mode */
+	ahd_outb(ahd, SEEADR, SEEOP_EWEN_ADDR);
+	ahd_outb(ahd, SEECTL, SEEOP_EWEN | SEESTART);
+	error = ahd_wait_seeprom(ahd);
+	if (error)
+		return (error);
+
+	/*
+	 * Write the data.  If we don't get throught the loop at
+	 * least once, the arguments were invalid.
+	 */
+	retval = EINVAL;
+	end_addr = start_addr + count;
+	for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) {
+		ahd_outw(ahd, SEEDAT, *buf++);
+		ahd_outb(ahd, SEEADR, cur_addr);
+		ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART);
+		
+		retval = ahd_wait_seeprom(ahd);
+		if (retval)
+			break;
+	}
+
+	/*
+	 * Disable writes.
+	 */
+	ahd_outb(ahd, SEEADR, SEEOP_EWDS_ADDR);
+	ahd_outb(ahd, SEECTL, SEEOP_EWDS | SEESTART);
+	error = ahd_wait_seeprom(ahd);
+	if (error)
+		return (error);
+	return (retval);
+}
+
+/*
+ * Wait ~100us for the serial eeprom to satisfy our request.
+ */
+int
+ahd_wait_seeprom(struct ahd_softc *ahd)
+{
+	int cnt;
+
+	cnt = 20;
+	while ((ahd_inb(ahd, SEESTAT) & (SEEARBACK|SEEBUSY)) != 0 && --cnt)
+		ahd_delay(5);
+
+	if (cnt == 0)
+		return (ETIMEDOUT);
+	return (0);
+}
+
+/*
+ * Validate the two checksums in the per_channel
+ * vital product data struct.
+ */
+int
+ahd_verify_vpd_cksum(struct vpd_config *vpd)
+{
+	int i;
+	int maxaddr;
+	uint32_t checksum;
+	uint8_t *vpdarray;
+
+	vpdarray = (uint8_t *)vpd;
+	maxaddr = offsetof(struct vpd_config, vpd_checksum);
+	checksum = 0;
+	for (i = offsetof(struct vpd_config, resource_type); i < maxaddr; i++)
+		checksum = checksum + vpdarray[i];
+	if (checksum == 0
+	 || (-checksum & 0xFF) != vpd->vpd_checksum)
+		return (0);
+
+	checksum = 0;
+	maxaddr = offsetof(struct vpd_config, checksum);
+	for (i = offsetof(struct vpd_config, default_target_flags);
+	     i < maxaddr; i++)
+		checksum = checksum + vpdarray[i];
+	if (checksum == 0
+	 || (-checksum & 0xFF) != vpd->checksum)
+		return (0);
+	return (1);
+}
+
+int
+ahd_verify_cksum(struct seeprom_config *sc)
+{
+	int i;
+	int maxaddr;
+	uint32_t checksum;
+	uint16_t *scarray;
+
+	maxaddr = (sizeof(*sc)/2) - 1;
+	checksum = 0;
+	scarray = (uint16_t *)sc;
+
+	for (i = 0; i < maxaddr; i++)
+		checksum = checksum + scarray[i];
+	if (checksum == 0
+	 || (checksum & 0xFFFF) != sc->checksum) {
+		return (0);
+	} else {
+		return (1);
+	}
+}
+
+int
+ahd_acquire_seeprom(struct ahd_softc *ahd)
+{
+	/*
+	 * We should be able to determine the SEEPROM type
+	 * from the flexport logic, but unfortunately not
+	 * all implementations have this logic and there is
+	 * no programatic method for determining if the logic
+	 * is present.
+	 */
+	return (1);
+#if 0
+	uint8_t	seetype;
+	int	error;
+
+	error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype);
+	if (error != 0
+         || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE))
+		return (0);
+	return (1);
+#endif
+}
+
+void
+ahd_release_seeprom(struct ahd_softc *ahd)
+{
+	/* Currently a no-op */
+}
+
+int
+ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value)
+{
+	int error;
+
+	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+	if (addr > 7)
+		panic("ahd_write_flexport: address out of range");
+	ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3));
+	error = ahd_wait_flexport(ahd);
+	if (error != 0)
+		return (error);
+	ahd_outb(ahd, BRDDAT, value);
+	ahd_flush_device_writes(ahd);
+	ahd_outb(ahd, BRDCTL, BRDSTB|BRDEN|(addr << 3));
+	ahd_flush_device_writes(ahd);
+	ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3));
+	ahd_flush_device_writes(ahd);
+	ahd_outb(ahd, BRDCTL, 0);
+	ahd_flush_device_writes(ahd);
+	return (0);
+}
+
+int
+ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value)
+{
+	int	error;
+
+	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+	if (addr > 7)
+		panic("ahd_read_flexport: address out of range");
+	ahd_outb(ahd, BRDCTL, BRDRW|BRDEN|(addr << 3));
+	error = ahd_wait_flexport(ahd);
+	if (error != 0)
+		return (error);
+	*value = ahd_inb(ahd, BRDDAT);
+	ahd_outb(ahd, BRDCTL, 0);
+	ahd_flush_device_writes(ahd);
+	return (0);
+}
+
+/*
+ * Wait at most 2 seconds for flexport arbitration to succeed.
+ */
+int
+ahd_wait_flexport(struct ahd_softc *ahd)
+{
+	int cnt;
+
+	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
+	cnt = 1000000 * 2 / 5;
+	while ((ahd_inb(ahd, BRDCTL) & FLXARBACK) == 0 && --cnt)
+		ahd_delay(5);
+
+	if (cnt == 0)
+		return (ETIMEDOUT);
+	return (0);
+}
+
+/************************* Target Mode ****************************************/
+#ifdef AHD_TARGET_MODE
+cam_status
+ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb,
+		    struct ahd_tmode_tstate **tstate,
+		    struct ahd_tmode_lstate **lstate,
+		    int notfound_failure)
+{
+
+	if ((ahd->features & AHD_TARGETMODE) == 0)
+		return (CAM_REQ_INVALID);
+
+	/*
+	 * Handle the 'black hole' device that sucks up
+	 * requests to unattached luns on enabled targets.
+	 */
+	if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
+	 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
+		*tstate = NULL;
+		*lstate = ahd->black_hole;
+	} else {
+		u_int max_id;
+
+		max_id = (ahd->features & AHD_WIDE) ? 15 : 7;
+		if (ccb->ccb_h.target_id > max_id)
+			return (CAM_TID_INVALID);
+
+		if (ccb->ccb_h.target_lun >= AHD_NUM_LUNS)
+			return (CAM_LUN_INVALID);
+
+		*tstate = ahd->enabled_targets[ccb->ccb_h.target_id];
+		*lstate = NULL;
+		if (*tstate != NULL)
+			*lstate =
+			    (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
+	}
+
+	if (notfound_failure != 0 && *lstate == NULL)
+		return (CAM_PATH_INVALID);
+
+	return (CAM_REQ_CMP);
+}
+
+void
+ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
+{
+#if NOT_YET
+	struct	   ahd_tmode_tstate *tstate;
+	struct	   ahd_tmode_lstate *lstate;
+	struct	   ccb_en_lun *cel;
+	cam_status status;
+	u_int	   target;
+	u_int	   lun;
+	u_int	   target_mask;
+	u_long	   s;
+	char	   channel;
+
+	status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate,
+				     /*notfound_failure*/FALSE);
+
+	if (status != CAM_REQ_CMP) {
+		ccb->ccb_h.status = status;
+		return;
+	}
+
+	if ((ahd->features & AHD_MULTIROLE) != 0) {
+		u_int	   our_id;
+
+		our_id = ahd->our_id;
+		if (ccb->ccb_h.target_id != our_id) {
+			if ((ahd->features & AHD_MULTI_TID) != 0
+		   	 && (ahd->flags & AHD_INITIATORROLE) != 0) {
+				/*
+				 * Only allow additional targets if
+				 * the initiator role is disabled.
+				 * The hardware cannot handle a re-select-in
+				 * on the initiator id during a re-select-out
+				 * on a different target id.
+				 */
+				status = CAM_TID_INVALID;
+			} else if ((ahd->flags & AHD_INITIATORROLE) != 0
+				|| ahd->enabled_luns > 0) {
+				/*
+				 * Only allow our target id to change
+				 * if the initiator role is not configured
+				 * and there are no enabled luns which
+				 * are attached to the currently registered
+				 * scsi id.
+				 */
+				status = CAM_TID_INVALID;
+			}
+		}
+	}
+
+	if (status != CAM_REQ_CMP) {
+		ccb->ccb_h.status = status;
+		return;
+	}
+
+	/*
+	 * We now have an id that is valid.
+	 * If we aren't in target mode, switch modes.
+	 */
+	if ((ahd->flags & AHD_TARGETROLE) == 0
+	 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
+		u_long	s;
+
+		printf("Configuring Target Mode\n");
+		ahd_lock(ahd, &s);
+		if (LIST_FIRST(&ahd->pending_scbs) != NULL) {
+			ccb->ccb_h.status = CAM_BUSY;
+			ahd_unlock(ahd, &s);
+			return;
+		}
+		ahd->flags |= AHD_TARGETROLE;
+		if ((ahd->features & AHD_MULTIROLE) == 0)
+			ahd->flags &= ~AHD_INITIATORROLE;
+		ahd_pause(ahd);
+		ahd_loadseq(ahd);
+		ahd_restart(ahd);
+		ahd_unlock(ahd, &s);
+	}
+	cel = &ccb->cel;
+	target = ccb->ccb_h.target_id;
+	lun = ccb->ccb_h.target_lun;
+	channel = SIM_CHANNEL(ahd, sim);
+	target_mask = 0x01 << target;
+	if (channel == 'B')
+		target_mask <<= 8;
+
+	if (cel->enable != 0) {
+		u_int scsiseq1;
+
+		/* Are we already enabled?? */
+		if (lstate != NULL) {
+			xpt_print_path(ccb->ccb_h.path);
+			printf("Lun already enabled\n");
+			ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
+			return;
+		}
+
+		if (cel->grp6_len != 0
+		 || cel->grp7_len != 0) {
+			/*
+			 * Don't (yet?) support vendor
+			 * specific commands.
+			 */
+			ccb->ccb_h.status = CAM_REQ_INVALID;
+			printf("Non-zero Group Codes\n");
+			return;
+		}
+
+		/*
+		 * Seems to be okay.
+		 * Setup our data structures.
+		 */
+		if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
+			tstate = ahd_alloc_tstate(ahd, target, channel);
+			if (tstate == NULL) {
+				xpt_print_path(ccb->ccb_h.path);
+				printf("Couldn't allocate tstate\n");
+				ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+				return;
+			}
+		}
+		lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT);
+		if (lstate == NULL) {
+			xpt_print_path(ccb->ccb_h.path);
+			printf("Couldn't allocate lstate\n");
+			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+			return;
+		}
+		memset(lstate, 0, sizeof(*lstate));
+		status = xpt_create_path(&lstate->path, /*periph*/NULL,
+					 xpt_path_path_id(ccb->ccb_h.path),
+					 xpt_path_target_id(ccb->ccb_h.path),
+					 xpt_path_lun_id(ccb->ccb_h.path));
+		if (status != CAM_REQ_CMP) {
+			free(lstate, M_DEVBUF);
+			xpt_print_path(ccb->ccb_h.path);
+			printf("Couldn't allocate path\n");
+			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+			return;
+		}
+		SLIST_INIT(&lstate->accept_tios);
+		SLIST_INIT(&lstate->immed_notifies);
+		ahd_lock(ahd, &s);
+		ahd_pause(ahd);
+		if (target != CAM_TARGET_WILDCARD) {
+			tstate->enabled_luns[lun] = lstate;
+			ahd->enabled_luns++;
+
+			if ((ahd->features & AHD_MULTI_TID) != 0) {
+				u_int targid_mask;
+
+				targid_mask = ahd_inb(ahd, TARGID)
+					    | (ahd_inb(ahd, TARGID + 1) << 8);
+
+				targid_mask |= target_mask;
+				ahd_outb(ahd, TARGID, targid_mask);
+				ahd_outb(ahd, TARGID+1, (targid_mask >> 8));
+				
+				ahd_update_scsiid(ahd, targid_mask);
+			} else {
+				u_int our_id;
+				char  channel;
+
+				channel = SIM_CHANNEL(ahd, sim);
+				our_id = SIM_SCSI_ID(ahd, sim);
+
+				/*
+				 * This can only happen if selections
+				 * are not enabled
+				 */
+				if (target != our_id) {
+					u_int sblkctl;
+					char  cur_channel;
+					int   swap;
+
+					sblkctl = ahd_inb(ahd, SBLKCTL);
+					cur_channel = (sblkctl & SELBUSB)
+						    ? 'B' : 'A';
+					if ((ahd->features & AHD_TWIN) == 0)
+						cur_channel = 'A';
+					swap = cur_channel != channel;
+					ahd->our_id = target;
+
+					if (swap)
+						ahd_outb(ahd, SBLKCTL,
+							 sblkctl ^ SELBUSB);
+
+					ahd_outb(ahd, SCSIID, target);
+
+					if (swap)
+						ahd_outb(ahd, SBLKCTL, sblkctl);
+				}
+			}
+		} else
+			ahd->black_hole = lstate;
+		/* Allow select-in operations */
+		if (ahd->black_hole != NULL && ahd->enabled_luns > 0) {
+			scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE);
+			scsiseq1 |= ENSELI;
+			ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1);
+			scsiseq1 = ahd_inb(ahd, SCSISEQ1);
+			scsiseq1 |= ENSELI;
+			ahd_outb(ahd, SCSISEQ1, scsiseq1);
+		}
+		ahd_unpause(ahd);
+		ahd_unlock(ahd, &s);
+		ccb->ccb_h.status = CAM_REQ_CMP;
+		xpt_print_path(ccb->ccb_h.path);
+		printf("Lun now enabled for target mode\n");
+	} else {
+		struct scb *scb;
+		int i, empty;
+
+		if (lstate == NULL) {
+			ccb->ccb_h.status = CAM_LUN_INVALID;
+			return;
+		}
+
+		ahd_lock(ahd, &s);
+		
+		ccb->ccb_h.status = CAM_REQ_CMP;
+		LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
+			struct ccb_hdr *ccbh;
+
+			ccbh = &scb->io_ctx->ccb_h;
+			if (ccbh->func_code == XPT_CONT_TARGET_IO
+			 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
+				printf("CTIO pending\n");
+				ccb->ccb_h.status = CAM_REQ_INVALID;
+				ahd_unlock(ahd, &s);
+				return;
+			}
+		}
+
+		if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
+			printf("ATIOs pending\n");
+			ccb->ccb_h.status = CAM_REQ_INVALID;
+		}
+
+		if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
+			printf("INOTs pending\n");
+			ccb->ccb_h.status = CAM_REQ_INVALID;
+		}
+
+		if (ccb->ccb_h.status != CAM_REQ_CMP) {
+			ahd_unlock(ahd, &s);
+			return;
+		}
+
+		xpt_print_path(ccb->ccb_h.path);
+		printf("Target mode disabled\n");
+		xpt_free_path(lstate->path);
+		free(lstate, M_DEVBUF);
+
+		ahd_pause(ahd);
+		/* Can we clean up the target too? */
+		if (target != CAM_TARGET_WILDCARD) {
+			tstate->enabled_luns[lun] = NULL;
+			ahd->enabled_luns--;
+			for (empty = 1, i = 0; i < 8; i++)
+				if (tstate->enabled_luns[i] != NULL) {
+					empty = 0;
+					break;
+				}
+
+			if (empty) {
+				ahd_free_tstate(ahd, target, channel,
+						/*force*/FALSE);
+				if (ahd->features & AHD_MULTI_TID) {
+					u_int targid_mask;
+
+					targid_mask = ahd_inb(ahd, TARGID)
+						    | (ahd_inb(ahd, TARGID + 1)
+						       << 8);
+
+					targid_mask &= ~target_mask;
+					ahd_outb(ahd, TARGID, targid_mask);
+					ahd_outb(ahd, TARGID+1,
+					 	 (targid_mask >> 8));
+					ahd_update_scsiid(ahd, targid_mask);
+				}
+			}
+		} else {
+
+			ahd->black_hole = NULL;
+
+			/*
+			 * We can't allow selections without
+			 * our black hole device.
+			 */
+			empty = TRUE;
+		}
+		if (ahd->enabled_luns == 0) {
+			/* Disallow select-in */
+			u_int scsiseq1;
+
+			scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE);
+			scsiseq1 &= ~ENSELI;
+			ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1);
+			scsiseq1 = ahd_inb(ahd, SCSISEQ1);
+			scsiseq1 &= ~ENSELI;
+			ahd_outb(ahd, SCSISEQ1, scsiseq1);
+
+			if ((ahd->features & AHD_MULTIROLE) == 0) {
+				printf("Configuring Initiator Mode\n");
+				ahd->flags &= ~AHD_TARGETROLE;
+				ahd->flags |= AHD_INITIATORROLE;
+				ahd_pause(ahd);
+				ahd_loadseq(ahd);
+				ahd_restart(ahd);
+				/*
+				 * Unpaused.  The extra unpause
+				 * that follows is harmless.
+				 */
+			}
+		}
+		ahd_unpause(ahd);
+		ahd_unlock(ahd, &s);
+	}
+#endif
+}
+
+static void
+ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask)
+{
+#if NOT_YET
+	u_int scsiid_mask;
+	u_int scsiid;
+
+	if ((ahd->features & AHD_MULTI_TID) == 0)
+		panic("ahd_update_scsiid called on non-multitid unit\n");
+
+	/*
+	 * Since we will rely on the TARGID mask
+	 * for selection enables, ensure that OID
+	 * in SCSIID is not set to some other ID
+	 * that we don't want to allow selections on.
+	 */
+	if ((ahd->features & AHD_ULTRA2) != 0)
+		scsiid = ahd_inb(ahd, SCSIID_ULTRA2);
+	else
+		scsiid = ahd_inb(ahd, SCSIID);
+	scsiid_mask = 0x1 << (scsiid & OID);
+	if ((targid_mask & scsiid_mask) == 0) {
+		u_int our_id;
+
+		/* ffs counts from 1 */
+		our_id = ffs(targid_mask);
+		if (our_id == 0)
+			our_id = ahd->our_id;
+		else
+			our_id--;
+		scsiid &= TID;
+		scsiid |= our_id;
+	}
+	if ((ahd->features & AHD_ULTRA2) != 0)
+		ahd_outb(ahd, SCSIID_ULTRA2, scsiid);
+	else
+		ahd_outb(ahd, SCSIID, scsiid);
+#endif
+}
+
+void
+ahd_run_tqinfifo(struct ahd_softc *ahd, int paused)
+{
+	struct target_cmd *cmd;
+
+	ahd_sync_tqinfifo(ahd, BUS_DMASYNC_POSTREAD);
+	while ((cmd = &ahd->targetcmds[ahd->tqinfifonext])->cmd_valid != 0) {
+
+		/*
+		 * Only advance through the queue if we
+		 * have the resources to process the command.
+		 */
+		if (ahd_handle_target_cmd(ahd, cmd) != 0)
+			break;
+
+		cmd->cmd_valid = 0;
+		ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
+				ahd->shared_data_dmamap,
+				ahd_targetcmd_offset(ahd, ahd->tqinfifonext),
+				sizeof(struct target_cmd),
+				BUS_DMASYNC_PREREAD);
+		ahd->tqinfifonext++;
+
+		/*
+		 * Lazily update our position in the target mode incoming
+		 * command queue as seen by the sequencer.
+		 */
+		if ((ahd->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
+			u_int hs_mailbox;
+
+			hs_mailbox = ahd_inb(ahd, HS_MAILBOX);
+			hs_mailbox &= ~HOST_TQINPOS;
+			hs_mailbox |= ahd->tqinfifonext & HOST_TQINPOS;
+			ahd_outb(ahd, HS_MAILBOX, hs_mailbox);
+		}
+	}
+}
+
+static int
+ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd)
+{
+	struct	  ahd_tmode_tstate *tstate;
+	struct	  ahd_tmode_lstate *lstate;
+	struct	  ccb_accept_tio *atio;
+	uint8_t *byte;
+	int	  initiator;
+	int	  target;
+	int	  lun;
+
+	initiator = SCSIID_TARGET(ahd, cmd->scsiid);
+	target = SCSIID_OUR_ID(cmd->scsiid);
+	lun    = (cmd->identify & MSG_IDENTIFY_LUNMASK);
+
+	byte = cmd->bytes;
+	tstate = ahd->enabled_targets[target];
+	lstate = NULL;
+	if (tstate != NULL)
+		lstate = tstate->enabled_luns[lun];
+
+	/*
+	 * Commands for disabled luns go to the black hole driver.
+	 */
+	if (lstate == NULL)
+		lstate = ahd->black_hole;
+
+	atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
+	if (atio == NULL) {
+		ahd->flags |= AHD_TQINFIFO_BLOCKED;
+		/*
+		 * Wait for more ATIOs from the peripheral driver for this lun.
+		 */
+		return (1);
+	} else
+		ahd->flags &= ~AHD_TQINFIFO_BLOCKED;
+#ifdef AHD_DEBUG
+	if ((ahd_debug & AHD_SHOW_TQIN) != 0)
+		printf("Incoming command from %d for %d:%d%s\n",
+		       initiator, target, lun,
+		       lstate == ahd->black_hole ? "(Black Holed)" : "");
+#endif
+	SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
+
+	if (lstate == ahd->black_hole) {
+		/* Fill in the wildcards */
+		atio->ccb_h.target_id = target;
+		atio->ccb_h.target_lun = lun;
+	}
+
+	/*
+	 * Package it up and send it off to
+	 * whomever has this lun enabled.
+	 */
+	atio->sense_len = 0;
+	atio->init_id = initiator;
+	if (byte[0] != 0xFF) {
+		/* Tag was included */
+		atio->tag_action = *byte++;
+		atio->tag_id = *byte++;
+		atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
+	} else {
+		atio->ccb_h.flags = 0;
+	}
+	byte++;
+
+	/* Okay.  Now determine the cdb size based on the command code */
+	switch (*byte >> CMD_GROUP_CODE_SHIFT) {
+	case 0:
+		atio->cdb_len = 6;
+		break;
+	case 1:
+	case 2:
+		atio->cdb_len = 10;
+		break;
+	case 4:
+		atio->cdb_len = 16;
+		break;
+	case 5:
+		atio->cdb_len = 12;
+		break;
+	case 3:
+	default:
+		/* Only copy the opcode. */
+		atio->cdb_len = 1;
+		printf("Reserved or VU command code type encountered\n");
+		break;
+	}
+	
+	memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
+
+	atio->ccb_h.status |= CAM_CDB_RECVD;
+
+	if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
+		/*
+		 * We weren't allowed to disconnect.
+		 * We're hanging on the bus until a
+		 * continue target I/O comes in response
+		 * to this accept tio.
+		 */
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_TQIN) != 0)
+			printf("Received Immediate Command %d:%d:%d - %p\n",
+			       initiator, target, lun, ahd->pending_device);
+#endif
+		ahd->pending_device = lstate;
+		ahd_freeze_ccb((union ccb *)atio);
+		atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
+	}
+	xpt_done((union ccb*)atio);
+	return (0);
+}
+
+#endif
diff --git a/drivers/scsi/aic7xxx/aic79xx_inline.h b/drivers/scsi/aic7xxx/aic79xx_inline.h
new file mode 100644
index 0000000..d80bc51
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_inline.h
@@ -0,0 +1,965 @@
+/*
+ * Inline routines shareable across OS platforms.
+ *
+ * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 2000-2003 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx_inline.h#51 $
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _AIC79XX_INLINE_H_
+#define _AIC79XX_INLINE_H_
+
+/******************************** Debugging ***********************************/
+static __inline char *ahd_name(struct ahd_softc *ahd);
+
+static __inline char *
+ahd_name(struct ahd_softc *ahd)
+{
+	return (ahd->name);
+}
+
+/************************ Sequencer Execution Control *************************/
+static __inline void ahd_known_modes(struct ahd_softc *ahd,
+				     ahd_mode src, ahd_mode dst);
+static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd,
+						    ahd_mode src,
+						    ahd_mode dst);
+static __inline void ahd_extract_mode_state(struct ahd_softc *ahd,
+					    ahd_mode_state state,
+					    ahd_mode *src, ahd_mode *dst);
+static __inline void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src,
+				   ahd_mode dst);
+static __inline void ahd_update_modes(struct ahd_softc *ahd);
+static __inline void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
+				      ahd_mode dstmode, const char *file,
+				      int line);
+static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *ahd);
+static __inline void ahd_restore_modes(struct ahd_softc *ahd,
+				       ahd_mode_state state);
+static __inline int  ahd_is_paused(struct ahd_softc *ahd);
+static __inline void ahd_pause(struct ahd_softc *ahd);
+static __inline void ahd_unpause(struct ahd_softc *ahd);
+
+static __inline void
+ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
+{
+	ahd->src_mode = src;
+	ahd->dst_mode = dst;
+	ahd->saved_src_mode = src;
+	ahd->saved_dst_mode = dst;
+}
+
+static __inline ahd_mode_state
+ahd_build_mode_state(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
+{
+	return ((src << SRC_MODE_SHIFT) | (dst << DST_MODE_SHIFT));
+}
+
+static __inline void
+ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state,
+		       ahd_mode *src, ahd_mode *dst)
+{
+	*src = (state & SRC_MODE) >> SRC_MODE_SHIFT;
+	*dst = (state & DST_MODE) >> DST_MODE_SHIFT;
+}
+
+static __inline void
+ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
+{
+	if (ahd->src_mode == src && ahd->dst_mode == dst)
+		return;
+#ifdef AHD_DEBUG
+	if (ahd->src_mode == AHD_MODE_UNKNOWN
+	 || ahd->dst_mode == AHD_MODE_UNKNOWN)
+		panic("Setting mode prior to saving it.\n");
+	if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
+		printf("%s: Setting mode 0x%x\n", ahd_name(ahd),
+		       ahd_build_mode_state(ahd, src, dst));
+#endif
+	ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
+	ahd->src_mode = src;
+	ahd->dst_mode = dst;
+}
+
+static __inline void
+ahd_update_modes(struct ahd_softc *ahd)
+{
+	ahd_mode_state mode_ptr;
+	ahd_mode src;
+	ahd_mode dst;
+
+	mode_ptr = ahd_inb(ahd, MODE_PTR);
+#ifdef AHD_DEBUG
+	if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
+		printf("Reading mode 0x%x\n", mode_ptr);
+#endif
+	ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
+	ahd_known_modes(ahd, src, dst);
+}
+
+static __inline void
+ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
+		 ahd_mode dstmode, const char *file, int line)
+{
+#ifdef AHD_DEBUG
+	if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
+	 || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
+		panic("%s:%s:%d: Mode assertion failed.\n",
+		       ahd_name(ahd), file, line);
+	}
+#endif
+}
+
+static __inline ahd_mode_state
+ahd_save_modes(struct ahd_softc *ahd)
+{
+	if (ahd->src_mode == AHD_MODE_UNKNOWN
+	 || ahd->dst_mode == AHD_MODE_UNKNOWN)
+		ahd_update_modes(ahd);
+
+	return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
+}
+
+static __inline void
+ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
+{
+	ahd_mode src;
+	ahd_mode dst;
+
+	ahd_extract_mode_state(ahd, state, &src, &dst);
+	ahd_set_modes(ahd, src, dst);
+}
+
+#define AHD_ASSERT_MODES(ahd, source, dest) \
+	ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
+
+/*
+ * Determine whether the sequencer has halted code execution.
+ * Returns non-zero status if the sequencer is stopped.
+ */
+static __inline int
+ahd_is_paused(struct ahd_softc *ahd)
+{
+	return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
+}
+
+/*
+ * Request that the sequencer stop and wait, indefinitely, for it
+ * to stop.  The sequencer will only acknowledge that it is paused
+ * once it has reached an instruction boundary and PAUSEDIS is
+ * cleared in the SEQCTL register.  The sequencer may use PAUSEDIS
+ * for critical sections.
+ */
+static __inline void
+ahd_pause(struct ahd_softc *ahd)
+{
+	ahd_outb(ahd, HCNTRL, ahd->pause);
+
+	/*
+	 * Since the sequencer can disable pausing in a critical section, we
+	 * must loop until it actually stops.
+	 */
+	while (ahd_is_paused(ahd) == 0)
+		;
+}
+
+/*
+ * Allow the sequencer to continue program execution.
+ * We check here to ensure that no additional interrupt
+ * sources that would cause the sequencer to halt have been
+ * asserted.  If, for example, a SCSI bus reset is detected
+ * while we are fielding a different, pausing, interrupt type,
+ * we don't want to release the sequencer before going back
+ * into our interrupt handler and dealing with this new
+ * condition.
+ */
+static __inline void
+ahd_unpause(struct ahd_softc *ahd)
+{
+	/*
+	 * Automatically restore our modes to those saved
+	 * prior to the first change of the mode.
+	 */
+	if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
+	 && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
+		if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
+			ahd_reset_cmds_pending(ahd);
+		ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
+	}
+
+	if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0)
+		ahd_outb(ahd, HCNTRL, ahd->unpause);
+
+	ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
+}
+
+/*********************** Scatter Gather List Handling *************************/
+static __inline void	*ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
+				      void *sgptr, dma_addr_t addr,
+				      bus_size_t len, int last);
+static __inline void	 ahd_setup_scb_common(struct ahd_softc *ahd,
+					      struct scb *scb);
+static __inline void	 ahd_setup_data_scb(struct ahd_softc *ahd,
+					    struct scb *scb);
+static __inline void	 ahd_setup_noxfer_scb(struct ahd_softc *ahd,
+					      struct scb *scb);
+
+static __inline void *
+ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
+	     void *sgptr, dma_addr_t addr, bus_size_t len, int last)
+{
+	scb->sg_count++;
+	if (sizeof(dma_addr_t) > 4
+	 && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+		struct ahd_dma64_seg *sg;
+
+		sg = (struct ahd_dma64_seg *)sgptr;
+		sg->addr = ahd_htole64(addr);
+		sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
+		return (sg + 1);
+	} else {
+		struct ahd_dma_seg *sg;
+
+		sg = (struct ahd_dma_seg *)sgptr;
+		sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
+		sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
+				    | (last ? AHD_DMA_LAST_SEG : 0));
+		return (sg + 1);
+	}
+}
+
+static __inline void
+ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
+{
+	/* XXX Handle target mode SCBs. */
+	scb->crc_retry_count = 0;
+	if ((scb->flags & SCB_PACKETIZED) != 0) {
+		/* XXX what about ACA??  It is type 4, but TAG_TYPE == 0x3. */
+		scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE;
+	} else {
+		if (ahd_get_transfer_length(scb) & 0x01)
+			scb->hscb->task_attribute = SCB_XFERLEN_ODD;
+		else
+			scb->hscb->task_attribute = 0;
+	}
+
+	if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
+	 || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
+		scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
+		    ahd_htole32(scb->sense_busaddr);
+}
+
+static __inline void
+ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+	/*
+	 * Copy the first SG into the "current" data ponter area.
+	 */
+	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+		struct ahd_dma64_seg *sg;
+
+		sg = (struct ahd_dma64_seg *)scb->sg_list;
+		scb->hscb->dataptr = sg->addr;
+		scb->hscb->datacnt = sg->len;
+	} else {
+		struct ahd_dma_seg *sg;
+		uint32_t *dataptr_words;
+
+		sg = (struct ahd_dma_seg *)scb->sg_list;
+		dataptr_words = (uint32_t*)&scb->hscb->dataptr;
+		dataptr_words[0] = sg->addr;
+		dataptr_words[1] = 0;
+		if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
+			uint64_t high_addr;
+
+			high_addr = ahd_le32toh(sg->len) & 0x7F000000;
+			scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
+		}
+		scb->hscb->datacnt = sg->len;
+	}
+	/*
+	 * Note where to find the SG entries in bus space.
+	 * We also set the full residual flag which the 
+	 * sequencer will clear as soon as a data transfer
+	 * occurs.
+	 */
+	scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
+}
+
+static __inline void
+ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+	scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
+	scb->hscb->dataptr = 0;
+	scb->hscb->datacnt = 0;
+}
+
+/************************** Memory mapping routines ***************************/
+static __inline size_t	ahd_sg_size(struct ahd_softc *ahd);
+static __inline void *
+			ahd_sg_bus_to_virt(struct ahd_softc *ahd,
+					   struct scb *scb,
+					   uint32_t sg_busaddr);
+static __inline uint32_t
+			ahd_sg_virt_to_bus(struct ahd_softc *ahd,
+					   struct scb *scb,
+					   void *sg);
+static __inline void	ahd_sync_scb(struct ahd_softc *ahd,
+				     struct scb *scb, int op);
+static __inline void	ahd_sync_sglist(struct ahd_softc *ahd,
+					struct scb *scb, int op);
+static __inline void	ahd_sync_sense(struct ahd_softc *ahd,
+				       struct scb *scb, int op);
+static __inline uint32_t
+			ahd_targetcmd_offset(struct ahd_softc *ahd,
+					     u_int index);
+
+static __inline size_t
+ahd_sg_size(struct ahd_softc *ahd)
+{
+	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
+		return (sizeof(struct ahd_dma64_seg));
+	return (sizeof(struct ahd_dma_seg));
+}
+
+static __inline void *
+ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
+{
+	dma_addr_t sg_offset;
+
+	/* sg_list_phys points to entry 1, not 0 */
+	sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
+	return ((uint8_t *)scb->sg_list + sg_offset);
+}
+
+static __inline uint32_t
+ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
+{
+	dma_addr_t sg_offset;
+
+	/* sg_list_phys points to entry 1, not 0 */
+	sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
+		  - ahd_sg_size(ahd);
+
+	return (scb->sg_list_busaddr + sg_offset);
+}
+
+static __inline void
+ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
+{
+	ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat,
+			scb->hscb_map->dmamap,
+			/*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
+			/*len*/sizeof(*scb->hscb), op);
+}
+
+static __inline void
+ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
+{
+	if (scb->sg_count == 0)
+		return;
+
+	ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat,
+			scb->sg_map->dmamap,
+			/*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
+			/*len*/ahd_sg_size(ahd) * scb->sg_count, op);
+}
+
+static __inline void
+ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
+{
+	ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat,
+			scb->sense_map->dmamap,
+			/*offset*/scb->sense_busaddr,
+			/*len*/AHD_SENSE_BUFSIZE, op);
+}
+
+static __inline uint32_t
+ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
+{
+	return (((uint8_t *)&ahd->targetcmds[index])
+	       - (uint8_t *)ahd->qoutfifo);
+}
+
+/*********************** Miscelaneous Support Functions ***********************/
+static __inline void	ahd_complete_scb(struct ahd_softc *ahd,
+					 struct scb *scb);
+static __inline void	ahd_update_residual(struct ahd_softc *ahd,
+					    struct scb *scb);
+static __inline struct ahd_initiator_tinfo *
+			ahd_fetch_transinfo(struct ahd_softc *ahd,
+					    char channel, u_int our_id,
+					    u_int remote_id,
+					    struct ahd_tmode_tstate **tstate);
+static __inline uint16_t
+			ahd_inw(struct ahd_softc *ahd, u_int port);
+static __inline void	ahd_outw(struct ahd_softc *ahd, u_int port,
+				 u_int value);
+static __inline uint32_t
+			ahd_inl(struct ahd_softc *ahd, u_int port);
+static __inline void	ahd_outl(struct ahd_softc *ahd, u_int port,
+				 uint32_t value);
+static __inline uint64_t
+			ahd_inq(struct ahd_softc *ahd, u_int port);
+static __inline void	ahd_outq(struct ahd_softc *ahd, u_int port,
+				 uint64_t value);
+static __inline u_int	ahd_get_scbptr(struct ahd_softc *ahd);
+static __inline void	ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr);
+static __inline u_int	ahd_get_hnscb_qoff(struct ahd_softc *ahd);
+static __inline void	ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value);
+static __inline u_int	ahd_get_hescb_qoff(struct ahd_softc *ahd);
+static __inline void	ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value);
+static __inline u_int	ahd_get_snscb_qoff(struct ahd_softc *ahd);
+static __inline void	ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value);
+static __inline u_int	ahd_get_sescb_qoff(struct ahd_softc *ahd);
+static __inline void	ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value);
+static __inline u_int	ahd_get_sdscb_qoff(struct ahd_softc *ahd);
+static __inline void	ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value);
+static __inline u_int	ahd_inb_scbram(struct ahd_softc *ahd, u_int offset);
+static __inline u_int	ahd_inw_scbram(struct ahd_softc *ahd, u_int offset);
+static __inline uint32_t
+			ahd_inl_scbram(struct ahd_softc *ahd, u_int offset);
+static __inline uint64_t
+			ahd_inq_scbram(struct ahd_softc *ahd, u_int offset);
+static __inline void	ahd_swap_with_next_hscb(struct ahd_softc *ahd,
+						struct scb *scb);
+static __inline void	ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb);
+static __inline uint8_t *
+			ahd_get_sense_buf(struct ahd_softc *ahd,
+					  struct scb *scb);
+static __inline uint32_t
+			ahd_get_sense_bufaddr(struct ahd_softc *ahd,
+					      struct scb *scb);
+
+static __inline void
+ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+	uint32_t sgptr;
+
+	sgptr = ahd_le32toh(scb->hscb->sgptr);
+	if ((sgptr & SG_STATUS_VALID) != 0)
+		ahd_handle_scb_status(ahd, scb);
+	else
+		ahd_done(ahd, scb);
+}
+
+/*
+ * Determine whether the sequencer reported a residual
+ * for this SCB/transaction.
+ */
+static __inline void
+ahd_update_residual(struct ahd_softc *ahd, struct scb *scb)
+{
+	uint32_t sgptr;
+
+	sgptr = ahd_le32toh(scb->hscb->sgptr);
+	if ((sgptr & SG_STATUS_VALID) != 0)
+		ahd_calc_residual(ahd, scb);
+}
+
+/*
+ * Return pointers to the transfer negotiation information
+ * for the specified our_id/remote_id pair.
+ */
+static __inline struct ahd_initiator_tinfo *
+ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
+		    u_int remote_id, struct ahd_tmode_tstate **tstate)
+{
+	/*
+	 * Transfer data structures are stored from the perspective
+	 * of the target role.  Since the parameters for a connection
+	 * in the initiator role to a given target are the same as
+	 * when the roles are reversed, we pretend we are the target.
+	 */
+	if (channel == 'B')
+		our_id += 8;
+	*tstate = ahd->enabled_targets[our_id];
+	return (&(*tstate)->transinfo[remote_id]);
+}
+
+#define AHD_COPY_COL_IDX(dst, src)				\
+do {								\
+	dst->hscb->scsiid = src->hscb->scsiid;			\
+	dst->hscb->lun = src->hscb->lun;			\
+} while (0)
+
+static __inline uint16_t
+ahd_inw(struct ahd_softc *ahd, u_int port)
+{
+	return ((ahd_inb(ahd, port+1) << 8) | ahd_inb(ahd, port));
+}
+
+static __inline void
+ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
+{
+	ahd_outb(ahd, port, value & 0xFF);
+	ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
+}
+
+static __inline uint32_t
+ahd_inl(struct ahd_softc *ahd, u_int port)
+{
+	return ((ahd_inb(ahd, port))
+	      | (ahd_inb(ahd, port+1) << 8)
+	      | (ahd_inb(ahd, port+2) << 16)
+	      | (ahd_inb(ahd, port+3) << 24));
+}
+
+static __inline void
+ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
+{
+	ahd_outb(ahd, port, (value) & 0xFF);
+	ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
+	ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
+	ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
+}
+
+static __inline uint64_t
+ahd_inq(struct ahd_softc *ahd, u_int port)
+{
+	return ((ahd_inb(ahd, port))
+	      | (ahd_inb(ahd, port+1) << 8)
+	      | (ahd_inb(ahd, port+2) << 16)
+	      | (ahd_inb(ahd, port+3) << 24)
+	      | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
+	      | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
+	      | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
+	      | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
+}
+
+static __inline void
+ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
+{
+	ahd_outb(ahd, port, value & 0xFF);
+	ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
+	ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
+	ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
+	ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
+	ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
+	ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
+	ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
+}
+
+static __inline u_int
+ahd_get_scbptr(struct ahd_softc *ahd)
+{
+	AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+			 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+	return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
+}
+
+static __inline void
+ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
+{
+	AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+			 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+	ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
+	ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
+}
+
+static __inline u_int
+ahd_get_hnscb_qoff(struct ahd_softc *ahd)
+{
+	return (ahd_inw_atomic(ahd, HNSCB_QOFF));
+}
+
+static __inline void
+ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
+{
+	ahd_outw_atomic(ahd, HNSCB_QOFF, value);
+}
+
+static __inline u_int
+ahd_get_hescb_qoff(struct ahd_softc *ahd)
+{
+	return (ahd_inb(ahd, HESCB_QOFF));
+}
+
+static __inline void
+ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
+{
+	ahd_outb(ahd, HESCB_QOFF, value);
+}
+
+static __inline u_int
+ahd_get_snscb_qoff(struct ahd_softc *ahd)
+{
+	u_int oldvalue;
+
+	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+	oldvalue = ahd_inw(ahd, SNSCB_QOFF);
+	ahd_outw(ahd, SNSCB_QOFF, oldvalue);
+	return (oldvalue);
+}
+
+static __inline void
+ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
+{
+	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+	ahd_outw(ahd, SNSCB_QOFF, value);
+}
+
+static __inline u_int
+ahd_get_sescb_qoff(struct ahd_softc *ahd)
+{
+	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+	return (ahd_inb(ahd, SESCB_QOFF));
+}
+
+static __inline void
+ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
+{
+	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+	ahd_outb(ahd, SESCB_QOFF, value);
+}
+
+static __inline u_int
+ahd_get_sdscb_qoff(struct ahd_softc *ahd)
+{
+	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+	return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
+}
+
+static __inline void
+ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
+{
+	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+	ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
+	ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
+}
+
+static __inline u_int
+ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
+{
+	u_int value;
+
+	/*
+	 * Workaround PCI-X Rev A. hardware bug.
+	 * After a host read of SCB memory, the chip
+	 * may become confused into thinking prefetch
+	 * was required.  This starts the discard timer
+	 * running and can cause an unexpected discard
+	 * timer interrupt.  The work around is to read
+	 * a normal register prior to the exhaustion of
+	 * the discard timer.  The mode pointer register
+	 * has no side effects and so serves well for
+	 * this purpose.
+	 *
+	 * Razor #528
+	 */
+	value = ahd_inb(ahd, offset);
+	if ((ahd->flags & AHD_PCIX_SCBRAM_RD_BUG) != 0)
+		ahd_inb(ahd, MODE_PTR);
+	return (value);
+}
+
+static __inline u_int
+ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
+{
+	return (ahd_inb_scbram(ahd, offset)
+	      | (ahd_inb_scbram(ahd, offset+1) << 8));
+}
+
+static __inline uint32_t
+ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
+{
+	return (ahd_inw_scbram(ahd, offset)
+	      | (ahd_inw_scbram(ahd, offset+2) << 16));
+}
+
+static __inline uint64_t
+ahd_inq_scbram(struct ahd_softc *ahd, u_int offset)
+{
+	return (ahd_inl_scbram(ahd, offset)
+	      | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32);
+}
+
+static __inline struct scb *
+ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
+{
+	struct scb* scb;
+
+	if (tag >= AHD_SCB_MAX)
+		return (NULL);
+	scb = ahd->scb_data.scbindex[tag];
+	if (scb != NULL)
+		ahd_sync_scb(ahd, scb,
+			     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+	return (scb);
+}
+
+static __inline void
+ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
+{
+	struct hardware_scb *q_hscb;
+	uint32_t saved_hscb_busaddr;
+
+	/*
+	 * Our queuing method is a bit tricky.  The card
+	 * knows in advance which HSCB (by address) to download,
+	 * and we can't disappoint it.  To achieve this, the next
+	 * HSCB to download is saved off in ahd->next_queued_hscb.
+	 * When we are called to queue "an arbitrary scb",
+	 * we copy the contents of the incoming HSCB to the one
+	 * the sequencer knows about, swap HSCB pointers and
+	 * finally assign the SCB to the tag indexed location
+	 * in the scb_array.  This makes sure that we can still
+	 * locate the correct SCB by SCB_TAG.
+	 */
+	q_hscb = ahd->next_queued_hscb;
+	saved_hscb_busaddr = q_hscb->hscb_busaddr;
+	memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
+	q_hscb->hscb_busaddr = saved_hscb_busaddr;
+	q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
+
+	/* Now swap HSCB pointers. */
+	ahd->next_queued_hscb = scb->hscb;
+	scb->hscb = q_hscb;
+
+	/* Now define the mapping from tag to SCB in the scbindex */
+	ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
+}
+
+/*
+ * Tell the sequencer about a new transaction to execute.
+ */
+static __inline void
+ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+	ahd_swap_with_next_hscb(ahd, scb);
+
+	if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
+		panic("Attempt to queue invalid SCB tag %x\n",
+		      SCB_GET_TAG(scb));
+
+	/*
+	 * Keep a history of SCBs we've downloaded in the qinfifo.
+	 */
+	ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
+	ahd->qinfifonext++;
+
+	if (scb->sg_count != 0)
+		ahd_setup_data_scb(ahd, scb);
+	else
+		ahd_setup_noxfer_scb(ahd, scb);
+	ahd_setup_scb_common(ahd, scb);
+
+	/*
+	 * Make sure our data is consistent from the
+	 * perspective of the adapter.
+	 */
+	ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+#ifdef AHD_DEBUG
+	if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
+		uint64_t host_dataptr;
+
+		host_dataptr = ahd_le64toh(scb->hscb->dataptr);
+		printf("%s: Queueing SCB 0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
+		       ahd_name(ahd),
+		       SCB_GET_TAG(scb), ahd_le32toh(scb->hscb->hscb_busaddr),
+		       (u_int)((host_dataptr >> 32) & 0xFFFFFFFF),
+		       (u_int)(host_dataptr & 0xFFFFFFFF),
+		       ahd_le32toh(scb->hscb->datacnt));
+	}
+#endif
+	/* Tell the adapter about the newly queued SCB */
+	ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
+}
+
+static __inline uint8_t *
+ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb)
+{
+	return (scb->sense_data);
+}
+
+static __inline uint32_t
+ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb)
+{
+	return (scb->sense_busaddr);
+}
+
+/************************** Interrupt Processing ******************************/
+static __inline void	ahd_sync_qoutfifo(struct ahd_softc *ahd, int op);
+static __inline void	ahd_sync_tqinfifo(struct ahd_softc *ahd, int op);
+static __inline u_int	ahd_check_cmdcmpltqueues(struct ahd_softc *ahd);
+static __inline int	ahd_intr(struct ahd_softc *ahd);
+
+static __inline void
+ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
+{
+	ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_dmamap,
+			/*offset*/0, /*len*/AHC_SCB_MAX * sizeof(uint16_t), op);
+}
+
+static __inline void
+ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
+{
+#ifdef AHD_TARGET_MODE
+	if ((ahd->flags & AHD_TARGETROLE) != 0) {
+		ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
+				ahd->shared_data_dmamap,
+				ahd_targetcmd_offset(ahd, 0),
+				sizeof(struct target_cmd) * AHD_TMODE_CMDS,
+				op);
+	}
+#endif
+}
+
+/*
+ * See if the firmware has posted any completed commands
+ * into our in-core command complete fifos.
+ */
+#define AHD_RUN_QOUTFIFO 0x1
+#define AHD_RUN_TQINFIFO 0x2
+static __inline u_int
+ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
+{
+	u_int retval;
+
+	retval = 0;
+	ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_dmamap,
+			/*offset*/ahd->qoutfifonext, /*len*/2,
+			BUS_DMASYNC_POSTREAD);
+	if ((ahd->qoutfifo[ahd->qoutfifonext]
+	     & QOUTFIFO_ENTRY_VALID_LE) == ahd->qoutfifonext_valid_tag)
+		retval |= AHD_RUN_QOUTFIFO;
+#ifdef AHD_TARGET_MODE
+	if ((ahd->flags & AHD_TARGETROLE) != 0
+	 && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
+		ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
+				ahd->shared_data_dmamap,
+				ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
+				/*len*/sizeof(struct target_cmd),
+				BUS_DMASYNC_POSTREAD);
+		if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
+			retval |= AHD_RUN_TQINFIFO;
+	}
+#endif
+	return (retval);
+}
+
+/*
+ * Catch an interrupt from the adapter
+ */
+static __inline int
+ahd_intr(struct ahd_softc *ahd)
+{
+	u_int	intstat;
+
+	if ((ahd->pause & INTEN) == 0) {
+		/*
+		 * Our interrupt is not enabled on the chip
+		 * and may be disabled for re-entrancy reasons,
+		 * so just return.  This is likely just a shared
+		 * interrupt.
+		 */
+		return (0);
+	}
+
+	/*
+	 * Instead of directly reading the interrupt status register,
+	 * infer the cause of the interrupt by checking our in-core
+	 * completion queues.  This avoids a costly PCI bus read in
+	 * most cases.
+	 */
+	if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
+	 && (ahd_check_cmdcmpltqueues(ahd) != 0))
+		intstat = CMDCMPLT;
+	else
+		intstat = ahd_inb(ahd, INTSTAT);
+
+	if ((intstat & INT_PEND) == 0)
+		return (0);
+
+	if (intstat & CMDCMPLT) {
+		ahd_outb(ahd, CLRINT, CLRCMDINT);
+
+		/*
+		 * Ensure that the chip sees that we've cleared
+		 * this interrupt before we walk the output fifo.
+		 * Otherwise, we may, due to posted bus writes,
+		 * clear the interrupt after we finish the scan,
+		 * and after the sequencer has added new entries
+		 * and asserted the interrupt again.
+		 */
+		if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
+			if (ahd_is_paused(ahd)) {
+				/*
+				 * Potentially lost SEQINT.
+				 * If SEQINTCODE is non-zero,
+				 * simulate the SEQINT.
+				 */
+				if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
+					intstat |= SEQINT;
+			}
+		} else {
+			ahd_flush_device_writes(ahd);
+		}
+		ahd_run_qoutfifo(ahd);
+		ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
+		ahd->cmdcmplt_total++;
+#ifdef AHD_TARGET_MODE
+		if ((ahd->flags & AHD_TARGETROLE) != 0)
+			ahd_run_tqinfifo(ahd, /*paused*/FALSE);
+#endif
+	}
+
+	/*
+	 * Handle statuses that may invalidate our cached
+	 * copy of INTSTAT separately.
+	 */
+	if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) {
+		/* Hot eject.  Do nothing */
+	} else if (intstat & HWERRINT) {
+		ahd_handle_hwerrint(ahd);
+	} else if ((intstat & (PCIINT|SPLTINT)) != 0) {
+		ahd->bus_intr(ahd);
+	} else {
+
+		if ((intstat & SEQINT) != 0)
+			ahd_handle_seqint(ahd, intstat);
+
+		if ((intstat & SCSIINT) != 0)
+			ahd_handle_scsiint(ahd, intstat);
+	}
+	return (1);
+}
+
+#endif  /* _AIC79XX_INLINE_H_ */
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
new file mode 100644
index 0000000..fb2877c
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -0,0 +1,5017 @@
+/*
+ * Adaptec AIC79xx device driver for Linux.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm.c#171 $
+ *
+ * --------------------------------------------------------------------------
+ * Copyright (c) 1994-2000 Justin T. Gibbs.
+ * Copyright (c) 1997-1999 Doug Ledford
+ * Copyright (c) 2000-2003 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include "aic79xx_osm.h"
+#include "aic79xx_inline.h"
+#include <scsi/scsicam.h>
+
+/*
+ * Include aiclib.c as part of our
+ * "module dependencies are hard" work around.
+ */
+#include "aiclib.c"
+
+#include <linux/init.h>		/* __setup */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+#include "sd.h"			/* For geometry detection */
+#endif
+
+#include <linux/mm.h>		/* For fetching system memory size */
+#include <linux/delay.h>	/* For ssleep/msleep */
+
+/*
+ * Lock protecting manipulation of the ahd softc list.
+ */
+spinlock_t ahd_list_spinlock;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+/* For dynamic sglist size calculation. */
+u_int ahd_linux_nseg;
+#endif
+
+/*
+ * Bucket size for counting good commands in between bad ones.
+ */
+#define AHD_LINUX_ERR_THRESH	1000
+
+/*
+ * Set this to the delay in seconds after SCSI bus reset.
+ * Note, we honor this only for the initial bus reset.
+ * The scsi error recovery code performs its own bus settle
+ * delay handling for error recovery actions.
+ */
+#ifdef CONFIG_AIC79XX_RESET_DELAY_MS
+#define AIC79XX_RESET_DELAY CONFIG_AIC79XX_RESET_DELAY_MS
+#else
+#define AIC79XX_RESET_DELAY 5000
+#endif
+
+/*
+ * To change the default number of tagged transactions allowed per-device,
+ * add a line to the lilo.conf file like:
+ * append="aic79xx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}"
+ * which will result in the first four devices on the first two
+ * controllers being set to a tagged queue depth of 32.
+ *
+ * The tag_commands is an array of 16 to allow for wide and twin adapters.
+ * Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15
+ * for channel 1.
+ */
+typedef struct {
+	uint16_t tag_commands[16];	/* Allow for wide/twin adapters. */
+} adapter_tag_info_t;
+
+/*
+ * Modify this as you see fit for your system.
+ *
+ * 0			tagged queuing disabled
+ * 1 <= n <= 253	n == max tags ever dispatched.
+ *
+ * The driver will throttle the number of commands dispatched to a
+ * device if it returns queue full.  For devices with a fixed maximum
+ * queue depth, the driver will eventually determine this depth and
+ * lock it in (a console message is printed to indicate that a lock
+ * has occurred).  On some devices, queue full is returned for a temporary
+ * resource shortage.  These devices will return queue full at varying
+ * depths.  The driver will throttle back when the queue fulls occur and
+ * attempt to slowly increase the depth over time as the device recovers
+ * from the resource shortage.
+ *
+ * In this example, the first line will disable tagged queueing for all
+ * the devices on the first probed aic79xx adapter.
+ *
+ * The second line enables tagged queueing with 4 commands/LUN for IDs
+ * (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the
+ * driver to attempt to use up to 64 tags for ID 1.
+ *
+ * The third line is the same as the first line.
+ *
+ * The fourth line disables tagged queueing for devices 0 and 3.  It
+ * enables tagged queueing for the other IDs, with 16 commands/LUN
+ * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for
+ * IDs 2, 5-7, and 9-15.
+ */
+
+/*
+ * NOTE: The below structure is for reference only, the actual structure
+ *       to modify in order to change things is just below this comment block.
+adapter_tag_info_t aic79xx_tag_info[] =
+{
+	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+	{{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}},
+	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+	{{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}}
+};
+*/
+
+#ifdef CONFIG_AIC79XX_CMDS_PER_DEVICE
+#define AIC79XX_CMDS_PER_DEVICE CONFIG_AIC79XX_CMDS_PER_DEVICE
+#else
+#define AIC79XX_CMDS_PER_DEVICE AHD_MAX_QUEUE
+#endif
+
+#define AIC79XX_CONFIGED_TAG_COMMANDS {					\
+	AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE,		\
+	AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE,		\
+	AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE,		\
+	AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE,		\
+	AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE,		\
+	AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE,		\
+	AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE,		\
+	AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE		\
+}
+
+/*
+ * By default, use the number of commands specified by
+ * the users kernel configuration.
+ */
+static adapter_tag_info_t aic79xx_tag_info[] =
+{
+	{AIC79XX_CONFIGED_TAG_COMMANDS},
+	{AIC79XX_CONFIGED_TAG_COMMANDS},
+	{AIC79XX_CONFIGED_TAG_COMMANDS},
+	{AIC79XX_CONFIGED_TAG_COMMANDS},
+	{AIC79XX_CONFIGED_TAG_COMMANDS},
+	{AIC79XX_CONFIGED_TAG_COMMANDS},
+	{AIC79XX_CONFIGED_TAG_COMMANDS},
+	{AIC79XX_CONFIGED_TAG_COMMANDS},
+	{AIC79XX_CONFIGED_TAG_COMMANDS},
+	{AIC79XX_CONFIGED_TAG_COMMANDS},
+	{AIC79XX_CONFIGED_TAG_COMMANDS},
+	{AIC79XX_CONFIGED_TAG_COMMANDS},
+	{AIC79XX_CONFIGED_TAG_COMMANDS},
+	{AIC79XX_CONFIGED_TAG_COMMANDS},
+	{AIC79XX_CONFIGED_TAG_COMMANDS},
+	{AIC79XX_CONFIGED_TAG_COMMANDS}
+};
+
+/*
+ * By default, read streaming is disabled.  In theory,
+ * read streaming should enhance performance, but early
+ * U320 drive firmware actually performs slower with
+ * read streaming enabled.
+ */
+#ifdef CONFIG_AIC79XX_ENABLE_RD_STRM
+#define AIC79XX_CONFIGED_RD_STRM 0xFFFF
+#else
+#define AIC79XX_CONFIGED_RD_STRM 0
+#endif
+
+static uint16_t aic79xx_rd_strm_info[] =
+{
+	AIC79XX_CONFIGED_RD_STRM,
+	AIC79XX_CONFIGED_RD_STRM,
+	AIC79XX_CONFIGED_RD_STRM,
+	AIC79XX_CONFIGED_RD_STRM,
+	AIC79XX_CONFIGED_RD_STRM,
+	AIC79XX_CONFIGED_RD_STRM,
+	AIC79XX_CONFIGED_RD_STRM,
+	AIC79XX_CONFIGED_RD_STRM,
+	AIC79XX_CONFIGED_RD_STRM,
+	AIC79XX_CONFIGED_RD_STRM,
+	AIC79XX_CONFIGED_RD_STRM,
+	AIC79XX_CONFIGED_RD_STRM,
+	AIC79XX_CONFIGED_RD_STRM,
+	AIC79XX_CONFIGED_RD_STRM,
+	AIC79XX_CONFIGED_RD_STRM,
+	AIC79XX_CONFIGED_RD_STRM
+};
+
+/*
+ * DV option:
+ *
+ * positive value = DV Enabled
+ * zero		  = DV Disabled
+ * negative value = DV Default for adapter type/seeprom
+ */
+#ifdef CONFIG_AIC79XX_DV_SETTING
+#define AIC79XX_CONFIGED_DV CONFIG_AIC79XX_DV_SETTING
+#else
+#define AIC79XX_CONFIGED_DV -1
+#endif
+
+static int8_t aic79xx_dv_settings[] =
+{
+	AIC79XX_CONFIGED_DV,
+	AIC79XX_CONFIGED_DV,
+	AIC79XX_CONFIGED_DV,
+	AIC79XX_CONFIGED_DV,
+	AIC79XX_CONFIGED_DV,
+	AIC79XX_CONFIGED_DV,
+	AIC79XX_CONFIGED_DV,
+	AIC79XX_CONFIGED_DV,
+	AIC79XX_CONFIGED_DV,
+	AIC79XX_CONFIGED_DV,
+	AIC79XX_CONFIGED_DV,
+	AIC79XX_CONFIGED_DV,
+	AIC79XX_CONFIGED_DV,
+	AIC79XX_CONFIGED_DV,
+	AIC79XX_CONFIGED_DV,
+	AIC79XX_CONFIGED_DV
+};
+
+/*
+ * The I/O cell on the chip is very configurable in respect to its analog
+ * characteristics.  Set the defaults here; they can be overriden with
+ * the proper insmod parameters.
+ */
+struct ahd_linux_iocell_opts
+{
+	uint8_t	precomp;
+	uint8_t	slewrate;
+	uint8_t amplitude;
+};
+#define AIC79XX_DEFAULT_PRECOMP		0xFF
+#define AIC79XX_DEFAULT_SLEWRATE	0xFF
+#define AIC79XX_DEFAULT_AMPLITUDE	0xFF
+#define AIC79XX_DEFAULT_IOOPTS			\
+{						\
+	AIC79XX_DEFAULT_PRECOMP,		\
+	AIC79XX_DEFAULT_SLEWRATE,		\
+	AIC79XX_DEFAULT_AMPLITUDE		\
+}
+#define AIC79XX_PRECOMP_INDEX	0
+#define AIC79XX_SLEWRATE_INDEX	1
+#define AIC79XX_AMPLITUDE_INDEX	2
+static struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
+{
+	AIC79XX_DEFAULT_IOOPTS,
+	AIC79XX_DEFAULT_IOOPTS,
+	AIC79XX_DEFAULT_IOOPTS,
+	AIC79XX_DEFAULT_IOOPTS,
+	AIC79XX_DEFAULT_IOOPTS,
+	AIC79XX_DEFAULT_IOOPTS,
+	AIC79XX_DEFAULT_IOOPTS,
+	AIC79XX_DEFAULT_IOOPTS,
+	AIC79XX_DEFAULT_IOOPTS,
+	AIC79XX_DEFAULT_IOOPTS,
+	AIC79XX_DEFAULT_IOOPTS,
+	AIC79XX_DEFAULT_IOOPTS,
+	AIC79XX_DEFAULT_IOOPTS,
+	AIC79XX_DEFAULT_IOOPTS,
+	AIC79XX_DEFAULT_IOOPTS,
+	AIC79XX_DEFAULT_IOOPTS
+};
+
+/*
+ * There should be a specific return value for this in scsi.h, but
+ * it seems that most drivers ignore it.
+ */
+#define DID_UNDERFLOW   DID_ERROR
+
+void
+ahd_print_path(struct ahd_softc *ahd, struct scb *scb)
+{
+	printk("(scsi%d:%c:%d:%d): ",
+	       ahd->platform_data->host->host_no,
+	       scb != NULL ? SCB_GET_CHANNEL(ahd, scb) : 'X',
+	       scb != NULL ? SCB_GET_TARGET(ahd, scb) : -1,
+	       scb != NULL ? SCB_GET_LUN(scb) : -1);
+}
+
+/*
+ * XXX - these options apply unilaterally to _all_ adapters
+ *       cards in the system.  This should be fixed.  Exceptions to this
+ *       rule are noted in the comments.
+ */
+
+/*
+ * Skip the scsi bus reset.  Non 0 make us skip the reset at startup.  This
+ * has no effect on any later resets that might occur due to things like
+ * SCSI bus timeouts.
+ */
+static uint32_t aic79xx_no_reset;
+
+/*
+ * Certain PCI motherboards will scan PCI devices from highest to lowest,
+ * others scan from lowest to highest, and they tend to do all kinds of
+ * strange things when they come into contact with PCI bridge chips.  The
+ * net result of all this is that the PCI card that is actually used to boot
+ * the machine is very hard to detect.  Most motherboards go from lowest
+ * PCI slot number to highest, and the first SCSI controller found is the
+ * one you boot from.  The only exceptions to this are when a controller
+ * has its BIOS disabled.  So, we by default sort all of our SCSI controllers
+ * from lowest PCI slot number to highest PCI slot number.  We also force
+ * all controllers with their BIOS disabled to the end of the list.  This
+ * works on *almost* all computers.  Where it doesn't work, we have this
+ * option.  Setting this option to non-0 will reverse the order of the sort
+ * to highest first, then lowest, but will still leave cards with their BIOS
+ * disabled at the very end.  That should fix everyone up unless there are
+ * really strange cirumstances.
+ */
+static uint32_t aic79xx_reverse_scan;
+
+/*
+ * Should we force EXTENDED translation on a controller.
+ *     0 == Use whatever is in the SEEPROM or default to off
+ *     1 == Use whatever is in the SEEPROM or default to on
+ */
+static uint32_t aic79xx_extended;
+
+/*
+ * PCI bus parity checking of the Adaptec controllers.  This is somewhat
+ * dubious at best.  To my knowledge, this option has never actually
+ * solved a PCI parity problem, but on certain machines with broken PCI
+ * chipset configurations, it can generate tons of false error messages.
+ * It's included in the driver for completeness.
+ *   0	   = Shut off PCI parity check
+ *   non-0 = Enable PCI parity check
+ *
+ * NOTE: you can't actually pass -1 on the lilo prompt.  So, to set this
+ * variable to -1 you would actually want to simply pass the variable
+ * name without a number.  That will invert the 0 which will result in
+ * -1.
+ */
+static uint32_t aic79xx_pci_parity = ~0;
+
+/*
+ * There are lots of broken chipsets in the world.  Some of them will
+ * violate the PCI spec when we issue byte sized memory writes to our
+ * controller.  I/O mapped register access, if allowed by the given
+ * platform, will work in almost all cases.
+ */
+uint32_t aic79xx_allow_memio = ~0;
+
+/*
+ * aic79xx_detect() has been run, so register all device arrivals
+ * immediately with the system rather than deferring to the sorted
+ * attachment performed by aic79xx_detect().
+ */
+int aic79xx_detect_complete;
+
+/*
+ * So that we can set how long each device is given as a selection timeout.
+ * The table of values goes like this:
+ *   0 - 256ms
+ *   1 - 128ms
+ *   2 - 64ms
+ *   3 - 32ms
+ * We default to 256ms because some older devices need a longer time
+ * to respond to initial selection.
+ */
+static uint32_t aic79xx_seltime;
+
+/*
+ * Certain devices do not perform any aging on commands.  Should the
+ * device be saturated by commands in one portion of the disk, it is
+ * possible for transactions on far away sectors to never be serviced.
+ * To handle these devices, we can periodically send an ordered tag to
+ * force all outstanding transactions to be serviced prior to a new
+ * transaction.
+ */
+uint32_t aic79xx_periodic_otag;
+
+/*
+ * Module information and settable options.
+ */
+static char *aic79xx = NULL;
+
+MODULE_AUTHOR("Maintainer: Justin T. Gibbs <gibbs@scsiguy.com>");
+MODULE_DESCRIPTION("Adaptec Aic790X U320 SCSI Host Bus Adapter driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(AIC79XX_DRIVER_VERSION);
+module_param(aic79xx, charp, 0);
+MODULE_PARM_DESC(aic79xx,
+"period delimited, options string.\n"
+"	verbose			Enable verbose/diagnostic logging\n"
+"	allow_memio		Allow device registers to be memory mapped\n"
+"	debug			Bitmask of debug values to enable\n"
+"	no_reset		Supress initial bus resets\n"
+"	extended		Enable extended geometry on all controllers\n"
+"	periodic_otag		Send an ordered tagged transaction\n"
+"				periodically to prevent tag starvation.\n"
+"				This may be required by some older disk\n"
+"				or drives/RAID arrays.\n"
+"	reverse_scan		Sort PCI devices highest Bus/Slot to lowest\n"
+"	tag_info:<tag_str>	Set per-target tag depth\n"
+"	global_tag_depth:<int>	Global tag depth for all targets on all buses\n"
+"	rd_strm:<rd_strm_masks> Set per-target read streaming setting.\n"
+"	dv:<dv_settings>	Set per-controller Domain Validation Setting.\n"
+"	slewrate:<slewrate_list>Set the signal slew rate (0-15).\n"
+"	precomp:<pcomp_list>	Set the signal precompensation (0-7).\n"
+"	amplitude:<int>		Set the signal amplitude (0-7).\n"
+"	seltime:<int>		Selection Timeout:\n"
+"				(0/256ms,1/128ms,2/64ms,3/32ms)\n"
+"\n"
+"	Sample /etc/modprobe.conf line:\n"
+"		Enable verbose logging\n"
+"		Set tag depth on Controller 2/Target 2 to 10 tags\n"
+"		Shorten the selection timeout to 128ms\n"
+"\n"
+"	options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n"
+"\n"
+"	Sample /etc/modprobe.conf line:\n"
+"		Change Read Streaming for Controller's 2 and 3\n"
+"\n"
+"	options aic79xx 'aic79xx=rd_strm:{..0xFFF0.0xC0F0}'");
+
+static void ahd_linux_handle_scsi_status(struct ahd_softc *,
+					 struct ahd_linux_device *,
+					 struct scb *);
+static void ahd_linux_queue_cmd_complete(struct ahd_softc *ahd,
+					 Scsi_Cmnd *cmd);
+static void ahd_linux_filter_inquiry(struct ahd_softc *ahd,
+				     struct ahd_devinfo *devinfo);
+static void ahd_linux_dev_timed_unfreeze(u_long arg);
+static void ahd_linux_sem_timeout(u_long arg);
+static void ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd);
+static void ahd_linux_size_nseg(void);
+static void ahd_linux_thread_run_complete_queue(struct ahd_softc *ahd);
+static void ahd_linux_start_dv(struct ahd_softc *ahd);
+static void ahd_linux_dv_timeout(struct scsi_cmnd *cmd);
+static int  ahd_linux_dv_thread(void *data);
+static void ahd_linux_kill_dv_thread(struct ahd_softc *ahd);
+static void ahd_linux_dv_target(struct ahd_softc *ahd, u_int target);
+static void ahd_linux_dv_transition(struct ahd_softc *ahd,
+				    struct scsi_cmnd *cmd,
+				    struct ahd_devinfo *devinfo,
+				    struct ahd_linux_target *targ);
+static void ahd_linux_dv_fill_cmd(struct ahd_softc *ahd,
+				  struct scsi_cmnd *cmd,
+				  struct ahd_devinfo *devinfo);
+static void ahd_linux_dv_inq(struct ahd_softc *ahd,
+			     struct scsi_cmnd *cmd,
+			     struct ahd_devinfo *devinfo,
+			     struct ahd_linux_target *targ,
+			     u_int request_length);
+static void ahd_linux_dv_tur(struct ahd_softc *ahd,
+			     struct scsi_cmnd *cmd,
+			     struct ahd_devinfo *devinfo);
+static void ahd_linux_dv_rebd(struct ahd_softc *ahd,
+			      struct scsi_cmnd *cmd,
+			      struct ahd_devinfo *devinfo,
+			      struct ahd_linux_target *targ);
+static void ahd_linux_dv_web(struct ahd_softc *ahd,
+			     struct scsi_cmnd *cmd,
+			     struct ahd_devinfo *devinfo,
+			     struct ahd_linux_target *targ);
+static void ahd_linux_dv_reb(struct ahd_softc *ahd,
+			     struct scsi_cmnd *cmd,
+			     struct ahd_devinfo *devinfo,
+			     struct ahd_linux_target *targ);
+static void ahd_linux_dv_su(struct ahd_softc *ahd,
+			    struct scsi_cmnd *cmd,
+			    struct ahd_devinfo *devinfo,
+			    struct ahd_linux_target *targ);
+static int ahd_linux_fallback(struct ahd_softc *ahd,
+			      struct ahd_devinfo *devinfo);
+static __inline int ahd_linux_dv_fallback(struct ahd_softc *ahd,
+					  struct ahd_devinfo *devinfo);
+static void ahd_linux_dv_complete(Scsi_Cmnd *cmd);
+static void ahd_linux_generate_dv_pattern(struct ahd_linux_target *targ);
+static u_int ahd_linux_user_tagdepth(struct ahd_softc *ahd,
+				     struct ahd_devinfo *devinfo);
+static u_int ahd_linux_user_dv_setting(struct ahd_softc *ahd);
+static void ahd_linux_setup_user_rd_strm_settings(struct ahd_softc *ahd);
+static void ahd_linux_device_queue_depth(struct ahd_softc *ahd,
+					 struct ahd_linux_device *dev);
+static struct ahd_linux_target*	ahd_linux_alloc_target(struct ahd_softc*,
+						       u_int, u_int);
+static void			ahd_linux_free_target(struct ahd_softc*,
+						      struct ahd_linux_target*);
+static struct ahd_linux_device*	ahd_linux_alloc_device(struct ahd_softc*,
+						       struct ahd_linux_target*,
+						       u_int);
+static void			ahd_linux_free_device(struct ahd_softc*,
+						      struct ahd_linux_device*);
+static void ahd_linux_run_device_queue(struct ahd_softc*,
+				       struct ahd_linux_device*);
+static void ahd_linux_setup_tag_info_global(char *p);
+static aic_option_callback_t ahd_linux_setup_tag_info;
+static aic_option_callback_t ahd_linux_setup_rd_strm_info;
+static aic_option_callback_t ahd_linux_setup_dv;
+static aic_option_callback_t ahd_linux_setup_iocell_info;
+static int ahd_linux_next_unit(void);
+static void ahd_runq_tasklet(unsigned long data);
+static int aic79xx_setup(char *c);
+
+/****************************** Inlines ***************************************/
+static __inline void ahd_schedule_completeq(struct ahd_softc *ahd);
+static __inline void ahd_schedule_runq(struct ahd_softc *ahd);
+static __inline void ahd_setup_runq_tasklet(struct ahd_softc *ahd);
+static __inline void ahd_teardown_runq_tasklet(struct ahd_softc *ahd);
+static __inline struct ahd_linux_device*
+		     ahd_linux_get_device(struct ahd_softc *ahd, u_int channel,
+					  u_int target, u_int lun, int alloc);
+static struct ahd_cmd *ahd_linux_run_complete_queue(struct ahd_softc *ahd);
+static __inline void ahd_linux_check_device_queue(struct ahd_softc *ahd,
+						  struct ahd_linux_device *dev);
+static __inline struct ahd_linux_device *
+		     ahd_linux_next_device_to_run(struct ahd_softc *ahd);
+static __inline void ahd_linux_run_device_queues(struct ahd_softc *ahd);
+static __inline void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
+
+static __inline void
+ahd_schedule_completeq(struct ahd_softc *ahd)
+{
+	if ((ahd->platform_data->flags & AHD_RUN_CMPLT_Q_TIMER) == 0) {
+		ahd->platform_data->flags |= AHD_RUN_CMPLT_Q_TIMER;
+		ahd->platform_data->completeq_timer.expires = jiffies;
+		add_timer(&ahd->platform_data->completeq_timer);
+	}
+}
+
+/*
+ * Must be called with our lock held.
+ */
+static __inline void
+ahd_schedule_runq(struct ahd_softc *ahd)
+{
+	tasklet_schedule(&ahd->platform_data->runq_tasklet);
+}
+
+static __inline
+void ahd_setup_runq_tasklet(struct ahd_softc *ahd)
+{
+	tasklet_init(&ahd->platform_data->runq_tasklet, ahd_runq_tasklet,
+		     (unsigned long)ahd);
+}
+
+static __inline void
+ahd_teardown_runq_tasklet(struct ahd_softc *ahd)
+{
+	tasklet_kill(&ahd->platform_data->runq_tasklet);
+}
+
+static __inline struct ahd_linux_device*
+ahd_linux_get_device(struct ahd_softc *ahd, u_int channel, u_int target,
+		     u_int lun, int alloc)
+{
+	struct ahd_linux_target *targ;
+	struct ahd_linux_device *dev;
+	u_int target_offset;
+
+	target_offset = target;
+	if (channel != 0)
+		target_offset += 8;
+	targ = ahd->platform_data->targets[target_offset];
+	if (targ == NULL) {
+		if (alloc != 0) {
+			targ = ahd_linux_alloc_target(ahd, channel, target);
+			if (targ == NULL)
+				return (NULL);
+		} else
+			return (NULL);
+	}
+	dev = targ->devices[lun];
+	if (dev == NULL && alloc != 0)
+		dev = ahd_linux_alloc_device(ahd, targ, lun);
+	return (dev);
+}
+
+#define AHD_LINUX_MAX_RETURNED_ERRORS 4
+static struct ahd_cmd *
+ahd_linux_run_complete_queue(struct ahd_softc *ahd)
+{	
+	struct	ahd_cmd *acmd;
+	u_long	done_flags;
+	int	with_errors;
+
+	with_errors = 0;
+	ahd_done_lock(ahd, &done_flags);
+	while ((acmd = TAILQ_FIRST(&ahd->platform_data->completeq)) != NULL) {
+		Scsi_Cmnd *cmd;
+
+		if (with_errors > AHD_LINUX_MAX_RETURNED_ERRORS) {
+			/*
+			 * Linux uses stack recursion to requeue
+			 * commands that need to be retried.  Avoid
+			 * blowing out the stack by "spoon feeding"
+			 * commands that completed with error back
+			 * the operating system in case they are going
+			 * to be retried. "ick"
+			 */
+			ahd_schedule_completeq(ahd);
+			break;
+		}
+		TAILQ_REMOVE(&ahd->platform_data->completeq,
+			     acmd, acmd_links.tqe);
+		cmd = &acmd_scsi_cmd(acmd);
+		cmd->host_scribble = NULL;
+		if (ahd_cmd_get_transaction_status(cmd) != DID_OK
+		 || (cmd->result & 0xFF) != SCSI_STATUS_OK)
+			with_errors++;
+
+		cmd->scsi_done(cmd);
+	}
+	ahd_done_unlock(ahd, &done_flags);
+	return (acmd);
+}
+
+static __inline void
+ahd_linux_check_device_queue(struct ahd_softc *ahd,
+			     struct ahd_linux_device *dev)
+{
+	if ((dev->flags & AHD_DEV_FREEZE_TIL_EMPTY) != 0
+	 && dev->active == 0) {
+		dev->flags &= ~AHD_DEV_FREEZE_TIL_EMPTY;
+		dev->qfrozen--;
+	}
+
+	if (TAILQ_FIRST(&dev->busyq) == NULL
+	 || dev->openings == 0 || dev->qfrozen != 0)
+		return;
+
+	ahd_linux_run_device_queue(ahd, dev);
+}
+
+static __inline struct ahd_linux_device *
+ahd_linux_next_device_to_run(struct ahd_softc *ahd)
+{
+	
+	if ((ahd->flags & AHD_RESOURCE_SHORTAGE) != 0
+	 || (ahd->platform_data->qfrozen != 0
+	  && AHD_DV_SIMQ_FROZEN(ahd) == 0))
+		return (NULL);
+	return (TAILQ_FIRST(&ahd->platform_data->device_runq));
+}
+
+static __inline void
+ahd_linux_run_device_queues(struct ahd_softc *ahd)
+{
+	struct ahd_linux_device *dev;
+
+	while ((dev = ahd_linux_next_device_to_run(ahd)) != NULL) {
+		TAILQ_REMOVE(&ahd->platform_data->device_runq, dev, links);
+		dev->flags &= ~AHD_DEV_ON_RUN_LIST;
+		ahd_linux_check_device_queue(ahd, dev);
+	}
+}
+
+static __inline void
+ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+	Scsi_Cmnd *cmd;
+	int direction;
+
+	cmd = scb->io_ctx;
+	direction = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+	ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE);
+	if (cmd->use_sg != 0) {
+		struct scatterlist *sg;
+
+		sg = (struct scatterlist *)cmd->request_buffer;
+		pci_unmap_sg(ahd->dev_softc, sg, cmd->use_sg, direction);
+	} else if (cmd->request_bufflen != 0) {
+		pci_unmap_single(ahd->dev_softc,
+				 scb->platform_data->buf_busaddr,
+				 cmd->request_bufflen, direction);
+	}
+}
+
+/******************************** Macros **************************************/
+#define BUILD_SCSIID(ahd, cmd)						\
+	((((cmd)->device->id << TID_SHIFT) & TID) | (ahd)->our_id)
+
+/************************  Host template entry points *************************/
+static int	   ahd_linux_detect(Scsi_Host_Template *);
+static const char *ahd_linux_info(struct Scsi_Host *);
+static int	   ahd_linux_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *));
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+static int	   ahd_linux_slave_alloc(Scsi_Device *);
+static int	   ahd_linux_slave_configure(Scsi_Device *);
+static void	   ahd_linux_slave_destroy(Scsi_Device *);
+#if defined(__i386__)
+static int	   ahd_linux_biosparam(struct scsi_device*,
+				       struct block_device*, sector_t, int[]);
+#endif
+#else
+static int	   ahd_linux_release(struct Scsi_Host *);
+static void	   ahd_linux_select_queue_depth(struct Scsi_Host *host,
+						Scsi_Device *scsi_devs);
+#if defined(__i386__)
+static int	   ahd_linux_biosparam(Disk *, kdev_t, int[]);
+#endif
+#endif
+static int	   ahd_linux_bus_reset(Scsi_Cmnd *);
+static int	   ahd_linux_dev_reset(Scsi_Cmnd *);
+static int	   ahd_linux_abort(Scsi_Cmnd *);
+
+/*
+ * Calculate a safe value for AHD_NSEG (as expressed through ahd_linux_nseg).
+ *
+ * In pre-2.5.X...
+ * The midlayer allocates an S/G array dynamically when a command is issued
+ * using SCSI malloc.  This array, which is in an OS dependent format that
+ * must later be copied to our private S/G list, is sized to house just the
+ * number of segments needed for the current transfer.  Since the code that
+ * sizes the SCSI malloc pool does not take into consideration fragmentation
+ * of the pool, executing transactions numbering just a fraction of our
+ * concurrent transaction limit with SG list lengths aproaching AHC_NSEG will
+ * quickly depleat the SCSI malloc pool of usable space.  Unfortunately, the
+ * mid-layer does not properly handle this scsi malloc failures for the S/G
+ * array and the result can be a lockup of the I/O subsystem.  We try to size
+ * our S/G list so that it satisfies our drivers allocation requirements in
+ * addition to avoiding fragmentation of the SCSI malloc pool.
+ */
+static void
+ahd_linux_size_nseg(void)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+	u_int cur_size;
+	u_int best_size;
+
+	/*
+	 * The SCSI allocator rounds to the nearest 512 bytes
+	 * an cannot allocate across a page boundary.  Our algorithm
+	 * is to start at 1K of scsi malloc space per-command and
+	 * loop through all factors of the PAGE_SIZE and pick the best.
+	 */
+	best_size = 0;
+	for (cur_size = 1024; cur_size <= PAGE_SIZE; cur_size *= 2) {
+		u_int nseg;
+
+		nseg = cur_size / sizeof(struct scatterlist);
+		if (nseg < AHD_LINUX_MIN_NSEG)
+			continue;
+
+		if (best_size == 0) {
+			best_size = cur_size;
+			ahd_linux_nseg = nseg;
+		} else {
+			u_int best_rem;
+			u_int cur_rem;
+
+			/*
+			 * Compare the traits of the current "best_size"
+			 * with the current size to determine if the
+			 * current size is a better size.
+			 */
+			best_rem = best_size % sizeof(struct scatterlist);
+			cur_rem = cur_size % sizeof(struct scatterlist);
+			if (cur_rem < best_rem) {
+				best_size = cur_size;
+				ahd_linux_nseg = nseg;
+			}
+		}
+	}
+#endif
+}
+
+/*
+ * Try to detect an Adaptec 79XX controller.
+ */
+static int
+ahd_linux_detect(Scsi_Host_Template *template)
+{
+	struct	ahd_softc *ahd;
+	int     found;
+	int	error = 0;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+	/*
+	 * It is a bug that the upper layer takes
+	 * this lock just prior to calling us.
+	 */
+	spin_unlock_irq(&io_request_lock);
+#endif
+
+	/*
+	 * Sanity checking of Linux SCSI data structures so
+	 * that some of our hacks^H^H^H^H^Hassumptions aren't
+	 * violated.
+	 */
+	if (offsetof(struct ahd_cmd_internal, end)
+	  > offsetof(struct scsi_cmnd, host_scribble)) {
+		printf("ahd_linux_detect: SCSI data structures changed.\n");
+		printf("ahd_linux_detect: Unable to attach\n");
+		return (0);
+	}
+	/*
+	 * Determine an appropriate size for our Scatter Gatther lists.
+	 */
+	ahd_linux_size_nseg();
+#ifdef MODULE
+	/*
+	 * If we've been passed any parameters, process them now.
+	 */
+	if (aic79xx)
+		aic79xx_setup(aic79xx);
+#endif
+
+	template->proc_name = "aic79xx";
+
+	/*
+	 * Initialize our softc list lock prior to
+	 * probing for any adapters.
+	 */
+	ahd_list_lockinit();
+
+#ifdef CONFIG_PCI
+	error = ahd_linux_pci_init();
+	if (error)
+		return error;
+#endif
+
+	/*
+	 * Register with the SCSI layer all
+	 * controllers we've found.
+	 */
+	found = 0;
+	TAILQ_FOREACH(ahd, &ahd_tailq, links) {
+
+		if (ahd_linux_register_host(ahd, template) == 0)
+			found++;
+	}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+	spin_lock_irq(&io_request_lock);
+#endif
+	aic79xx_detect_complete++;
+	return 0;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+/*
+ * Free the passed in Scsi_Host memory structures prior to unloading the
+ * module.
+ */
+static int
+ahd_linux_release(struct Scsi_Host * host)
+{
+	struct ahd_softc *ahd;
+	u_long l;
+
+	ahd_list_lock(&l);
+	if (host != NULL) {
+
+		/*
+		 * We should be able to just perform
+		 * the free directly, but check our
+		 * list for extra sanity.
+		 */
+		ahd = ahd_find_softc(*(struct ahd_softc **)host->hostdata);
+		if (ahd != NULL) {
+			u_long s;
+
+			ahd_lock(ahd, &s);
+			ahd_intr_enable(ahd, FALSE);
+			ahd_unlock(ahd, &s);
+			ahd_free(ahd);
+		}
+	}
+	ahd_list_unlock(&l);
+	return (0);
+}
+#endif
+
+/*
+ * Return a string describing the driver.
+ */
+static const char *
+ahd_linux_info(struct Scsi_Host *host)
+{
+	static char buffer[512];
+	char	ahd_info[256];
+	char   *bp;
+	struct ahd_softc *ahd;
+
+	bp = &buffer[0];
+	ahd = *(struct ahd_softc **)host->hostdata;
+	memset(bp, 0, sizeof(buffer));
+	strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev ");
+	strcat(bp, AIC79XX_DRIVER_VERSION);
+	strcat(bp, "\n");
+	strcat(bp, "        <");
+	strcat(bp, ahd->description);
+	strcat(bp, ">\n");
+	strcat(bp, "        ");
+	ahd_controller_info(ahd, ahd_info);
+	strcat(bp, ahd_info);
+	strcat(bp, "\n");
+
+	return (bp);
+}
+
+/*
+ * Queue an SCB to the controller.
+ */
+static int
+ahd_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *))
+{
+	struct	 ahd_softc *ahd;
+	struct	 ahd_linux_device *dev;
+	u_long	 flags;
+
+	ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
+
+	/*
+	 * Save the callback on completion function.
+	 */
+	cmd->scsi_done = scsi_done;
+
+	ahd_midlayer_entrypoint_lock(ahd, &flags);
+
+	/*
+	 * Close the race of a command that was in the process of
+	 * being queued to us just as our simq was frozen.  Let
+	 * DV commands through so long as we are only frozen to
+	 * perform DV.
+	 */
+	if (ahd->platform_data->qfrozen != 0
+	 && AHD_DV_CMD(cmd) == 0) {
+
+		ahd_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ);
+		ahd_linux_queue_cmd_complete(ahd, cmd);
+		ahd_schedule_completeq(ahd);
+		ahd_midlayer_entrypoint_unlock(ahd, &flags);
+		return (0);
+	}
+	dev = ahd_linux_get_device(ahd, cmd->device->channel,
+				   cmd->device->id, cmd->device->lun,
+				   /*alloc*/TRUE);
+	if (dev == NULL) {
+		ahd_cmd_set_transaction_status(cmd, CAM_RESRC_UNAVAIL);
+		ahd_linux_queue_cmd_complete(ahd, cmd);
+		ahd_schedule_completeq(ahd);
+		ahd_midlayer_entrypoint_unlock(ahd, &flags);
+		printf("%s: aic79xx_linux_queue - Unable to allocate device!\n",
+		       ahd_name(ahd));
+		return (0);
+	}
+	if (cmd->cmd_len > MAX_CDB_LEN)
+		return (-EINVAL);
+	cmd->result = CAM_REQ_INPROG << 16;
+	TAILQ_INSERT_TAIL(&dev->busyq, (struct ahd_cmd *)cmd, acmd_links.tqe);
+	if ((dev->flags & AHD_DEV_ON_RUN_LIST) == 0) {
+		TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq, dev, links);
+		dev->flags |= AHD_DEV_ON_RUN_LIST;
+		ahd_linux_run_device_queues(ahd);
+	}
+	ahd_midlayer_entrypoint_unlock(ahd, &flags);
+	return (0);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+static int
+ahd_linux_slave_alloc(Scsi_Device *device)
+{
+	struct	ahd_softc *ahd;
+
+	ahd = *((struct ahd_softc **)device->host->hostdata);
+	if (bootverbose)
+		printf("%s: Slave Alloc %d\n", ahd_name(ahd), device->id);
+	return (0);
+}
+
+static int
+ahd_linux_slave_configure(Scsi_Device *device)
+{
+	struct	ahd_softc *ahd;
+	struct	ahd_linux_device *dev;
+	u_long	flags;
+
+	ahd = *((struct ahd_softc **)device->host->hostdata);
+	if (bootverbose)
+		printf("%s: Slave Configure %d\n", ahd_name(ahd), device->id);
+	ahd_midlayer_entrypoint_lock(ahd, &flags);
+	/*
+	 * Since Linux has attached to the device, configure
+	 * it so we don't free and allocate the device
+	 * structure on every command.
+	 */
+	dev = ahd_linux_get_device(ahd, device->channel,
+				   device->id, device->lun,
+				   /*alloc*/TRUE);
+	if (dev != NULL) {
+		dev->flags &= ~AHD_DEV_UNCONFIGURED;
+		dev->flags |= AHD_DEV_SLAVE_CONFIGURED;
+		dev->scsi_device = device;
+		ahd_linux_device_queue_depth(ahd, dev);
+	}
+	ahd_midlayer_entrypoint_unlock(ahd, &flags);
+	return (0);
+}
+
+static void
+ahd_linux_slave_destroy(Scsi_Device *device)
+{
+	struct	ahd_softc *ahd;
+	struct	ahd_linux_device *dev;
+	u_long	flags;
+
+	ahd = *((struct ahd_softc **)device->host->hostdata);
+	if (bootverbose)
+		printf("%s: Slave Destroy %d\n", ahd_name(ahd), device->id);
+	ahd_midlayer_entrypoint_lock(ahd, &flags);
+	dev = ahd_linux_get_device(ahd, device->channel,
+				   device->id, device->lun,
+					   /*alloc*/FALSE);
+
+	/*
+	 * Filter out "silly" deletions of real devices by only
+	 * deleting devices that have had slave_configure()
+	 * called on them.  All other devices that have not
+	 * been configured will automatically be deleted by
+	 * the refcounting process.
+	 */
+	if (dev != NULL
+	 && (dev->flags & AHD_DEV_SLAVE_CONFIGURED) != 0) {
+		dev->flags |= AHD_DEV_UNCONFIGURED;
+		if (TAILQ_EMPTY(&dev->busyq)
+		 && dev->active == 0
+		 && (dev->flags & AHD_DEV_TIMER_ACTIVE) == 0)
+			ahd_linux_free_device(ahd, dev);
+	}
+	ahd_midlayer_entrypoint_unlock(ahd, &flags);
+}
+#else
+/*
+ * Sets the queue depth for each SCSI device hanging
+ * off the input host adapter.
+ */
+static void
+ahd_linux_select_queue_depth(struct Scsi_Host * host,
+			     Scsi_Device * scsi_devs)
+{
+	Scsi_Device *device;
+	Scsi_Device *ldev;
+	struct	ahd_softc *ahd;
+	u_long	flags;
+
+	ahd = *((struct ahd_softc **)host->hostdata);
+	ahd_lock(ahd, &flags);
+	for (device = scsi_devs; device != NULL; device = device->next) {
+
+		/*
+		 * Watch out for duplicate devices.  This works around
+		 * some quirks in how the SCSI scanning code does its
+		 * device management.
+		 */
+		for (ldev = scsi_devs; ldev != device; ldev = ldev->next) {
+			if (ldev->host == device->host
+			 && ldev->channel == device->channel
+			 && ldev->id == device->id
+			 && ldev->lun == device->lun)
+				break;
+		}
+		/* Skip duplicate. */
+		if (ldev != device)
+			continue;
+
+		if (device->host == host) {
+			struct	 ahd_linux_device *dev;
+
+			/*
+			 * Since Linux has attached to the device, configure
+			 * it so we don't free and allocate the device
+			 * structure on every command.
+			 */
+			dev = ahd_linux_get_device(ahd, device->channel,
+						   device->id, device->lun,
+						   /*alloc*/TRUE);
+			if (dev != NULL) {
+				dev->flags &= ~AHD_DEV_UNCONFIGURED;
+				dev->scsi_device = device;
+				ahd_linux_device_queue_depth(ahd, dev);
+				device->queue_depth = dev->openings
+						    + dev->active;
+				if ((dev->flags & (AHD_DEV_Q_BASIC
+						| AHD_DEV_Q_TAGGED)) == 0) {
+					/*
+					 * We allow the OS to queue 2 untagged
+					 * transactions to us at any time even
+					 * though we can only execute them
+					 * serially on the controller/device.
+					 * This should remove some latency.
+					 */
+					device->queue_depth = 2;
+				}
+			}
+		}
+	}
+	ahd_unlock(ahd, &flags);
+}
+#endif
+
+#if defined(__i386__)
+/*
+ * Return the disk geometry for the given SCSI device.
+ */
+static int
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+		    sector_t capacity, int geom[])
+{
+	uint8_t *bh;
+#else
+ahd_linux_biosparam(Disk *disk, kdev_t dev, int geom[])
+{
+	struct	scsi_device *sdev = disk->device;
+	u_long	capacity = disk->capacity;
+	struct	buffer_head *bh;
+#endif
+	int	 heads;
+	int	 sectors;
+	int	 cylinders;
+	int	 ret;
+	int	 extended;
+	struct	 ahd_softc *ahd;
+
+	ahd = *((struct ahd_softc **)sdev->host->hostdata);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+	bh = scsi_bios_ptable(bdev);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,17)
+	bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, block_size(dev));
+#else
+	bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, 1024);
+#endif
+
+	if (bh) {
+		ret = scsi_partsize(bh, capacity,
+				    &geom[2], &geom[0], &geom[1]);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+		kfree(bh);
+#else
+		brelse(bh);
+#endif
+		if (ret != -1)
+			return (ret);
+	}
+	heads = 64;
+	sectors = 32;
+	cylinders = aic_sector_div(capacity, heads, sectors);
+
+	if (aic79xx_extended != 0)
+		extended = 1;
+	else
+		extended = (ahd->flags & AHD_EXTENDED_TRANS_A) != 0;
+	if (extended && cylinders >= 1024) {
+		heads = 255;
+		sectors = 63;
+		cylinders = aic_sector_div(capacity, heads, sectors);
+	}
+	geom[0] = heads;
+	geom[1] = sectors;
+	geom[2] = cylinders;
+	return (0);
+}
+#endif
+
+/*
+ * Abort the current SCSI command(s).
+ */
+static int
+ahd_linux_abort(Scsi_Cmnd *cmd)
+{
+	struct ahd_softc *ahd;
+	struct ahd_cmd *acmd;
+	struct ahd_cmd *list_acmd;
+	struct ahd_linux_device *dev;
+	struct scb *pending_scb;
+	u_long s;
+	u_int  saved_scbptr;
+	u_int  active_scbptr;
+	u_int  last_phase;
+	u_int  cdb_byte;
+	int    retval;
+	int    was_paused;
+	int    paused;
+	int    wait;
+	int    disconnected;
+	ahd_mode_state saved_modes;
+
+	pending_scb = NULL;
+	paused = FALSE;
+	wait = FALSE;
+	ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
+	acmd = (struct ahd_cmd *)cmd;
+
+	printf("%s:%d:%d:%d: Attempting to abort cmd %p:",
+	       ahd_name(ahd), cmd->device->channel, cmd->device->id,
+	       cmd->device->lun, cmd);
+	for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
+		printf(" 0x%x", cmd->cmnd[cdb_byte]);
+	printf("\n");
+
+	/*
+	 * In all versions of Linux, we have to work around
+	 * a major flaw in how the mid-layer is locked down
+	 * if we are to sleep successfully in our error handler
+	 * while allowing our interrupt handler to run.  Since
+	 * the midlayer acquires either the io_request_lock or
+	 * our lock prior to calling us, we must use the
+	 * spin_unlock_irq() method for unlocking our lock.
+	 * This will force interrupts to be enabled on the
+	 * current CPU.  Since the EH thread should not have
+	 * been running with CPU interrupts disabled other than
+	 * by acquiring either the io_request_lock or our own
+	 * lock, this *should* be safe.
+	 */
+	ahd_midlayer_entrypoint_lock(ahd, &s);
+
+	/*
+	 * First determine if we currently own this command.
+	 * Start by searching the device queue.  If not found
+	 * there, check the pending_scb list.  If not found
+	 * at all, and the system wanted us to just abort the
+	 * command, return success.
+	 */
+	dev = ahd_linux_get_device(ahd, cmd->device->channel,
+				   cmd->device->id, cmd->device->lun,
+				   /*alloc*/FALSE);
+
+	if (dev == NULL) {
+		/*
+		 * No target device for this command exists,
+		 * so we must not still own the command.
+		 */
+		printf("%s:%d:%d:%d: Is not an active device\n",
+		       ahd_name(ahd), cmd->device->channel, cmd->device->id,
+		       cmd->device->lun);
+		retval = SUCCESS;
+		goto no_cmd;
+	}
+
+	TAILQ_FOREACH(list_acmd, &dev->busyq, acmd_links.tqe) {
+		if (list_acmd == acmd)
+			break;
+	}
+
+	if (list_acmd != NULL) {
+		printf("%s:%d:%d:%d: Command found on device queue\n",
+		       ahd_name(ahd), cmd->device->channel, cmd->device->id,
+		       cmd->device->lun);
+		TAILQ_REMOVE(&dev->busyq, list_acmd, acmd_links.tqe);
+		cmd->result = DID_ABORT << 16;
+		ahd_linux_queue_cmd_complete(ahd, cmd);
+		retval = SUCCESS;
+		goto done;
+	}
+
+	/*
+	 * See if we can find a matching cmd in the pending list.
+	 */
+	LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
+		if (pending_scb->io_ctx == cmd)
+			break;
+	}
+
+	if (pending_scb == NULL) {
+		printf("%s:%d:%d:%d: Command not found\n",
+		       ahd_name(ahd), cmd->device->channel, cmd->device->id,
+		       cmd->device->lun);
+		goto no_cmd;
+	}
+
+	if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
+		/*
+		 * We can't queue two recovery actions using the same SCB
+		 */
+		retval = FAILED;
+		goto  done;
+	}
+
+	/*
+	 * Ensure that the card doesn't do anything
+	 * behind our back.  Also make sure that we
+	 * didn't "just" miss an interrupt that would
+	 * affect this cmd.
+	 */
+	was_paused = ahd_is_paused(ahd);
+	ahd_pause_and_flushwork(ahd);
+	paused = TRUE;
+
+	if ((pending_scb->flags & SCB_ACTIVE) == 0) {
+		printf("%s:%d:%d:%d: Command already completed\n",
+		       ahd_name(ahd), cmd->device->channel, cmd->device->id,
+		       cmd->device->lun);
+		goto no_cmd;
+	}
+
+	printf("%s: At time of recovery, card was %spaused\n",
+	       ahd_name(ahd), was_paused ? "" : "not ");
+	ahd_dump_card_state(ahd);
+
+	disconnected = TRUE;
+	if (ahd_search_qinfifo(ahd, cmd->device->id, cmd->device->channel + 'A',
+			       cmd->device->lun, SCB_GET_TAG(pending_scb),
+			       ROLE_INITIATOR, CAM_REQ_ABORTED,
+			       SEARCH_COMPLETE) > 0) {
+		printf("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
+		       ahd_name(ahd), cmd->device->channel, cmd->device->id,
+				cmd->device->lun);
+		retval = SUCCESS;
+		goto done;
+	}
+
+	saved_modes = ahd_save_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	last_phase = ahd_inb(ahd, LASTPHASE);
+	saved_scbptr = ahd_get_scbptr(ahd);
+	active_scbptr = saved_scbptr;
+	if (disconnected && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
+		struct scb *bus_scb;
+
+		bus_scb = ahd_lookup_scb(ahd, active_scbptr);
+		if (bus_scb == pending_scb)
+			disconnected = FALSE;
+	}
+
+	/*
+	 * At this point, pending_scb is the scb associated with the
+	 * passed in command.  That command is currently active on the
+	 * bus or is in the disconnected state.
+	 */
+	if (last_phase != P_BUSFREE
+	 && SCB_GET_TAG(pending_scb) == active_scbptr) {
+
+		/*
+		 * We're active on the bus, so assert ATN
+		 * and hope that the target responds.
+		 */
+		pending_scb = ahd_lookup_scb(ahd, active_scbptr);
+		pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
+		ahd_outb(ahd, MSG_OUT, HOST_MSG);
+		ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
+		printf("%s:%d:%d:%d: Device is active, asserting ATN\n",
+		       ahd_name(ahd), cmd->device->channel,
+		       cmd->device->id, cmd->device->lun);
+		wait = TRUE;
+	} else if (disconnected) {
+
+		/*
+		 * Actually re-queue this SCB in an attempt
+		 * to select the device before it reconnects.
+		 */
+		pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
+		ahd_set_scbptr(ahd, SCB_GET_TAG(pending_scb));
+		pending_scb->hscb->cdb_len = 0;
+		pending_scb->hscb->task_attribute = 0;
+		pending_scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK;
+
+		if ((pending_scb->flags & SCB_PACKETIZED) != 0) {
+			/*
+			 * Mark the SCB has having an outstanding
+			 * task management function.  Should the command
+			 * complete normally before the task management
+			 * function can be sent, the host will be notified
+			 * to abort our requeued SCB.
+			 */
+			ahd_outb(ahd, SCB_TASK_MANAGEMENT,
+				 pending_scb->hscb->task_management);
+		} else {
+			/*
+			 * If non-packetized, set the MK_MESSAGE control
+			 * bit indicating that we desire to send a message.
+			 * We also set the disconnected flag since there is
+			 * no guarantee that our SCB control byte matches
+			 * the version on the card.  We don't want the
+			 * sequencer to abort the command thinking an
+			 * unsolicited reselection occurred.
+			 */
+			pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
+
+			/*
+			 * The sequencer will never re-reference the
+			 * in-core SCB.  To make sure we are notified
+			 * during reslection, set the MK_MESSAGE flag in
+			 * the card's copy of the SCB.
+			 */
+			ahd_outb(ahd, SCB_CONTROL,
+				 ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE);
+		}
+
+		/*
+		 * Clear out any entries in the QINFIFO first
+		 * so we are the next SCB for this target
+		 * to run.
+		 */
+		ahd_search_qinfifo(ahd, cmd->device->id,
+				   cmd->device->channel + 'A', cmd->device->lun,
+				   SCB_LIST_NULL, ROLE_INITIATOR,
+				   CAM_REQUEUE_REQ, SEARCH_COMPLETE);
+		ahd_qinfifo_requeue_tail(ahd, pending_scb);
+		ahd_set_scbptr(ahd, saved_scbptr);
+		ahd_print_path(ahd, pending_scb);
+		printf("Device is disconnected, re-queuing SCB\n");
+		wait = TRUE;
+	} else {
+		printf("%s:%d:%d:%d: Unable to deliver message\n",
+		       ahd_name(ahd), cmd->device->channel,
+		       cmd->device->id, cmd->device->lun);
+		retval = FAILED;
+		goto done;
+	}
+
+no_cmd:
+	/*
+	 * Our assumption is that if we don't have the command, no
+	 * recovery action was required, so we return success.  Again,
+	 * the semantics of the mid-layer recovery engine are not
+	 * well defined, so this may change in time.
+	 */
+	retval = SUCCESS;
+done:
+	if (paused)
+		ahd_unpause(ahd);
+	if (wait) {
+		struct timer_list timer;
+		int ret;
+
+		pending_scb->platform_data->flags |= AHD_SCB_UP_EH_SEM;
+		spin_unlock_irq(&ahd->platform_data->spin_lock);
+		init_timer(&timer);
+		timer.data = (u_long)pending_scb;
+		timer.expires = jiffies + (5 * HZ);
+		timer.function = ahd_linux_sem_timeout;
+		add_timer(&timer);
+		printf("Recovery code sleeping\n");
+		down(&ahd->platform_data->eh_sem);
+		printf("Recovery code awake\n");
+        	ret = del_timer_sync(&timer);
+		if (ret == 0) {
+			printf("Timer Expired\n");
+			retval = FAILED;
+		}
+		spin_lock_irq(&ahd->platform_data->spin_lock);
+	}
+	ahd_schedule_runq(ahd);
+	ahd_linux_run_complete_queue(ahd);
+	ahd_midlayer_entrypoint_unlock(ahd, &s);
+	return (retval);
+}
+
+
+static void
+ahd_linux_dev_reset_complete(Scsi_Cmnd *cmd)
+{
+	free(cmd, M_DEVBUF);
+}
+
+/*
+ * Attempt to send a target reset message to the device that timed out.
+ */
+static int
+ahd_linux_dev_reset(Scsi_Cmnd *cmd)
+{
+	struct	ahd_softc *ahd;
+	struct	scsi_cmnd *recovery_cmd;
+	struct	ahd_linux_device *dev;
+	struct	ahd_initiator_tinfo *tinfo;
+	struct	ahd_tmode_tstate *tstate;
+	struct	scb *scb;
+	struct	hardware_scb *hscb;
+	u_long	s;
+	struct	timer_list timer;
+	int	retval;
+
+	ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
+	recovery_cmd = malloc(sizeof(struct scsi_cmnd), M_DEVBUF, M_WAITOK);
+	if (!recovery_cmd)
+		return (FAILED);
+	memset(recovery_cmd, 0, sizeof(struct scsi_cmnd));
+	recovery_cmd->device = cmd->device;
+	recovery_cmd->scsi_done = ahd_linux_dev_reset_complete;
+#if AHD_DEBUG
+	if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
+		printf("%s:%d:%d:%d: Device reset called for cmd %p\n",
+		       ahd_name(ahd), cmd->device->channel, cmd->device->id,
+		       cmd->device->lun, cmd);
+#endif
+	ahd_midlayer_entrypoint_lock(ahd, &s);
+
+	dev = ahd_linux_get_device(ahd, cmd->device->channel, cmd->device->id,
+				   cmd->device->lun, /*alloc*/FALSE);
+	if (dev == NULL) {
+		ahd_midlayer_entrypoint_unlock(ahd, &s);
+		kfree(recovery_cmd);
+		return (FAILED);
+	}
+	if ((scb = ahd_get_scb(ahd, AHD_NEVER_COL_IDX)) == NULL) {
+		ahd_midlayer_entrypoint_unlock(ahd, &s);
+		kfree(recovery_cmd);
+		return (FAILED);
+	}
+	tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
+				    cmd->device->id, &tstate);
+	recovery_cmd->result = CAM_REQ_INPROG << 16;
+	recovery_cmd->host_scribble = (char *)scb;
+	scb->io_ctx = recovery_cmd;
+	scb->platform_data->dev = dev;
+	scb->sg_count = 0;
+	ahd_set_residual(scb, 0);
+	ahd_set_sense_residual(scb, 0);
+	hscb = scb->hscb;
+	hscb->control = 0;
+	hscb->scsiid = BUILD_SCSIID(ahd, cmd);
+	hscb->lun = cmd->device->lun;
+	hscb->cdb_len = 0;
+	hscb->task_management = SIU_TASKMGMT_LUN_RESET;
+	scb->flags |= SCB_DEVICE_RESET|SCB_RECOVERY_SCB|SCB_ACTIVE;
+	if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
+		scb->flags |= SCB_PACKETIZED;
+	} else {
+		hscb->control |= MK_MESSAGE;
+	}
+	dev->openings--;
+	dev->active++;
+	dev->commands_issued++;
+	LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
+	ahd_queue_scb(ahd, scb);
+
+	scb->platform_data->flags |= AHD_SCB_UP_EH_SEM;
+	spin_unlock_irq(&ahd->platform_data->spin_lock);
+	init_timer(&timer);
+	timer.data = (u_long)scb;
+	timer.expires = jiffies + (5 * HZ);
+	timer.function = ahd_linux_sem_timeout;
+	add_timer(&timer);
+	printf("Recovery code sleeping\n");
+	down(&ahd->platform_data->eh_sem);
+	printf("Recovery code awake\n");
+	retval = SUCCESS;
+	if (del_timer_sync(&timer) == 0) {
+		printf("Timer Expired\n");
+		retval = FAILED;
+	}
+	spin_lock_irq(&ahd->platform_data->spin_lock);
+	ahd_schedule_runq(ahd);
+	ahd_linux_run_complete_queue(ahd);
+	ahd_midlayer_entrypoint_unlock(ahd, &s);
+	printf("%s: Device reset returning 0x%x\n", ahd_name(ahd), retval);
+	return (retval);
+}
+
+/*
+ * Reset the SCSI bus.
+ */
+static int
+ahd_linux_bus_reset(Scsi_Cmnd *cmd)
+{
+	struct ahd_softc *ahd;
+	u_long s;
+	int    found;
+
+	ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
+#ifdef AHD_DEBUG
+	if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
+		printf("%s: Bus reset called for cmd %p\n",
+		       ahd_name(ahd), cmd);
+#endif
+	ahd_midlayer_entrypoint_lock(ahd, &s);
+	found = ahd_reset_channel(ahd, cmd->device->channel + 'A',
+				  /*initiate reset*/TRUE);
+	ahd_linux_run_complete_queue(ahd);
+	ahd_midlayer_entrypoint_unlock(ahd, &s);
+
+	if (bootverbose)
+		printf("%s: SCSI bus reset delivered. "
+		       "%d SCBs aborted.\n", ahd_name(ahd), found);
+
+	return (SUCCESS);
+}
+
+Scsi_Host_Template aic79xx_driver_template = {
+	.module			= THIS_MODULE,
+	.name			= "aic79xx",
+	.proc_info		= ahd_linux_proc_info,
+	.info			= ahd_linux_info,
+	.queuecommand		= ahd_linux_queue,
+	.eh_abort_handler	= ahd_linux_abort,
+	.eh_device_reset_handler = ahd_linux_dev_reset,
+	.eh_bus_reset_handler	= ahd_linux_bus_reset,
+#if defined(__i386__)
+	.bios_param		= ahd_linux_biosparam,
+#endif
+	.can_queue		= AHD_MAX_QUEUE,
+	.this_id		= -1,
+	.cmd_per_lun		= 2,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.slave_alloc		= ahd_linux_slave_alloc,
+	.slave_configure	= ahd_linux_slave_configure,
+	.slave_destroy		= ahd_linux_slave_destroy,
+};
+
+/**************************** Tasklet Handler *********************************/
+
+/*
+ * In 2.4.X and above, this routine is called from a tasklet,
+ * so we must re-acquire our lock prior to executing this code.
+ * In all prior kernels, ahd_schedule_runq() calls this routine
+ * directly and ahd_schedule_runq() is called with our lock held.
+ */
+static void
+ahd_runq_tasklet(unsigned long data)
+{
+	struct ahd_softc* ahd;
+	struct ahd_linux_device *dev;
+	u_long flags;
+
+	ahd = (struct ahd_softc *)data;
+	ahd_lock(ahd, &flags);
+	while ((dev = ahd_linux_next_device_to_run(ahd)) != NULL) {
+	
+		TAILQ_REMOVE(&ahd->platform_data->device_runq, dev, links);
+		dev->flags &= ~AHD_DEV_ON_RUN_LIST;
+		ahd_linux_check_device_queue(ahd, dev);
+		/* Yeild to our interrupt handler */
+		ahd_unlock(ahd, &flags);
+		ahd_lock(ahd, &flags);
+	}
+	ahd_unlock(ahd, &flags);
+}
+
+/******************************** Bus DMA *************************************/
+int
+ahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent,
+		   bus_size_t alignment, bus_size_t boundary,
+		   dma_addr_t lowaddr, dma_addr_t highaddr,
+		   bus_dma_filter_t *filter, void *filterarg,
+		   bus_size_t maxsize, int nsegments,
+		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag)
+{
+	bus_dma_tag_t dmat;
+
+	dmat = malloc(sizeof(*dmat), M_DEVBUF, M_NOWAIT);
+	if (dmat == NULL)
+		return (ENOMEM);
+
+	/*
+	 * Linux is very simplistic about DMA memory.  For now don't
+	 * maintain all specification information.  Once Linux supplies
+	 * better facilities for doing these operations, or the
+	 * needs of this particular driver change, we might need to do
+	 * more here.
+	 */
+	dmat->alignment = alignment;
+	dmat->boundary = boundary;
+	dmat->maxsize = maxsize;
+	*ret_tag = dmat;
+	return (0);
+}
+
+void
+ahd_dma_tag_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat)
+{
+	free(dmat, M_DEVBUF);
+}
+
+int
+ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr,
+		 int flags, bus_dmamap_t *mapp)
+{
+	bus_dmamap_t map;
+
+	map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT);
+	if (map == NULL)
+		return (ENOMEM);
+	/*
+	 * Although we can dma data above 4GB, our
+	 * "consistent" memory is below 4GB for
+	 * space efficiency reasons (only need a 4byte
+	 * address).  For this reason, we have to reset
+	 * our dma mask when doing allocations.
+	 */
+	if (ahd->dev_softc != NULL)
+		if (pci_set_dma_mask(ahd->dev_softc, 0xFFFFFFFF)) {
+			printk(KERN_WARNING "aic79xx: No suitable DMA available.\n");
+			kfree(map);
+			return (ENODEV);
+		}
+	*vaddr = pci_alloc_consistent(ahd->dev_softc,
+				      dmat->maxsize, &map->bus_addr);
+	if (ahd->dev_softc != NULL)
+		if (pci_set_dma_mask(ahd->dev_softc,
+				     ahd->platform_data->hw_dma_mask)) {
+			printk(KERN_WARNING "aic79xx: No suitable DMA available.\n");
+			kfree(map);
+			return (ENODEV);
+		}
+	if (*vaddr == NULL)
+		return (ENOMEM);
+	*mapp = map;
+	return(0);
+}
+
+void
+ahd_dmamem_free(struct ahd_softc *ahd, bus_dma_tag_t dmat,
+		void* vaddr, bus_dmamap_t map)
+{
+	pci_free_consistent(ahd->dev_softc, dmat->maxsize,
+			    vaddr, map->bus_addr);
+}
+
+int
+ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map,
+		void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
+		void *cb_arg, int flags)
+{
+	/*
+	 * Assume for now that this will only be used during
+	 * initialization and not for per-transaction buffer mapping.
+	 */
+	bus_dma_segment_t stack_sg;
+
+	stack_sg.ds_addr = map->bus_addr;
+	stack_sg.ds_len = dmat->maxsize;
+	cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
+	return (0);
+}
+
+void
+ahd_dmamap_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+	/*
+	 * The map may is NULL in our < 2.3.X implementation.
+	 */
+	if (map != NULL)
+		free(map, M_DEVBUF);
+}
+
+int
+ahd_dmamap_unload(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+	/* Nothing to do */
+	return (0);
+}
+
+/********************* Platform Dependent Functions ***************************/
+/*
+ * Compare "left hand" softc with "right hand" softc, returning:
+ * < 0 - lahd has a lower priority than rahd
+ *   0 - Softcs are equal
+ * > 0 - lahd has a higher priority than rahd
+ */
+int
+ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
+{
+	int	value;
+
+	/*
+	 * Under Linux, cards are ordered as follows:
+	 *	1) PCI devices that are marked as the boot controller.
+	 *	2) PCI devices with BIOS enabled sorted by bus/slot/func.
+	 *	3) All remaining PCI devices sorted by bus/slot/func.
+	 */
+#if 0
+	value = (lahd->flags & AHD_BOOT_CHANNEL)
+	      - (rahd->flags & AHD_BOOT_CHANNEL);
+	if (value != 0)
+		/* Controllers set for boot have a *higher* priority */
+		return (value);
+#endif
+
+	value = (lahd->flags & AHD_BIOS_ENABLED)
+	      - (rahd->flags & AHD_BIOS_ENABLED);
+	if (value != 0)
+		/* Controllers with BIOS enabled have a *higher* priority */
+		return (value);
+
+	/* Still equal.  Sort by bus/slot/func. */
+	if (aic79xx_reverse_scan != 0)
+		value = ahd_get_pci_bus(lahd->dev_softc)
+		      - ahd_get_pci_bus(rahd->dev_softc);
+	else
+		value = ahd_get_pci_bus(rahd->dev_softc)
+		      - ahd_get_pci_bus(lahd->dev_softc);
+	if (value != 0)
+		return (value);
+	if (aic79xx_reverse_scan != 0)
+		value = ahd_get_pci_slot(lahd->dev_softc)
+		      - ahd_get_pci_slot(rahd->dev_softc);
+	else
+		value = ahd_get_pci_slot(rahd->dev_softc)
+		      - ahd_get_pci_slot(lahd->dev_softc);
+	if (value != 0)
+		return (value);
+
+	value = rahd->channel - lahd->channel;
+	return (value);
+}
+
+static void
+ahd_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
+{
+
+	if ((instance >= 0) && (targ >= 0)
+	 && (instance < NUM_ELEMENTS(aic79xx_tag_info))
+	 && (targ < AHD_NUM_TARGETS)) {
+		aic79xx_tag_info[instance].tag_commands[targ] = value & 0x1FF;
+		if (bootverbose)
+			printf("tag_info[%d:%d] = %d\n", instance, targ, value);
+	}
+}
+
+static void
+ahd_linux_setup_rd_strm_info(u_long arg, int instance, int targ, int32_t value)
+{
+	if ((instance >= 0)
+	 && (instance < NUM_ELEMENTS(aic79xx_rd_strm_info))) {
+		aic79xx_rd_strm_info[instance] = value & 0xFFFF;
+		if (bootverbose)
+			printf("rd_strm[%d] = 0x%x\n", instance, value);
+	}
+}
+
+static void
+ahd_linux_setup_dv(u_long arg, int instance, int targ, int32_t value)
+{
+	if ((instance >= 0)
+	 && (instance < NUM_ELEMENTS(aic79xx_dv_settings))) {
+		aic79xx_dv_settings[instance] = value;
+		if (bootverbose)
+			printf("dv[%d] = %d\n", instance, value);
+	}
+}
+
+static void
+ahd_linux_setup_iocell_info(u_long index, int instance, int targ, int32_t value)
+{
+
+	if ((instance >= 0)
+	 && (instance < NUM_ELEMENTS(aic79xx_iocell_info))) {
+		uint8_t *iocell_info;
+
+		iocell_info = (uint8_t*)&aic79xx_iocell_info[instance];
+		iocell_info[index] = value & 0xFFFF;
+		if (bootverbose)
+			printf("iocell[%d:%ld] = %d\n", instance, index, value);
+	}
+}
+
+static void
+ahd_linux_setup_tag_info_global(char *p)
+{
+	int tags, i, j;
+
+	tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
+	printf("Setting Global Tags= %d\n", tags);
+
+	for (i = 0; i < NUM_ELEMENTS(aic79xx_tag_info); i++) {
+		for (j = 0; j < AHD_NUM_TARGETS; j++) {
+			aic79xx_tag_info[i].tag_commands[j] = tags;
+		}
+	}
+}
+
+/*
+ * Handle Linux boot parameters. This routine allows for assigning a value
+ * to a parameter with a ':' between the parameter and the value.
+ * ie. aic79xx=stpwlev:1,extended
+ */
+static int
+aic79xx_setup(char *s)
+{
+	int	i, n;
+	char   *p;
+	char   *end;
+
+	static struct {
+		const char *name;
+		uint32_t *flag;
+	} options[] = {
+		{ "extended", &aic79xx_extended },
+		{ "no_reset", &aic79xx_no_reset },
+		{ "verbose", &aic79xx_verbose },
+		{ "allow_memio", &aic79xx_allow_memio},
+#ifdef AHD_DEBUG
+		{ "debug", &ahd_debug },
+#endif
+		{ "reverse_scan", &aic79xx_reverse_scan },
+		{ "periodic_otag", &aic79xx_periodic_otag },
+		{ "pci_parity", &aic79xx_pci_parity },
+		{ "seltime", &aic79xx_seltime },
+		{ "tag_info", NULL },
+		{ "global_tag_depth", NULL},
+		{ "rd_strm", NULL },
+		{ "dv", NULL },
+		{ "slewrate", NULL },
+		{ "precomp", NULL },
+		{ "amplitude", NULL },
+	};
+
+	end = strchr(s, '\0');
+
+	/*
+	 * XXX ia64 gcc isn't smart enough to know that NUM_ELEMENTS
+	 * will never be 0 in this case.
+	 */      
+	n = 0;  
+
+	while ((p = strsep(&s, ",.")) != NULL) {
+		if (*p == '\0')
+			continue;
+		for (i = 0; i < NUM_ELEMENTS(options); i++) {
+
+			n = strlen(options[i].name);
+			if (strncmp(options[i].name, p, n) == 0)
+				break;
+		}
+		if (i == NUM_ELEMENTS(options))
+			continue;
+
+		if (strncmp(p, "global_tag_depth", n) == 0) {
+			ahd_linux_setup_tag_info_global(p + n);
+		} else if (strncmp(p, "tag_info", n) == 0) {
+			s = aic_parse_brace_option("tag_info", p + n, end,
+			    2, ahd_linux_setup_tag_info, 0);
+		} else if (strncmp(p, "rd_strm", n) == 0) {
+			s = aic_parse_brace_option("rd_strm", p + n, end,
+			    1, ahd_linux_setup_rd_strm_info, 0);
+		} else if (strncmp(p, "dv", n) == 0) {
+			s = aic_parse_brace_option("dv", p + n, end, 1,
+			    ahd_linux_setup_dv, 0);
+		} else if (strncmp(p, "slewrate", n) == 0) {
+			s = aic_parse_brace_option("slewrate",
+			    p + n, end, 1, ahd_linux_setup_iocell_info,
+			    AIC79XX_SLEWRATE_INDEX);
+		} else if (strncmp(p, "precomp", n) == 0) {
+			s = aic_parse_brace_option("precomp",
+			    p + n, end, 1, ahd_linux_setup_iocell_info,
+			    AIC79XX_PRECOMP_INDEX);
+		} else if (strncmp(p, "amplitude", n) == 0) {
+			s = aic_parse_brace_option("amplitude",
+			    p + n, end, 1, ahd_linux_setup_iocell_info,
+			    AIC79XX_AMPLITUDE_INDEX);
+		} else if (p[n] == ':') {
+			*(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
+		} else if (!strncmp(p, "verbose", n)) {
+			*(options[i].flag) = 1;
+		} else {
+			*(options[i].flag) ^= 0xFFFFFFFF;
+		}
+	}
+	return 1;
+}
+
+__setup("aic79xx=", aic79xx_setup);
+
+uint32_t aic79xx_verbose;
+
+int
+ahd_linux_register_host(struct ahd_softc *ahd, Scsi_Host_Template *template)
+{
+	char	buf[80];
+	struct	Scsi_Host *host;
+	char	*new_name;
+	u_long	s;
+	u_long	target;
+
+	template->name = ahd->description;
+	host = scsi_host_alloc(template, sizeof(struct ahd_softc *));
+	if (host == NULL)
+		return (ENOMEM);
+
+	*((struct ahd_softc **)host->hostdata) = ahd;
+	ahd_lock(ahd, &s);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+	scsi_assign_lock(host, &ahd->platform_data->spin_lock);
+#elif AHD_SCSI_HAS_HOST_LOCK != 0
+	host->lock = &ahd->platform_data->spin_lock;
+#endif
+	ahd->platform_data->host = host;
+	host->can_queue = AHD_MAX_QUEUE;
+	host->cmd_per_lun = 2;
+	host->sg_tablesize = AHD_NSEG;
+	host->this_id = ahd->our_id;
+	host->irq = ahd->platform_data->irq;
+	host->max_id = (ahd->features & AHD_WIDE) ? 16 : 8;
+	host->max_lun = AHD_NUM_LUNS;
+	host->max_channel = 0;
+	host->sg_tablesize = AHD_NSEG;
+	ahd_set_unit(ahd, ahd_linux_next_unit());
+	sprintf(buf, "scsi%d", host->host_no);
+	new_name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
+	if (new_name != NULL) {
+		strcpy(new_name, buf);
+		ahd_set_name(ahd, new_name);
+	}
+	host->unique_id = ahd->unit;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+	scsi_set_pci_device(host, ahd->dev_softc);
+#endif
+	ahd_linux_setup_user_rd_strm_settings(ahd);
+	ahd_linux_initialize_scsi_bus(ahd);
+	ahd_unlock(ahd, &s);
+	ahd->platform_data->dv_pid = kernel_thread(ahd_linux_dv_thread, ahd, 0);
+	ahd_lock(ahd, &s);
+	if (ahd->platform_data->dv_pid < 0) {
+		printf("%s: Failed to create DV thread, error= %d\n",
+		       ahd_name(ahd), ahd->platform_data->dv_pid);
+		return (-ahd->platform_data->dv_pid);
+	}
+	/*
+	 * Initially allocate *all* of our linux target objects
+	 * so that the DV thread will scan them all in parallel
+	 * just after driver initialization.  Any device that
+	 * does not exist will have its target object destroyed
+	 * by the selection timeout handler.  In the case of a
+	 * device that appears after the initial DV scan, async
+	 * negotiation will occur for the first command, and DV
+	 * will comence should that first command be successful.
+	 */
+	for (target = 0; target < host->max_id; target++) {
+
+		/*
+		 * Skip our own ID.  Some Compaq/HP storage devices
+		 * have enclosure management devices that respond to
+		 * single bit selection (i.e. selecting ourselves).
+		 * It is expected that either an external application
+		 * or a modified kernel will be used to probe this
+		 * ID if it is appropriate.  To accommodate these
+		 * installations, ahc_linux_alloc_target() will allocate
+		 * for our ID if asked to do so.
+		 */
+		if (target == ahd->our_id) 
+			continue;
+
+		ahd_linux_alloc_target(ahd, 0, target);
+	}
+	ahd_intr_enable(ahd, TRUE);
+	ahd_linux_start_dv(ahd);
+	ahd_unlock(ahd, &s);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+	scsi_add_host(host, &ahd->dev_softc->dev); /* XXX handle failure */
+	scsi_scan_host(host);
+#endif
+	return (0);
+}
+
+uint64_t
+ahd_linux_get_memsize(void)
+{
+	struct sysinfo si;
+
+	si_meminfo(&si);
+	return ((uint64_t)si.totalram << PAGE_SHIFT);
+}
+
+/*
+ * Find the smallest available unit number to use
+ * for a new device.  We don't just use a static
+ * count to handle the "repeated hot-(un)plug"
+ * scenario.
+ */
+static int
+ahd_linux_next_unit(void)
+{
+	struct ahd_softc *ahd;
+	int unit;
+
+	unit = 0;
+retry:
+	TAILQ_FOREACH(ahd, &ahd_tailq, links) {
+		if (ahd->unit == unit) {
+			unit++;
+			goto retry;
+		}
+	}
+	return (unit);
+}
+
+/*
+ * Place the SCSI bus into a known state by either resetting it,
+ * or forcing transfer negotiations on the next command to any
+ * target.
+ */
+static void
+ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd)
+{
+	u_int target_id;
+	u_int numtarg;
+
+	target_id = 0;
+	numtarg = 0;
+
+	if (aic79xx_no_reset != 0)
+		ahd->flags &= ~AHD_RESET_BUS_A;
+
+	if ((ahd->flags & AHD_RESET_BUS_A) != 0)
+		ahd_reset_channel(ahd, 'A', /*initiate_reset*/TRUE);
+	else
+		numtarg = (ahd->features & AHD_WIDE) ? 16 : 8;
+
+	/*
+	 * Force negotiation to async for all targets that
+	 * will not see an initial bus reset.
+	 */
+	for (; target_id < numtarg; target_id++) {
+		struct ahd_devinfo devinfo;
+		struct ahd_initiator_tinfo *tinfo;
+		struct ahd_tmode_tstate *tstate;
+
+		tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
+					    target_id, &tstate);
+		ahd_compile_devinfo(&devinfo, ahd->our_id, target_id,
+				    CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR);
+		ahd_update_neg_request(ahd, &devinfo, tstate,
+				       tinfo, AHD_NEG_ALWAYS);
+	}
+	/* Give the bus some time to recover */
+	if ((ahd->flags & AHD_RESET_BUS_A) != 0) {
+		ahd_freeze_simq(ahd);
+		init_timer(&ahd->platform_data->reset_timer);
+		ahd->platform_data->reset_timer.data = (u_long)ahd;
+		ahd->platform_data->reset_timer.expires =
+		    jiffies + (AIC79XX_RESET_DELAY * HZ)/1000;
+		ahd->platform_data->reset_timer.function =
+		    (ahd_linux_callback_t *)ahd_release_simq;
+		add_timer(&ahd->platform_data->reset_timer);
+	}
+}
+
+int
+ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
+{
+	ahd->platform_data =
+	    malloc(sizeof(struct ahd_platform_data), M_DEVBUF, M_NOWAIT);
+	if (ahd->platform_data == NULL)
+		return (ENOMEM);
+	memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
+	TAILQ_INIT(&ahd->platform_data->completeq);
+	TAILQ_INIT(&ahd->platform_data->device_runq);
+	ahd->platform_data->irq = AHD_LINUX_NOIRQ;
+	ahd->platform_data->hw_dma_mask = 0xFFFFFFFF;
+	ahd_lockinit(ahd);
+	ahd_done_lockinit(ahd);
+	init_timer(&ahd->platform_data->completeq_timer);
+	ahd->platform_data->completeq_timer.data = (u_long)ahd;
+	ahd->platform_data->completeq_timer.function =
+	    (ahd_linux_callback_t *)ahd_linux_thread_run_complete_queue;
+	init_MUTEX_LOCKED(&ahd->platform_data->eh_sem);
+	init_MUTEX_LOCKED(&ahd->platform_data->dv_sem);
+	init_MUTEX_LOCKED(&ahd->platform_data->dv_cmd_sem);
+	ahd_setup_runq_tasklet(ahd);
+	ahd->seltime = (aic79xx_seltime & 0x3) << 4;
+	return (0);
+}
+
+void
+ahd_platform_free(struct ahd_softc *ahd)
+{
+	struct ahd_linux_target *targ;
+	struct ahd_linux_device *dev;
+	int i, j;
+
+	if (ahd->platform_data != NULL) {
+		del_timer_sync(&ahd->platform_data->completeq_timer);
+		ahd_linux_kill_dv_thread(ahd);
+		ahd_teardown_runq_tasklet(ahd);
+		if (ahd->platform_data->host != NULL) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+			scsi_remove_host(ahd->platform_data->host);
+#endif
+			scsi_host_put(ahd->platform_data->host);
+		}
+
+		/* destroy all of the device and target objects */
+		for (i = 0; i < AHD_NUM_TARGETS; i++) {
+			targ = ahd->platform_data->targets[i];
+			if (targ != NULL) {
+				/* Keep target around through the loop. */
+				targ->refcount++;
+				for (j = 0; j < AHD_NUM_LUNS; j++) {
+
+					if (targ->devices[j] == NULL)
+						continue;
+					dev = targ->devices[j];
+					ahd_linux_free_device(ahd, dev);
+				}
+				/*
+				 * Forcibly free the target now that
+				 * all devices are gone.
+				 */
+				ahd_linux_free_target(ahd, targ);
+			}
+		}
+
+		if (ahd->platform_data->irq != AHD_LINUX_NOIRQ)
+			free_irq(ahd->platform_data->irq, ahd);
+		if (ahd->tags[0] == BUS_SPACE_PIO
+		 && ahd->bshs[0].ioport != 0)
+			release_region(ahd->bshs[0].ioport, 256);
+		if (ahd->tags[1] == BUS_SPACE_PIO
+		 && ahd->bshs[1].ioport != 0)
+			release_region(ahd->bshs[1].ioport, 256);
+		if (ahd->tags[0] == BUS_SPACE_MEMIO
+		 && ahd->bshs[0].maddr != NULL) {
+			iounmap(ahd->bshs[0].maddr);
+			release_mem_region(ahd->platform_data->mem_busaddr,
+					   0x1000);
+		}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+    		/*
+		 * In 2.4 we detach from the scsi midlayer before the PCI
+		 * layer invokes our remove callback.  No per-instance
+		 * detach is provided, so we must reach inside the PCI
+		 * subsystem's internals and detach our driver manually.
+		 */
+		if (ahd->dev_softc != NULL)
+			ahd->dev_softc->driver = NULL;
+#endif
+		free(ahd->platform_data, M_DEVBUF);
+	}
+}
+
+void
+ahd_platform_init(struct ahd_softc *ahd)
+{
+	/*
+	 * Lookup and commit any modified IO Cell options.
+	 */
+	if (ahd->unit < NUM_ELEMENTS(aic79xx_iocell_info)) {
+		struct ahd_linux_iocell_opts *iocell_opts;
+
+		iocell_opts = &aic79xx_iocell_info[ahd->unit];
+		if (iocell_opts->precomp != AIC79XX_DEFAULT_PRECOMP)
+			AHD_SET_PRECOMP(ahd, iocell_opts->precomp);
+		if (iocell_opts->slewrate != AIC79XX_DEFAULT_SLEWRATE)
+			AHD_SET_SLEWRATE(ahd, iocell_opts->slewrate);
+		if (iocell_opts->amplitude != AIC79XX_DEFAULT_AMPLITUDE)
+			AHD_SET_AMPLITUDE(ahd, iocell_opts->amplitude);
+	}
+
+}
+
+void
+ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb)
+{
+	ahd_platform_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
+				SCB_GET_CHANNEL(ahd, scb),
+				SCB_GET_LUN(scb), SCB_LIST_NULL,
+				ROLE_UNKNOWN, CAM_REQUEUE_REQ);
+}
+
+void
+ahd_platform_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
+		      ahd_queue_alg alg)
+{
+	struct ahd_linux_device *dev;
+	int was_queuing;
+	int now_queuing;
+
+	dev = ahd_linux_get_device(ahd, devinfo->channel - 'A',
+				   devinfo->target,
+				   devinfo->lun, /*alloc*/FALSE);
+	if (dev == NULL)
+		return;
+	was_queuing = dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED);
+	switch (alg) {
+	default:
+	case AHD_QUEUE_NONE:
+		now_queuing = 0;
+		break; 
+	case AHD_QUEUE_BASIC:
+		now_queuing = AHD_DEV_Q_BASIC;
+		break;
+	case AHD_QUEUE_TAGGED:
+		now_queuing = AHD_DEV_Q_TAGGED;
+		break;
+	}
+	if ((dev->flags & AHD_DEV_FREEZE_TIL_EMPTY) == 0
+	 && (was_queuing != now_queuing)
+	 && (dev->active != 0)) {
+		dev->flags |= AHD_DEV_FREEZE_TIL_EMPTY;
+		dev->qfrozen++;
+	}
+
+	dev->flags &= ~(AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED|AHD_DEV_PERIODIC_OTAG);
+	if (now_queuing) {
+		u_int usertags;
+
+		usertags = ahd_linux_user_tagdepth(ahd, devinfo);
+		if (!was_queuing) {
+			/*
+			 * Start out agressively and allow our
+			 * dynamic queue depth algorithm to take
+			 * care of the rest.
+			 */
+			dev->maxtags = usertags;
+			dev->openings = dev->maxtags - dev->active;
+		}
+		if (dev->maxtags == 0) {
+			/*
+			 * Queueing is disabled by the user.
+			 */
+			dev->openings = 1;
+		} else if (alg == AHD_QUEUE_TAGGED) {
+			dev->flags |= AHD_DEV_Q_TAGGED;
+			if (aic79xx_periodic_otag != 0)
+				dev->flags |= AHD_DEV_PERIODIC_OTAG;
+		} else
+			dev->flags |= AHD_DEV_Q_BASIC;
+	} else {
+		/* We can only have one opening. */
+		dev->maxtags = 0;
+		dev->openings =  1 - dev->active;
+	}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+	if (dev->scsi_device != NULL) {
+		switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) {
+		case AHD_DEV_Q_BASIC:
+			scsi_adjust_queue_depth(dev->scsi_device,
+						MSG_SIMPLE_TASK,
+						dev->openings + dev->active);
+			break;
+		case AHD_DEV_Q_TAGGED:
+			scsi_adjust_queue_depth(dev->scsi_device,
+						MSG_ORDERED_TASK,
+						dev->openings + dev->active);
+			break;
+		default:
+			/*
+			 * We allow the OS to queue 2 untagged transactions to
+			 * us at any time even though we can only execute them
+			 * serially on the controller/device.  This should
+			 * remove some latency.
+			 */
+			scsi_adjust_queue_depth(dev->scsi_device,
+						/*NON-TAGGED*/0,
+						/*queue depth*/2);
+			break;
+		}
+	}
+#endif
+}
+
+int
+ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, char channel,
+			int lun, u_int tag, role_t role, uint32_t status)
+{
+	int targ;
+	int maxtarg;
+	int maxlun;
+	int clun;
+	int count;
+
+	if (tag != SCB_LIST_NULL)
+		return (0);
+
+	targ = 0;
+	if (target != CAM_TARGET_WILDCARD) {
+		targ = target;
+		maxtarg = targ + 1;
+	} else {
+		maxtarg = (ahd->features & AHD_WIDE) ? 16 : 8;
+	}
+	clun = 0;
+	if (lun != CAM_LUN_WILDCARD) {
+		clun = lun;
+		maxlun = clun + 1;
+	} else {
+		maxlun = AHD_NUM_LUNS;
+	}
+
+	count = 0;
+	for (; targ < maxtarg; targ++) {
+
+		for (; clun < maxlun; clun++) {
+			struct ahd_linux_device *dev;
+			struct ahd_busyq *busyq;
+			struct ahd_cmd *acmd;
+
+			dev = ahd_linux_get_device(ahd, /*chan*/0, targ,
+						   clun, /*alloc*/FALSE);
+			if (dev == NULL)
+				continue;
+
+			busyq = &dev->busyq;
+			while ((acmd = TAILQ_FIRST(busyq)) != NULL) {
+				Scsi_Cmnd *cmd;
+
+				cmd = &acmd_scsi_cmd(acmd);
+				TAILQ_REMOVE(busyq, acmd,
+					     acmd_links.tqe);
+				count++;
+				cmd->result = status << 16;
+				ahd_linux_queue_cmd_complete(ahd, cmd);
+			}
+		}
+	}
+
+	return (count);
+}
+
+static void
+ahd_linux_thread_run_complete_queue(struct ahd_softc *ahd)
+{
+	u_long flags;
+
+	ahd_lock(ahd, &flags);
+	del_timer(&ahd->platform_data->completeq_timer);
+	ahd->platform_data->flags &= ~AHD_RUN_CMPLT_Q_TIMER;
+	ahd_linux_run_complete_queue(ahd);
+	ahd_unlock(ahd, &flags);
+}
+
+static void
+ahd_linux_start_dv(struct ahd_softc *ahd)
+{
+
+	/*
+	 * Freeze the simq and signal ahd_linux_queue to not let any
+	 * more commands through
+	 */
+	if ((ahd->platform_data->flags & AHD_DV_ACTIVE) == 0) {
+#ifdef AHD_DEBUG
+		if (ahd_debug & AHD_SHOW_DV)
+			printf("%s: Starting DV\n", ahd_name(ahd));
+#endif
+
+		ahd->platform_data->flags |= AHD_DV_ACTIVE;
+		ahd_freeze_simq(ahd);
+
+		/* Wake up the DV kthread */
+		up(&ahd->platform_data->dv_sem);
+	}
+}
+
+static int
+ahd_linux_dv_thread(void *data)
+{
+	struct	ahd_softc *ahd;
+	int	target;
+	u_long	s;
+
+	ahd = (struct ahd_softc *)data;
+
+#ifdef AHD_DEBUG
+	if (ahd_debug & AHD_SHOW_DV)
+		printf("In DV Thread\n");
+#endif
+
+	/*
+	 * Complete thread creation.
+	 */
+	lock_kernel();
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,60)
+	/*
+	 * Don't care about any signals.
+	 */
+	siginitsetinv(&current->blocked, 0);
+
+	daemonize();
+	sprintf(current->comm, "ahd_dv_%d", ahd->unit);
+#else
+	daemonize("ahd_dv_%d", ahd->unit);
+	current->flags |= PF_FREEZE;
+#endif
+	unlock_kernel();
+
+	while (1) {
+		/*
+		 * Use down_interruptible() rather than down() to
+		 * avoid inclusion in the load average.
+		 */
+		down_interruptible(&ahd->platform_data->dv_sem);
+
+		/* Check to see if we've been signaled to exit */
+		ahd_lock(ahd, &s);
+		if ((ahd->platform_data->flags & AHD_DV_SHUTDOWN) != 0) {
+			ahd_unlock(ahd, &s);
+			break;
+		}
+		ahd_unlock(ahd, &s);
+
+#ifdef AHD_DEBUG
+		if (ahd_debug & AHD_SHOW_DV)
+			printf("%s: Beginning Domain Validation\n",
+			       ahd_name(ahd));
+#endif
+
+		/*
+		 * Wait for any pending commands to drain before proceeding.
+		 */
+		ahd_lock(ahd, &s);
+		while (LIST_FIRST(&ahd->pending_scbs) != NULL) {
+			ahd->platform_data->flags |= AHD_DV_WAIT_SIMQ_EMPTY;
+			ahd_unlock(ahd, &s);
+			down_interruptible(&ahd->platform_data->dv_sem);
+			ahd_lock(ahd, &s);
+		}
+
+		/*
+		 * Wait for the SIMQ to be released so that DV is the
+		 * only reason the queue is frozen.
+		 */
+		while (AHD_DV_SIMQ_FROZEN(ahd) == 0) {
+			ahd->platform_data->flags |= AHD_DV_WAIT_SIMQ_RELEASE;
+			ahd_unlock(ahd, &s);
+			down_interruptible(&ahd->platform_data->dv_sem);
+			ahd_lock(ahd, &s);
+		}
+		ahd_unlock(ahd, &s);
+
+		for (target = 0; target < AHD_NUM_TARGETS; target++)
+			ahd_linux_dv_target(ahd, target);
+
+		ahd_lock(ahd, &s);
+		ahd->platform_data->flags &= ~AHD_DV_ACTIVE;
+		ahd_unlock(ahd, &s);
+
+		/*
+		 * Release the SIMQ so that normal commands are
+		 * allowed to continue on the bus.
+		 */
+		ahd_release_simq(ahd);
+	}
+	up(&ahd->platform_data->eh_sem);
+	return (0);
+}
+
+static void
+ahd_linux_kill_dv_thread(struct ahd_softc *ahd)
+{
+	u_long s;
+
+	ahd_lock(ahd, &s);
+	if (ahd->platform_data->dv_pid != 0) {
+		ahd->platform_data->flags |= AHD_DV_SHUTDOWN;
+		ahd_unlock(ahd, &s);
+		up(&ahd->platform_data->dv_sem);
+
+		/*
+		 * Use the eh_sem as an indicator that the
+		 * dv thread is exiting.  Note that the dv
+		 * thread must still return after performing
+		 * the up on our semaphore before it has
+		 * completely exited this module.  Unfortunately,
+		 * there seems to be no easy way to wait for the
+		 * exit of a thread for which you are not the
+		 * parent (dv threads are parented by init).
+		 * Cross your fingers...
+		 */
+		down(&ahd->platform_data->eh_sem);
+
+		/*
+		 * Mark the dv thread as already dead.  This
+		 * avoids attempting to kill it a second time.
+		 * This is necessary because we must kill the
+		 * DV thread before calling ahd_free() in the
+		 * module shutdown case to avoid bogus locking
+		 * in the SCSI mid-layer, but we ahd_free() is
+		 * called without killing the DV thread in the
+		 * instance detach case, so ahd_platform_free()
+		 * calls us again to verify that the DV thread
+		 * is dead.
+		 */
+		ahd->platform_data->dv_pid = 0;
+	} else {
+		ahd_unlock(ahd, &s);
+	}
+}
+
+#define AHD_LINUX_DV_INQ_SHORT_LEN	36
+#define AHD_LINUX_DV_INQ_LEN		256
+#define AHD_LINUX_DV_TIMEOUT		(HZ / 4)
+
+#define AHD_SET_DV_STATE(ahd, targ, newstate) \
+	ahd_set_dv_state(ahd, targ, newstate, __LINE__)
+
+static __inline void
+ahd_set_dv_state(struct ahd_softc *ahd, struct ahd_linux_target *targ,
+		 ahd_dv_state newstate, u_int line)
+{
+	ahd_dv_state oldstate;
+
+	oldstate = targ->dv_state;
+#ifdef AHD_DEBUG
+	if (ahd_debug & AHD_SHOW_DV)
+		printf("%s:%d: Going from state %d to state %d\n",
+		       ahd_name(ahd), line, oldstate, newstate);
+#endif
+
+	if (oldstate == newstate)
+		targ->dv_state_retry++;
+	else
+		targ->dv_state_retry = 0;
+	targ->dv_state = newstate;
+}
+
+static void
+ahd_linux_dv_target(struct ahd_softc *ahd, u_int target_offset)
+{
+	struct	 ahd_devinfo devinfo;
+	struct	 ahd_linux_target *targ;
+	struct	 scsi_cmnd *cmd;
+	struct	 scsi_device *scsi_dev;
+	struct	 scsi_sense_data *sense;
+	uint8_t *buffer;
+	u_long	 s;
+	u_int	 timeout;
+	int	 echo_size;
+
+	sense = NULL;
+	buffer = NULL;
+	echo_size = 0;
+	ahd_lock(ahd, &s);
+	targ = ahd->platform_data->targets[target_offset];
+	if (targ == NULL || (targ->flags & AHD_DV_REQUIRED) == 0) {
+		ahd_unlock(ahd, &s);
+		return;
+	}
+	ahd_compile_devinfo(&devinfo, ahd->our_id, targ->target, /*lun*/0,
+			    targ->channel + 'A', ROLE_INITIATOR);
+#ifdef AHD_DEBUG
+	if (ahd_debug & AHD_SHOW_DV) {
+		ahd_print_devinfo(ahd, &devinfo);
+		printf("Performing DV\n");
+	}
+#endif
+
+	ahd_unlock(ahd, &s);
+
+	cmd = malloc(sizeof(struct scsi_cmnd), M_DEVBUF, M_WAITOK);
+	scsi_dev = malloc(sizeof(struct scsi_device), M_DEVBUF, M_WAITOK);
+	scsi_dev->host = ahd->platform_data->host;
+	scsi_dev->id = devinfo.target;
+	scsi_dev->lun = devinfo.lun;
+	scsi_dev->channel = devinfo.channel - 'A';
+	ahd->platform_data->dv_scsi_dev = scsi_dev;
+
+	AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_INQ_SHORT_ASYNC);
+
+	while (targ->dv_state != AHD_DV_STATE_EXIT) {
+		timeout = AHD_LINUX_DV_TIMEOUT;
+		switch (targ->dv_state) {
+		case AHD_DV_STATE_INQ_SHORT_ASYNC:
+		case AHD_DV_STATE_INQ_ASYNC:
+		case AHD_DV_STATE_INQ_ASYNC_VERIFY:
+			/*
+			 * Set things to async narrow to reduce the
+			 * chance that the INQ will fail.
+			 */
+			ahd_lock(ahd, &s);
+			ahd_set_syncrate(ahd, &devinfo, 0, 0, 0,
+					 AHD_TRANS_GOAL, /*paused*/FALSE);
+			ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+				      AHD_TRANS_GOAL, /*paused*/FALSE);
+			ahd_unlock(ahd, &s);
+			timeout = 10 * HZ;
+			targ->flags &= ~AHD_INQ_VALID;
+			/* FALLTHROUGH */
+		case AHD_DV_STATE_INQ_VERIFY:
+		{
+			u_int inq_len;
+
+			if (targ->dv_state == AHD_DV_STATE_INQ_SHORT_ASYNC)
+				inq_len = AHD_LINUX_DV_INQ_SHORT_LEN;
+			else
+				inq_len = targ->inq_data->additional_length + 5;
+			ahd_linux_dv_inq(ahd, cmd, &devinfo, targ, inq_len);
+			break;
+		}
+		case AHD_DV_STATE_TUR:
+		case AHD_DV_STATE_BUSY:
+			timeout = 5 * HZ;
+			ahd_linux_dv_tur(ahd, cmd, &devinfo);
+			break;
+		case AHD_DV_STATE_REBD:
+			ahd_linux_dv_rebd(ahd, cmd, &devinfo, targ);
+			break;
+		case AHD_DV_STATE_WEB:
+			ahd_linux_dv_web(ahd, cmd, &devinfo, targ);
+			break;
+
+		case AHD_DV_STATE_REB:
+			ahd_linux_dv_reb(ahd, cmd, &devinfo, targ);
+			break;
+
+		case AHD_DV_STATE_SU:
+			ahd_linux_dv_su(ahd, cmd, &devinfo, targ);
+			timeout = 50 * HZ;
+			break;
+
+		default:
+			ahd_print_devinfo(ahd, &devinfo);
+			printf("Unknown DV state %d\n", targ->dv_state);
+			goto out;
+		}
+
+		/* Queue the command and wait for it to complete */
+		/* Abuse eh_timeout in the scsi_cmnd struct for our purposes */
+		init_timer(&cmd->eh_timeout);
+#ifdef AHD_DEBUG
+		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
+			/*
+			 * All of the printfs during negotiation
+			 * really slow down the negotiation.
+			 * Add a bit of time just to be safe.
+			 */
+			timeout += HZ;
+#endif
+		scsi_add_timer(cmd, timeout, ahd_linux_dv_timeout);
+		/*
+		 * In 2.5.X, it is assumed that all calls from the
+		 * "midlayer" (which we are emulating) will have the
+		 * ahd host lock held.  For other kernels, the
+		 * io_request_lock must be held.
+		 */
+#if AHD_SCSI_HAS_HOST_LOCK != 0
+		ahd_lock(ahd, &s);
+#else
+		spin_lock_irqsave(&io_request_lock, s);
+#endif
+		ahd_linux_queue(cmd, ahd_linux_dv_complete);
+#if AHD_SCSI_HAS_HOST_LOCK != 0
+		ahd_unlock(ahd, &s);
+#else
+		spin_unlock_irqrestore(&io_request_lock, s);
+#endif
+		down_interruptible(&ahd->platform_data->dv_cmd_sem);
+		/*
+		 * Wait for the SIMQ to be released so that DV is the
+		 * only reason the queue is frozen.
+		 */
+		ahd_lock(ahd, &s);
+		while (AHD_DV_SIMQ_FROZEN(ahd) == 0) {
+			ahd->platform_data->flags |= AHD_DV_WAIT_SIMQ_RELEASE;
+			ahd_unlock(ahd, &s);
+			down_interruptible(&ahd->platform_data->dv_sem);
+			ahd_lock(ahd, &s);
+		}
+		ahd_unlock(ahd, &s);
+
+		ahd_linux_dv_transition(ahd, cmd, &devinfo, targ);
+	}
+
+out:
+	if ((targ->flags & AHD_INQ_VALID) != 0
+	 && ahd_linux_get_device(ahd, devinfo.channel - 'A',
+				 devinfo.target, devinfo.lun,
+				 /*alloc*/FALSE) == NULL) {
+		/*
+		 * The DV state machine failed to configure this device.  
+		 * This is normal if DV is disabled.  Since we have inquiry
+		 * data, filter it and use the "optimistic" negotiation
+		 * parameters found in the inquiry string.
+		 */
+		ahd_linux_filter_inquiry(ahd, &devinfo);
+		if ((targ->flags & (AHD_BASIC_DV|AHD_ENHANCED_DV)) != 0) {
+			ahd_print_devinfo(ahd, &devinfo);
+			printf("DV failed to configure device.  "
+			       "Please file a bug report against "
+			       "this driver.\n");
+		}
+	}
+
+	if (cmd != NULL)
+		free(cmd, M_DEVBUF);
+
+	if (ahd->platform_data->dv_scsi_dev != NULL) {
+		free(ahd->platform_data->dv_scsi_dev, M_DEVBUF);
+		ahd->platform_data->dv_scsi_dev = NULL;
+	}
+
+	ahd_lock(ahd, &s);
+	if (targ->dv_buffer != NULL) {
+		free(targ->dv_buffer, M_DEVBUF);
+		targ->dv_buffer = NULL;
+	}
+	if (targ->dv_buffer1 != NULL) {
+		free(targ->dv_buffer1, M_DEVBUF);
+		targ->dv_buffer1 = NULL;
+	}
+	targ->flags &= ~AHD_DV_REQUIRED;
+	if (targ->refcount == 0)
+		ahd_linux_free_target(ahd, targ);
+	ahd_unlock(ahd, &s);
+}
+
+static __inline int
+ahd_linux_dv_fallback(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+	u_long s;
+	int retval;
+
+	ahd_lock(ahd, &s);
+	retval = ahd_linux_fallback(ahd, devinfo);
+	ahd_unlock(ahd, &s);
+
+	return (retval);
+}
+
+static void
+ahd_linux_dv_transition(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
+			struct ahd_devinfo *devinfo,
+			struct ahd_linux_target *targ)
+{
+	u_int32_t status;
+
+	status = aic_error_action(cmd, targ->inq_data,
+				  ahd_cmd_get_transaction_status(cmd),
+				  ahd_cmd_get_scsi_status(cmd));
+
+	
+#ifdef AHD_DEBUG
+	if (ahd_debug & AHD_SHOW_DV) {
+		ahd_print_devinfo(ahd, devinfo);
+		printf("Entering ahd_linux_dv_transition, state= %d, "
+		       "status= 0x%x, cmd->result= 0x%x\n", targ->dv_state,
+		       status, cmd->result);
+	}
+#endif
+
+	switch (targ->dv_state) {
+	case AHD_DV_STATE_INQ_SHORT_ASYNC:
+	case AHD_DV_STATE_INQ_ASYNC:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+		{
+			AHD_SET_DV_STATE(ahd, targ, targ->dv_state+1);
+			break;
+		}
+		case SS_INQ_REFRESH:
+			AHD_SET_DV_STATE(ahd, targ,
+					 AHD_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		case SS_TUR:
+		case SS_RETRY:
+			AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
+			if (ahd_cmd_get_transaction_status(cmd)
+			 == CAM_REQUEUE_REQ)
+				targ->dv_state_retry--;
+			if ((status & SS_ERRMASK) == EBUSY)
+				AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
+			if (targ->dv_state_retry < 10)
+				break;
+			/* FALLTHROUGH */
+		default:
+			AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
+#ifdef AHD_DEBUG
+			if (ahd_debug & AHD_SHOW_DV) {
+				ahd_print_devinfo(ahd, devinfo);
+				printf("Failed DV inquiry, skipping\n");
+			}
+#endif
+			break;
+		}
+		break;
+	case AHD_DV_STATE_INQ_ASYNC_VERIFY:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+		{
+			u_int xportflags;
+			u_int spi3data;
+
+			if (memcmp(targ->inq_data, targ->dv_buffer,
+				   AHD_LINUX_DV_INQ_LEN) != 0) {
+				/*
+				 * Inquiry data must have changed.
+				 * Try from the top again.
+				 */
+				AHD_SET_DV_STATE(ahd, targ,
+						 AHD_DV_STATE_INQ_SHORT_ASYNC);
+				break;
+			}
+
+			AHD_SET_DV_STATE(ahd, targ, targ->dv_state+1);
+			targ->flags |= AHD_INQ_VALID;
+			if (ahd_linux_user_dv_setting(ahd) == 0)
+				break;
+
+			xportflags = targ->inq_data->flags;
+			if ((xportflags & (SID_Sync|SID_WBus16)) == 0)
+				break;
+
+			spi3data = targ->inq_data->spi3data;
+			switch (spi3data & SID_SPI_CLOCK_DT_ST) {
+			default:
+			case SID_SPI_CLOCK_ST:
+				/* Assume only basic DV is supported. */
+				targ->flags |= AHD_BASIC_DV;
+				break;
+			case SID_SPI_CLOCK_DT:
+			case SID_SPI_CLOCK_DT_ST:
+				targ->flags |= AHD_ENHANCED_DV;
+				break;
+			}
+			break;
+		}
+		case SS_INQ_REFRESH:
+			AHD_SET_DV_STATE(ahd, targ,
+					 AHD_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		case SS_TUR:
+		case SS_RETRY:
+			AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
+			if (ahd_cmd_get_transaction_status(cmd)
+			 == CAM_REQUEUE_REQ)
+				targ->dv_state_retry--;
+
+			if ((status & SS_ERRMASK) == EBUSY)
+				AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
+			if (targ->dv_state_retry < 10)
+				break;
+			/* FALLTHROUGH */
+		default:
+			AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
+#ifdef AHD_DEBUG
+			if (ahd_debug & AHD_SHOW_DV) {
+				ahd_print_devinfo(ahd, devinfo);
+				printf("Failed DV inquiry, skipping\n");
+			}
+#endif
+			break;
+		}
+		break;
+	case AHD_DV_STATE_INQ_VERIFY:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+		{
+
+			if (memcmp(targ->inq_data, targ->dv_buffer,
+				   AHD_LINUX_DV_INQ_LEN) == 0) {
+				AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
+				break;
+			}
+
+#ifdef AHD_DEBUG
+			if (ahd_debug & AHD_SHOW_DV) {
+				int i;
+
+				ahd_print_devinfo(ahd, devinfo);
+				printf("Inquiry buffer mismatch:");
+				for (i = 0; i < AHD_LINUX_DV_INQ_LEN; i++) {
+					if ((i & 0xF) == 0)
+						printf("\n        ");
+					printf("0x%x:0x0%x ",
+					       ((uint8_t *)targ->inq_data)[i], 
+					       targ->dv_buffer[i]);
+				}
+				printf("\n");
+			}
+#endif
+
+			if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
+				AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
+				break;
+			}
+			/*
+			 * Do not count "falling back"
+			 * against our retries.
+			 */
+			targ->dv_state_retry = 0;
+			AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
+			break;
+		}
+		case SS_INQ_REFRESH:
+			AHD_SET_DV_STATE(ahd, targ,
+					 AHD_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		case SS_TUR:
+		case SS_RETRY:
+			AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
+			if (ahd_cmd_get_transaction_status(cmd)
+			 == CAM_REQUEUE_REQ) {
+				targ->dv_state_retry--;
+			} else if ((status & SSQ_FALLBACK) != 0) {
+				if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
+					AHD_SET_DV_STATE(ahd, targ,
+							 AHD_DV_STATE_EXIT);
+					break;
+				}
+				/*
+				 * Do not count "falling back"
+				 * against our retries.
+				 */
+				targ->dv_state_retry = 0;
+			} else if ((status & SS_ERRMASK) == EBUSY)
+				AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
+			if (targ->dv_state_retry < 10)
+				break;
+			/* FALLTHROUGH */
+		default:
+			AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
+#ifdef AHD_DEBUG
+			if (ahd_debug & AHD_SHOW_DV) {
+				ahd_print_devinfo(ahd, devinfo);
+				printf("Failed DV inquiry, skipping\n");
+			}
+#endif
+			break;
+		}
+		break;
+
+	case AHD_DV_STATE_TUR:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+			if ((targ->flags & AHD_BASIC_DV) != 0) {
+				ahd_linux_filter_inquiry(ahd, devinfo);
+				AHD_SET_DV_STATE(ahd, targ,
+						 AHD_DV_STATE_INQ_VERIFY);
+			} else if ((targ->flags & AHD_ENHANCED_DV) != 0) {
+				AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_REBD);
+			} else {
+				AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
+			}
+			break;
+		case SS_RETRY:
+		case SS_TUR:
+			if ((status & SS_ERRMASK) == EBUSY) {
+				AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
+				break;
+			}
+			AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
+			if (ahd_cmd_get_transaction_status(cmd)
+			 == CAM_REQUEUE_REQ) {
+				targ->dv_state_retry--;
+			} else if ((status & SSQ_FALLBACK) != 0) {
+				if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
+					AHD_SET_DV_STATE(ahd, targ,
+							 AHD_DV_STATE_EXIT);
+					break;
+				}
+				/*
+				 * Do not count "falling back"
+				 * against our retries.
+				 */
+				targ->dv_state_retry = 0;
+			}
+			if (targ->dv_state_retry >= 10) {
+#ifdef AHD_DEBUG
+				if (ahd_debug & AHD_SHOW_DV) {
+					ahd_print_devinfo(ahd, devinfo);
+					printf("DV TUR reties exhausted\n");
+				}
+#endif
+				AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
+				break;
+			}
+			if (status & SSQ_DELAY)
+				ssleep(1);
+
+			break;
+		case SS_START:
+			AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_SU);
+			break;
+		case SS_INQ_REFRESH:
+			AHD_SET_DV_STATE(ahd, targ,
+					 AHD_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		default:
+			AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
+			break;
+		}
+		break;
+
+	case AHD_DV_STATE_REBD:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+		{
+			uint32_t echo_size;
+
+			AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_WEB);
+			echo_size = scsi_3btoul(&targ->dv_buffer[1]);
+			echo_size &= 0x1FFF;
+#ifdef AHD_DEBUG
+			if (ahd_debug & AHD_SHOW_DV) {
+				ahd_print_devinfo(ahd, devinfo);
+				printf("Echo buffer size= %d\n", echo_size);
+			}
+#endif
+			if (echo_size == 0) {
+				AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
+				break;
+			}
+
+			/* Generate the buffer pattern */
+			targ->dv_echo_size = echo_size;
+			ahd_linux_generate_dv_pattern(targ);
+			/*
+			 * Setup initial negotiation values.
+			 */
+			ahd_linux_filter_inquiry(ahd, devinfo);
+			break;
+		}
+		case SS_INQ_REFRESH:
+			AHD_SET_DV_STATE(ahd, targ,
+					 AHD_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		case SS_RETRY:
+			AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
+			if (ahd_cmd_get_transaction_status(cmd)
+			 == CAM_REQUEUE_REQ)
+				targ->dv_state_retry--;
+			if (targ->dv_state_retry <= 10)
+				break;
+#ifdef AHD_DEBUG
+			if (ahd_debug & AHD_SHOW_DV) {
+				ahd_print_devinfo(ahd, devinfo);
+				printf("DV REBD reties exhausted\n");
+			}
+#endif
+			/* FALLTHROUGH */
+		case SS_FATAL:
+		default:
+			/*
+			 * Setup initial negotiation values
+			 * and try level 1 DV.
+			 */
+			ahd_linux_filter_inquiry(ahd, devinfo);
+			AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_INQ_VERIFY);
+			targ->dv_echo_size = 0;
+			break;
+		}
+		break;
+
+	case AHD_DV_STATE_WEB:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+			AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_REB);
+			break;
+		case SS_INQ_REFRESH:
+			AHD_SET_DV_STATE(ahd, targ,
+					 AHD_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		case SS_RETRY:
+			AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
+			if (ahd_cmd_get_transaction_status(cmd)
+			 == CAM_REQUEUE_REQ) {
+				targ->dv_state_retry--;
+			} else if ((status & SSQ_FALLBACK) != 0) {
+				if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
+					AHD_SET_DV_STATE(ahd, targ,
+							 AHD_DV_STATE_EXIT);
+					break;
+				}
+				/*
+				 * Do not count "falling back"
+				 * against our retries.
+				 */
+				targ->dv_state_retry = 0;
+			}
+			if (targ->dv_state_retry <= 10)
+				break;
+			/* FALLTHROUGH */
+#ifdef AHD_DEBUG
+			if (ahd_debug & AHD_SHOW_DV) {
+				ahd_print_devinfo(ahd, devinfo);
+				printf("DV WEB reties exhausted\n");
+			}
+#endif
+		default:
+			AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
+			break;
+		}
+		break;
+
+	case AHD_DV_STATE_REB:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+			if (memcmp(targ->dv_buffer, targ->dv_buffer1,
+				   targ->dv_echo_size) != 0) {
+				if (ahd_linux_dv_fallback(ahd, devinfo) != 0)
+					AHD_SET_DV_STATE(ahd, targ,
+							 AHD_DV_STATE_EXIT);
+				else
+					AHD_SET_DV_STATE(ahd, targ,
+							 AHD_DV_STATE_WEB);
+				break;
+			}
+			
+			if (targ->dv_buffer != NULL) {
+				free(targ->dv_buffer, M_DEVBUF);
+				targ->dv_buffer = NULL;
+			}
+			if (targ->dv_buffer1 != NULL) {
+				free(targ->dv_buffer1, M_DEVBUF);
+				targ->dv_buffer1 = NULL;
+			}
+			AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
+			break;
+		case SS_INQ_REFRESH:
+			AHD_SET_DV_STATE(ahd, targ,
+					 AHD_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		case SS_RETRY:
+			AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
+			if (ahd_cmd_get_transaction_status(cmd)
+			 == CAM_REQUEUE_REQ) {
+				targ->dv_state_retry--;
+			} else if ((status & SSQ_FALLBACK) != 0) {
+				if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
+					AHD_SET_DV_STATE(ahd, targ,
+							 AHD_DV_STATE_EXIT);
+					break;
+				}
+				AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_WEB);
+			}
+			if (targ->dv_state_retry <= 10) {
+				if ((status & (SSQ_DELAY_RANDOM|SSQ_DELAY))!= 0)
+					msleep(ahd->our_id*1000/10);
+				break;
+			}
+#ifdef AHD_DEBUG
+			if (ahd_debug & AHD_SHOW_DV) {
+				ahd_print_devinfo(ahd, devinfo);
+				printf("DV REB reties exhausted\n");
+			}
+#endif
+			/* FALLTHROUGH */
+		default:
+			AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
+			break;
+		}
+		break;
+
+	case AHD_DV_STATE_SU:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+		case SS_INQ_REFRESH:
+			AHD_SET_DV_STATE(ahd, targ,
+					 AHD_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		default:
+			AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
+			break;
+		}
+		break;
+
+	case AHD_DV_STATE_BUSY:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+		case SS_INQ_REFRESH:
+			AHD_SET_DV_STATE(ahd, targ,
+					 AHD_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		case SS_TUR:
+		case SS_RETRY:
+			AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
+			if (ahd_cmd_get_transaction_status(cmd)
+			 == CAM_REQUEUE_REQ) {
+				targ->dv_state_retry--;
+			} else if (targ->dv_state_retry < 60) {
+				if ((status & SSQ_DELAY) != 0)
+					ssleep(1);
+			} else {
+#ifdef AHD_DEBUG
+				if (ahd_debug & AHD_SHOW_DV) {
+					ahd_print_devinfo(ahd, devinfo);
+					printf("DV BUSY reties exhausted\n");
+				}
+#endif
+				AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
+			}
+			break;
+		default:
+			AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
+			break;
+		}
+		break;
+
+	default:
+		printf("%s: Invalid DV completion state %d\n", ahd_name(ahd),
+		       targ->dv_state);
+		AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
+		break;
+	}
+}
+
+static void
+ahd_linux_dv_fill_cmd(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
+		      struct ahd_devinfo *devinfo)
+{
+	memset(cmd, 0, sizeof(struct scsi_cmnd));
+	cmd->device = ahd->platform_data->dv_scsi_dev;
+	cmd->scsi_done = ahd_linux_dv_complete;
+}
+
+/*
+ * Synthesize an inquiry command.  On the return trip, it'll be
+ * sniffed and the device transfer settings set for us.
+ */
+static void
+ahd_linux_dv_inq(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
+		 struct ahd_devinfo *devinfo, struct ahd_linux_target *targ,
+		 u_int request_length)
+{
+
+#ifdef AHD_DEBUG
+	if (ahd_debug & AHD_SHOW_DV) {
+		ahd_print_devinfo(ahd, devinfo);
+		printf("Sending INQ\n");
+	}
+#endif
+	if (targ->inq_data == NULL)
+		targ->inq_data = malloc(AHD_LINUX_DV_INQ_LEN,
+					M_DEVBUF, M_WAITOK);
+	if (targ->dv_state > AHD_DV_STATE_INQ_ASYNC) {
+		if (targ->dv_buffer != NULL)
+			free(targ->dv_buffer, M_DEVBUF);
+		targ->dv_buffer = malloc(AHD_LINUX_DV_INQ_LEN,
+					 M_DEVBUF, M_WAITOK);
+	}
+
+	ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
+	cmd->sc_data_direction = SCSI_DATA_READ;
+	cmd->cmd_len = 6;
+	cmd->cmnd[0] = INQUIRY;
+	cmd->cmnd[4] = request_length;
+	cmd->request_bufflen = request_length;
+	if (targ->dv_state > AHD_DV_STATE_INQ_ASYNC)
+		cmd->request_buffer = targ->dv_buffer;
+	else
+		cmd->request_buffer = targ->inq_data;
+	memset(cmd->request_buffer, 0, AHD_LINUX_DV_INQ_LEN);
+}
+
+static void
+ahd_linux_dv_tur(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
+		 struct ahd_devinfo *devinfo)
+{
+
+#ifdef AHD_DEBUG
+	if (ahd_debug & AHD_SHOW_DV) {
+		ahd_print_devinfo(ahd, devinfo);
+		printf("Sending TUR\n");
+	}
+#endif
+	/* Do a TUR to clear out any non-fatal transitional state */
+	ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
+	cmd->sc_data_direction = SCSI_DATA_NONE;
+	cmd->cmd_len = 6;
+	cmd->cmnd[0] = TEST_UNIT_READY;
+}
+
+#define AHD_REBD_LEN 4
+
+static void
+ahd_linux_dv_rebd(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
+		 struct ahd_devinfo *devinfo, struct ahd_linux_target *targ)
+{
+
+#ifdef AHD_DEBUG
+	if (ahd_debug & AHD_SHOW_DV) {
+		ahd_print_devinfo(ahd, devinfo);
+		printf("Sending REBD\n");
+	}
+#endif
+	if (targ->dv_buffer != NULL)
+		free(targ->dv_buffer, M_DEVBUF);
+	targ->dv_buffer = malloc(AHD_REBD_LEN, M_DEVBUF, M_WAITOK);
+	ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
+	cmd->sc_data_direction = SCSI_DATA_READ;
+	cmd->cmd_len = 10;
+	cmd->cmnd[0] = READ_BUFFER;
+	cmd->cmnd[1] = 0x0b;
+	scsi_ulto3b(AHD_REBD_LEN, &cmd->cmnd[6]);
+	cmd->request_bufflen = AHD_REBD_LEN;
+	cmd->underflow = cmd->request_bufflen;
+	cmd->request_buffer = targ->dv_buffer;
+}
+
+static void
+ahd_linux_dv_web(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
+		 struct ahd_devinfo *devinfo, struct ahd_linux_target *targ)
+{
+
+#ifdef AHD_DEBUG
+	if (ahd_debug & AHD_SHOW_DV) {
+		ahd_print_devinfo(ahd, devinfo);
+		printf("Sending WEB\n");
+	}
+#endif
+	ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
+	cmd->sc_data_direction = SCSI_DATA_WRITE;
+	cmd->cmd_len = 10;
+	cmd->cmnd[0] = WRITE_BUFFER;
+	cmd->cmnd[1] = 0x0a;
+	scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
+	cmd->request_bufflen = targ->dv_echo_size;
+	cmd->underflow = cmd->request_bufflen;
+	cmd->request_buffer = targ->dv_buffer;
+}
+
+static void
+ahd_linux_dv_reb(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
+		 struct ahd_devinfo *devinfo, struct ahd_linux_target *targ)
+{
+
+#ifdef AHD_DEBUG
+	if (ahd_debug & AHD_SHOW_DV) {
+		ahd_print_devinfo(ahd, devinfo);
+		printf("Sending REB\n");
+	}
+#endif
+	ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
+	cmd->sc_data_direction = SCSI_DATA_READ;
+	cmd->cmd_len = 10;
+	cmd->cmnd[0] = READ_BUFFER;
+	cmd->cmnd[1] = 0x0a;
+	scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
+	cmd->request_bufflen = targ->dv_echo_size;
+	cmd->underflow = cmd->request_bufflen;
+	cmd->request_buffer = targ->dv_buffer1;
+}
+
+static void
+ahd_linux_dv_su(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
+		struct ahd_devinfo *devinfo,
+		struct ahd_linux_target *targ)
+{
+	u_int le;
+
+	le = SID_IS_REMOVABLE(targ->inq_data) ? SSS_LOEJ : 0;
+
+#ifdef AHD_DEBUG
+	if (ahd_debug & AHD_SHOW_DV) {
+		ahd_print_devinfo(ahd, devinfo);
+		printf("Sending SU\n");
+	}
+#endif
+	ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
+	cmd->sc_data_direction = SCSI_DATA_NONE;
+	cmd->cmd_len = 6;
+	cmd->cmnd[0] = START_STOP_UNIT;
+	cmd->cmnd[4] = le | SSS_START;
+}
+
+static int
+ahd_linux_fallback(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+	struct	ahd_linux_target *targ;
+	struct	ahd_initiator_tinfo *tinfo;
+	struct	ahd_transinfo *goal;
+	struct	ahd_tmode_tstate *tstate;
+	u_int	width;
+	u_int	period;
+	u_int	offset;
+	u_int	ppr_options;
+	u_int	cur_speed;
+	u_int	wide_speed;
+	u_int	narrow_speed;
+	u_int	fallback_speed;
+
+#ifdef AHD_DEBUG
+	if (ahd_debug & AHD_SHOW_DV) {
+		ahd_print_devinfo(ahd, devinfo);
+		printf("Trying to fallback\n");
+	}
+#endif
+	targ = ahd->platform_data->targets[devinfo->target_offset];
+	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel,
+				    devinfo->our_scsiid,
+				    devinfo->target, &tstate);
+	goal = &tinfo->goal;
+	width = goal->width;
+	period = goal->period;
+	offset = goal->offset;
+	ppr_options = goal->ppr_options;
+	if (offset == 0)
+		period = AHD_ASYNC_XFER_PERIOD;
+	if (targ->dv_next_narrow_period == 0)
+		targ->dv_next_narrow_period = MAX(period, AHD_SYNCRATE_ULTRA2);
+	if (targ->dv_next_wide_period == 0)
+		targ->dv_next_wide_period = period;
+	if (targ->dv_max_width == 0)
+		targ->dv_max_width = width;
+	if (targ->dv_max_ppr_options == 0)
+		targ->dv_max_ppr_options = ppr_options;
+	if (targ->dv_last_ppr_options == 0)
+		targ->dv_last_ppr_options = ppr_options;
+
+	cur_speed = aic_calc_speed(width, period, offset, AHD_SYNCRATE_MIN);
+	wide_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_16_BIT,
+					  targ->dv_next_wide_period,
+					  MAX_OFFSET, AHD_SYNCRATE_MIN);
+	narrow_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_8_BIT,
+					    targ->dv_next_narrow_period,
+					    MAX_OFFSET, AHD_SYNCRATE_MIN);
+	fallback_speed = aic_calc_speed(width, period+1, offset,
+					      AHD_SYNCRATE_MIN);
+#ifdef AHD_DEBUG
+	if (ahd_debug & AHD_SHOW_DV) {
+		printf("cur_speed= %d, wide_speed= %d, narrow_speed= %d, "
+		       "fallback_speed= %d\n", cur_speed, wide_speed,
+		       narrow_speed, fallback_speed);
+	}
+#endif
+
+	if (cur_speed > 160000) {
+		/*
+		 * Paced/DT/IU_REQ only transfer speeds.  All we
+		 * can do is fallback in terms of syncrate.
+		 */
+		period++;
+	} else if (cur_speed > 80000) {
+		if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
+			/*
+			 * Try without IU_REQ as it may be confusing
+			 * an expander.
+			 */
+			ppr_options &= ~MSG_EXT_PPR_IU_REQ;
+		} else {
+			/*
+			 * Paced/DT only transfer speeds.  All we
+			 * can do is fallback in terms of syncrate.
+			 */
+			period++;
+			ppr_options = targ->dv_max_ppr_options;
+		}
+	} else if (cur_speed > 3300) {
+
+		/*
+		 * In this range we the following
+		 * options ordered from highest to
+		 * lowest desireability:
+		 *
+		 * o Wide/DT
+		 * o Wide/non-DT
+		 * o Narrow at a potentally higher sync rate.
+		 *
+		 * All modes are tested with and without IU_REQ
+		 * set since using IUs may confuse an expander.
+		 */
+		if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
+
+			ppr_options &= ~MSG_EXT_PPR_IU_REQ;
+		} else if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
+			/*
+			 * Try going non-DT.
+			 */
+			ppr_options = targ->dv_max_ppr_options;
+			ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+		} else if (targ->dv_last_ppr_options != 0) {
+			/*
+			 * Try without QAS or any other PPR options.
+			 * We may need a non-PPR message to work with
+			 * an expander.  We look at the "last PPR options"
+			 * so we will perform this fallback even if the
+			 * target responded to our PPR negotiation with
+			 * no option bits set.
+			 */
+			ppr_options = 0;
+		} else if (width == MSG_EXT_WDTR_BUS_16_BIT) {
+			/*
+			 * If the next narrow speed is greater than
+			 * the next wide speed, fallback to narrow.
+			 * Otherwise fallback to the next DT/Wide setting.
+			 * The narrow async speed will always be smaller
+			 * than the wide async speed, so handle this case
+			 * specifically.
+			 */
+			ppr_options = targ->dv_max_ppr_options;
+			if (narrow_speed > fallback_speed
+			 || period >= AHD_ASYNC_XFER_PERIOD) {
+				targ->dv_next_wide_period = period+1;
+				width = MSG_EXT_WDTR_BUS_8_BIT;
+				period = targ->dv_next_narrow_period;
+			} else {
+				period++;
+			}
+		} else if ((ahd->features & AHD_WIDE) != 0
+			&& targ->dv_max_width != 0
+			&& wide_speed >= fallback_speed
+			&& (targ->dv_next_wide_period <= AHD_ASYNC_XFER_PERIOD
+			 || period >= AHD_ASYNC_XFER_PERIOD)) {
+
+			/*
+			 * We are narrow.  Try falling back
+			 * to the next wide speed with 
+			 * all supported ppr options set.
+			 */
+			targ->dv_next_narrow_period = period+1;
+			width = MSG_EXT_WDTR_BUS_16_BIT;
+			period = targ->dv_next_wide_period;
+			ppr_options = targ->dv_max_ppr_options;
+		} else {
+			/* Only narrow fallback is allowed. */
+			period++;
+			ppr_options = targ->dv_max_ppr_options;
+		}
+	} else {
+		return (-1);
+	}
+	offset = MAX_OFFSET;
+	ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_PACED);
+	ahd_set_width(ahd, devinfo, width, AHD_TRANS_GOAL, FALSE);
+	if (period == 0) {
+		period = 0;
+		offset = 0;
+		ppr_options = 0;
+		if (width == MSG_EXT_WDTR_BUS_8_BIT)
+			targ->dv_next_narrow_period = AHD_ASYNC_XFER_PERIOD;
+		else
+			targ->dv_next_wide_period = AHD_ASYNC_XFER_PERIOD;
+	}
+	ahd_set_syncrate(ahd, devinfo, period, offset,
+			 ppr_options, AHD_TRANS_GOAL, FALSE);
+	targ->dv_last_ppr_options = ppr_options;
+	return (0);
+}
+
+static void
+ahd_linux_dv_timeout(struct scsi_cmnd *cmd)
+{
+	struct	ahd_softc *ahd;
+	struct	scb *scb;
+	u_long	flags;
+
+	ahd = *((struct ahd_softc **)cmd->device->host->hostdata);
+	ahd_lock(ahd, &flags);
+
+#ifdef AHD_DEBUG
+	if (ahd_debug & AHD_SHOW_DV) {
+		printf("%s: Timeout while doing DV command %x.\n",
+		       ahd_name(ahd), cmd->cmnd[0]);
+		ahd_dump_card_state(ahd);
+	}
+#endif
+	
+	/*
+	 * Guard against "done race".  No action is
+	 * required if we just completed.
+	 */
+	if ((scb = (struct scb *)cmd->host_scribble) == NULL) {
+		ahd_unlock(ahd, &flags);
+		return;
+	}
+
+	/*
+	 * Command has not completed.  Mark this
+	 * SCB as having failing status prior to
+	 * resetting the bus, so we get the correct
+	 * error code.
+	 */
+	if ((scb->flags & SCB_SENSE) != 0)
+		ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
+	else
+		ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
+	ahd_reset_channel(ahd, cmd->device->channel + 'A', /*initiate*/TRUE);
+
+	/*
+	 * Add a minimal bus settle delay for devices that are slow to
+	 * respond after bus resets.
+	 */
+	ahd_freeze_simq(ahd);
+	init_timer(&ahd->platform_data->reset_timer);
+	ahd->platform_data->reset_timer.data = (u_long)ahd;
+	ahd->platform_data->reset_timer.expires = jiffies + HZ / 2;
+	ahd->platform_data->reset_timer.function =
+	    (ahd_linux_callback_t *)ahd_release_simq;
+	add_timer(&ahd->platform_data->reset_timer);
+	if (ahd_linux_next_device_to_run(ahd) != NULL)
+		ahd_schedule_runq(ahd);
+	ahd_linux_run_complete_queue(ahd);
+	ahd_unlock(ahd, &flags);
+}
+
+static void
+ahd_linux_dv_complete(struct scsi_cmnd *cmd)
+{
+	struct ahd_softc *ahd;
+
+	ahd = *((struct ahd_softc **)cmd->device->host->hostdata);
+
+	/* Delete the DV timer before it goes off! */
+	scsi_delete_timer(cmd);
+
+#ifdef AHD_DEBUG
+	if (ahd_debug & AHD_SHOW_DV)
+		printf("%s:%c:%d: Command completed, status= 0x%x\n",
+		       ahd_name(ahd), cmd->device->channel, cmd->device->id,
+		       cmd->result);
+#endif
+
+	/* Wake up the state machine */
+	up(&ahd->platform_data->dv_cmd_sem);
+}
+
+static void
+ahd_linux_generate_dv_pattern(struct ahd_linux_target *targ)
+{
+	uint16_t b;
+	u_int	 i;
+	u_int	 j;
+
+	if (targ->dv_buffer != NULL)
+		free(targ->dv_buffer, M_DEVBUF);
+	targ->dv_buffer = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
+	if (targ->dv_buffer1 != NULL)
+		free(targ->dv_buffer1, M_DEVBUF);
+	targ->dv_buffer1 = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
+
+	i = 0;
+
+	b = 0x0001;
+	for (j = 0 ; i < targ->dv_echo_size; j++) {
+		if (j < 32) {
+			/*
+			 * 32bytes of sequential numbers.
+			 */
+			targ->dv_buffer[i++] = j & 0xff;
+		} else if (j < 48) {
+			/*
+			 * 32bytes of repeating 0x0000, 0xffff.
+			 */
+			targ->dv_buffer[i++] = (j & 0x02) ? 0xff : 0x00;
+		} else if (j < 64) {
+			/*
+			 * 32bytes of repeating 0x5555, 0xaaaa.
+			 */
+			targ->dv_buffer[i++] = (j & 0x02) ? 0xaa : 0x55;
+		} else {
+			/*
+			 * Remaining buffer is filled with a repeating
+			 * patter of:
+			 *
+			 *	 0xffff
+			 *	~0x0001 << shifted once in each loop.
+			 */
+			if (j & 0x02) {
+				if (j & 0x01) {
+					targ->dv_buffer[i++] = ~(b >> 8) & 0xff;
+					b <<= 1;
+					if (b == 0x0000)
+						b = 0x0001;
+				} else {
+					targ->dv_buffer[i++] = (~b & 0xff);
+				}
+			} else {
+				targ->dv_buffer[i++] = 0xff;
+			}
+		}
+	}
+}
+
+static u_int
+ahd_linux_user_tagdepth(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+	static int warned_user;
+	u_int tags;
+
+	tags = 0;
+	if ((ahd->user_discenable & devinfo->target_mask) != 0) {
+		if (ahd->unit >= NUM_ELEMENTS(aic79xx_tag_info)) {
+
+			if (warned_user == 0) {
+				printf(KERN_WARNING
+"aic79xx: WARNING: Insufficient tag_info instances\n"
+"aic79xx: for installed controllers.  Using defaults\n"
+"aic79xx: Please update the aic79xx_tag_info array in\n"
+"aic79xx: the aic79xx_osm.c source file.\n");
+				warned_user++;
+			}
+			tags = AHD_MAX_QUEUE;
+		} else {
+			adapter_tag_info_t *tag_info;
+
+			tag_info = &aic79xx_tag_info[ahd->unit];
+			tags = tag_info->tag_commands[devinfo->target_offset];
+			if (tags > AHD_MAX_QUEUE)
+				tags = AHD_MAX_QUEUE;
+		}
+	}
+	return (tags);
+}
+
+static u_int
+ahd_linux_user_dv_setting(struct ahd_softc *ahd)
+{
+	static int warned_user;
+	int dv;
+
+	if (ahd->unit >= NUM_ELEMENTS(aic79xx_dv_settings)) {
+
+		if (warned_user == 0) {
+			printf(KERN_WARNING
+"aic79xx: WARNING: Insufficient dv settings instances\n"
+"aic79xx: for installed controllers. Using defaults\n"
+"aic79xx: Please update the aic79xx_dv_settings array in"
+"aic79xx: the aic79xx_osm.c source file.\n");
+			warned_user++;
+		}
+		dv = -1;
+	} else {
+
+		dv = aic79xx_dv_settings[ahd->unit];
+	}
+
+	if (dv < 0) {
+		/*
+		 * Apply the default.
+		 */
+		dv = 1;
+		if (ahd->seep_config != 0)
+			dv = (ahd->seep_config->bios_control & CFENABLEDV);
+	}
+	return (dv);
+}
+
+static void
+ahd_linux_setup_user_rd_strm_settings(struct ahd_softc *ahd)
+{
+	static	int warned_user;
+	u_int	rd_strm_mask;
+	u_int	target_id;
+
+	/*
+	 * If we have specific read streaming info for this controller,
+	 * apply it.  Otherwise use the defaults.
+	 */
+	 if (ahd->unit >= NUM_ELEMENTS(aic79xx_rd_strm_info)) {
+
+		if (warned_user == 0) {
+
+			printf(KERN_WARNING
+"aic79xx: WARNING: Insufficient rd_strm instances\n"
+"aic79xx: for installed controllers. Using defaults\n"
+"aic79xx: Please update the aic79xx_rd_strm_info array\n"
+"aic79xx: in the aic79xx_osm.c source file.\n");
+			warned_user++;
+		}
+		rd_strm_mask = AIC79XX_CONFIGED_RD_STRM;
+	} else {
+
+		rd_strm_mask = aic79xx_rd_strm_info[ahd->unit];
+	}
+	for (target_id = 0; target_id < 16; target_id++) {
+		struct ahd_devinfo devinfo;
+		struct ahd_initiator_tinfo *tinfo;
+		struct ahd_tmode_tstate *tstate;
+
+		tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
+					    target_id, &tstate);
+		ahd_compile_devinfo(&devinfo, ahd->our_id, target_id,
+				    CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR);
+		tinfo->user.ppr_options &= ~MSG_EXT_PPR_RD_STRM;
+		if ((rd_strm_mask & devinfo.target_mask) != 0)
+			tinfo->user.ppr_options |= MSG_EXT_PPR_RD_STRM;
+	}
+}
+
+/*
+ * Determines the queue depth for a given device.
+ */
+static void
+ahd_linux_device_queue_depth(struct ahd_softc *ahd,
+			     struct ahd_linux_device *dev)
+{
+	struct	ahd_devinfo devinfo;
+	u_int	tags;
+
+	ahd_compile_devinfo(&devinfo,
+			    ahd->our_id,
+			    dev->target->target, dev->lun,
+			    dev->target->channel == 0 ? 'A' : 'B',
+			    ROLE_INITIATOR);
+	tags = ahd_linux_user_tagdepth(ahd, &devinfo);
+	if (tags != 0
+	 && dev->scsi_device != NULL
+	 && dev->scsi_device->tagged_supported != 0) {
+
+		ahd_set_tags(ahd, &devinfo, AHD_QUEUE_TAGGED);
+		ahd_print_devinfo(ahd, &devinfo);
+		printf("Tagged Queuing enabled.  Depth %d\n", tags);
+	} else {
+		ahd_set_tags(ahd, &devinfo, AHD_QUEUE_NONE);
+	}
+}
+
+static void
+ahd_linux_run_device_queue(struct ahd_softc *ahd, struct ahd_linux_device *dev)
+{
+	struct	 ahd_cmd *acmd;
+	struct	 scsi_cmnd *cmd;
+	struct	 scb *scb;
+	struct	 hardware_scb *hscb;
+	struct	 ahd_initiator_tinfo *tinfo;
+	struct	 ahd_tmode_tstate *tstate;
+	u_int	 col_idx;
+	uint16_t mask;
+
+	if ((dev->flags & AHD_DEV_ON_RUN_LIST) != 0)
+		panic("running device on run list");
+
+	while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL
+	    && dev->openings > 0 && dev->qfrozen == 0) {
+
+		/*
+		 * Schedule us to run later.  The only reason we are not
+		 * running is because the whole controller Q is frozen.
+		 */
+		if (ahd->platform_data->qfrozen != 0
+		 && AHD_DV_SIMQ_FROZEN(ahd) == 0) {
+
+			TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq,
+					  dev, links);
+			dev->flags |= AHD_DEV_ON_RUN_LIST;
+			return;
+		}
+
+		cmd = &acmd_scsi_cmd(acmd);
+
+		/*
+		 * Get an scb to use.
+		 */
+		tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
+					    cmd->device->id, &tstate);
+		if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) == 0
+		 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
+			col_idx = AHD_NEVER_COL_IDX;
+		} else {
+			col_idx = AHD_BUILD_COL_IDX(cmd->device->id,
+						    cmd->device->lun);
+		}
+		if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
+			TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq,
+					 dev, links);
+			dev->flags |= AHD_DEV_ON_RUN_LIST;
+			ahd->flags |= AHD_RESOURCE_SHORTAGE;
+			return;
+		}
+		TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe);
+		scb->io_ctx = cmd;
+		scb->platform_data->dev = dev;
+		hscb = scb->hscb;
+		cmd->host_scribble = (char *)scb;
+
+		/*
+		 * Fill out basics of the HSCB.
+		 */
+		hscb->control = 0;
+		hscb->scsiid = BUILD_SCSIID(ahd, cmd);
+		hscb->lun = cmd->device->lun;
+		scb->hscb->task_management = 0;
+		mask = SCB_GET_TARGET_MASK(ahd, scb);
+
+		if ((ahd->user_discenable & mask) != 0)
+			hscb->control |= DISCENB;
+
+	 	if (AHD_DV_CMD(cmd) != 0)
+			scb->flags |= SCB_SILENT;
+
+		if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
+			scb->flags |= SCB_PACKETIZED;
+
+		if ((tstate->auto_negotiate & mask) != 0) {
+			scb->flags |= SCB_AUTO_NEGOTIATE;
+			scb->hscb->control |= MK_MESSAGE;
+		}
+
+		if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+			int	msg_bytes;
+			uint8_t tag_msgs[2];
+
+			msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
+			if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
+				hscb->control |= tag_msgs[0];
+				if (tag_msgs[0] == MSG_ORDERED_TASK)
+					dev->commands_since_idle_or_otag = 0;
+			} else
+#endif
+			if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH
+			 && (dev->flags & AHD_DEV_Q_TAGGED) != 0) {
+				hscb->control |= MSG_ORDERED_TASK;
+				dev->commands_since_idle_or_otag = 0;
+			} else {
+				hscb->control |= MSG_SIMPLE_TASK;
+			}
+		}
+
+		hscb->cdb_len = cmd->cmd_len;
+		memcpy(hscb->shared_data.idata.cdb, cmd->cmnd, hscb->cdb_len);
+
+		scb->sg_count = 0;
+		ahd_set_residual(scb, 0);
+		ahd_set_sense_residual(scb, 0);
+		if (cmd->use_sg != 0) {
+			void	*sg;
+			struct	 scatterlist *cur_seg;
+			u_int	 nseg;
+			int	 dir;
+
+			cur_seg = (struct scatterlist *)cmd->request_buffer;
+			dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+			nseg = pci_map_sg(ahd->dev_softc, cur_seg,
+					  cmd->use_sg, dir);
+			scb->platform_data->xfer_len = 0;
+			for (sg = scb->sg_list; nseg > 0; nseg--, cur_seg++) {
+				dma_addr_t addr;
+				bus_size_t len;
+
+				addr = sg_dma_address(cur_seg);
+				len = sg_dma_len(cur_seg);
+				scb->platform_data->xfer_len += len;
+				sg = ahd_sg_setup(ahd, scb, sg, addr, len,
+						  /*last*/nseg == 1);
+			}
+		} else if (cmd->request_bufflen != 0) {
+			void *sg;
+			dma_addr_t addr;
+			int dir;
+
+			sg = scb->sg_list;
+			dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+			addr = pci_map_single(ahd->dev_softc,
+					      cmd->request_buffer,
+					      cmd->request_bufflen, dir);
+			scb->platform_data->xfer_len = cmd->request_bufflen;
+			scb->platform_data->buf_busaddr = addr;
+			sg = ahd_sg_setup(ahd, scb, sg, addr,
+					  cmd->request_bufflen, /*last*/TRUE);
+		}
+
+		LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
+		dev->openings--;
+		dev->active++;
+		dev->commands_issued++;
+
+		/* Update the error counting bucket and dump if needed */
+		if (dev->target->cmds_since_error) {
+			dev->target->cmds_since_error++;
+			if (dev->target->cmds_since_error >
+			    AHD_LINUX_ERR_THRESH)
+				dev->target->cmds_since_error = 0;
+		}
+
+		if ((dev->flags & AHD_DEV_PERIODIC_OTAG) != 0)
+			dev->commands_since_idle_or_otag++;
+		scb->flags |= SCB_ACTIVE;
+		ahd_queue_scb(ahd, scb);
+	}
+}
+
+/*
+ * SCSI controller interrupt handler.
+ */
+irqreturn_t
+ahd_linux_isr(int irq, void *dev_id, struct pt_regs * regs)
+{
+	struct	ahd_softc *ahd;
+	u_long	flags;
+	int	ours;
+
+	ahd = (struct ahd_softc *) dev_id;
+	ahd_lock(ahd, &flags); 
+	ours = ahd_intr(ahd);
+	if (ahd_linux_next_device_to_run(ahd) != NULL)
+		ahd_schedule_runq(ahd);
+	ahd_linux_run_complete_queue(ahd);
+	ahd_unlock(ahd, &flags);
+	return IRQ_RETVAL(ours);
+}
+
+void
+ahd_platform_flushwork(struct ahd_softc *ahd)
+{
+
+	while (ahd_linux_run_complete_queue(ahd) != NULL)
+		;
+}
+
+static struct ahd_linux_target*
+ahd_linux_alloc_target(struct ahd_softc *ahd, u_int channel, u_int target)
+{
+	struct ahd_linux_target *targ;
+
+	targ = malloc(sizeof(*targ), M_DEVBUF, M_NOWAIT);
+	if (targ == NULL)
+		return (NULL);
+	memset(targ, 0, sizeof(*targ));
+	targ->channel = channel;
+	targ->target = target;
+	targ->ahd = ahd;
+	targ->flags = AHD_DV_REQUIRED;
+	ahd->platform_data->targets[target] = targ;
+	return (targ);
+}
+
+static void
+ahd_linux_free_target(struct ahd_softc *ahd, struct ahd_linux_target *targ)
+{
+	struct ahd_devinfo devinfo;
+	struct ahd_initiator_tinfo *tinfo;
+	struct ahd_tmode_tstate *tstate;
+	u_int our_id;
+	u_int target_offset;
+	char channel;
+
+	/*
+	 * Force a negotiation to async/narrow on any
+	 * future command to this device unless a bus
+	 * reset occurs between now and that command.
+	 */
+	channel = 'A' + targ->channel;
+	our_id = ahd->our_id;
+	target_offset = targ->target;
+	tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
+				    targ->target, &tstate);
+	ahd_compile_devinfo(&devinfo, our_id, targ->target, CAM_LUN_WILDCARD,
+			    channel, ROLE_INITIATOR);
+	ahd_set_syncrate(ahd, &devinfo, 0, 0, 0,
+			 AHD_TRANS_GOAL, /*paused*/FALSE);
+	ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+		      AHD_TRANS_GOAL, /*paused*/FALSE);
+	ahd_update_neg_request(ahd, &devinfo, tstate, tinfo, AHD_NEG_ALWAYS);
+ 	ahd->platform_data->targets[target_offset] = NULL;
+	if (targ->inq_data != NULL)
+		free(targ->inq_data, M_DEVBUF);
+	if (targ->dv_buffer != NULL)
+		free(targ->dv_buffer, M_DEVBUF);
+	if (targ->dv_buffer1 != NULL)
+		free(targ->dv_buffer1, M_DEVBUF);
+	free(targ, M_DEVBUF);
+}
+
+static struct ahd_linux_device*
+ahd_linux_alloc_device(struct ahd_softc *ahd,
+		 struct ahd_linux_target *targ, u_int lun)
+{
+	struct ahd_linux_device *dev;
+
+	dev = malloc(sizeof(*dev), M_DEVBUG, M_NOWAIT);
+	if (dev == NULL)
+		return (NULL);
+	memset(dev, 0, sizeof(*dev));
+	init_timer(&dev->timer);
+	TAILQ_INIT(&dev->busyq);
+	dev->flags = AHD_DEV_UNCONFIGURED;
+	dev->lun = lun;
+	dev->target = targ;
+
+	/*
+	 * We start out life using untagged
+	 * transactions of which we allow one.
+	 */
+	dev->openings = 1;
+
+	/*
+	 * Set maxtags to 0.  This will be changed if we
+	 * later determine that we are dealing with
+	 * a tagged queuing capable device.
+	 */
+	dev->maxtags = 0;
+	
+	targ->refcount++;
+	targ->devices[lun] = dev;
+	return (dev);
+}
+
+static void
+ahd_linux_free_device(struct ahd_softc *ahd, struct ahd_linux_device *dev)
+{
+	struct ahd_linux_target *targ;
+
+	del_timer(&dev->timer);
+	targ = dev->target;
+	targ->devices[dev->lun] = NULL;
+	free(dev, M_DEVBUF);
+	targ->refcount--;
+	if (targ->refcount == 0
+	 && (targ->flags & AHD_DV_REQUIRED) == 0)
+		ahd_linux_free_target(ahd, targ);
+}
+
+void
+ahd_send_async(struct ahd_softc *ahd, char channel,
+	       u_int target, u_int lun, ac_code code, void *arg)
+{
+	switch (code) {
+	case AC_TRANSFER_NEG:
+	{
+		char	buf[80];
+		struct	ahd_linux_target *targ;
+		struct	info_str info;
+		struct	ahd_initiator_tinfo *tinfo;
+		struct	ahd_tmode_tstate *tstate;
+
+		info.buffer = buf;
+		info.length = sizeof(buf);
+		info.offset = 0;
+		info.pos = 0;
+		tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id,
+					    target, &tstate);
+
+		/*
+		 * Don't bother reporting results while
+		 * negotiations are still pending.
+		 */
+		if (tinfo->curr.period != tinfo->goal.period
+		 || tinfo->curr.width != tinfo->goal.width
+		 || tinfo->curr.offset != tinfo->goal.offset
+		 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
+			if (bootverbose == 0)
+				break;
+
+		/*
+		 * Don't bother reporting results that
+		 * are identical to those last reported.
+		 */
+		targ = ahd->platform_data->targets[target];
+		if (targ == NULL)
+			break;
+		if (tinfo->curr.period == targ->last_tinfo.period
+		 && tinfo->curr.width == targ->last_tinfo.width
+		 && tinfo->curr.offset == targ->last_tinfo.offset
+		 && tinfo->curr.ppr_options == targ->last_tinfo.ppr_options)
+			if (bootverbose == 0)
+				break;
+
+		targ->last_tinfo.period = tinfo->curr.period;
+		targ->last_tinfo.width = tinfo->curr.width;
+		targ->last_tinfo.offset = tinfo->curr.offset;
+		targ->last_tinfo.ppr_options = tinfo->curr.ppr_options;
+
+		printf("(%s:%c:", ahd_name(ahd), channel);
+		if (target == CAM_TARGET_WILDCARD)
+			printf("*): ");
+		else
+			printf("%d): ", target);
+		ahd_format_transinfo(&info, &tinfo->curr);
+		if (info.pos < info.length)
+			*info.buffer = '\0';
+		else
+			buf[info.length - 1] = '\0';
+		printf("%s", buf);
+		break;
+	}
+        case AC_SENT_BDR:
+	{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+		WARN_ON(lun != CAM_LUN_WILDCARD);
+		scsi_report_device_reset(ahd->platform_data->host,
+					 channel - 'A', target);
+#else
+		Scsi_Device *scsi_dev;
+
+		/*
+		 * Find the SCSI device associated with this
+		 * request and indicate that a UA is expected.
+		 */
+		for (scsi_dev = ahd->platform_data->host->host_queue;
+		     scsi_dev != NULL; scsi_dev = scsi_dev->next) {
+			if (channel - 'A' == scsi_dev->channel
+			 && target == scsi_dev->id
+			 && (lun == CAM_LUN_WILDCARD
+			  || lun == scsi_dev->lun)) {
+				scsi_dev->was_reset = 1;
+				scsi_dev->expecting_cc_ua = 1;
+			}
+		}
+#endif
+		break;
+	}
+        case AC_BUS_RESET:
+		if (ahd->platform_data->host != NULL) {
+			scsi_report_bus_reset(ahd->platform_data->host,
+					      channel - 'A');
+		}
+                break;
+        default:
+                panic("ahd_send_async: Unexpected async event");
+        }
+}
+
+/*
+ * Calls the higher level scsi done function and frees the scb.
+ */
+void
+ahd_done(struct ahd_softc *ahd, struct scb *scb)
+{
+	Scsi_Cmnd *cmd;
+	struct	  ahd_linux_device *dev;
+
+	if ((scb->flags & SCB_ACTIVE) == 0) {
+		printf("SCB %d done'd twice\n", SCB_GET_TAG(scb));
+		ahd_dump_card_state(ahd);
+		panic("Stopping for safety");
+	}
+	LIST_REMOVE(scb, pending_links);
+	cmd = scb->io_ctx;
+	dev = scb->platform_data->dev;
+	dev->active--;
+	dev->openings++;
+	if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
+		cmd->result &= ~(CAM_DEV_QFRZN << 16);
+		dev->qfrozen--;
+	}
+	ahd_linux_unmap_scb(ahd, scb);
+
+	/*
+	 * Guard against stale sense data.
+	 * The Linux mid-layer assumes that sense
+	 * was retrieved anytime the first byte of
+	 * the sense buffer looks "sane".
+	 */
+	cmd->sense_buffer[0] = 0;
+	if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) {
+		uint32_t amount_xferred;
+
+		amount_xferred =
+		    ahd_get_transfer_length(scb) - ahd_get_residual(scb);
+		if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {
+#ifdef AHD_DEBUG
+			if ((ahd_debug & AHD_SHOW_MISC) != 0) {
+				ahd_print_path(ahd, scb);
+				printf("Set CAM_UNCOR_PARITY\n");
+			}
+#endif
+			ahd_set_transaction_status(scb, CAM_UNCOR_PARITY);
+#ifdef AHD_REPORT_UNDERFLOWS
+		/*
+		 * This code is disabled by default as some
+		 * clients of the SCSI system do not properly
+		 * initialize the underflow parameter.  This
+		 * results in spurious termination of commands
+		 * that complete as expected (e.g. underflow is
+		 * allowed as command can return variable amounts
+		 * of data.
+		 */
+		} else if (amount_xferred < scb->io_ctx->underflow) {
+			u_int i;
+
+			ahd_print_path(ahd, scb);
+			printf("CDB:");
+			for (i = 0; i < scb->io_ctx->cmd_len; i++)
+				printf(" 0x%x", scb->io_ctx->cmnd[i]);
+			printf("\n");
+			ahd_print_path(ahd, scb);
+			printf("Saw underflow (%ld of %ld bytes). "
+			       "Treated as error\n",
+				ahd_get_residual(scb),
+				ahd_get_transfer_length(scb));
+			ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);
+#endif
+		} else {
+			ahd_set_transaction_status(scb, CAM_REQ_CMP);
+		}
+	} else if (ahd_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
+		ahd_linux_handle_scsi_status(ahd, dev, scb);
+	} else if (ahd_get_transaction_status(scb) == CAM_SEL_TIMEOUT) {
+		dev->flags |= AHD_DEV_UNCONFIGURED;
+		if (AHD_DV_CMD(cmd) == FALSE)
+			dev->target->flags &= ~AHD_DV_REQUIRED;
+	}
+	/*
+	 * Start DV for devices that require it assuming the first command
+	 * sent does not result in a selection timeout.
+	 */
+	if (ahd_get_transaction_status(scb) != CAM_SEL_TIMEOUT
+	 && (dev->target->flags & AHD_DV_REQUIRED) != 0)
+		ahd_linux_start_dv(ahd);
+
+	if (dev->openings == 1
+	 && ahd_get_transaction_status(scb) == CAM_REQ_CMP
+	 && ahd_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)
+		dev->tag_success_count++;
+	/*
+	 * Some devices deal with temporary internal resource
+	 * shortages by returning queue full.  When the queue
+	 * full occurrs, we throttle back.  Slowly try to get
+	 * back to our previous queue depth.
+	 */
+	if ((dev->openings + dev->active) < dev->maxtags
+	 && dev->tag_success_count > AHD_TAG_SUCCESS_INTERVAL) {
+		dev->tag_success_count = 0;
+		dev->openings++;
+	}
+
+	if (dev->active == 0)
+		dev->commands_since_idle_or_otag = 0;
+
+	if (TAILQ_EMPTY(&dev->busyq)) {
+		if ((dev->flags & AHD_DEV_UNCONFIGURED) != 0
+		 && dev->active == 0
+		 && (dev->flags & AHD_DEV_TIMER_ACTIVE) == 0)
+			ahd_linux_free_device(ahd, dev);
+	} else if ((dev->flags & AHD_DEV_ON_RUN_LIST) == 0) {
+		TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq, dev, links);
+		dev->flags |= AHD_DEV_ON_RUN_LIST;
+	}
+
+	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
+		printf("Recovery SCB completes\n");
+		if (ahd_get_transaction_status(scb) == CAM_BDR_SENT
+		 || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED)
+			ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
+		if ((scb->platform_data->flags & AHD_SCB_UP_EH_SEM) != 0) {
+			scb->platform_data->flags &= ~AHD_SCB_UP_EH_SEM;
+			up(&ahd->platform_data->eh_sem);
+		}
+	}
+
+	ahd_free_scb(ahd, scb);
+	ahd_linux_queue_cmd_complete(ahd, cmd);
+
+	if ((ahd->platform_data->flags & AHD_DV_WAIT_SIMQ_EMPTY) != 0
+	 && LIST_FIRST(&ahd->pending_scbs) == NULL) {
+		ahd->platform_data->flags &= ~AHD_DV_WAIT_SIMQ_EMPTY;
+		up(&ahd->platform_data->dv_sem);
+	}
+}
+
+static void
+ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
+			     struct ahd_linux_device *dev, struct scb *scb)
+{
+	struct	ahd_devinfo devinfo;
+
+	ahd_compile_devinfo(&devinfo,
+			    ahd->our_id,
+			    dev->target->target, dev->lun,
+			    dev->target->channel == 0 ? 'A' : 'B',
+			    ROLE_INITIATOR);
+	
+	/*
+	 * We don't currently trust the mid-layer to
+	 * properly deal with queue full or busy.  So,
+	 * when one occurs, we tell the mid-layer to
+	 * unconditionally requeue the command to us
+	 * so that we can retry it ourselves.  We also
+	 * implement our own throttling mechanism so
+	 * we don't clobber the device with too many
+	 * commands.
+	 */
+	switch (ahd_get_scsi_status(scb)) {
+	default:
+		break;
+	case SCSI_STATUS_CHECK_COND:
+	case SCSI_STATUS_CMD_TERMINATED:
+	{
+		Scsi_Cmnd *cmd;
+
+		/*
+		 * Copy sense information to the OS's cmd
+		 * structure if it is available.
+		 */
+		cmd = scb->io_ctx;
+		if ((scb->flags & (SCB_SENSE|SCB_PKT_SENSE)) != 0) {
+			struct scsi_status_iu_header *siu;
+			u_int sense_size;
+			u_int sense_offset;
+
+			if (scb->flags & SCB_SENSE) {
+				sense_size = MIN(sizeof(struct scsi_sense_data)
+					       - ahd_get_sense_residual(scb),
+						 sizeof(cmd->sense_buffer));
+				sense_offset = 0;
+			} else {
+				/*
+				 * Copy only the sense data into the provided
+				 * buffer.
+				 */
+				siu = (struct scsi_status_iu_header *)
+				    scb->sense_data;
+				sense_size = MIN(scsi_4btoul(siu->sense_length),
+						sizeof(cmd->sense_buffer));
+				sense_offset = SIU_SENSE_OFFSET(siu);
+			}
+
+			memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
+			memcpy(cmd->sense_buffer,
+			       ahd_get_sense_buf(ahd, scb)
+			       + sense_offset, sense_size);
+			cmd->result |= (DRIVER_SENSE << 24);
+
+#ifdef AHD_DEBUG
+			if (ahd_debug & AHD_SHOW_SENSE) {
+				int i;
+
+				printf("Copied %d bytes of sense data at %d:",
+				       sense_size, sense_offset);
+				for (i = 0; i < sense_size; i++) {
+					if ((i & 0xF) == 0)
+						printf("\n");
+					printf("0x%x ", cmd->sense_buffer[i]);
+				}
+				printf("\n");
+			}
+#endif
+		}
+		break;
+	}
+	case SCSI_STATUS_QUEUE_FULL:
+	{
+		/*
+		 * By the time the core driver has returned this
+		 * command, all other commands that were queued
+		 * to us but not the device have been returned.
+		 * This ensures that dev->active is equal to
+		 * the number of commands actually queued to
+		 * the device.
+		 */
+		dev->tag_success_count = 0;
+		if (dev->active != 0) {
+			/*
+			 * Drop our opening count to the number
+			 * of commands currently outstanding.
+			 */
+			dev->openings = 0;
+#ifdef AHD_DEBUG
+			if ((ahd_debug & AHD_SHOW_QFULL) != 0) {
+				ahd_print_path(ahd, scb);
+				printf("Dropping tag count to %d\n",
+				       dev->active);
+			}
+#endif
+			if (dev->active == dev->tags_on_last_queuefull) {
+
+				dev->last_queuefull_same_count++;
+				/*
+				 * If we repeatedly see a queue full
+				 * at the same queue depth, this
+				 * device has a fixed number of tag
+				 * slots.  Lock in this tag depth
+				 * so we stop seeing queue fulls from
+				 * this device.
+				 */
+				if (dev->last_queuefull_same_count
+				 == AHD_LOCK_TAGS_COUNT) {
+					dev->maxtags = dev->active;
+					ahd_print_path(ahd, scb);
+					printf("Locking max tag count at %d\n",
+					       dev->active);
+				}
+			} else {
+				dev->tags_on_last_queuefull = dev->active;
+				dev->last_queuefull_same_count = 0;
+			}
+			ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
+			ahd_set_scsi_status(scb, SCSI_STATUS_OK);
+			ahd_platform_set_tags(ahd, &devinfo,
+				     (dev->flags & AHD_DEV_Q_BASIC)
+				   ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
+			break;
+		}
+		/*
+		 * Drop down to a single opening, and treat this
+		 * as if the target returned BUSY SCSI status.
+		 */
+		dev->openings = 1;
+		ahd_platform_set_tags(ahd, &devinfo,
+			     (dev->flags & AHD_DEV_Q_BASIC)
+			   ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
+		ahd_set_scsi_status(scb, SCSI_STATUS_BUSY);
+		/* FALLTHROUGH */
+	}
+	case SCSI_STATUS_BUSY:
+		/*
+		 * Set a short timer to defer sending commands for
+		 * a bit since Linux will not delay in this case.
+		 */
+		if ((dev->flags & AHD_DEV_TIMER_ACTIVE) != 0) {
+			printf("%s:%c:%d: Device Timer still active during "
+			       "busy processing\n", ahd_name(ahd),
+				dev->target->channel, dev->target->target);
+			break;
+		}
+		dev->flags |= AHD_DEV_TIMER_ACTIVE;
+		dev->qfrozen++;
+		init_timer(&dev->timer);
+		dev->timer.data = (u_long)dev;
+		dev->timer.expires = jiffies + (HZ/2);
+		dev->timer.function = ahd_linux_dev_timed_unfreeze;
+		add_timer(&dev->timer);
+		break;
+	}
+}
+
+static void
+ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, Scsi_Cmnd *cmd)
+{
+	/*
+	 * Typically, the complete queue has very few entries
+	 * queued to it before the queue is emptied by
+	 * ahd_linux_run_complete_queue, so sorting the entries
+	 * by generation number should be inexpensive.
+	 * We perform the sort so that commands that complete
+	 * with an error are retuned in the order origionally
+	 * queued to the controller so that any subsequent retries
+	 * are performed in order.  The underlying ahd routines do
+	 * not guarantee the order that aborted commands will be
+	 * returned to us.
+	 */
+	struct ahd_completeq *completeq;
+	struct ahd_cmd *list_cmd;
+	struct ahd_cmd *acmd;
+
+	/*
+	 * Map CAM error codes into Linux Error codes.  We
+	 * avoid the conversion so that the DV code has the
+	 * full error information available when making
+	 * state change decisions.
+	 */
+	if (AHD_DV_CMD(cmd) == FALSE) {
+		uint32_t status;
+		u_int new_status;
+
+		status = ahd_cmd_get_transaction_status(cmd);
+		if (status != CAM_REQ_CMP) {
+			struct ahd_linux_device *dev;
+			struct ahd_devinfo devinfo;
+			cam_status cam_status;
+			uint32_t action;
+			u_int scsi_status;
+
+			dev = ahd_linux_get_device(ahd, cmd->device->channel,
+						   cmd->device->id,
+						   cmd->device->lun,
+						   /*alloc*/FALSE);
+
+			if (dev == NULL)
+				goto no_fallback;
+
+			ahd_compile_devinfo(&devinfo,
+					    ahd->our_id,
+					    dev->target->target, dev->lun,
+					    dev->target->channel == 0 ? 'A':'B',
+					    ROLE_INITIATOR);
+
+			scsi_status = ahd_cmd_get_scsi_status(cmd);
+			cam_status = ahd_cmd_get_transaction_status(cmd);
+			action = aic_error_action(cmd, dev->target->inq_data,
+						  cam_status, scsi_status);
+			if ((action & SSQ_FALLBACK) != 0) {
+
+				/* Update stats */
+				dev->target->errors_detected++;
+				if (dev->target->cmds_since_error == 0)
+					dev->target->cmds_since_error++;
+				else {
+					dev->target->cmds_since_error = 0;
+					ahd_linux_fallback(ahd, &devinfo);
+				}
+			}
+		}
+no_fallback:
+		switch (status) {
+		case CAM_REQ_INPROG:
+		case CAM_REQ_CMP:
+		case CAM_SCSI_STATUS_ERROR:
+			new_status = DID_OK;
+			break;
+		case CAM_REQ_ABORTED:
+			new_status = DID_ABORT;
+			break;
+		case CAM_BUSY:
+			new_status = DID_BUS_BUSY;
+			break;
+		case CAM_REQ_INVALID:
+		case CAM_PATH_INVALID:
+			new_status = DID_BAD_TARGET;
+			break;
+		case CAM_SEL_TIMEOUT:
+			new_status = DID_NO_CONNECT;
+			break;
+		case CAM_SCSI_BUS_RESET:
+		case CAM_BDR_SENT:
+			new_status = DID_RESET;
+			break;
+		case CAM_UNCOR_PARITY:
+			new_status = DID_PARITY;
+			break;
+		case CAM_CMD_TIMEOUT:
+			new_status = DID_TIME_OUT;
+			break;
+		case CAM_UA_ABORT:
+		case CAM_REQ_CMP_ERR:
+		case CAM_AUTOSENSE_FAIL:
+		case CAM_NO_HBA:
+		case CAM_DATA_RUN_ERR:
+		case CAM_UNEXP_BUSFREE:
+		case CAM_SEQUENCE_FAIL:
+		case CAM_CCB_LEN_ERR:
+		case CAM_PROVIDE_FAIL:
+		case CAM_REQ_TERMIO:
+		case CAM_UNREC_HBA_ERROR:
+		case CAM_REQ_TOO_BIG:
+			new_status = DID_ERROR;
+			break;
+		case CAM_REQUEUE_REQ:
+			/*
+			 * If we want the request requeued, make sure there
+			 * are sufficent retries.  In the old scsi error code,
+			 * we used to be able to specify a result code that
+			 * bypassed the retry count.  Now we must use this
+			 * hack.  We also "fake" a check condition with
+			 * a sense code of ABORTED COMMAND.  This seems to
+			 * evoke a retry even if this command is being sent
+			 * via the eh thread.  Ick!  Ick!  Ick!
+			 */
+			if (cmd->retries > 0)
+				cmd->retries--;
+			new_status = DID_OK;
+			ahd_cmd_set_scsi_status(cmd, SCSI_STATUS_CHECK_COND);
+			cmd->result |= (DRIVER_SENSE << 24);
+			memset(cmd->sense_buffer, 0,
+			       sizeof(cmd->sense_buffer));
+			cmd->sense_buffer[0] = SSD_ERRCODE_VALID
+					     | SSD_CURRENT_ERROR;
+			cmd->sense_buffer[2] = SSD_KEY_ABORTED_COMMAND;
+			break;
+		default:
+			/* We should never get here */
+			new_status = DID_ERROR;
+			break;
+		}
+
+		ahd_cmd_set_transaction_status(cmd, new_status);
+	}
+
+	completeq = &ahd->platform_data->completeq;
+	list_cmd = TAILQ_FIRST(completeq);
+	acmd = (struct ahd_cmd *)cmd;
+	while (list_cmd != NULL
+	    && acmd_scsi_cmd(list_cmd).serial_number
+	     < acmd_scsi_cmd(acmd).serial_number)
+		list_cmd = TAILQ_NEXT(list_cmd, acmd_links.tqe);
+	if (list_cmd != NULL)
+		TAILQ_INSERT_BEFORE(list_cmd, acmd, acmd_links.tqe);
+	else
+		TAILQ_INSERT_TAIL(completeq, acmd, acmd_links.tqe);
+}
+
+static void
+ahd_linux_filter_inquiry(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
+{
+	struct	scsi_inquiry_data *sid;
+	struct	ahd_initiator_tinfo *tinfo;
+	struct	ahd_transinfo *user;
+	struct	ahd_transinfo *goal;
+	struct	ahd_transinfo *curr;
+	struct	ahd_tmode_tstate *tstate;
+	struct	ahd_linux_device *dev;
+	u_int	width;
+	u_int	period;
+	u_int	offset;
+	u_int	ppr_options;
+	u_int	trans_version;
+	u_int	prot_version;
+
+	/*
+	 * Determine if this lun actually exists.  If so,
+	 * hold on to its corresponding device structure.
+	 * If not, make sure we release the device and
+	 * don't bother processing the rest of this inquiry
+	 * command.
+	 */
+	dev = ahd_linux_get_device(ahd, devinfo->channel - 'A',
+				   devinfo->target, devinfo->lun,
+				   /*alloc*/TRUE);
+
+	sid = (struct scsi_inquiry_data *)dev->target->inq_data;
+	if (SID_QUAL(sid) == SID_QUAL_LU_CONNECTED) {
+
+		dev->flags &= ~AHD_DEV_UNCONFIGURED;
+	} else {
+		dev->flags |= AHD_DEV_UNCONFIGURED;
+		return;
+	}
+
+	/*
+	 * Update our notion of this device's transfer
+	 * negotiation capabilities.
+	 */
+	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel,
+				    devinfo->our_scsiid,
+				    devinfo->target, &tstate);
+	user = &tinfo->user;
+	goal = &tinfo->goal;
+	curr = &tinfo->curr;
+	width = user->width;
+	period = user->period;
+	offset = user->offset;
+	ppr_options = user->ppr_options;
+	trans_version = user->transport_version;
+	prot_version = MIN(user->protocol_version, SID_ANSI_REV(sid));
+
+	/*
+	 * Only attempt SPI3/4 once we've verified that
+	 * the device claims to support SPI3/4 features.
+	 */
+	if (prot_version < SCSI_REV_2)
+		trans_version = SID_ANSI_REV(sid);
+	else
+		trans_version = SCSI_REV_2;
+
+	if ((sid->flags & SID_WBus16) == 0)
+		width = MSG_EXT_WDTR_BUS_8_BIT;
+	if ((sid->flags & SID_Sync) == 0) {
+		period = 0;
+		offset = 0;
+		ppr_options = 0;
+	}
+	if ((sid->spi3data & SID_SPI_QAS) == 0)
+		ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
+	if ((sid->spi3data & SID_SPI_CLOCK_DT) == 0)
+		ppr_options &= MSG_EXT_PPR_QAS_REQ;
+	if ((sid->spi3data & SID_SPI_IUS) == 0)
+		ppr_options &= (MSG_EXT_PPR_DT_REQ
+			      | MSG_EXT_PPR_QAS_REQ);
+
+	if (prot_version > SCSI_REV_2
+	 && ppr_options != 0)
+		trans_version = user->transport_version;
+
+	ahd_validate_width(ahd, /*tinfo limit*/NULL, &width, ROLE_UNKNOWN);
+	ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_MAX);
+	ahd_validate_offset(ahd, /*tinfo limit*/NULL, period,
+			    &offset, width, ROLE_UNKNOWN);
+	if (offset == 0 || period == 0) {
+		period = 0;
+		offset = 0;
+		ppr_options = 0;
+	}
+	/* Apply our filtered user settings. */
+	curr->transport_version = trans_version;
+	curr->protocol_version = prot_version;
+	ahd_set_width(ahd, devinfo, width, AHD_TRANS_GOAL, /*paused*/FALSE);
+	ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options,
+			 AHD_TRANS_GOAL, /*paused*/FALSE);
+}
+
+void
+ahd_freeze_simq(struct ahd_softc *ahd)
+{
+	ahd->platform_data->qfrozen++;
+	if (ahd->platform_data->qfrozen == 1) {
+		scsi_block_requests(ahd->platform_data->host);
+		ahd_platform_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS,
+					CAM_LUN_WILDCARD, SCB_LIST_NULL,
+					ROLE_INITIATOR, CAM_REQUEUE_REQ);
+	}
+}
+
+void
+ahd_release_simq(struct ahd_softc *ahd)
+{
+	u_long s;
+	int    unblock_reqs;
+
+	unblock_reqs = 0;
+	ahd_lock(ahd, &s);
+	if (ahd->platform_data->qfrozen > 0)
+		ahd->platform_data->qfrozen--;
+	if (ahd->platform_data->qfrozen == 0) {
+		unblock_reqs = 1;
+	}
+	if (AHD_DV_SIMQ_FROZEN(ahd)
+	 && ((ahd->platform_data->flags & AHD_DV_WAIT_SIMQ_RELEASE) != 0)) {
+		ahd->platform_data->flags &= ~AHD_DV_WAIT_SIMQ_RELEASE;
+		up(&ahd->platform_data->dv_sem);
+	}
+	ahd_schedule_runq(ahd);
+	ahd_unlock(ahd, &s);
+	/*
+	 * There is still a race here.  The mid-layer
+	 * should keep its own freeze count and use
+	 * a bottom half handler to run the queues
+	 * so we can unblock with our own lock held.
+	 */
+	if (unblock_reqs)
+		scsi_unblock_requests(ahd->platform_data->host);
+}
+
+static void
+ahd_linux_sem_timeout(u_long arg)
+{
+	struct	scb *scb;
+	struct	ahd_softc *ahd;
+	u_long	s;
+
+	scb = (struct scb *)arg;
+	ahd = scb->ahd_softc;
+	ahd_lock(ahd, &s);
+	if ((scb->platform_data->flags & AHD_SCB_UP_EH_SEM) != 0) {
+		scb->platform_data->flags &= ~AHD_SCB_UP_EH_SEM;
+		up(&ahd->platform_data->eh_sem);
+	}
+	ahd_unlock(ahd, &s);
+}
+
+static void
+ahd_linux_dev_timed_unfreeze(u_long arg)
+{
+	struct ahd_linux_device *dev;
+	struct ahd_softc *ahd;
+	u_long s;
+
+	dev = (struct ahd_linux_device *)arg;
+	ahd = dev->target->ahd;
+	ahd_lock(ahd, &s);
+	dev->flags &= ~AHD_DEV_TIMER_ACTIVE;
+	if (dev->qfrozen > 0)
+		dev->qfrozen--;
+	if (dev->qfrozen == 0
+	 && (dev->flags & AHD_DEV_ON_RUN_LIST) == 0)
+		ahd_linux_run_device_queue(ahd, dev);
+	if ((dev->flags & AHD_DEV_UNCONFIGURED) != 0
+	 && dev->active == 0)
+		ahd_linux_free_device(ahd, dev);
+	ahd_unlock(ahd, &s);
+}
+
+void
+ahd_platform_dump_card_state(struct ahd_softc *ahd)
+{
+	struct ahd_linux_device *dev;
+	int target;
+	int maxtarget;
+	int lun;
+	int i;
+
+	maxtarget = (ahd->features & AHD_WIDE) ? 15 : 7;
+	for (target = 0; target <=maxtarget; target++) {
+
+		for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
+			struct ahd_cmd *acmd;
+
+			dev = ahd_linux_get_device(ahd, 0, target,
+						   lun, /*alloc*/FALSE);
+			if (dev == NULL)
+				continue;
+
+			printf("DevQ(%d:%d:%d): ", 0, target, lun);
+			i = 0;
+			TAILQ_FOREACH(acmd, &dev->busyq, acmd_links.tqe) {
+				if (i++ > AHD_SCB_MAX)
+					break;
+			}
+			printf("%d waiting\n", i);
+		}
+	}
+}
+
+static int __init
+ahd_linux_init(void)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+	return ahd_linux_detect(&aic79xx_driver_template);
+#else
+	scsi_register_module(MODULE_SCSI_HA, &aic79xx_driver_template);
+	if (aic79xx_driver_template.present == 0) {
+		scsi_unregister_module(MODULE_SCSI_HA,
+				       &aic79xx_driver_template);
+		return (-ENODEV);
+	}
+
+	return (0);
+#endif
+}
+
+static void __exit
+ahd_linux_exit(void)
+{
+	struct ahd_softc *ahd;
+
+	/*
+	 * Shutdown DV threads before going into the SCSI mid-layer.
+	 * This avoids situations where the mid-layer locks the entire
+	 * kernel so that waiting for our DV threads to exit leads
+	 * to deadlock.
+	 */
+	TAILQ_FOREACH(ahd, &ahd_tailq, links) {
+
+		ahd_linux_kill_dv_thread(ahd);
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+	/*
+	 * In 2.4 we have to unregister from the PCI core _after_
+	 * unregistering from the scsi midlayer to avoid dangling
+	 * references.
+	 */
+	scsi_unregister_module(MODULE_SCSI_HA, &aic79xx_driver_template);
+#endif
+	ahd_linux_pci_exit();
+}
+
+module_init(ahd_linux_init);
+module_exit(ahd_linux_exit);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
new file mode 100644
index 0000000..605f92b
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -0,0 +1,1147 @@
+/*
+ * Adaptec AIC79xx device driver for Linux.
+ *
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm.h#137 $
+ *
+ */
+#ifndef _AIC79XX_LINUX_H_
+#define _AIC79XX_LINUX_H_
+
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/smp_lock.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+#include <asm/io.h>
+
+#include <linux/interrupt.h> /* For tasklet support. */
+#include <linux/config.h>
+#include <linux/slab.h>
+
+/* Core SCSI definitions */
+#define AIC_LIB_PREFIX ahd
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+
+/* Name space conflict with BSD queue macros */
+#ifdef LIST_HEAD
+#undef LIST_HEAD
+#endif
+
+#include "cam.h"
+#include "queue.h"
+#include "scsi_message.h"
+#include "scsi_iu.h"
+#include "aiclib.h"
+
+/*********************************** Debugging ********************************/
+#ifdef CONFIG_AIC79XX_DEBUG_ENABLE
+#ifdef CONFIG_AIC79XX_DEBUG_MASK
+#define AHD_DEBUG 1
+#define AHD_DEBUG_OPTS CONFIG_AIC79XX_DEBUG_MASK
+#else
+/*
+ * Compile in debugging code, but do not enable any printfs.
+ */
+#define AHD_DEBUG 1
+#define AHD_DEBUG_OPTS 0
+#endif
+/* No debugging code. */
+#endif
+
+/********************************** Misc Macros *******************************/
+#define	roundup(x, y)   ((((x)+((y)-1))/(y))*(y))
+#define	powerof2(x)	((((x)-1)&(x))==0)
+
+/************************* Forward Declarations *******************************/
+struct ahd_softc;
+typedef struct pci_dev *ahd_dev_softc_t;
+typedef Scsi_Cmnd      *ahd_io_ctx_t;
+
+/******************************* Byte Order ***********************************/
+#define ahd_htobe16(x)	cpu_to_be16(x)
+#define ahd_htobe32(x)	cpu_to_be32(x)
+#define ahd_htobe64(x)	cpu_to_be64(x)
+#define ahd_htole16(x)	cpu_to_le16(x)
+#define ahd_htole32(x)	cpu_to_le32(x)
+#define ahd_htole64(x)	cpu_to_le64(x)
+
+#define ahd_be16toh(x)	be16_to_cpu(x)
+#define ahd_be32toh(x)	be32_to_cpu(x)
+#define ahd_be64toh(x)	be64_to_cpu(x)
+#define ahd_le16toh(x)	le16_to_cpu(x)
+#define ahd_le32toh(x)	le32_to_cpu(x)
+#define ahd_le64toh(x)	le64_to_cpu(x)
+
+#ifndef LITTLE_ENDIAN
+#define LITTLE_ENDIAN 1234
+#endif
+
+#ifndef BIG_ENDIAN
+#define BIG_ENDIAN 4321
+#endif
+
+#ifndef BYTE_ORDER
+#if defined(__BIG_ENDIAN)
+#define BYTE_ORDER BIG_ENDIAN
+#endif
+#if defined(__LITTLE_ENDIAN)
+#define BYTE_ORDER LITTLE_ENDIAN
+#endif
+#endif /* BYTE_ORDER */
+
+/************************* Configuration Data *********************************/
+extern uint32_t aic79xx_allow_memio;
+extern int aic79xx_detect_complete;
+extern Scsi_Host_Template aic79xx_driver_template;
+
+/***************************** Bus Space/DMA **********************************/
+
+typedef uint32_t bus_size_t;
+
+typedef enum {
+	BUS_SPACE_MEMIO,
+	BUS_SPACE_PIO
+} bus_space_tag_t;
+
+typedef union {
+	u_long		  ioport;
+	volatile uint8_t __iomem *maddr;
+} bus_space_handle_t;
+
+typedef struct bus_dma_segment
+{
+	dma_addr_t	ds_addr;
+	bus_size_t	ds_len;
+} bus_dma_segment_t;
+
+struct ahd_linux_dma_tag
+{
+	bus_size_t	alignment;
+	bus_size_t	boundary;
+	bus_size_t	maxsize;
+};
+typedef struct ahd_linux_dma_tag* bus_dma_tag_t;
+
+struct ahd_linux_dmamap
+{
+	dma_addr_t	bus_addr;
+};
+typedef struct ahd_linux_dmamap* bus_dmamap_t;
+
+typedef int bus_dma_filter_t(void*, dma_addr_t);
+typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
+
+#define BUS_DMA_WAITOK		0x0
+#define BUS_DMA_NOWAIT		0x1
+#define BUS_DMA_ALLOCNOW	0x2
+#define BUS_DMA_LOAD_SEGS	0x4	/*
+					 * Argument is an S/G list not
+					 * a single buffer.
+					 */
+
+#define BUS_SPACE_MAXADDR	0xFFFFFFFF
+#define BUS_SPACE_MAXADDR_32BIT	0xFFFFFFFF
+#define BUS_SPACE_MAXSIZE_32BIT	0xFFFFFFFF
+
+int	ahd_dma_tag_create(struct ahd_softc *, bus_dma_tag_t /*parent*/,
+			   bus_size_t /*alignment*/, bus_size_t /*boundary*/,
+			   dma_addr_t /*lowaddr*/, dma_addr_t /*highaddr*/,
+			   bus_dma_filter_t*/*filter*/, void */*filterarg*/,
+			   bus_size_t /*maxsize*/, int /*nsegments*/,
+			   bus_size_t /*maxsegsz*/, int /*flags*/,
+			   bus_dma_tag_t */*dma_tagp*/);
+
+void	ahd_dma_tag_destroy(struct ahd_softc *, bus_dma_tag_t /*tag*/);
+
+int	ahd_dmamem_alloc(struct ahd_softc *, bus_dma_tag_t /*dmat*/,
+			 void** /*vaddr*/, int /*flags*/,
+			 bus_dmamap_t* /*mapp*/);
+
+void	ahd_dmamem_free(struct ahd_softc *, bus_dma_tag_t /*dmat*/,
+			void* /*vaddr*/, bus_dmamap_t /*map*/);
+
+void	ahd_dmamap_destroy(struct ahd_softc *, bus_dma_tag_t /*tag*/,
+			   bus_dmamap_t /*map*/);
+
+int	ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t /*dmat*/,
+			bus_dmamap_t /*map*/, void * /*buf*/,
+			bus_size_t /*buflen*/, bus_dmamap_callback_t *,
+			void */*callback_arg*/, int /*flags*/);
+
+int	ahd_dmamap_unload(struct ahd_softc *, bus_dma_tag_t, bus_dmamap_t);
+
+/*
+ * Operations performed by ahd_dmamap_sync().
+ */
+#define BUS_DMASYNC_PREREAD	0x01	/* pre-read synchronization */
+#define BUS_DMASYNC_POSTREAD	0x02	/* post-read synchronization */
+#define BUS_DMASYNC_PREWRITE	0x04	/* pre-write synchronization */
+#define BUS_DMASYNC_POSTWRITE	0x08	/* post-write synchronization */
+
+/*
+ * XXX
+ * ahd_dmamap_sync is only used on buffers allocated with
+ * the pci_alloc_consistent() API.  Although I'm not sure how
+ * this works on architectures with a write buffer, Linux does
+ * not have an API to sync "coherent" memory.  Perhaps we need
+ * to do an mb()?
+ */
+#define ahd_dmamap_sync(ahd, dma_tag, dmamap, offset, len, op)
+
+/************************** Timer DataStructures ******************************/
+typedef struct timer_list ahd_timer_t;
+
+/********************************** Includes **********************************/
+#ifdef CONFIG_AIC79XX_REG_PRETTY_PRINT
+#define AIC_DEBUG_REGISTERS 1
+#else
+#define AIC_DEBUG_REGISTERS 0
+#endif
+#include "aic79xx.h"
+
+/***************************** Timer Facilities *******************************/
+#define ahd_timer_init init_timer
+#define ahd_timer_stop del_timer_sync
+typedef void ahd_linux_callback_t (u_long);  
+static __inline void ahd_timer_reset(ahd_timer_t *timer, u_int usec,
+				     ahd_callback_t *func, void *arg);
+static __inline void ahd_scb_timer_reset(struct scb *scb, u_int usec);
+
+static __inline void
+ahd_timer_reset(ahd_timer_t *timer, u_int usec, ahd_callback_t *func, void *arg)
+{
+	struct ahd_softc *ahd;
+
+	ahd = (struct ahd_softc *)arg;
+	del_timer(timer);
+	timer->data = (u_long)arg;
+	timer->expires = jiffies + (usec * HZ)/1000000;
+	timer->function = (ahd_linux_callback_t*)func;
+	add_timer(timer);
+}
+
+static __inline void
+ahd_scb_timer_reset(struct scb *scb, u_int usec)
+{
+	mod_timer(&scb->io_ctx->eh_timeout, jiffies + (usec * HZ)/1000000);
+}
+
+/***************************** SMP support ************************************/
+#include <linux/spinlock.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) || defined(SCSI_HAS_HOST_LOCK))
+#define AHD_SCSI_HAS_HOST_LOCK 1
+#else
+#define AHD_SCSI_HAS_HOST_LOCK 0
+#endif
+
+#define AIC79XX_DRIVER_VERSION "1.3.11"
+
+/**************************** Front End Queues ********************************/
+/*
+ * Data structure used to cast the Linux struct scsi_cmnd to something
+ * that allows us to use the queue macros.  The linux structure has
+ * plenty of space to hold the links fields as required by the queue
+ * macros, but the queue macors require them to have the correct type.
+ */
+struct ahd_cmd_internal {
+	/* Area owned by the Linux scsi layer. */
+	uint8_t	private[offsetof(struct scsi_cmnd, SCp.Status)];
+	union {
+		STAILQ_ENTRY(ahd_cmd)	ste;
+		LIST_ENTRY(ahd_cmd)	le;
+		TAILQ_ENTRY(ahd_cmd)	tqe;
+	} links;
+	uint32_t			end;
+};
+
+struct ahd_cmd {
+	union {
+		struct ahd_cmd_internal	icmd;
+		struct scsi_cmnd	scsi_cmd;
+	} un;
+};
+
+#define acmd_icmd(cmd) ((cmd)->un.icmd)
+#define acmd_scsi_cmd(cmd) ((cmd)->un.scsi_cmd)
+#define acmd_links un.icmd.links
+
+/*************************** Device Data Structures ***************************/
+/*
+ * A per probed device structure used to deal with some error recovery
+ * scenarios that the Linux mid-layer code just doesn't know how to
+ * handle.  The structure allocated for a device only becomes persistent
+ * after a successfully completed inquiry command to the target when
+ * that inquiry data indicates a lun is present.
+ */
+TAILQ_HEAD(ahd_busyq, ahd_cmd);
+typedef enum {
+	AHD_DEV_UNCONFIGURED	 = 0x01,
+	AHD_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */
+	AHD_DEV_TIMER_ACTIVE	 = 0x04, /* Our timer is active */
+	AHD_DEV_ON_RUN_LIST	 = 0x08, /* Queued to be run later */
+	AHD_DEV_Q_BASIC		 = 0x10, /* Allow basic device queuing */
+	AHD_DEV_Q_TAGGED	 = 0x20, /* Allow full SCSI2 command queueing */
+	AHD_DEV_PERIODIC_OTAG	 = 0x40, /* Send OTAG to prevent starvation */
+	AHD_DEV_SLAVE_CONFIGURED = 0x80	 /* slave_configure() has been called */
+} ahd_linux_dev_flags;
+
+struct ahd_linux_target;
+struct ahd_linux_device {
+	TAILQ_ENTRY(ahd_linux_device) links;
+	struct			ahd_busyq busyq;
+
+	/*
+	 * The number of transactions currently
+	 * queued to the device.
+	 */
+	int			active;
+
+	/*
+	 * The currently allowed number of 
+	 * transactions that can be queued to
+	 * the device.  Must be signed for
+	 * conversion from tagged to untagged
+	 * mode where the device may have more
+	 * than one outstanding active transaction.
+	 */
+	int			openings;
+
+	/*
+	 * A positive count indicates that this
+	 * device's queue is halted.
+	 */
+	u_int			qfrozen;
+	
+	/*
+	 * Cumulative command counter.
+	 */
+	u_long			commands_issued;
+
+	/*
+	 * The number of tagged transactions when
+	 * running at our current opening level
+	 * that have been successfully received by
+	 * this device since the last QUEUE FULL.
+	 */
+	u_int			tag_success_count;
+#define AHD_TAG_SUCCESS_INTERVAL 50
+
+	ahd_linux_dev_flags	flags;
+
+	/*
+	 * Per device timer.
+	 */
+	struct timer_list	timer;
+
+	/*
+	 * The high limit for the tags variable.
+	 */
+	u_int			maxtags;
+
+	/*
+	 * The computed number of tags outstanding
+	 * at the time of the last QUEUE FULL event.
+	 */
+	u_int			tags_on_last_queuefull;
+
+	/*
+	 * How many times we have seen a queue full
+	 * with the same number of tags.  This is used
+	 * to stop our adaptive queue depth algorithm
+	 * on devices with a fixed number of tags.
+	 */
+	u_int			last_queuefull_same_count;
+#define AHD_LOCK_TAGS_COUNT 50
+
+	/*
+	 * How many transactions have been queued
+	 * without the device going idle.  We use
+	 * this statistic to determine when to issue
+	 * an ordered tag to prevent transaction
+	 * starvation.  This statistic is only updated
+	 * if the AHD_DEV_PERIODIC_OTAG flag is set
+	 * on this device.
+	 */
+	u_int			commands_since_idle_or_otag;
+#define AHD_OTAG_THRESH	500
+
+	int			lun;
+	Scsi_Device	       *scsi_device;
+	struct			ahd_linux_target *target;
+};
+
+typedef enum {
+	AHD_DV_REQUIRED		 = 0x01,
+	AHD_INQ_VALID		 = 0x02,
+	AHD_BASIC_DV		 = 0x04,
+	AHD_ENHANCED_DV		 = 0x08
+} ahd_linux_targ_flags;
+
+/* DV States */
+typedef enum {
+	AHD_DV_STATE_EXIT = 0,
+	AHD_DV_STATE_INQ_SHORT_ASYNC,
+	AHD_DV_STATE_INQ_ASYNC,
+	AHD_DV_STATE_INQ_ASYNC_VERIFY,
+	AHD_DV_STATE_TUR,
+	AHD_DV_STATE_REBD,
+	AHD_DV_STATE_INQ_VERIFY,
+	AHD_DV_STATE_WEB,
+	AHD_DV_STATE_REB,
+	AHD_DV_STATE_SU,
+	AHD_DV_STATE_BUSY
+} ahd_dv_state;
+
+struct ahd_linux_target {
+	struct ahd_linux_device	 *devices[AHD_NUM_LUNS];
+	int			  channel;
+	int			  target;
+	int			  refcount;
+	struct ahd_transinfo	  last_tinfo;
+	struct ahd_softc	 *ahd;
+	ahd_linux_targ_flags	  flags;
+	struct scsi_inquiry_data *inq_data;
+	/*
+	 * The next "fallback" period to use for narrow/wide transfers.
+	 */
+	uint8_t			  dv_next_narrow_period;
+	uint8_t			  dv_next_wide_period;
+	uint8_t			  dv_max_width;
+	uint8_t			  dv_max_ppr_options;
+	uint8_t			  dv_last_ppr_options;
+	u_int			  dv_echo_size;
+	ahd_dv_state		  dv_state;
+	u_int			  dv_state_retry;
+	uint8_t			 *dv_buffer;
+	uint8_t			 *dv_buffer1;
+
+	/*
+	 * Cumulative counter of errors.
+	 */
+	u_long			errors_detected;
+	u_long			cmds_since_error;
+};
+
+/********************* Definitions Required by the Core ***********************/
+/*
+ * Number of SG segments we require.  So long as the S/G segments for
+ * a particular transaction are allocated in a physically contiguous
+ * manner and are allocated below 4GB, the number of S/G segments is
+ * unrestricted.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+/*
+ * We dynamically adjust the number of segments in pre-2.5 kernels to
+ * avoid fragmentation issues in the SCSI mid-layer's private memory
+ * allocator.  See aic79xx_osm.c ahd_linux_size_nseg() for details.
+ */
+extern u_int ahd_linux_nseg;
+#define	AHD_NSEG ahd_linux_nseg
+#define	AHD_LINUX_MIN_NSEG 64
+#else
+#define	AHD_NSEG 128
+#endif
+
+/*
+ * Per-SCB OSM storage.
+ */
+typedef enum {
+	AHD_SCB_UP_EH_SEM = 0x1
+} ahd_linux_scb_flags;
+
+struct scb_platform_data {
+	struct ahd_linux_device	*dev;
+	dma_addr_t		 buf_busaddr;
+	uint32_t		 xfer_len;
+	uint32_t		 sense_resid;	/* Auto-Sense residual */
+	ahd_linux_scb_flags	 flags;
+};
+
+/*
+ * Define a structure used for each host adapter.  All members are
+ * aligned on a boundary >= the size of the member to honor the
+ * alignment restrictions of the various platforms supported by
+ * this driver.
+ */
+typedef enum {
+	AHD_DV_WAIT_SIMQ_EMPTY	 = 0x01,
+	AHD_DV_WAIT_SIMQ_RELEASE = 0x02,
+	AHD_DV_ACTIVE		 = 0x04,
+	AHD_DV_SHUTDOWN		 = 0x08,
+	AHD_RUN_CMPLT_Q_TIMER	 = 0x10
+} ahd_linux_softc_flags;
+
+TAILQ_HEAD(ahd_completeq, ahd_cmd);
+
+struct ahd_platform_data {
+	/*
+	 * Fields accessed from interrupt context.
+	 */
+	struct ahd_linux_target *targets[AHD_NUM_TARGETS]; 
+	TAILQ_HEAD(, ahd_linux_device) device_runq;
+	struct ahd_completeq	 completeq;
+
+	spinlock_t		 spin_lock;
+	struct tasklet_struct	 runq_tasklet;
+	u_int			 qfrozen;
+	pid_t			 dv_pid;
+	struct timer_list	 completeq_timer;
+	struct timer_list	 reset_timer;
+	struct timer_list	 stats_timer;
+	struct semaphore	 eh_sem;
+	struct semaphore	 dv_sem;
+	struct semaphore	 dv_cmd_sem;	/* XXX This needs to be in
+						 * the target struct
+						 */
+	struct scsi_device	*dv_scsi_dev;
+	struct Scsi_Host        *host;		/* pointer to scsi host */
+#define AHD_LINUX_NOIRQ	((uint32_t)~0)
+	uint32_t		 irq;		/* IRQ for this adapter */
+	uint32_t		 bios_address;
+	uint32_t		 mem_busaddr;	/* Mem Base Addr */
+	uint64_t		 hw_dma_mask;
+	ahd_linux_softc_flags	 flags;
+};
+
+/************************** OS Utility Wrappers *******************************/
+#define printf printk
+#define M_NOWAIT GFP_ATOMIC
+#define M_WAITOK 0
+#define malloc(size, type, flags) kmalloc(size, flags)
+#define free(ptr, type) kfree(ptr)
+
+static __inline void ahd_delay(long);
+static __inline void
+ahd_delay(long usec)
+{
+	/*
+	 * udelay on Linux can have problems for
+	 * multi-millisecond waits.  Wait at most
+	 * 1024us per call.
+	 */
+	while (usec > 0) {
+		udelay(usec % 1024);
+		usec -= 1024;
+	}
+}
+
+
+/***************************** Low Level I/O **********************************/
+static __inline uint8_t ahd_inb(struct ahd_softc * ahd, long port);
+static __inline uint16_t ahd_inw_atomic(struct ahd_softc * ahd, long port);
+static __inline void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
+static __inline void ahd_outw_atomic(struct ahd_softc * ahd,
+				     long port, uint16_t val);
+static __inline void ahd_outsb(struct ahd_softc * ahd, long port,
+			       uint8_t *, int count);
+static __inline void ahd_insb(struct ahd_softc * ahd, long port,
+			       uint8_t *, int count);
+
+static __inline uint8_t
+ahd_inb(struct ahd_softc * ahd, long port)
+{
+	uint8_t x;
+
+	if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+		x = readb(ahd->bshs[0].maddr + port);
+	} else {
+		x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
+	}
+	mb();
+	return (x);
+}
+
+static __inline uint16_t
+ahd_inw_atomic(struct ahd_softc * ahd, long port)
+{
+	uint8_t x;
+
+	if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+		x = readw(ahd->bshs[0].maddr + port);
+	} else {
+		x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
+	}
+	mb();
+	return (x);
+}
+
+static __inline void
+ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
+{
+	if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+		writeb(val, ahd->bshs[0].maddr + port);
+	} else {
+		outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
+	}
+	mb();
+}
+
+static __inline void
+ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
+{
+	if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+		writew(val, ahd->bshs[0].maddr + port);
+	} else {
+		outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
+	}
+	mb();
+}
+
+static __inline void
+ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
+{
+	int i;
+
+	/*
+	 * There is probably a more efficient way to do this on Linux
+	 * but we don't use this for anything speed critical and this
+	 * should work.
+	 */
+	for (i = 0; i < count; i++)
+		ahd_outb(ahd, port, *array++);
+}
+
+static __inline void
+ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
+{
+	int i;
+
+	/*
+	 * There is probably a more efficient way to do this on Linux
+	 * but we don't use this for anything speed critical and this
+	 * should work.
+	 */
+	for (i = 0; i < count; i++)
+		*array++ = ahd_inb(ahd, port);
+}
+
+/**************************** Initialization **********************************/
+int		ahd_linux_register_host(struct ahd_softc *,
+					Scsi_Host_Template *);
+
+uint64_t	ahd_linux_get_memsize(void);
+
+/*************************** Pretty Printing **********************************/
+struct info_str {
+	char *buffer;
+	int length;
+	off_t offset;
+	int pos;
+};
+
+void	ahd_format_transinfo(struct info_str *info,
+			     struct ahd_transinfo *tinfo);
+
+/******************************** Locking *************************************/
+/* Lock protecting internal data structures */
+static __inline void ahd_lockinit(struct ahd_softc *);
+static __inline void ahd_lock(struct ahd_softc *, unsigned long *flags);
+static __inline void ahd_unlock(struct ahd_softc *, unsigned long *flags);
+
+/* Lock acquisition and release of the above lock in midlayer entry points. */
+static __inline void ahd_midlayer_entrypoint_lock(struct ahd_softc *,
+						  unsigned long *flags);
+static __inline void ahd_midlayer_entrypoint_unlock(struct ahd_softc *,
+						    unsigned long *flags);
+
+/* Lock held during command compeletion to the upper layer */
+static __inline void ahd_done_lockinit(struct ahd_softc *);
+static __inline void ahd_done_lock(struct ahd_softc *, unsigned long *flags);
+static __inline void ahd_done_unlock(struct ahd_softc *, unsigned long *flags);
+
+/* Lock held during ahd_list manipulation and ahd softc frees */
+extern spinlock_t ahd_list_spinlock;
+static __inline void ahd_list_lockinit(void);
+static __inline void ahd_list_lock(unsigned long *flags);
+static __inline void ahd_list_unlock(unsigned long *flags);
+
+static __inline void
+ahd_lockinit(struct ahd_softc *ahd)
+{
+	spin_lock_init(&ahd->platform_data->spin_lock);
+}
+
+static __inline void
+ahd_lock(struct ahd_softc *ahd, unsigned long *flags)
+{
+	spin_lock_irqsave(&ahd->platform_data->spin_lock, *flags);
+}
+
+static __inline void
+ahd_unlock(struct ahd_softc *ahd, unsigned long *flags)
+{
+	spin_unlock_irqrestore(&ahd->platform_data->spin_lock, *flags);
+}
+
+static __inline void
+ahd_midlayer_entrypoint_lock(struct ahd_softc *ahd, unsigned long *flags)
+{
+	/*
+	 * In 2.5.X and some 2.4.X versions, the midlayer takes our
+	 * lock just before calling us, so we avoid locking again.
+	 * For other kernel versions, the io_request_lock is taken
+	 * just before our entry point is called.  In this case, we
+	 * trade the io_request_lock for our per-softc lock.
+	 */
+#if AHD_SCSI_HAS_HOST_LOCK == 0
+	spin_unlock(&io_request_lock);
+	spin_lock(&ahd->platform_data->spin_lock);
+#endif
+}
+
+static __inline void
+ahd_midlayer_entrypoint_unlock(struct ahd_softc *ahd, unsigned long *flags)
+{
+#if AHD_SCSI_HAS_HOST_LOCK == 0
+	spin_unlock(&ahd->platform_data->spin_lock);
+	spin_lock(&io_request_lock);
+#endif
+}
+
+static __inline void
+ahd_done_lockinit(struct ahd_softc *ahd)
+{
+	/*
+	 * In 2.5.X, our own lock is held during completions.
+	 * In previous versions, the io_request_lock is used.
+	 * In either case, we can't initialize this lock again.
+	 */
+}
+
+static __inline void
+ahd_done_lock(struct ahd_softc *ahd, unsigned long *flags)
+{
+#if AHD_SCSI_HAS_HOST_LOCK == 0
+	spin_lock(&io_request_lock);
+#endif
+}
+
+static __inline void
+ahd_done_unlock(struct ahd_softc *ahd, unsigned long *flags)
+{
+#if AHD_SCSI_HAS_HOST_LOCK == 0
+	spin_unlock(&io_request_lock);
+#endif
+}
+
+static __inline void
+ahd_list_lockinit(void)
+{
+	spin_lock_init(&ahd_list_spinlock);
+}
+
+static __inline void
+ahd_list_lock(unsigned long *flags)
+{
+	spin_lock_irqsave(&ahd_list_spinlock, *flags);
+}
+
+static __inline void
+ahd_list_unlock(unsigned long *flags)
+{
+	spin_unlock_irqrestore(&ahd_list_spinlock, *flags);
+}
+
+/******************************* PCI Definitions ******************************/
+/*
+ * PCIM_xxx: mask to locate subfield in register
+ * PCIR_xxx: config register offset
+ * PCIC_xxx: device class
+ * PCIS_xxx: device subclass
+ * PCIP_xxx: device programming interface
+ * PCIV_xxx: PCI vendor ID (only required to fixup ancient devices)
+ * PCID_xxx: device ID
+ */
+#define PCIR_DEVVENDOR		0x00
+#define PCIR_VENDOR		0x00
+#define PCIR_DEVICE		0x02
+#define PCIR_COMMAND		0x04
+#define PCIM_CMD_PORTEN		0x0001
+#define PCIM_CMD_MEMEN		0x0002
+#define PCIM_CMD_BUSMASTEREN	0x0004
+#define PCIM_CMD_MWRICEN	0x0010
+#define PCIM_CMD_PERRESPEN	0x0040
+#define	PCIM_CMD_SERRESPEN	0x0100
+#define PCIR_STATUS		0x06
+#define PCIR_REVID		0x08
+#define PCIR_PROGIF		0x09
+#define PCIR_SUBCLASS		0x0a
+#define PCIR_CLASS		0x0b
+#define PCIR_CACHELNSZ		0x0c
+#define PCIR_LATTIMER		0x0d
+#define PCIR_HEADERTYPE		0x0e
+#define PCIM_MFDEV		0x80
+#define PCIR_BIST		0x0f
+#define PCIR_CAP_PTR		0x34
+
+/* config registers for header type 0 devices */
+#define PCIR_MAPS	0x10
+#define PCIR_SUBVEND_0	0x2c
+#define PCIR_SUBDEV_0	0x2e
+
+/****************************** PCI-X definitions *****************************/
+#define PCIXR_COMMAND	0x96
+#define PCIXR_DEVADDR	0x98
+#define PCIXM_DEVADDR_FNUM	0x0003	/* Function Number */
+#define PCIXM_DEVADDR_DNUM	0x00F8	/* Device Number */
+#define PCIXM_DEVADDR_BNUM	0xFF00	/* Bus Number */
+#define PCIXR_STATUS	0x9A
+#define PCIXM_STATUS_64BIT	0x0001	/* Active 64bit connection to device. */
+#define PCIXM_STATUS_133CAP	0x0002	/* Device is 133MHz capable */
+#define PCIXM_STATUS_SCDISC	0x0004	/* Split Completion Discarded */
+#define PCIXM_STATUS_UNEXPSC	0x0008	/* Unexpected Split Completion */
+#define PCIXM_STATUS_CMPLEXDEV	0x0010	/* Device Complexity (set == bridge) */
+#define PCIXM_STATUS_MAXMRDBC	0x0060	/* Maximum Burst Read Count */
+#define PCIXM_STATUS_MAXSPLITS	0x0380	/* Maximum Split Transactions */
+#define PCIXM_STATUS_MAXCRDS	0x1C00	/* Maximum Cumulative Read Size */
+#define PCIXM_STATUS_RCVDSCEM	0x2000	/* Received a Split Comp w/Error msg */
+
+extern struct pci_driver aic79xx_pci_driver;
+
+typedef enum
+{
+	AHD_POWER_STATE_D0,
+	AHD_POWER_STATE_D1,
+	AHD_POWER_STATE_D2,
+	AHD_POWER_STATE_D3
+} ahd_power_state;
+
+void ahd_power_state_change(struct ahd_softc *ahd,
+			    ahd_power_state new_state);
+
+/******************************* PCI Routines *********************************/
+int			 ahd_linux_pci_init(void);
+void			 ahd_linux_pci_exit(void);
+int			 ahd_pci_map_registers(struct ahd_softc *ahd);
+int			 ahd_pci_map_int(struct ahd_softc *ahd);
+
+static __inline uint32_t ahd_pci_read_config(ahd_dev_softc_t pci,
+					     int reg, int width);
+
+static __inline uint32_t
+ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
+{
+	switch (width) {
+	case 1:
+	{
+		uint8_t retval;
+
+		pci_read_config_byte(pci, reg, &retval);
+		return (retval);
+	}
+	case 2:
+	{
+		uint16_t retval;
+		pci_read_config_word(pci, reg, &retval);
+		return (retval);
+	}
+	case 4:
+	{
+		uint32_t retval;
+		pci_read_config_dword(pci, reg, &retval);
+		return (retval);
+	}
+	default:
+		panic("ahd_pci_read_config: Read size too big");
+		/* NOTREACHED */
+		return (0);
+	}
+}
+
+static __inline void ahd_pci_write_config(ahd_dev_softc_t pci,
+					  int reg, uint32_t value,
+					  int width);
+
+static __inline void
+ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
+{
+	switch (width) {
+	case 1:
+		pci_write_config_byte(pci, reg, value);
+		break;
+	case 2:
+		pci_write_config_word(pci, reg, value);
+		break;
+	case 4:
+		pci_write_config_dword(pci, reg, value);
+		break;
+	default:
+		panic("ahd_pci_write_config: Write size too big");
+		/* NOTREACHED */
+	}
+}
+
+static __inline int ahd_get_pci_function(ahd_dev_softc_t);
+static __inline int
+ahd_get_pci_function(ahd_dev_softc_t pci)
+{
+	return (PCI_FUNC(pci->devfn));
+}
+
+static __inline int ahd_get_pci_slot(ahd_dev_softc_t);
+static __inline int
+ahd_get_pci_slot(ahd_dev_softc_t pci)
+{
+	return (PCI_SLOT(pci->devfn));
+}
+
+static __inline int ahd_get_pci_bus(ahd_dev_softc_t);
+static __inline int
+ahd_get_pci_bus(ahd_dev_softc_t pci)
+{
+	return (pci->bus->number);
+}
+
+static __inline void ahd_flush_device_writes(struct ahd_softc *);
+static __inline void
+ahd_flush_device_writes(struct ahd_softc *ahd)
+{
+	/* XXX Is this sufficient for all architectures??? */
+	ahd_inb(ahd, INTSTAT);
+}
+
+/**************************** Proc FS Support *********************************/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+int	ahd_linux_proc_info(char *, char **, off_t, int, int, int);
+#else
+int	ahd_linux_proc_info(struct Scsi_Host *, char *, char **,
+			    off_t, int, int);
+#endif
+
+/*************************** Domain Validation ********************************/
+#define AHD_DV_CMD(cmd) ((cmd)->scsi_done == ahd_linux_dv_complete)
+#define AHD_DV_SIMQ_FROZEN(ahd)					\
+	((((ahd)->platform_data->flags & AHD_DV_ACTIVE) != 0)	\
+	 && (ahd)->platform_data->qfrozen == 1)
+
+/*********************** Transaction Access Wrappers **************************/
+static __inline void ahd_cmd_set_transaction_status(Scsi_Cmnd *, uint32_t);
+static __inline void ahd_set_transaction_status(struct scb *, uint32_t);
+static __inline void ahd_cmd_set_scsi_status(Scsi_Cmnd *, uint32_t);
+static __inline void ahd_set_scsi_status(struct scb *, uint32_t);
+static __inline uint32_t ahd_cmd_get_transaction_status(Scsi_Cmnd *cmd);
+static __inline uint32_t ahd_get_transaction_status(struct scb *);
+static __inline uint32_t ahd_cmd_get_scsi_status(Scsi_Cmnd *cmd);
+static __inline uint32_t ahd_get_scsi_status(struct scb *);
+static __inline void ahd_set_transaction_tag(struct scb *, int, u_int);
+static __inline u_long ahd_get_transfer_length(struct scb *);
+static __inline int ahd_get_transfer_dir(struct scb *);
+static __inline void ahd_set_residual(struct scb *, u_long);
+static __inline void ahd_set_sense_residual(struct scb *scb, u_long resid);
+static __inline u_long ahd_get_residual(struct scb *);
+static __inline u_long ahd_get_sense_residual(struct scb *);
+static __inline int ahd_perform_autosense(struct scb *);
+static __inline uint32_t ahd_get_sense_bufsize(struct ahd_softc *,
+					       struct scb *);
+static __inline void ahd_notify_xfer_settings_change(struct ahd_softc *,
+						     struct ahd_devinfo *);
+static __inline void ahd_platform_scb_free(struct ahd_softc *ahd,
+					   struct scb *scb);
+static __inline void ahd_freeze_scb(struct scb *scb);
+
+static __inline
+void ahd_cmd_set_transaction_status(Scsi_Cmnd *cmd, uint32_t status)
+{
+	cmd->result &= ~(CAM_STATUS_MASK << 16);
+	cmd->result |= status << 16;
+}
+
+static __inline
+void ahd_set_transaction_status(struct scb *scb, uint32_t status)
+{
+	ahd_cmd_set_transaction_status(scb->io_ctx,status);
+}
+
+static __inline
+void ahd_cmd_set_scsi_status(Scsi_Cmnd *cmd, uint32_t status)
+{
+	cmd->result &= ~0xFFFF;
+	cmd->result |= status;
+}
+
+static __inline
+void ahd_set_scsi_status(struct scb *scb, uint32_t status)
+{
+	ahd_cmd_set_scsi_status(scb->io_ctx, status);
+}
+
+static __inline
+uint32_t ahd_cmd_get_transaction_status(Scsi_Cmnd *cmd)
+{
+	return ((cmd->result >> 16) & CAM_STATUS_MASK);
+}
+
+static __inline
+uint32_t ahd_get_transaction_status(struct scb *scb)
+{
+	return (ahd_cmd_get_transaction_status(scb->io_ctx));
+}
+
+static __inline
+uint32_t ahd_cmd_get_scsi_status(Scsi_Cmnd *cmd)
+{
+	return (cmd->result & 0xFFFF);
+}
+
+static __inline
+uint32_t ahd_get_scsi_status(struct scb *scb)
+{
+	return (ahd_cmd_get_scsi_status(scb->io_ctx));
+}
+
+static __inline
+void ahd_set_transaction_tag(struct scb *scb, int enabled, u_int type)
+{
+	/*
+	 * Nothing to do for linux as the incoming transaction
+	 * has no concept of tag/non tagged, etc.
+	 */
+}
+
+static __inline
+u_long ahd_get_transfer_length(struct scb *scb)
+{
+	return (scb->platform_data->xfer_len);
+}
+
+static __inline
+int ahd_get_transfer_dir(struct scb *scb)
+{
+	return (scb->io_ctx->sc_data_direction);
+}
+
+static __inline
+void ahd_set_residual(struct scb *scb, u_long resid)
+{
+	scb->io_ctx->resid = resid;
+}
+
+static __inline
+void ahd_set_sense_residual(struct scb *scb, u_long resid)
+{
+	scb->platform_data->sense_resid = resid;
+}
+
+static __inline
+u_long ahd_get_residual(struct scb *scb)
+{
+	return (scb->io_ctx->resid);
+}
+
+static __inline
+u_long ahd_get_sense_residual(struct scb *scb)
+{
+	return (scb->platform_data->sense_resid);
+}
+
+static __inline
+int ahd_perform_autosense(struct scb *scb)
+{
+	/*
+	 * We always perform autosense in Linux.
+	 * On other platforms this is set on a
+	 * per-transaction basis.
+	 */
+	return (1);
+}
+
+static __inline uint32_t
+ahd_get_sense_bufsize(struct ahd_softc *ahd, struct scb *scb)
+{
+	return (sizeof(struct scsi_sense_data));
+}
+
+static __inline void
+ahd_notify_xfer_settings_change(struct ahd_softc *ahd,
+				struct ahd_devinfo *devinfo)
+{
+	/* Nothing to do here for linux */
+}
+
+static __inline void
+ahd_platform_scb_free(struct ahd_softc *ahd, struct scb *scb)
+{
+	ahd->flags &= ~AHD_RESOURCE_SHORTAGE;
+}
+
+int	ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg);
+void	ahd_platform_free(struct ahd_softc *ahd);
+void	ahd_platform_init(struct ahd_softc *ahd);
+void	ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb);
+void	ahd_freeze_simq(struct ahd_softc *ahd);
+void	ahd_release_simq(struct ahd_softc *ahd);
+
+static __inline void
+ahd_freeze_scb(struct scb *scb)
+{
+	if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
+                scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
+                scb->platform_data->dev->qfrozen++;
+        }
+}
+
+void	ahd_platform_set_tags(struct ahd_softc *ahd,
+			      struct ahd_devinfo *devinfo, ahd_queue_alg);
+int	ahd_platform_abort_scbs(struct ahd_softc *ahd, int target,
+				char channel, int lun, u_int tag,
+				role_t role, uint32_t status);
+irqreturn_t
+	ahd_linux_isr(int irq, void *dev_id, struct pt_regs * regs);
+void	ahd_platform_flushwork(struct ahd_softc *ahd);
+int	ahd_softc_comp(struct ahd_softc *, struct ahd_softc *);
+void	ahd_done(struct ahd_softc*, struct scb*);
+void	ahd_send_async(struct ahd_softc *, char channel,
+		       u_int target, u_int lun, ac_code, void *);
+void	ahd_print_path(struct ahd_softc *, struct scb *);
+void	ahd_platform_dump_card_state(struct ahd_softc *ahd);
+
+#ifdef CONFIG_PCI
+#define AHD_PCI_CONFIG 1
+#else
+#define AHD_PCI_CONFIG 0
+#endif
+#define bootverbose aic79xx_verbose
+extern uint32_t aic79xx_verbose;
+
+#endif /* _AIC79XX_LINUX_H_ */
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
new file mode 100644
index 0000000..91daf0c
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -0,0 +1,368 @@
+/*
+ * Linux driver attachment glue for PCI based U320 controllers.
+ *
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm_pci.c#25 $
+ */
+
+#include "aic79xx_osm.h"
+#include "aic79xx_inline.h"
+#include "aic79xx_pci.h"
+
+static int	ahd_linux_pci_dev_probe(struct pci_dev *pdev,
+					const struct pci_device_id *ent);
+static int	ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd,
+						 u_long *base, u_long *base2);
+static int	ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
+						 u_long *bus_addr,
+						 uint8_t __iomem **maddr);
+static void	ahd_linux_pci_dev_remove(struct pci_dev *pdev);
+
+/* Define the macro locally since it's different for different class of chips.
+ */
+#define ID(x)            \
+	ID2C(x),         \
+	ID2C(IDIROC(x))
+
+static struct pci_device_id ahd_linux_pci_id_table[] = {
+	/* aic7901 based controllers */
+	ID(ID_AHA_29320A),
+	ID(ID_AHA_29320ALP),
+	/* aic7902 based controllers */
+	ID(ID_AHA_29320),
+	ID(ID_AHA_29320B),
+	ID(ID_AHA_29320LP),
+	ID(ID_AHA_39320),
+	ID(ID_AHA_39320_B),
+	ID(ID_AHA_39320A),
+	ID(ID_AHA_39320D),
+	ID(ID_AHA_39320D_HP),
+	ID(ID_AHA_39320D_B),
+	ID(ID_AHA_39320D_B_HP),
+	/* Generic chip probes for devices we don't know exactly. */
+	ID16(ID_AIC7901 & ID_9005_GENERIC_MASK),
+	ID(ID_AIC7901A & ID_DEV_VENDOR_MASK),
+	ID16(ID_AIC7902 & ID_9005_GENERIC_MASK),
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, ahd_linux_pci_id_table);
+
+struct pci_driver aic79xx_pci_driver = {
+	.name		= "aic79xx",
+	.probe		= ahd_linux_pci_dev_probe,
+	.remove		= ahd_linux_pci_dev_remove,
+	.id_table	= ahd_linux_pci_id_table
+};
+
+static void
+ahd_linux_pci_dev_remove(struct pci_dev *pdev)
+{
+	struct ahd_softc *ahd;
+	u_long l;
+
+	/*
+	 * We should be able to just perform
+	 * the free directly, but check our
+	 * list for extra sanity.
+	 */
+	ahd_list_lock(&l);
+	ahd = ahd_find_softc((struct ahd_softc *)pci_get_drvdata(pdev));
+	if (ahd != NULL) {
+		u_long s;
+
+		TAILQ_REMOVE(&ahd_tailq, ahd, links);
+		ahd_list_unlock(&l);
+		ahd_lock(ahd, &s);
+		ahd_intr_enable(ahd, FALSE);
+		ahd_unlock(ahd, &s);
+		ahd_free(ahd);
+	} else
+		ahd_list_unlock(&l);
+}
+
+static int
+ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	char		 buf[80];
+	struct		 ahd_softc *ahd;
+	ahd_dev_softc_t	 pci;
+	struct		 ahd_pci_identity *entry;
+	char		*name;
+	int		 error;
+
+	/*
+	 * Some BIOSen report the same device multiple times.
+	 */
+	TAILQ_FOREACH(ahd, &ahd_tailq, links) {
+		struct pci_dev *probed_pdev;
+
+		probed_pdev = ahd->dev_softc;
+		if (probed_pdev->bus->number == pdev->bus->number
+		 && probed_pdev->devfn == pdev->devfn)
+			break;
+	}
+	if (ahd != NULL) {
+		/* Skip duplicate. */
+		return (-ENODEV);
+	}
+
+	pci = pdev;
+	entry = ahd_find_pci_device(pci);
+	if (entry == NULL)
+		return (-ENODEV);
+
+	/*
+	 * Allocate a softc for this card and
+	 * set it up for attachment by our
+	 * common detect routine.
+	 */
+	sprintf(buf, "ahd_pci:%d:%d:%d",
+		ahd_get_pci_bus(pci),
+		ahd_get_pci_slot(pci),
+		ahd_get_pci_function(pci));
+	name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
+	if (name == NULL)
+		return (-ENOMEM);
+	strcpy(name, buf);
+	ahd = ahd_alloc(NULL, name);
+	if (ahd == NULL)
+		return (-ENOMEM);
+	if (pci_enable_device(pdev)) {
+		ahd_free(ahd);
+		return (-ENODEV);
+	}
+	pci_set_master(pdev);
+
+	if (sizeof(dma_addr_t) > 4) {
+		uint64_t   memsize;
+		const uint64_t mask_39bit = 0x7FFFFFFFFFULL;
+
+		memsize = ahd_linux_get_memsize();
+
+		if (memsize >= 0x8000000000ULL
+	 	 && pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
+			ahd->flags |= AHD_64BIT_ADDRESSING;
+			ahd->platform_data->hw_dma_mask = DMA_64BIT_MASK;
+		} else if (memsize > 0x80000000
+			&& pci_set_dma_mask(pdev, mask_39bit) == 0) {
+			ahd->flags |= AHD_39BIT_ADDRESSING;
+			ahd->platform_data->hw_dma_mask = mask_39bit;
+		}
+	} else {
+		pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+		ahd->platform_data->hw_dma_mask = DMA_32BIT_MASK;
+	}
+	ahd->dev_softc = pci;
+	error = ahd_pci_config(ahd, entry);
+	if (error != 0) {
+		ahd_free(ahd);
+		return (-error);
+	}
+	pci_set_drvdata(pdev, ahd);
+	if (aic79xx_detect_complete) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+		ahd_linux_register_host(ahd, &aic79xx_driver_template);
+#else
+		printf("aic79xx: ignoring PCI device found after "
+		       "initialization\n");
+		return (-ENODEV);
+#endif
+	}
+	return (0);
+}
+
+int
+ahd_linux_pci_init(void)
+{
+	return (pci_module_init(&aic79xx_pci_driver));
+}
+
+void
+ahd_linux_pci_exit(void)
+{
+	pci_unregister_driver(&aic79xx_pci_driver);
+}
+
+static int
+ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, u_long *base,
+				 u_long *base2)
+{
+	*base = pci_resource_start(ahd->dev_softc, 0);
+	/*
+	 * This is really the 3rd bar and should be at index 2,
+	 * but the Linux PCI code doesn't know how to "count" 64bit
+	 * bars.
+	 */
+	*base2 = pci_resource_start(ahd->dev_softc, 3);
+	if (*base == 0 || *base2 == 0)
+		return (ENOMEM);
+	if (request_region(*base, 256, "aic79xx") == 0)
+		return (ENOMEM);
+	if (request_region(*base2, 256, "aic79xx") == 0) {
+		release_region(*base2, 256);
+		return (ENOMEM);
+	}
+	return (0);
+}
+
+static int
+ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
+				 u_long *bus_addr,
+				 uint8_t __iomem **maddr)
+{
+	u_long	start;
+	u_long	base_page;
+	u_long	base_offset;
+	int	error;
+
+	if (aic79xx_allow_memio == 0)
+		return (ENOMEM);
+
+	if ((ahd->bugs & AHD_PCIX_MMAPIO_BUG) != 0)
+		return (ENOMEM);
+
+	error = 0;
+	start = pci_resource_start(ahd->dev_softc, 1);
+	base_page = start & PAGE_MASK;
+	base_offset = start - base_page;
+	if (start != 0) {
+		*bus_addr = start;
+		if (request_mem_region(start, 0x1000, "aic79xx") == 0)
+			error = ENOMEM;
+		if (error == 0) {
+			*maddr = ioremap_nocache(base_page, base_offset + 256);
+			if (*maddr == NULL) {
+				error = ENOMEM;
+				release_mem_region(start, 0x1000);
+			} else
+				*maddr += base_offset;
+		}
+	} else
+		error = ENOMEM;
+	return (error);
+}
+
+int
+ahd_pci_map_registers(struct ahd_softc *ahd)
+{
+	uint32_t command;
+	u_long	 base;
+	uint8_t	__iomem *maddr;
+	int	 error;
+
+	/*
+	 * If its allowed, we prefer memory mapped access.
+	 */
+	command = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, 4);
+	command &= ~(PCIM_CMD_PORTEN|PCIM_CMD_MEMEN);
+	base = 0;
+	maddr = NULL;
+	error = ahd_linux_pci_reserve_mem_region(ahd, &base, &maddr);
+	if (error == 0) {
+		ahd->platform_data->mem_busaddr = base;
+		ahd->tags[0] = BUS_SPACE_MEMIO;
+		ahd->bshs[0].maddr = maddr;
+		ahd->tags[1] = BUS_SPACE_MEMIO;
+		ahd->bshs[1].maddr = maddr + 0x100;
+		ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND,
+				     command | PCIM_CMD_MEMEN, 4);
+
+		if (ahd_pci_test_register_access(ahd) != 0) {
+
+			printf("aic79xx: PCI Device %d:%d:%d "
+			       "failed memory mapped test.  Using PIO.\n",
+			       ahd_get_pci_bus(ahd->dev_softc),
+			       ahd_get_pci_slot(ahd->dev_softc),
+			       ahd_get_pci_function(ahd->dev_softc));
+			iounmap(maddr);
+			release_mem_region(ahd->platform_data->mem_busaddr,
+					   0x1000);
+			ahd->bshs[0].maddr = NULL;
+			maddr = NULL;
+		} else
+			command |= PCIM_CMD_MEMEN;
+	} else if (bootverbose) {
+		printf("aic79xx: PCI%d:%d:%d MEM region 0x%lx "
+		       "unavailable. Cannot memory map device.\n",
+		       ahd_get_pci_bus(ahd->dev_softc),
+		       ahd_get_pci_slot(ahd->dev_softc),
+		       ahd_get_pci_function(ahd->dev_softc),
+		       base);
+	}
+
+	if (maddr == NULL) {
+		u_long	 base2;
+
+		error = ahd_linux_pci_reserve_io_regions(ahd, &base, &base2);
+		if (error == 0) {
+			ahd->tags[0] = BUS_SPACE_PIO;
+			ahd->tags[1] = BUS_SPACE_PIO;
+			ahd->bshs[0].ioport = base;
+			ahd->bshs[1].ioport = base2;
+			command |= PCIM_CMD_PORTEN;
+		} else {
+			printf("aic79xx: PCI%d:%d:%d IO regions 0x%lx and 0x%lx"
+			       "unavailable. Cannot map device.\n",
+			       ahd_get_pci_bus(ahd->dev_softc),
+			       ahd_get_pci_slot(ahd->dev_softc),
+			       ahd_get_pci_function(ahd->dev_softc),
+			       base, base2);
+		}
+	}
+	ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, 4);
+	return (error);
+}
+
+int
+ahd_pci_map_int(struct ahd_softc *ahd)
+{
+	int error;
+
+	error = request_irq(ahd->dev_softc->irq, ahd_linux_isr,
+			    SA_SHIRQ, "aic79xx", ahd);
+	if (error == 0)
+		ahd->platform_data->irq = ahd->dev_softc->irq;
+	
+	return (-error);
+}
+
+void
+ahd_power_state_change(struct ahd_softc *ahd, ahd_power_state new_state)
+{
+	pci_set_power_state(ahd->dev_softc, new_state);
+}
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
new file mode 100644
index 0000000..4c3bb7b
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -0,0 +1,987 @@
+/*
+ * Product specific probe and attach routines for:
+ *	aic7901 and aic7902 SCSI controllers
+ *
+ * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 2000-2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#77 $
+ *
+ * $FreeBSD$
+ */
+
+#ifdef __linux__
+#include "aic79xx_osm.h"
+#include "aic79xx_inline.h"
+#else
+#include <dev/aic7xxx/aic79xx_osm.h>
+#include <dev/aic7xxx/aic79xx_inline.h>
+#endif
+
+#include "aic79xx_pci.h"
+
+static __inline uint64_t
+ahd_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor)
+{
+	uint64_t id;
+
+	id = subvendor
+	   | (subdevice << 16)
+	   | ((uint64_t)vendor << 32)
+	   | ((uint64_t)device << 48);
+
+	return (id);
+}
+
+#define ID_AIC7902_PCI_REV_A4		0x3
+#define ID_AIC7902_PCI_REV_B0		0x10
+#define SUBID_HP			0x0E11
+
+#define DEVID_9005_HOSTRAID(id) ((id) & 0x80)
+
+#define DEVID_9005_TYPE(id) ((id) & 0xF)
+#define		DEVID_9005_TYPE_HBA		0x0	/* Standard Card */
+#define		DEVID_9005_TYPE_HBA_2EXT	0x1	/* 2 External Ports */
+#define		DEVID_9005_TYPE_IROC		0x8	/* Raid(0,1,10) Card */
+#define		DEVID_9005_TYPE_MB		0xF	/* On Motherboard */
+
+#define DEVID_9005_MFUNC(id) ((id) & 0x10)
+
+#define DEVID_9005_PACKETIZED(id) ((id) & 0x8000)
+
+#define SUBID_9005_TYPE(id) ((id) & 0xF)
+#define		SUBID_9005_TYPE_HBA		0x0	/* Standard Card */
+#define		SUBID_9005_TYPE_MB		0xF	/* On Motherboard */
+
+#define SUBID_9005_AUTOTERM(id)	(((id) & 0x10) == 0)
+
+#define SUBID_9005_LEGACYCONN_FUNC(id) ((id) & 0x20)
+
+#define SUBID_9005_SEEPTYPE(id) ((id) & 0x0C0) >> 6)
+#define		SUBID_9005_SEEPTYPE_NONE	0x0
+#define		SUBID_9005_SEEPTYPE_4K		0x1
+
+static ahd_device_setup_t ahd_aic7901_setup;
+static ahd_device_setup_t ahd_aic7901A_setup;
+static ahd_device_setup_t ahd_aic7902_setup;
+static ahd_device_setup_t ahd_aic790X_setup;
+
+struct ahd_pci_identity ahd_pci_ident_table [] =
+{
+	/* aic7901 based controllers */
+	{
+		ID_AHA_29320A,
+		ID_ALL_MASK,
+		"Adaptec 29320A Ultra320 SCSI adapter",
+		ahd_aic7901_setup
+	},
+	{
+		ID_AHA_29320ALP,
+		ID_ALL_MASK,
+		"Adaptec 29320ALP Ultra320 SCSI adapter",
+		ahd_aic7901_setup
+	},
+	/* aic7902 based controllers */	
+	{
+		ID_AHA_29320,
+		ID_ALL_MASK,
+		"Adaptec 29320 Ultra320 SCSI adapter",
+		ahd_aic7902_setup
+	},
+	{
+		ID_AHA_29320B,
+		ID_ALL_MASK,
+		"Adaptec 29320B Ultra320 SCSI adapter",
+		ahd_aic7902_setup
+	},
+	{
+		ID_AHA_29320LP,
+		ID_ALL_MASK,
+		"Adaptec 29320LP Ultra320 SCSI adapter",
+		ahd_aic7901A_setup
+	},
+	{
+		ID_AHA_39320,
+		ID_ALL_MASK,
+		"Adaptec 39320 Ultra320 SCSI adapter",
+		ahd_aic7902_setup
+	},
+	{
+		ID_AHA_39320_B,
+		ID_ALL_MASK,
+		"Adaptec 39320 Ultra320 SCSI adapter",
+		ahd_aic7902_setup
+	},
+	{
+		ID_AHA_39320A,
+		ID_ALL_MASK,
+		"Adaptec 39320A Ultra320 SCSI adapter",
+		ahd_aic7902_setup
+	},
+	{
+		ID_AHA_39320D,
+		ID_ALL_MASK,
+		"Adaptec 39320D Ultra320 SCSI adapter",
+		ahd_aic7902_setup
+	},
+	{
+		ID_AHA_39320D_HP,
+		ID_ALL_MASK,
+		"Adaptec (HP OEM) 39320D Ultra320 SCSI adapter",
+		ahd_aic7902_setup
+	},
+	{
+		ID_AHA_39320D_B,
+		ID_ALL_MASK,
+		"Adaptec 39320D Ultra320 SCSI adapter",
+		ahd_aic7902_setup
+	},
+	{
+		ID_AHA_39320D_B_HP,
+		ID_ALL_MASK,
+		"Adaptec (HP OEM) 39320D Ultra320 SCSI adapter",
+		ahd_aic7902_setup
+	},
+	/* Generic chip probes for devices we don't know 'exactly' */
+	{
+		ID_AIC7901 & ID_9005_GENERIC_MASK,
+		ID_9005_GENERIC_MASK,
+		"Adaptec AIC7901 Ultra320 SCSI adapter",
+		ahd_aic7901_setup
+	},
+	{
+		ID_AIC7901A & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec AIC7901A Ultra320 SCSI adapter",
+		ahd_aic7901A_setup
+	},
+	{
+		ID_AIC7902 & ID_9005_GENERIC_MASK,
+		ID_9005_GENERIC_MASK,
+		"Adaptec AIC7902 Ultra320 SCSI adapter",
+		ahd_aic7902_setup
+	}
+};
+
+const u_int ahd_num_pci_devs = NUM_ELEMENTS(ahd_pci_ident_table);
+		
+#define	DEVCONFIG		0x40
+#define		PCIXINITPAT	0x0000E000ul
+#define			PCIXINIT_PCI33_66	0x0000E000ul
+#define			PCIXINIT_PCIX50_66	0x0000C000ul
+#define			PCIXINIT_PCIX66_100	0x0000A000ul
+#define			PCIXINIT_PCIX100_133	0x00008000ul
+#define	PCI_BUS_MODES_INDEX(devconfig)	\
+	(((devconfig) & PCIXINITPAT) >> 13)
+static const char *pci_bus_modes[] =
+{
+	"PCI bus mode unknown",
+	"PCI bus mode unknown",
+	"PCI bus mode unknown",
+	"PCI bus mode unknown",
+	"PCI-X 101-133Mhz",
+	"PCI-X 67-100Mhz",
+	"PCI-X 50-66Mhz",
+	"PCI 33 or 66Mhz"
+};
+
+#define		TESTMODE	0x00000800ul
+#define		IRDY_RST	0x00000200ul
+#define		FRAME_RST	0x00000100ul
+#define		PCI64BIT	0x00000080ul
+#define		MRDCEN		0x00000040ul
+#define		ENDIANSEL	0x00000020ul
+#define		MIXQWENDIANEN	0x00000008ul
+#define		DACEN		0x00000004ul
+#define		STPWLEVEL	0x00000002ul
+#define		QWENDIANSEL	0x00000001ul
+
+#define	DEVCONFIG1		0x44
+#define		PREQDIS		0x01
+
+#define	CSIZE_LATTIME		0x0c
+#define		CACHESIZE	0x000000fful
+#define		LATTIME		0x0000ff00ul
+
+static int	ahd_check_extport(struct ahd_softc *ahd);
+static void	ahd_configure_termination(struct ahd_softc *ahd,
+					  u_int adapter_control);
+static void	ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat);
+
+struct ahd_pci_identity *
+ahd_find_pci_device(ahd_dev_softc_t pci)
+{
+	uint64_t  full_id;
+	uint16_t  device;
+	uint16_t  vendor;
+	uint16_t  subdevice;
+	uint16_t  subvendor;
+	struct	  ahd_pci_identity *entry;
+	u_int	  i;
+
+	vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
+	device = ahd_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
+	subvendor = ahd_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
+	subdevice = ahd_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+	full_id = ahd_compose_id(device,
+				 vendor,
+				 subdevice,
+				 subvendor);
+
+	/*
+	 * Controllers, mask out the IROC/HostRAID bit
+	 */
+	
+	full_id &= ID_ALL_IROC_MASK;
+
+	for (i = 0; i < ahd_num_pci_devs; i++) {
+		entry = &ahd_pci_ident_table[i];
+		if (entry->full_id == (full_id & entry->id_mask)) {
+			/* Honor exclusion entries. */
+			if (entry->name == NULL)
+				return (NULL);
+			return (entry);
+		}
+	}
+	return (NULL);
+}
+
+int
+ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry)
+{
+	struct scb_data *shared_scb_data;
+	u_long		 l;
+	u_int		 command;
+	uint32_t	 devconfig;
+	uint16_t	 subvendor; 
+	int		 error;
+
+	shared_scb_data = NULL;
+	ahd->description = entry->name;
+	/*
+	 * Record if this is an HP board.
+	 */
+	subvendor = ahd_pci_read_config(ahd->dev_softc,
+					PCIR_SUBVEND_0, /*bytes*/2);
+	if (subvendor == SUBID_HP)
+		ahd->flags |= AHD_HP_BOARD;
+
+	error = entry->setup(ahd);
+	if (error != 0)
+		return (error);
+	
+	devconfig = ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4);
+	if ((devconfig & PCIXINITPAT) == PCIXINIT_PCI33_66) {
+		ahd->chip |= AHD_PCI;
+		/* Disable PCIX workarounds when running in PCI mode. */
+		ahd->bugs &= ~AHD_PCIX_BUG_MASK;
+	} else {
+		ahd->chip |= AHD_PCIX;
+	}
+	ahd->bus_description = pci_bus_modes[PCI_BUS_MODES_INDEX(devconfig)];
+
+	ahd_power_state_change(ahd, AHD_POWER_STATE_D0);
+
+	error = ahd_pci_map_registers(ahd);
+	if (error != 0)
+		return (error);
+
+	/*
+	 * If we need to support high memory, enable dual
+	 * address cycles.  This bit must be set to enable
+	 * high address bit generation even if we are on a
+	 * 64bit bus (PCI64BIT set in devconfig).
+	 */
+	if ((ahd->flags & (AHD_39BIT_ADDRESSING|AHD_64BIT_ADDRESSING)) != 0) {
+		uint32_t devconfig;
+
+		if (bootverbose)
+			printf("%s: Enabling 39Bit Addressing\n",
+			       ahd_name(ahd));
+		devconfig = ahd_pci_read_config(ahd->dev_softc,
+						DEVCONFIG, /*bytes*/4);
+		devconfig |= DACEN;
+		ahd_pci_write_config(ahd->dev_softc, DEVCONFIG,
+				     devconfig, /*bytes*/4);
+	}
+	
+	/* Ensure busmastering is enabled */
+	command = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2);
+	command |= PCIM_CMD_BUSMASTEREN;
+	ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, /*bytes*/2);
+
+	error = ahd_softc_init(ahd);
+	if (error != 0)
+		return (error);
+
+	ahd->bus_intr = ahd_pci_intr;
+
+	error = ahd_reset(ahd, /*reinit*/FALSE);
+	if (error != 0)
+		return (ENXIO);
+
+	ahd->pci_cachesize =
+	    ahd_pci_read_config(ahd->dev_softc, CSIZE_LATTIME,
+				/*bytes*/1) & CACHESIZE;
+	ahd->pci_cachesize *= 4;
+
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	/* See if we have a SEEPROM and perform auto-term */
+	error = ahd_check_extport(ahd);
+	if (error != 0)
+		return (error);
+
+	/* Core initialization */
+	error = ahd_init(ahd);
+	if (error != 0)
+		return (error);
+
+	/*
+	 * Allow interrupts now that we are completely setup.
+	 */
+	error = ahd_pci_map_int(ahd);
+	if (error != 0)
+		return (error);
+
+	ahd_list_lock(&l);
+	/*
+	 * Link this softc in with all other ahd instances.
+	 */
+	ahd_softc_insert(ahd);
+	ahd_list_unlock(&l);
+	return (0);
+}
+
+/*
+ * Perform some simple tests that should catch situations where
+ * our registers are invalidly mapped.
+ */
+int
+ahd_pci_test_register_access(struct ahd_softc *ahd)
+{
+	uint32_t cmd;
+	u_int	 targpcistat;
+	u_int	 pci_status1;
+	int	 error;
+	uint8_t	 hcntrl;
+
+	error = EIO;
+
+	/*
+	 * Enable PCI error interrupt status, but suppress NMIs
+	 * generated by SERR raised due to target aborts.
+	 */
+	cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2);
+	ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND,
+			     cmd & ~PCIM_CMD_SERRESPEN, /*bytes*/2);
+
+	/*
+	 * First a simple test to see if any
+	 * registers can be read.  Reading
+	 * HCNTRL has no side effects and has
+	 * at least one bit that is guaranteed to
+	 * be zero so it is a good register to
+	 * use for this test.
+	 */
+	hcntrl = ahd_inb(ahd, HCNTRL);
+	if (hcntrl == 0xFF)
+		goto fail;
+
+	/*
+	 * Next create a situation where write combining
+	 * or read prefetching could be initiated by the
+	 * CPU or host bridge.  Our device does not support
+	 * either, so look for data corruption and/or flaged
+	 * PCI errors.  First pause without causing another
+	 * chip reset.
+	 */
+	hcntrl &= ~CHIPRST;
+	ahd_outb(ahd, HCNTRL, hcntrl|PAUSE);
+	while (ahd_is_paused(ahd) == 0)
+		;
+
+	/* Clear any PCI errors that occurred before our driver attached. */
+	ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+	targpcistat = ahd_inb(ahd, TARGPCISTAT);
+	ahd_outb(ahd, TARGPCISTAT, targpcistat);
+	pci_status1 = ahd_pci_read_config(ahd->dev_softc,
+					  PCIR_STATUS + 1, /*bytes*/1);
+	ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1,
+			     pci_status1, /*bytes*/1);
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	ahd_outb(ahd, CLRINT, CLRPCIINT);
+
+	ahd_outb(ahd, SEQCTL0, PERRORDIS);
+	ahd_outl(ahd, SRAM_BASE, 0x5aa555aa);
+	if (ahd_inl(ahd, SRAM_BASE) != 0x5aa555aa)
+		goto fail;
+
+	if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) {
+		u_int targpcistat;
+
+		ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+		targpcistat = ahd_inb(ahd, TARGPCISTAT);
+		if ((targpcistat & STA) != 0)
+			goto fail;
+	}
+
+	error = 0;
+
+fail:
+	if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) {
+
+		ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+		targpcistat = ahd_inb(ahd, TARGPCISTAT);
+
+		/* Silently clear any latched errors. */
+		ahd_outb(ahd, TARGPCISTAT, targpcistat);
+		pci_status1 = ahd_pci_read_config(ahd->dev_softc,
+						  PCIR_STATUS + 1, /*bytes*/1);
+		ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1,
+				     pci_status1, /*bytes*/1);
+		ahd_outb(ahd, CLRINT, CLRPCIINT);
+	}
+	ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS);
+	ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2);
+	return (error);
+}
+
+/*
+ * Check the external port logic for a serial eeprom
+ * and termination/cable detection contrls.
+ */
+static int
+ahd_check_extport(struct ahd_softc *ahd)
+{
+	struct	vpd_config vpd;
+	struct	seeprom_config *sc;
+	u_int	adapter_control;
+	int	have_seeprom;
+	int	error;
+
+	sc = ahd->seep_config;
+	have_seeprom = ahd_acquire_seeprom(ahd);
+	if (have_seeprom) {
+		u_int start_addr;
+
+		/*
+		 * Fetch VPD for this function and parse it.
+		 */
+		if (bootverbose) 
+			printf("%s: Reading VPD from SEEPROM...",
+			       ahd_name(ahd));
+
+		/* Address is always in units of 16bit words */
+		start_addr = ((2 * sizeof(*sc))
+			    + (sizeof(vpd) * (ahd->channel - 'A'))) / 2;
+
+		error = ahd_read_seeprom(ahd, (uint16_t *)&vpd,
+					 start_addr, sizeof(vpd)/2,
+					 /*bytestream*/TRUE);
+		if (error == 0)
+			error = ahd_parse_vpddata(ahd, &vpd);
+		if (bootverbose) 
+			printf("%s: VPD parsing %s\n",
+			       ahd_name(ahd),
+			       error == 0 ? "successful" : "failed");
+
+		if (bootverbose) 
+			printf("%s: Reading SEEPROM...", ahd_name(ahd));
+
+		/* Address is always in units of 16bit words */
+		start_addr = (sizeof(*sc) / 2) * (ahd->channel - 'A');
+
+		error = ahd_read_seeprom(ahd, (uint16_t *)sc,
+					 start_addr, sizeof(*sc)/2,
+					 /*bytestream*/FALSE);
+
+		if (error != 0) {
+			printf("Unable to read SEEPROM\n");
+			have_seeprom = 0;
+		} else {
+			have_seeprom = ahd_verify_cksum(sc);
+
+			if (bootverbose) {
+				if (have_seeprom == 0)
+					printf ("checksum error\n");
+				else
+					printf ("done.\n");
+			}
+		}
+		ahd_release_seeprom(ahd);
+	}
+
+	if (!have_seeprom) {
+		u_int	  nvram_scb;
+
+		/*
+		 * Pull scratch ram settings and treat them as
+		 * if they are the contents of an seeprom if
+		 * the 'ADPT', 'BIOS', or 'ASPI' signature is found
+		 * in SCB 0xFF.  We manually compose the data as 16bit
+		 * values to avoid endian issues.
+		 */
+		ahd_set_scbptr(ahd, 0xFF);
+		nvram_scb = ahd_inb_scbram(ahd, SCB_BASE + NVRAM_SCB_OFFSET);
+		if (nvram_scb != 0xFF
+		 && ((ahd_inb_scbram(ahd, SCB_BASE + 0) == 'A'
+		   && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'D'
+		   && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'P'
+		   && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'T')
+		  || (ahd_inb_scbram(ahd, SCB_BASE + 0) == 'B'
+		   && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'I'
+		   && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'O'
+		   && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'S')
+		  || (ahd_inb_scbram(ahd, SCB_BASE + 0) == 'A'
+		   && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'S'
+		   && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'P'
+		   && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'I'))) {
+			uint16_t *sc_data;
+			int	  i;
+
+			ahd_set_scbptr(ahd, nvram_scb);
+			sc_data = (uint16_t *)sc;
+			for (i = 0; i < 64; i += 2)
+				*sc_data++ = ahd_inw_scbram(ahd, SCB_BASE+i);
+			have_seeprom = ahd_verify_cksum(sc);
+			if (have_seeprom)
+				ahd->flags |= AHD_SCB_CONFIG_USED;
+		}
+	}
+
+#if AHD_DEBUG
+	if (have_seeprom != 0
+	 && (ahd_debug & AHD_DUMP_SEEPROM) != 0) {
+		uint16_t *sc_data;
+		int	  i;
+
+		printf("%s: Seeprom Contents:", ahd_name(ahd));
+		sc_data = (uint16_t *)sc;
+		for (i = 0; i < (sizeof(*sc)); i += 2)
+			printf("\n\t0x%.4x", sc_data[i]);
+		printf("\n");
+	}
+#endif
+
+	if (!have_seeprom) {
+		if (bootverbose)
+			printf("%s: No SEEPROM available.\n", ahd_name(ahd));
+		ahd->flags |= AHD_USEDEFAULTS;
+		error = ahd_default_config(ahd);
+		adapter_control = CFAUTOTERM|CFSEAUTOTERM;
+		free(ahd->seep_config, M_DEVBUF);
+		ahd->seep_config = NULL;
+	} else {
+		error = ahd_parse_cfgdata(ahd, sc);
+		adapter_control = sc->adapter_control;
+	}
+	if (error != 0)
+		return (error);
+
+	ahd_configure_termination(ahd, adapter_control);
+
+	return (0);
+}
+
+static void
+ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control)
+{
+	int	 error;
+	u_int	 sxfrctl1;
+	uint8_t	 termctl;
+	uint32_t devconfig;
+
+	devconfig = ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4);
+	devconfig &= ~STPWLEVEL;
+	if ((ahd->flags & AHD_STPWLEVEL_A) != 0)
+		devconfig |= STPWLEVEL;
+	if (bootverbose)
+		printf("%s: STPWLEVEL is %s\n",
+		       ahd_name(ahd), (devconfig & STPWLEVEL) ? "on" : "off");
+	ahd_pci_write_config(ahd->dev_softc, DEVCONFIG, devconfig, /*bytes*/4);
+ 
+	/* Make sure current sensing is off. */
+	if ((ahd->flags & AHD_CURRENT_SENSING) != 0) {
+		(void)ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0);
+	}
+
+	/*
+	 * Read to sense.  Write to set.
+	 */
+	error = ahd_read_flexport(ahd, FLXADDR_TERMCTL, &termctl);
+	if ((adapter_control & CFAUTOTERM) == 0) {
+		if (bootverbose)
+			printf("%s: Manual Primary Termination\n",
+			       ahd_name(ahd));
+		termctl &= ~(FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH);
+		if ((adapter_control & CFSTERM) != 0)
+			termctl |= FLX_TERMCTL_ENPRILOW;
+		if ((adapter_control & CFWSTERM) != 0)
+			termctl |= FLX_TERMCTL_ENPRIHIGH;
+	} else if (error != 0) {
+		printf("%s: Primary Auto-Term Sensing failed! "
+		       "Using Defaults.\n", ahd_name(ahd));
+		termctl = FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH;
+	}
+
+	if ((adapter_control & CFSEAUTOTERM) == 0) {
+		if (bootverbose)
+			printf("%s: Manual Secondary Termination\n",
+			       ahd_name(ahd));
+		termctl &= ~(FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH);
+		if ((adapter_control & CFSELOWTERM) != 0)
+			termctl |= FLX_TERMCTL_ENSECLOW;
+		if ((adapter_control & CFSEHIGHTERM) != 0)
+			termctl |= FLX_TERMCTL_ENSECHIGH;
+	} else if (error != 0) {
+		printf("%s: Secondary Auto-Term Sensing failed! "
+		       "Using Defaults.\n", ahd_name(ahd));
+		termctl |= FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH;
+	}
+
+	/*
+	 * Now set the termination based on what we found.
+	 */
+	sxfrctl1 = ahd_inb(ahd, SXFRCTL1) & ~STPWEN;
+	if ((termctl & FLX_TERMCTL_ENPRILOW) != 0) {
+		ahd->flags |= AHD_TERM_ENB_A;
+		sxfrctl1 |= STPWEN;
+	}
+	/* Must set the latch once in order to be effective. */
+	ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN);
+	ahd_outb(ahd, SXFRCTL1, sxfrctl1);
+
+	error = ahd_write_flexport(ahd, FLXADDR_TERMCTL, termctl);
+	if (error != 0) {
+		printf("%s: Unable to set termination settings!\n",
+		       ahd_name(ahd));
+	} else if (bootverbose) {
+		printf("%s: Primary High byte termination %sabled\n",
+		       ahd_name(ahd),
+		       (termctl & FLX_TERMCTL_ENPRIHIGH) ? "En" : "Dis");
+
+		printf("%s: Primary Low byte termination %sabled\n",
+		       ahd_name(ahd),
+		       (termctl & FLX_TERMCTL_ENPRILOW) ? "En" : "Dis");
+
+		printf("%s: Secondary High byte termination %sabled\n",
+		       ahd_name(ahd),
+		       (termctl & FLX_TERMCTL_ENSECHIGH) ? "En" : "Dis");
+
+		printf("%s: Secondary Low byte termination %sabled\n",
+		       ahd_name(ahd),
+		       (termctl & FLX_TERMCTL_ENSECLOW) ? "En" : "Dis");
+	}
+	return;
+}
+
+#define	DPE	0x80
+#define SSE	0x40
+#define	RMA	0x20
+#define	RTA	0x10
+#define STA	0x08
+#define DPR	0x01
+
+static const char *split_status_source[] =
+{
+	"DFF0",
+	"DFF1",
+	"OVLY",
+	"CMC",
+};
+
+static const char *pci_status_source[] =
+{
+	"DFF0",
+	"DFF1",
+	"SG",
+	"CMC",
+	"OVLY",
+	"NONE",
+	"MSI",
+	"TARG"
+};
+
+static const char *split_status_strings[] =
+{
+	"%s: Received split response in %s.\n",
+	"%s: Received split completion error message in %s\n",
+	"%s: Receive overrun in %s\n",
+	"%s: Count not complete in %s\n",
+	"%s: Split completion data bucket in %s\n",
+	"%s: Split completion address error in %s\n",
+	"%s: Split completion byte count error in %s\n",
+	"%s: Signaled Target-abort to early terminate a split in %s\n"
+};
+
+static const char *pci_status_strings[] =
+{
+	"%s: Data Parity Error has been reported via PERR# in %s\n",
+	"%s: Target initial wait state error in %s\n",
+	"%s: Split completion read data parity error in %s\n",
+	"%s: Split completion address attribute parity error in %s\n",
+	"%s: Received a Target Abort in %s\n",
+	"%s: Received a Master Abort in %s\n",
+	"%s: Signal System Error Detected in %s\n",
+	"%s: Address or Write Phase Parity Error Detected in %s.\n"
+};
+
+void
+ahd_pci_intr(struct ahd_softc *ahd)
+{
+	uint8_t		pci_status[8];
+	ahd_mode_state	saved_modes;
+	u_int		pci_status1;
+	u_int		intstat;
+	u_int		i;
+	u_int		reg;
+	
+	intstat = ahd_inb(ahd, INTSTAT);
+
+	if ((intstat & SPLTINT) != 0)
+		ahd_pci_split_intr(ahd, intstat);
+
+	if ((intstat & PCIINT) == 0)
+		return;
+
+	printf("%s: PCI error Interrupt\n", ahd_name(ahd));
+	saved_modes = ahd_save_modes(ahd);
+	ahd_dump_card_state(ahd);
+	ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+	for (i = 0, reg = DF0PCISTAT; i < 8; i++, reg++) {
+
+		if (i == 5)
+			continue;
+		pci_status[i] = ahd_inb(ahd, reg);
+		/* Clear latched errors.  So our interrupt deasserts. */
+		ahd_outb(ahd, reg, pci_status[i]);
+	}
+
+	for (i = 0; i < 8; i++) {
+		u_int bit;
+	
+		if (i == 5)
+			continue;
+
+		for (bit = 0; bit < 8; bit++) {
+
+			if ((pci_status[i] & (0x1 << bit)) != 0) {
+				static const char *s;
+
+				s = pci_status_strings[bit];
+				if (i == 7/*TARG*/ && bit == 3)
+					s = "%s: Signaled Target Abort\n";
+				printf(s, ahd_name(ahd), pci_status_source[i]);
+			}
+		}	
+	}
+	pci_status1 = ahd_pci_read_config(ahd->dev_softc,
+					  PCIR_STATUS + 1, /*bytes*/1);
+	ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1,
+			     pci_status1, /*bytes*/1);
+	ahd_restore_modes(ahd, saved_modes);
+	ahd_outb(ahd, CLRINT, CLRPCIINT);
+	ahd_unpause(ahd);
+}
+
+static void
+ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
+{
+	uint8_t		split_status[4];
+	uint8_t		split_status1[4];
+	uint8_t		sg_split_status[2];
+	uint8_t		sg_split_status1[2];
+	ahd_mode_state	saved_modes;
+	u_int		i;
+	uint16_t	pcix_status;
+
+	/*
+	 * Check for splits in all modes.  Modes 0 and 1
+	 * additionally have SG engine splits to look at.
+	 */
+	pcix_status = ahd_pci_read_config(ahd->dev_softc, PCIXR_STATUS,
+					  /*bytes*/2);
+	printf("%s: PCI Split Interrupt - PCI-X status = 0x%x\n",
+	       ahd_name(ahd), pcix_status);
+	saved_modes = ahd_save_modes(ahd);
+	for (i = 0; i < 4; i++) {
+		ahd_set_modes(ahd, i, i);
+
+		split_status[i] = ahd_inb(ahd, DCHSPLTSTAT0);
+		split_status1[i] = ahd_inb(ahd, DCHSPLTSTAT1);
+		/* Clear latched errors.  So our interrupt deasserts. */
+		ahd_outb(ahd, DCHSPLTSTAT0, split_status[i]);
+		ahd_outb(ahd, DCHSPLTSTAT1, split_status1[i]);
+		if (i > 1)
+			continue;
+		sg_split_status[i] = ahd_inb(ahd, SGSPLTSTAT0);
+		sg_split_status1[i] = ahd_inb(ahd, SGSPLTSTAT1);
+		/* Clear latched errors.  So our interrupt deasserts. */
+		ahd_outb(ahd, SGSPLTSTAT0, sg_split_status[i]);
+		ahd_outb(ahd, SGSPLTSTAT1, sg_split_status1[i]);
+	}
+
+	for (i = 0; i < 4; i++) {
+		u_int bit;
+
+		for (bit = 0; bit < 8; bit++) {
+
+			if ((split_status[i] & (0x1 << bit)) != 0) {
+				static const char *s;
+
+				s = split_status_strings[bit];
+				printf(s, ahd_name(ahd),
+				       split_status_source[i]);
+			}
+
+			if (i > 1)
+				continue;
+
+			if ((sg_split_status[i] & (0x1 << bit)) != 0) {
+				static const char *s;
+
+				s = split_status_strings[bit];
+				printf(s, ahd_name(ahd), "SG");
+			}
+		}
+	}
+	/*
+	 * Clear PCI-X status bits.
+	 */
+	ahd_pci_write_config(ahd->dev_softc, PCIXR_STATUS,
+			     pcix_status, /*bytes*/2);
+	ahd_outb(ahd, CLRINT, CLRSPLTINT);
+	ahd_restore_modes(ahd, saved_modes);
+}
+
+static int
+ahd_aic7901_setup(struct ahd_softc *ahd)
+{
+
+	ahd->chip = AHD_AIC7901;
+	ahd->features = AHD_AIC7901_FE;
+	return (ahd_aic790X_setup(ahd));
+}
+
+static int
+ahd_aic7901A_setup(struct ahd_softc *ahd)
+{
+
+	ahd->chip = AHD_AIC7901A;
+	ahd->features = AHD_AIC7901A_FE;
+	return (ahd_aic790X_setup(ahd));
+}
+
+static int
+ahd_aic7902_setup(struct ahd_softc *ahd)
+{
+	ahd->chip = AHD_AIC7902;
+	ahd->features = AHD_AIC7902_FE;
+	return (ahd_aic790X_setup(ahd));
+}
+
+static int
+ahd_aic790X_setup(struct ahd_softc *ahd)
+{
+	ahd_dev_softc_t pci;
+	u_int rev;
+
+	pci = ahd->dev_softc;
+	rev = ahd_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
+	if (rev < ID_AIC7902_PCI_REV_A4) {
+		printf("%s: Unable to attach to unsupported chip revision %d\n",
+		       ahd_name(ahd), rev);
+		ahd_pci_write_config(pci, PCIR_COMMAND, 0, /*bytes*/2);
+		return (ENXIO);
+	}
+	ahd->channel = ahd_get_pci_function(pci) + 'A';
+	if (rev < ID_AIC7902_PCI_REV_B0) {
+		/*
+		 * Enable A series workarounds.
+		 */
+		ahd->bugs |= AHD_SENT_SCB_UPDATE_BUG|AHD_ABORT_LQI_BUG
+			  |  AHD_PKT_BITBUCKET_BUG|AHD_LONG_SETIMO_BUG
+			  |  AHD_NLQICRC_DELAYED_BUG|AHD_SCSIRST_BUG
+			  |  AHD_LQO_ATNO_BUG|AHD_AUTOFLUSH_BUG
+			  |  AHD_CLRLQO_AUTOCLR_BUG|AHD_PCIX_MMAPIO_BUG
+			  |  AHD_PCIX_CHIPRST_BUG|AHD_PCIX_SCBRAM_RD_BUG
+			  |  AHD_PKTIZED_STATUS_BUG|AHD_PKT_LUN_BUG
+			  |  AHD_MDFF_WSCBPTR_BUG|AHD_REG_SLOW_SETTLE_BUG
+			  |  AHD_SET_MODE_BUG|AHD_BUSFREEREV_BUG
+			  |  AHD_NONPACKFIFO_BUG|AHD_PACED_NEGTABLE_BUG
+			  |  AHD_FAINT_LED_BUG;
+
+		/*
+		 * IO Cell paramter setup.
+		 */
+		AHD_SET_PRECOMP(ahd, AHD_PRECOMP_CUTBACK_29);
+
+		if ((ahd->flags & AHD_HP_BOARD) == 0)
+			AHD_SET_SLEWRATE(ahd, AHD_SLEWRATE_DEF_REVA);
+	} else {
+		u_int devconfig1;
+
+		ahd->features |= AHD_RTI|AHD_NEW_IOCELL_OPTS
+			      |  AHD_NEW_DFCNTRL_OPTS|AHD_FAST_CDB_DELIVERY;
+		ahd->bugs |= AHD_LQOOVERRUN_BUG|AHD_EARLY_REQ_BUG;
+
+		/*
+		 * Some issues have been resolved in the 7901B.
+		 */
+		if ((ahd->features & AHD_MULTI_FUNC) != 0)
+			ahd->bugs |= AHD_INTCOLLISION_BUG|AHD_ABORT_LQI_BUG;
+
+		/*
+		 * IO Cell paramter setup.
+		 */
+		AHD_SET_PRECOMP(ahd, AHD_PRECOMP_CUTBACK_29);
+		AHD_SET_SLEWRATE(ahd, AHD_SLEWRATE_DEF_REVB);
+		AHD_SET_AMPLITUDE(ahd, AHD_AMPLITUDE_DEF);
+
+		/*
+		 * Set the PREQDIS bit for H2B which disables some workaround
+		 * that doesn't work on regular PCI busses.
+		 * XXX - Find out exactly what this does from the hardware
+		 * 	 folks!
+		 */
+		devconfig1 = ahd_pci_read_config(pci, DEVCONFIG1, /*bytes*/1);
+		ahd_pci_write_config(pci, DEVCONFIG1,
+				     devconfig1|PREQDIS, /*bytes*/1);
+		devconfig1 = ahd_pci_read_config(pci, DEVCONFIG1, /*bytes*/1);
+	}
+
+	return (0);
+}
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.h b/drivers/scsi/aic7xxx/aic79xx_pci.h
new file mode 100644
index 0000000..b5cfeab
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.h
@@ -0,0 +1,70 @@
+/*
+ * Adaptec AIC79xx device driver for Linux.
+ *
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id$
+ *
+ */
+#ifndef _AIC79XX_PCI_H_
+#define _AIC79XX_PCI_H_
+
+#define ID_ALL_MASK			0xFFFFFFFFFFFFFFFFull
+#define ID_ALL_IROC_MASK		0xFF7FFFFFFFFFFFFFull
+#define ID_DEV_VENDOR_MASK		0xFFFFFFFF00000000ull
+#define ID_9005_GENERIC_MASK		0xFFF0FFFF00000000ull
+#define ID_9005_GENERIC_IROC_MASK	0xFF70FFFF00000000ull
+
+#define ID_AIC7901			0x800F9005FFFF9005ull
+#define ID_AHA_29320A			0x8000900500609005ull
+#define ID_AHA_29320ALP			0x8017900500449005ull
+
+#define ID_AIC7901A			0x801E9005FFFF9005ull
+#define ID_AHA_29320			0x8012900500429005ull
+#define ID_AHA_29320B			0x8013900500439005ull
+#define ID_AHA_29320LP			0x8014900500449005ull
+
+#define ID_AIC7902			0x801F9005FFFF9005ull
+#define ID_AIC7902_B			0x801D9005FFFF9005ull
+#define ID_AHA_39320			0x8010900500409005ull
+#define ID_AHA_39320_B			0x8015900500409005ull
+#define ID_AHA_39320A			0x8016900500409005ull
+#define ID_AHA_39320D			0x8011900500419005ull
+#define ID_AHA_39320D_B			0x801C900500419005ull
+#define ID_AHA_39320D_HP		0x8011900500AC0E11ull
+#define ID_AHA_39320D_B_HP		0x801C900500AC0E11ull
+
+#endif /* _AIC79XX_PCI_H_ */
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c
new file mode 100644
index 0000000..e01cd61
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_proc.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * String handling code courtesy of Gerard Roudier's <groudier@club-internet.fr>
+ * sym driver.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_proc.c#19 $
+ */
+#include "aic79xx_osm.h"
+#include "aic79xx_inline.h"
+
+static void	copy_mem_info(struct info_str *info, char *data, int len);
+static int	copy_info(struct info_str *info, char *fmt, ...);
+static void	ahd_dump_target_state(struct ahd_softc *ahd,
+				      struct info_str *info,
+				      u_int our_id, char channel,
+				      u_int target_id, u_int target_offset);
+static void	ahd_dump_device_state(struct info_str *info,
+				      struct ahd_linux_device *dev);
+static int	ahd_proc_write_seeprom(struct ahd_softc *ahd,
+				       char *buffer, int length);
+
+static void
+copy_mem_info(struct info_str *info, char *data, int len)
+{
+	if (info->pos + len > info->offset + info->length)
+		len = info->offset + info->length - info->pos;
+
+	if (info->pos + len < info->offset) {
+		info->pos += len;
+		return;
+	}
+
+	if (info->pos < info->offset) {
+		off_t partial;
+
+		partial = info->offset - info->pos;
+		data += partial;
+		info->pos += partial;
+		len  -= partial;
+	}
+
+	if (len > 0) {
+		memcpy(info->buffer, data, len);
+		info->pos += len;
+		info->buffer += len;
+	}
+}
+
+static int
+copy_info(struct info_str *info, char *fmt, ...)
+{
+	va_list args;
+	char buf[256];
+	int len;
+
+	va_start(args, fmt);
+	len = vsprintf(buf, fmt, args);
+	va_end(args);
+
+	copy_mem_info(info, buf, len);
+	return (len);
+}
+
+void
+ahd_format_transinfo(struct info_str *info, struct ahd_transinfo *tinfo)
+{
+	u_int speed;
+	u_int freq;
+	u_int mb;
+
+	if (tinfo->period == AHD_PERIOD_UNKNOWN) {
+		copy_info(info, "Renegotiation Pending\n");
+		return;
+	}
+        speed = 3300;
+        freq = 0;
+	if (tinfo->offset != 0) {
+		freq = aic_calc_syncsrate(tinfo->period);
+		speed = freq;
+	}
+	speed *= (0x01 << tinfo->width);
+        mb = speed / 1000;
+        if (mb > 0)
+		copy_info(info, "%d.%03dMB/s transfers", mb, speed % 1000);
+        else
+		copy_info(info, "%dKB/s transfers", speed);
+
+	if (freq != 0) {
+		int	printed_options;
+
+		printed_options = 0;
+		copy_info(info, " (%d.%03dMHz", freq / 1000, freq % 1000);
+		if ((tinfo->ppr_options & MSG_EXT_PPR_RD_STRM) != 0) {
+			copy_info(info, " RDSTRM");
+			printed_options++;
+		}
+		if ((tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
+			copy_info(info, "%s", printed_options ? "|DT" : " DT");
+			printed_options++;
+		}
+		if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
+			copy_info(info, "%s", printed_options ? "|IU" : " IU");
+			printed_options++;
+		}
+		if ((tinfo->ppr_options & MSG_EXT_PPR_RTI) != 0) {
+			copy_info(info, "%s",
+				  printed_options ? "|RTI" : " RTI");
+			printed_options++;
+		}
+		if ((tinfo->ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) {
+			copy_info(info, "%s",
+				  printed_options ? "|QAS" : " QAS");
+			printed_options++;
+		}
+	}
+
+	if (tinfo->width > 0) {
+		if (freq != 0) {
+			copy_info(info, ", ");
+		} else {
+			copy_info(info, " (");
+		}
+		copy_info(info, "%dbit)", 8 * (0x01 << tinfo->width));
+	} else if (freq != 0) {
+		copy_info(info, ")");
+	}
+	copy_info(info, "\n");
+}
+
+static void
+ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
+		      u_int our_id, char channel, u_int target_id,
+		      u_int target_offset)
+{
+	struct	ahd_linux_target *targ;
+	struct	ahd_initiator_tinfo *tinfo;
+	struct	ahd_tmode_tstate *tstate;
+	int	lun;
+
+	tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
+				    target_id, &tstate);
+	copy_info(info, "Target %d Negotiation Settings\n", target_id);
+	copy_info(info, "\tUser: ");
+	ahd_format_transinfo(info, &tinfo->user);
+	targ = ahd->platform_data->targets[target_offset];
+	if (targ == NULL)
+		return;
+
+	copy_info(info, "\tGoal: ");
+	ahd_format_transinfo(info, &tinfo->goal);
+	copy_info(info, "\tCurr: ");
+	ahd_format_transinfo(info, &tinfo->curr);
+	copy_info(info, "\tTransmission Errors %ld\n", targ->errors_detected);
+
+	for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
+		struct ahd_linux_device *dev;
+
+		dev = targ->devices[lun];
+
+		if (dev == NULL)
+			continue;
+
+		ahd_dump_device_state(info, dev);
+	}
+}
+
+static void
+ahd_dump_device_state(struct info_str *info, struct ahd_linux_device *dev)
+{
+	copy_info(info, "\tChannel %c Target %d Lun %d Settings\n",
+		  dev->target->channel + 'A', dev->target->target, dev->lun);
+
+	copy_info(info, "\t\tCommands Queued %ld\n", dev->commands_issued);
+	copy_info(info, "\t\tCommands Active %d\n", dev->active);
+	copy_info(info, "\t\tCommand Openings %d\n", dev->openings);
+	copy_info(info, "\t\tMax Tagged Openings %d\n", dev->maxtags);
+	copy_info(info, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen);
+}
+
+static int
+ahd_proc_write_seeprom(struct ahd_softc *ahd, char *buffer, int length)
+{
+	ahd_mode_state saved_modes;
+	int have_seeprom;
+	u_long s;
+	int paused;
+	int written;
+
+	/* Default to failure. */
+	written = -EINVAL;
+	ahd_lock(ahd, &s);
+	paused = ahd_is_paused(ahd);
+	if (!paused)
+		ahd_pause(ahd);
+
+	saved_modes = ahd_save_modes(ahd);
+	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+	if (length != sizeof(struct seeprom_config)) {
+		printf("ahd_proc_write_seeprom: incorrect buffer size\n");
+		goto done;
+	}
+
+	have_seeprom = ahd_verify_cksum((struct seeprom_config*)buffer);
+	if (have_seeprom == 0) {
+		printf("ahd_proc_write_seeprom: cksum verification failed\n");
+		goto done;
+	}
+
+	have_seeprom = ahd_acquire_seeprom(ahd);
+	if (!have_seeprom) {
+		printf("ahd_proc_write_seeprom: No Serial EEPROM\n");
+		goto done;
+	} else {
+		u_int start_addr;
+
+		if (ahd->seep_config == NULL) {
+			ahd->seep_config = malloc(sizeof(*ahd->seep_config),
+						  M_DEVBUF, M_NOWAIT);
+			if (ahd->seep_config == NULL) {
+				printf("aic79xx: Unable to allocate serial "
+				       "eeprom buffer.  Write failing\n");
+				goto done;
+			}
+		}
+		printf("aic79xx: Writing Serial EEPROM\n");
+		start_addr = 32 * (ahd->channel - 'A');
+		ahd_write_seeprom(ahd, (u_int16_t *)buffer, start_addr,
+				  sizeof(struct seeprom_config)/2);
+		ahd_read_seeprom(ahd, (uint16_t *)ahd->seep_config,
+				 start_addr, sizeof(struct seeprom_config)/2,
+				 /*ByteStream*/FALSE);
+		ahd_release_seeprom(ahd);
+		written = length;
+	}
+
+done:
+	ahd_restore_modes(ahd, saved_modes);
+	if (!paused)
+		ahd_unpause(ahd);
+	ahd_unlock(ahd, &s);
+	return (written);
+}
+/*
+ * Return information to handle /proc support for the driver.
+ */
+int
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+ahd_linux_proc_info(char *buffer, char **start, off_t offset,
+		    int length, int hostno, int inout)
+#else
+ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
+		    off_t offset, int length, int inout)
+#endif
+{
+	struct	ahd_softc *ahd;
+	struct	info_str info;
+	char	ahd_info[256];
+	u_long	l;
+	u_int	max_targ;
+	u_int	i;
+	int	retval;
+
+	retval = -EINVAL;
+	ahd_list_lock(&l);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+	TAILQ_FOREACH(ahd, &ahd_tailq, links) {
+		if (ahd->platform_data->host->host_no == hostno)
+			break;
+	}
+#else
+	ahd = ahd_find_softc(*(struct ahd_softc **)shost->hostdata);
+#endif
+
+	if (ahd == NULL)
+		goto done;
+
+	 /* Has data been written to the file? */ 
+	if (inout == TRUE) {
+		retval = ahd_proc_write_seeprom(ahd, buffer, length);
+		goto done;
+	}
+
+	if (start)
+		*start = buffer;
+
+	info.buffer	= buffer;
+	info.length	= length;
+	info.offset	= offset;
+	info.pos	= 0;
+
+	copy_info(&info, "Adaptec AIC79xx driver version: %s\n",
+		  AIC79XX_DRIVER_VERSION);
+	copy_info(&info, "%s\n", ahd->description);
+	ahd_controller_info(ahd, ahd_info);
+	copy_info(&info, "%s\n", ahd_info);
+	copy_info(&info, "Allocated SCBs: %d, SG List Length: %d\n\n",
+		  ahd->scb_data.numscbs, AHD_NSEG);
+
+	max_targ = 15;
+
+	if (ahd->seep_config == NULL)
+		copy_info(&info, "No Serial EEPROM\n");
+	else {
+		copy_info(&info, "Serial EEPROM:\n");
+		for (i = 0; i < sizeof(*ahd->seep_config)/2; i++) {
+			if (((i % 8) == 0) && (i != 0)) {
+				copy_info(&info, "\n");
+			}
+			copy_info(&info, "0x%.4x ",
+				  ((uint16_t*)ahd->seep_config)[i]);
+		}
+		copy_info(&info, "\n");
+	}
+	copy_info(&info, "\n");
+
+	if ((ahd->features & AHD_WIDE) == 0)
+		max_targ = 7;
+
+	for (i = 0; i <= max_targ; i++) {
+
+		ahd_dump_target_state(ahd, &info, ahd->our_id, 'A',
+				      /*target_id*/i, /*target_offset*/i);
+	}
+	retval = info.pos > info.offset ? info.pos - info.offset : 0;
+done:
+	ahd_list_unlock(&l);
+	return (retval);
+}
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
new file mode 100644
index 0000000..c01ac39
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
@@ -0,0 +1,3776 @@
+/*
+ * DO NOT EDIT - This file is automatically generated
+ *		 from the following source files:
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#94 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#70 $
+ */
+typedef int (ahd_reg_print_t)(u_int, u_int *, u_int);
+typedef struct ahd_reg_parse_entry {
+	char	*name;
+	uint8_t	 value;
+	uint8_t	 mask;
+} ahd_reg_parse_entry_t;
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_mode_ptr_print;
+#else
+#define ahd_mode_ptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "MODE_PTR", 0x00, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_intstat_print;
+#else
+#define ahd_intstat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "INTSTAT", 0x01, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seqintcode_print;
+#else
+#define ahd_seqintcode_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEQINTCODE", 0x02, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrint_print;
+#else
+#define ahd_clrint_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CLRINT", 0x03, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_error_print;
+#else
+#define ahd_error_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ERROR", 0x04, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrerr_print;
+#else
+#define ahd_clrerr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CLRERR", 0x04, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_hcntrl_print;
+#else
+#define ahd_hcntrl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "HCNTRL", 0x05, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_hnscb_qoff_print;
+#else
+#define ahd_hnscb_qoff_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "HNSCB_QOFF", 0x06, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_hescb_qoff_print;
+#else
+#define ahd_hescb_qoff_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "HESCB_QOFF", 0x08, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_hs_mailbox_print;
+#else
+#define ahd_hs_mailbox_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "HS_MAILBOX", 0x0b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrseqintstat_print;
+#else
+#define ahd_clrseqintstat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CLRSEQINTSTAT", 0x0c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seqintstat_print;
+#else
+#define ahd_seqintstat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEQINTSTAT", 0x0c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_swtimer_print;
+#else
+#define ahd_swtimer_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SWTIMER", 0x0e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_snscb_qoff_print;
+#else
+#define ahd_snscb_qoff_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SNSCB_QOFF", 0x10, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sescb_qoff_print;
+#else
+#define ahd_sescb_qoff_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SESCB_QOFF", 0x12, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sdscb_qoff_print;
+#else
+#define ahd_sdscb_qoff_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SDSCB_QOFF", 0x14, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_qoff_ctlsta_print;
+#else
+#define ahd_qoff_ctlsta_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "QOFF_CTLSTA", 0x16, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_intctl_print;
+#else
+#define ahd_intctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "INTCTL", 0x18, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dfcntrl_print;
+#else
+#define ahd_dfcntrl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DFCNTRL", 0x19, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dscommand0_print;
+#else
+#define ahd_dscommand0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DSCOMMAND0", 0x19, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dfstatus_print;
+#else
+#define ahd_dfstatus_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DFSTATUS", 0x1a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sg_cache_shadow_print;
+#else
+#define ahd_sg_cache_shadow_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SG_CACHE_SHADOW", 0x1b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_arbctl_print;
+#else
+#define ahd_arbctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ARBCTL", 0x1b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sg_cache_pre_print;
+#else
+#define ahd_sg_cache_pre_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SG_CACHE_PRE", 0x1b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqin_print;
+#else
+#define ahd_lqin_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQIN", 0x20, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_typeptr_print;
+#else
+#define ahd_typeptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "TYPEPTR", 0x20, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_tagptr_print;
+#else
+#define ahd_tagptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "TAGPTR", 0x21, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lunptr_print;
+#else
+#define ahd_lunptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LUNPTR", 0x22, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_datalenptr_print;
+#else
+#define ahd_datalenptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DATALENPTR", 0x23, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_statlenptr_print;
+#else
+#define ahd_statlenptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "STATLENPTR", 0x24, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmdlenptr_print;
+#else
+#define ahd_cmdlenptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CMDLENPTR", 0x25, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_attrptr_print;
+#else
+#define ahd_attrptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ATTRPTR", 0x26, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_flagptr_print;
+#else
+#define ahd_flagptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "FLAGPTR", 0x27, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmdptr_print;
+#else
+#define ahd_cmdptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CMDPTR", 0x28, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_qnextptr_print;
+#else
+#define ahd_qnextptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "QNEXTPTR", 0x29, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_idptr_print;
+#else
+#define ahd_idptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "IDPTR", 0x2a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_abrtbyteptr_print;
+#else
+#define ahd_abrtbyteptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ABRTBYTEPTR", 0x2b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_abrtbitptr_print;
+#else
+#define ahd_abrtbitptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ABRTBITPTR", 0x2c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_maxcmdbytes_print;
+#else
+#define ahd_maxcmdbytes_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "MAXCMDBYTES", 0x2d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_maxcmd2rcv_print;
+#else
+#define ahd_maxcmd2rcv_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "MAXCMD2RCV", 0x2e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_shortthresh_print;
+#else
+#define ahd_shortthresh_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SHORTTHRESH", 0x2f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lunlen_print;
+#else
+#define ahd_lunlen_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LUNLEN", 0x30, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cdblimit_print;
+#else
+#define ahd_cdblimit_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CDBLIMIT", 0x31, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_maxcmd_print;
+#else
+#define ahd_maxcmd_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "MAXCMD", 0x32, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_maxcmdcnt_print;
+#else
+#define ahd_maxcmdcnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "MAXCMDCNT", 0x33, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqrsvd01_print;
+#else
+#define ahd_lqrsvd01_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQRSVD01", 0x34, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqrsvd16_print;
+#else
+#define ahd_lqrsvd16_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQRSVD16", 0x35, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqrsvd17_print;
+#else
+#define ahd_lqrsvd17_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQRSVD17", 0x36, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmdrsvd0_print;
+#else
+#define ahd_cmdrsvd0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CMDRSVD0", 0x37, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqctl0_print;
+#else
+#define ahd_lqctl0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQCTL0", 0x38, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqctl1_print;
+#else
+#define ahd_lqctl1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQCTL1", 0x38, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsbist0_print;
+#else
+#define ahd_scsbist0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCSBIST0", 0x39, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqctl2_print;
+#else
+#define ahd_lqctl2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQCTL2", 0x39, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsbist1_print;
+#else
+#define ahd_scsbist1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCSBIST1", 0x3a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsiseq0_print;
+#else
+#define ahd_scsiseq0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCSISEQ0", 0x3a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsiseq1_print;
+#else
+#define ahd_scsiseq1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCSISEQ1", 0x3b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sxfrctl0_print;
+#else
+#define ahd_sxfrctl0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SXFRCTL0", 0x3c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_businitid_print;
+#else
+#define ahd_businitid_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "BUSINITID", 0x3c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dlcount_print;
+#else
+#define ahd_dlcount_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DLCOUNT", 0x3c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sxfrctl1_print;
+#else
+#define ahd_sxfrctl1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SXFRCTL1", 0x3d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_bustargid_print;
+#else
+#define ahd_bustargid_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "BUSTARGID", 0x3e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sxfrctl2_print;
+#else
+#define ahd_sxfrctl2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SXFRCTL2", 0x3e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dffstat_print;
+#else
+#define ahd_dffstat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DFFSTAT", 0x3f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsisigo_print;
+#else
+#define ahd_scsisigo_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCSISIGO", 0x40, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_multargid_print;
+#else
+#define ahd_multargid_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "MULTARGID", 0x40, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsisigi_print;
+#else
+#define ahd_scsisigi_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCSISIGI", 0x41, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsiphase_print;
+#else
+#define ahd_scsiphase_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCSIPHASE", 0x42, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsidat0_img_print;
+#else
+#define ahd_scsidat0_img_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCSIDAT0_IMG", 0x43, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsidat_print;
+#else
+#define ahd_scsidat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCSIDAT", 0x44, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsibus_print;
+#else
+#define ahd_scsibus_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCSIBUS", 0x46, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_targidin_print;
+#else
+#define ahd_targidin_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "TARGIDIN", 0x48, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_selid_print;
+#else
+#define ahd_selid_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SELID", 0x49, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sblkctl_print;
+#else
+#define ahd_sblkctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SBLKCTL", 0x4a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_optionmode_print;
+#else
+#define ahd_optionmode_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "OPTIONMODE", 0x4a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sstat0_print;
+#else
+#define ahd_sstat0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SSTAT0", 0x4b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrsint0_print;
+#else
+#define ahd_clrsint0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CLRSINT0", 0x4b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_simode0_print;
+#else
+#define ahd_simode0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SIMODE0", 0x4b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrsint1_print;
+#else
+#define ahd_clrsint1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CLRSINT1", 0x4c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sstat1_print;
+#else
+#define ahd_sstat1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SSTAT1", 0x4c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sstat2_print;
+#else
+#define ahd_sstat2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SSTAT2", 0x4d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrsint2_print;
+#else
+#define ahd_clrsint2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CLRSINT2", 0x4d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_simode2_print;
+#else
+#define ahd_simode2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SIMODE2", 0x4d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_perrdiag_print;
+#else
+#define ahd_perrdiag_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "PERRDIAG", 0x4e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqistate_print;
+#else
+#define ahd_lqistate_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQISTATE", 0x4e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_soffcnt_print;
+#else
+#define ahd_soffcnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SOFFCNT", 0x4f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqostate_print;
+#else
+#define ahd_lqostate_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQOSTATE", 0x4f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqistat0_print;
+#else
+#define ahd_lqistat0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQISTAT0", 0x50, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrlqiint0_print;
+#else
+#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqimode0_print;
+#else
+#define ahd_lqimode0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqimode1_print;
+#else
+#define ahd_lqimode1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQIMODE1", 0x51, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqistat1_print;
+#else
+#define ahd_lqistat1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQISTAT1", 0x51, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrlqiint1_print;
+#else
+#define ahd_clrlqiint1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CLRLQIINT1", 0x51, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqistat2_print;
+#else
+#define ahd_lqistat2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQISTAT2", 0x52, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sstat3_print;
+#else
+#define ahd_sstat3_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SSTAT3", 0x53, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_simode3_print;
+#else
+#define ahd_simode3_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SIMODE3", 0x53, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrsint3_print;
+#else
+#define ahd_clrsint3_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CLRSINT3", 0x53, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqomode0_print;
+#else
+#define ahd_lqomode0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQOMODE0", 0x54, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqostat0_print;
+#else
+#define ahd_lqostat0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQOSTAT0", 0x54, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrlqoint0_print;
+#else
+#define ahd_clrlqoint0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CLRLQOINT0", 0x54, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqostat1_print;
+#else
+#define ahd_lqostat1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQOSTAT1", 0x55, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrlqoint1_print;
+#else
+#define ahd_clrlqoint1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CLRLQOINT1", 0x55, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqomode1_print;
+#else
+#define ahd_lqomode1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQOMODE1", 0x55, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqostat2_print;
+#else
+#define ahd_lqostat2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQOSTAT2", 0x56, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_os_space_cnt_print;
+#else
+#define ahd_os_space_cnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "OS_SPACE_CNT", 0x56, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_simode1_print;
+#else
+#define ahd_simode1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SIMODE1", 0x57, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_gsfifo_print;
+#else
+#define ahd_gsfifo_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "GSFIFO", 0x58, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dffsxfrctl_print;
+#else
+#define ahd_dffsxfrctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DFFSXFRCTL", 0x5a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lqoscsctl_print;
+#else
+#define ahd_lqoscsctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQOSCSCTL", 0x5a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_nextscb_print;
+#else
+#define ahd_nextscb_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "NEXTSCB", 0x5a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_clrseqintsrc_print;
+#else
+#define ahd_clrseqintsrc_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CLRSEQINTSRC", 0x5b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seqintsrc_print;
+#else
+#define ahd_seqintsrc_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEQINTSRC", 0x5b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_currscb_print;
+#else
+#define ahd_currscb_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seqimode_print;
+#else
+#define ahd_seqimode_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEQIMODE", 0x5c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_mdffstat_print;
+#else
+#define ahd_mdffstat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "MDFFSTAT", 0x5d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_crccontrol_print;
+#else
+#define ahd_crccontrol_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CRCCONTROL", 0x5d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dfftag_print;
+#else
+#define ahd_dfftag_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DFFTAG", 0x5e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lastscb_print;
+#else
+#define ahd_lastscb_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LASTSCB", 0x5e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsitest_print;
+#else
+#define ahd_scsitest_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCSITEST", 0x5e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_iopdnctl_print;
+#else
+#define ahd_iopdnctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "IOPDNCTL", 0x5f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_shaddr_print;
+#else
+#define ahd_shaddr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SHADDR", 0x60, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_negoaddr_print;
+#else
+#define ahd_negoaddr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "NEGOADDR", 0x60, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dgrpcrci_print;
+#else
+#define ahd_dgrpcrci_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DGRPCRCI", 0x60, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_negperiod_print;
+#else
+#define ahd_negperiod_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "NEGPERIOD", 0x61, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_packcrci_print;
+#else
+#define ahd_packcrci_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "PACKCRCI", 0x62, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_negoffset_print;
+#else
+#define ahd_negoffset_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "NEGOFFSET", 0x62, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_negppropts_print;
+#else
+#define ahd_negppropts_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "NEGPPROPTS", 0x63, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_negconopts_print;
+#else
+#define ahd_negconopts_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "NEGCONOPTS", 0x64, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_annexcol_print;
+#else
+#define ahd_annexcol_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ANNEXCOL", 0x65, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scschkn_print;
+#else
+#define ahd_scschkn_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCSCHKN", 0x66, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_annexdat_print;
+#else
+#define ahd_annexdat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ANNEXDAT", 0x66, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_iownid_print;
+#else
+#define ahd_iownid_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "IOWNID", 0x67, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_pll960ctl0_print;
+#else
+#define ahd_pll960ctl0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "PLL960CTL0", 0x68, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_shcnt_print;
+#else
+#define ahd_shcnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SHCNT", 0x68, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_townid_print;
+#else
+#define ahd_townid_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "TOWNID", 0x69, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_pll960ctl1_print;
+#else
+#define ahd_pll960ctl1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "PLL960CTL1", 0x69, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_pll960cnt0_print;
+#else
+#define ahd_pll960cnt0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "PLL960CNT0", 0x6a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_xsig_print;
+#else
+#define ahd_xsig_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "XSIG", 0x6a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seloid_print;
+#else
+#define ahd_seloid_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SELOID", 0x6b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_pll400ctl0_print;
+#else
+#define ahd_pll400ctl0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "PLL400CTL0", 0x6c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_fairness_print;
+#else
+#define ahd_fairness_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "FAIRNESS", 0x6c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_pll400ctl1_print;
+#else
+#define ahd_pll400ctl1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "PLL400CTL1", 0x6d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_pll400cnt0_print;
+#else
+#define ahd_pll400cnt0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "PLL400CNT0", 0x6e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_unfairness_print;
+#else
+#define ahd_unfairness_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "UNFAIRNESS", 0x6e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_haddr_print;
+#else
+#define ahd_haddr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "HADDR", 0x70, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_plldelay_print;
+#else
+#define ahd_plldelay_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "PLLDELAY", 0x70, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_hodmaadr_print;
+#else
+#define ahd_hodmaadr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "HODMAADR", 0x70, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_hodmacnt_print;
+#else
+#define ahd_hodmacnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "HODMACNT", 0x78, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_hcnt_print;
+#else
+#define ahd_hcnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "HCNT", 0x78, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_hodmaen_print;
+#else
+#define ahd_hodmaen_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "HODMAEN", 0x7a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sghaddr_print;
+#else
+#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scbhaddr_print;
+#else
+#define ahd_scbhaddr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCBHADDR", 0x7c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sghcnt_print;
+#else
+#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scbhcnt_print;
+#else
+#define ahd_scbhcnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCBHCNT", 0x84, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dff_thrsh_print;
+#else
+#define ahd_dff_thrsh_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DFF_THRSH", 0x88, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_romaddr_print;
+#else
+#define ahd_romaddr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ROMADDR", 0x8a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_romcntrl_print;
+#else
+#define ahd_romcntrl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ROMCNTRL", 0x8d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_romdata_print;
+#else
+#define ahd_romdata_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ROMDATA", 0x8e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmcrxmsg0_print;
+#else
+#define ahd_cmcrxmsg0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CMCRXMSG0", 0x90, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_roenable_print;
+#else
+#define ahd_roenable_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ROENABLE", 0x90, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ovlyrxmsg0_print;
+#else
+#define ahd_ovlyrxmsg0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "OVLYRXMSG0", 0x90, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dchrxmsg0_print;
+#else
+#define ahd_dchrxmsg0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DCHRXMSG0", 0x90, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ovlyrxmsg1_print;
+#else
+#define ahd_ovlyrxmsg1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "OVLYRXMSG1", 0x91, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_nsenable_print;
+#else
+#define ahd_nsenable_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "NSENABLE", 0x91, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dchrxmsg1_print;
+#else
+#define ahd_dchrxmsg1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DCHRXMSG1", 0x91, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmcrxmsg1_print;
+#else
+#define ahd_cmcrxmsg1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CMCRXMSG1", 0x91, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dchrxmsg2_print;
+#else
+#define ahd_dchrxmsg2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DCHRXMSG2", 0x92, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ovlyrxmsg2_print;
+#else
+#define ahd_ovlyrxmsg2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "OVLYRXMSG2", 0x92, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmcrxmsg2_print;
+#else
+#define ahd_cmcrxmsg2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CMCRXMSG2", 0x92, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ost_print;
+#else
+#define ahd_ost_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "OST", 0x92, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dchrxmsg3_print;
+#else
+#define ahd_dchrxmsg3_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DCHRXMSG3", 0x93, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmcrxmsg3_print;
+#else
+#define ahd_cmcrxmsg3_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CMCRXMSG3", 0x93, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_pcixctl_print;
+#else
+#define ahd_pcixctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "PCIXCTL", 0x93, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ovlyrxmsg3_print;
+#else
+#define ahd_ovlyrxmsg3_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "OVLYRXMSG3", 0x93, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ovlyseqbcnt_print;
+#else
+#define ahd_ovlyseqbcnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "OVLYSEQBCNT", 0x94, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmcseqbcnt_print;
+#else
+#define ahd_cmcseqbcnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CMCSEQBCNT", 0x94, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dchseqbcnt_print;
+#else
+#define ahd_dchseqbcnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DCHSEQBCNT", 0x94, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmcspltstat0_print;
+#else
+#define ahd_cmcspltstat0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CMCSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ovlyspltstat0_print;
+#else
+#define ahd_ovlyspltstat0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "OVLYSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dchspltstat0_print;
+#else
+#define ahd_dchspltstat0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DCHSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dchspltstat1_print;
+#else
+#define ahd_dchspltstat1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DCHSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmcspltstat1_print;
+#else
+#define ahd_cmcspltstat1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CMCSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ovlyspltstat1_print;
+#else
+#define ahd_ovlyspltstat1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "OVLYSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sgrxmsg0_print;
+#else
+#define ahd_sgrxmsg0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SGRXMSG0", 0x98, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_slvspltoutadr0_print;
+#else
+#define ahd_slvspltoutadr0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SLVSPLTOUTADR0", 0x98, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sgrxmsg1_print;
+#else
+#define ahd_sgrxmsg1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SGRXMSG1", 0x99, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_slvspltoutadr1_print;
+#else
+#define ahd_slvspltoutadr1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SLVSPLTOUTADR1", 0x99, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sgrxmsg2_print;
+#else
+#define ahd_sgrxmsg2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SGRXMSG2", 0x9a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_slvspltoutadr2_print;
+#else
+#define ahd_slvspltoutadr2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SLVSPLTOUTADR2", 0x9a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sgrxmsg3_print;
+#else
+#define ahd_sgrxmsg3_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SGRXMSG3", 0x9b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_slvspltoutadr3_print;
+#else
+#define ahd_slvspltoutadr3_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SLVSPLTOUTADR3", 0x9b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sgseqbcnt_print;
+#else
+#define ahd_sgseqbcnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SGSEQBCNT", 0x9c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_slvspltoutattr0_print;
+#else
+#define ahd_slvspltoutattr0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SLVSPLTOUTATTR0", 0x9c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_slvspltoutattr1_print;
+#else
+#define ahd_slvspltoutattr1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SLVSPLTOUTATTR1", 0x9d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_slvspltoutattr2_print;
+#else
+#define ahd_slvspltoutattr2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SLVSPLTOUTATTR2", 0x9e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sgspltstat0_print;
+#else
+#define ahd_sgspltstat0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SGSPLTSTAT0", 0x9e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sfunct_print;
+#else
+#define ahd_sfunct_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SFUNCT", 0x9f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sgspltstat1_print;
+#else
+#define ahd_sgspltstat1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SGSPLTSTAT1", 0x9f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_df0pcistat_print;
+#else
+#define ahd_df0pcistat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DF0PCISTAT", 0xa0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_reg0_print;
+#else
+#define ahd_reg0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "REG0", 0xa0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_df1pcistat_print;
+#else
+#define ahd_df1pcistat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DF1PCISTAT", 0xa1, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sgpcistat_print;
+#else
+#define ahd_sgpcistat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SGPCISTAT", 0xa2, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_reg1_print;
+#else
+#define ahd_reg1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "REG1", 0xa2, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmcpcistat_print;
+#else
+#define ahd_cmcpcistat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CMCPCISTAT", 0xa3, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ovlypcistat_print;
+#else
+#define ahd_ovlypcistat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "OVLYPCISTAT", 0xa4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_reg_isr_print;
+#else
+#define ahd_reg_isr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "REG_ISR", 0xa4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sg_state_print;
+#else
+#define ahd_sg_state_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SG_STATE", 0xa6, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_msipcistat_print;
+#else
+#define ahd_msipcistat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "MSIPCISTAT", 0xa6, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_targpcistat_print;
+#else
+#define ahd_targpcistat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "TARGPCISTAT", 0xa7, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_data_count_odd_print;
+#else
+#define ahd_data_count_odd_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DATA_COUNT_ODD", 0xa7, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scbptr_print;
+#else
+#define ahd_scbptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCBPTR", 0xa8, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ccscbacnt_print;
+#else
+#define ahd_ccscbacnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CCSCBACNT", 0xab, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scbautoptr_print;
+#else
+#define ahd_scbautoptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCBAUTOPTR", 0xab, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ccsgaddr_print;
+#else
+#define ahd_ccsgaddr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CCSGADDR", 0xac, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ccscbaddr_print;
+#else
+#define ahd_ccscbaddr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CCSCBADDR", 0xac, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ccscbadr_bk_print;
+#else
+#define ahd_ccscbadr_bk_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CCSCBADR_BK", 0xac, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmc_rambist_print;
+#else
+#define ahd_cmc_rambist_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CMC_RAMBIST", 0xad, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ccsgctl_print;
+#else
+#define ahd_ccsgctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CCSGCTL", 0xad, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ccscbctl_print;
+#else
+#define ahd_ccscbctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CCSCBCTL", 0xad, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ccsgram_print;
+#else
+#define ahd_ccsgram_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CCSGRAM", 0xb0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_flexadr_print;
+#else
+#define ahd_flexadr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "FLEXADR", 0xb0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ccscbram_print;
+#else
+#define ahd_ccscbram_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CCSCBRAM", 0xb0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_flexcnt_print;
+#else
+#define ahd_flexcnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "FLEXCNT", 0xb3, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_flexdmastat_print;
+#else
+#define ahd_flexdmastat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "FLEXDMASTAT", 0xb5, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_flexdata_print;
+#else
+#define ahd_flexdata_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "FLEXDATA", 0xb6, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_brddat_print;
+#else
+#define ahd_brddat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "BRDDAT", 0xb8, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_brdctl_print;
+#else
+#define ahd_brdctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "BRDCTL", 0xb9, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seeadr_print;
+#else
+#define ahd_seeadr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEEADR", 0xba, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seedat_print;
+#else
+#define ahd_seedat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEEDAT", 0xbc, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seectl_print;
+#else
+#define ahd_seectl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEECTL", 0xbe, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seestat_print;
+#else
+#define ahd_seestat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEESTAT", 0xbe, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scbcnt_print;
+#else
+#define ahd_scbcnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCBCNT", 0xbf, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dfwaddr_print;
+#else
+#define ahd_dfwaddr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DFWADDR", 0xc0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dspfltrctl_print;
+#else
+#define ahd_dspfltrctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DSPFLTRCTL", 0xc0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dspdatactl_print;
+#else
+#define ahd_dspdatactl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DSPDATACTL", 0xc1, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dfraddr_print;
+#else
+#define ahd_dfraddr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DFRADDR", 0xc2, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dspreqctl_print;
+#else
+#define ahd_dspreqctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DSPREQCTL", 0xc2, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dspackctl_print;
+#else
+#define ahd_dspackctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DSPACKCTL", 0xc3, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dfdat_print;
+#else
+#define ahd_dfdat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DFDAT", 0xc4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dspselect_print;
+#else
+#define ahd_dspselect_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DSPSELECT", 0xc4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_wrtbiasctl_print;
+#else
+#define ahd_wrtbiasctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "WRTBIASCTL", 0xc5, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_rcvrbiosctl_print;
+#else
+#define ahd_rcvrbiosctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "RCVRBIOSCTL", 0xc6, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_wrtbiascalc_print;
+#else
+#define ahd_wrtbiascalc_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "WRTBIASCALC", 0xc7, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dfptrs_print;
+#else
+#define ahd_dfptrs_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DFPTRS", 0xc8, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_rcvrbiascalc_print;
+#else
+#define ahd_rcvrbiascalc_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "RCVRBIASCALC", 0xc8, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dfbkptr_print;
+#else
+#define ahd_dfbkptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DFBKPTR", 0xc9, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_skewcalc_print;
+#else
+#define ahd_skewcalc_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SKEWCALC", 0xc9, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dfdbctl_print;
+#else
+#define ahd_dfdbctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DFDBCTL", 0xcb, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dfscnt_print;
+#else
+#define ahd_dfscnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DFSCNT", 0xcc, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dfbcnt_print;
+#else
+#define ahd_dfbcnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DFBCNT", 0xce, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ovlyaddr_print;
+#else
+#define ahd_ovlyaddr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "OVLYADDR", 0xd4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seqctl0_print;
+#else
+#define ahd_seqctl0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEQCTL0", 0xd6, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seqctl1_print;
+#else
+#define ahd_seqctl1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEQCTL1", 0xd7, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_flags_print;
+#else
+#define ahd_flags_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "FLAGS", 0xd8, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seqintctl_print;
+#else
+#define ahd_seqintctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEQINTCTL", 0xd9, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seqram_print;
+#else
+#define ahd_seqram_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEQRAM", 0xda, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_prgmcnt_print;
+#else
+#define ahd_prgmcnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "PRGMCNT", 0xde, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_accum_print;
+#else
+#define ahd_accum_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ACCUM", 0xe0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sindex_print;
+#else
+#define ahd_sindex_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SINDEX", 0xe2, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dindex_print;
+#else
+#define ahd_dindex_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DINDEX", 0xe4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_brkaddr1_print;
+#else
+#define ahd_brkaddr1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "BRKADDR1", 0xe6, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_brkaddr0_print;
+#else
+#define ahd_brkaddr0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "BRKADDR0", 0xe6, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_allones_print;
+#else
+#define ahd_allones_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ALLONES", 0xe8, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_allzeros_print;
+#else
+#define ahd_allzeros_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ALLZEROS", 0xea, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_none_print;
+#else
+#define ahd_none_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "NONE", 0xea, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sindir_print;
+#else
+#define ahd_sindir_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SINDIR", 0xec, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dindir_print;
+#else
+#define ahd_dindir_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DINDIR", 0xed, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_function1_print;
+#else
+#define ahd_function1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "FUNCTION1", 0xf0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_stack_print;
+#else
+#define ahd_stack_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "STACK", 0xf2, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_curaddr_print;
+#else
+#define ahd_curaddr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CURADDR", 0xf4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_intvec1_addr_print;
+#else
+#define ahd_intvec1_addr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "INTVEC1_ADDR", 0xf4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_intvec2_addr_print;
+#else
+#define ahd_intvec2_addr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "INTVEC2_ADDR", 0xf6, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lastaddr_print;
+#else
+#define ahd_lastaddr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LASTADDR", 0xf6, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_longjmp_addr_print;
+#else
+#define ahd_longjmp_addr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LONGJMP_ADDR", 0xf8, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_accum_save_print;
+#else
+#define ahd_accum_save_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ACCUM_SAVE", 0xfa, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_waiting_scb_tails_print;
+#else
+#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_ahd_pci_config_base_print;
+#else
+#define ahd_ahd_pci_config_base_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "AHD_PCI_CONFIG_BASE", 0x100, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_sram_base_print;
+#else
+#define ahd_sram_base_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_waiting_tid_head_print;
+#else
+#define ahd_waiting_tid_head_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "WAITING_TID_HEAD", 0x120, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_waiting_tid_tail_print;
+#else
+#define ahd_waiting_tid_tail_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "WAITING_TID_TAIL", 0x122, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_next_queued_scb_addr_print;
+#else
+#define ahd_next_queued_scb_addr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "NEXT_QUEUED_SCB_ADDR", 0x124, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_complete_scb_head_print;
+#else
+#define ahd_complete_scb_head_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "COMPLETE_SCB_HEAD", 0x128, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_complete_scb_dmainprog_head_print;
+#else
+#define ahd_complete_scb_dmainprog_head_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "COMPLETE_SCB_DMAINPROG_HEAD", 0x12a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_complete_dma_scb_head_print;
+#else
+#define ahd_complete_dma_scb_head_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "COMPLETE_DMA_SCB_HEAD", 0x12c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_qfreeze_count_print;
+#else
+#define ahd_qfreeze_count_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "QFREEZE_COUNT", 0x12e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_saved_mode_print;
+#else
+#define ahd_saved_mode_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SAVED_MODE", 0x130, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_msg_out_print;
+#else
+#define ahd_msg_out_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "MSG_OUT", 0x131, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_dmaparams_print;
+#else
+#define ahd_dmaparams_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DMAPARAMS", 0x132, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seq_flags_print;
+#else
+#define ahd_seq_flags_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEQ_FLAGS", 0x133, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_saved_scsiid_print;
+#else
+#define ahd_saved_scsiid_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SAVED_SCSIID", 0x134, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_saved_lun_print;
+#else
+#define ahd_saved_lun_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SAVED_LUN", 0x135, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_lastphase_print;
+#else
+#define ahd_lastphase_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LASTPHASE", 0x136, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_qoutfifo_entry_valid_tag_print;
+#else
+#define ahd_qoutfifo_entry_valid_tag_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "QOUTFIFO_ENTRY_VALID_TAG", 0x137, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_shared_data_addr_print;
+#else
+#define ahd_shared_data_addr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SHARED_DATA_ADDR", 0x138, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_qoutfifo_next_addr_print;
+#else
+#define ahd_qoutfifo_next_addr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "QOUTFIFO_NEXT_ADDR", 0x13c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_kernel_tqinpos_print;
+#else
+#define ahd_kernel_tqinpos_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "KERNEL_TQINPOS", 0x140, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_tqinpos_print;
+#else
+#define ahd_tqinpos_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "TQINPOS", 0x141, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_arg_1_print;
+#else
+#define ahd_arg_1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ARG_1", 0x142, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_arg_2_print;
+#else
+#define ahd_arg_2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ARG_2", 0x143, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_last_msg_print;
+#else
+#define ahd_last_msg_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LAST_MSG", 0x144, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scsiseq_template_print;
+#else
+#define ahd_scsiseq_template_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCSISEQ_TEMPLATE", 0x145, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_initiator_tag_print;
+#else
+#define ahd_initiator_tag_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "INITIATOR_TAG", 0x146, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_seq_flags2_print;
+#else
+#define ahd_seq_flags2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SEQ_FLAGS2", 0x147, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_allocfifo_scbptr_print;
+#else
+#define ahd_allocfifo_scbptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "ALLOCFIFO_SCBPTR", 0x148, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_int_coalescing_timer_print;
+#else
+#define ahd_int_coalescing_timer_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "INT_COALESCING_TIMER", 0x14a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_int_coalescing_maxcmds_print;
+#else
+#define ahd_int_coalescing_maxcmds_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "INT_COALESCING_MAXCMDS", 0x14c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_int_coalescing_mincmds_print;
+#else
+#define ahd_int_coalescing_mincmds_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "INT_COALESCING_MINCMDS", 0x14d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmds_pending_print;
+#else
+#define ahd_cmds_pending_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CMDS_PENDING", 0x14e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_int_coalescing_cmdcount_print;
+#else
+#define ahd_int_coalescing_cmdcount_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "INT_COALESCING_CMDCOUNT", 0x150, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_local_hs_mailbox_print;
+#else
+#define ahd_local_hs_mailbox_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LOCAL_HS_MAILBOX", 0x151, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_cmdsize_table_print;
+#else
+#define ahd_cmdsize_table_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CMDSIZE_TABLE", 0x152, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_base_print;
+#else
+#define ahd_scb_base_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_BASE", 0x180, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_residual_datacnt_print;
+#else
+#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_residual_sgptr_print;
+#else
+#define ahd_scb_residual_sgptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_RESIDUAL_SGPTR", 0x184, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_scsi_status_print;
+#else
+#define ahd_scb_scsi_status_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_SCSI_STATUS", 0x188, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_target_phases_print;
+#else
+#define ahd_scb_target_phases_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_TARGET_PHASES", 0x189, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_target_data_dir_print;
+#else
+#define ahd_scb_target_data_dir_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_TARGET_DATA_DIR", 0x18a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_target_itag_print;
+#else
+#define ahd_scb_target_itag_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_TARGET_ITAG", 0x18b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_sense_busaddr_print;
+#else
+#define ahd_scb_sense_busaddr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_SENSE_BUSADDR", 0x18c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_tag_print;
+#else
+#define ahd_scb_tag_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_TAG", 0x190, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_control_print;
+#else
+#define ahd_scb_control_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_CONTROL", 0x192, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_scsiid_print;
+#else
+#define ahd_scb_scsiid_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_SCSIID", 0x193, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_lun_print;
+#else
+#define ahd_scb_lun_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_LUN", 0x194, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_task_attribute_print;
+#else
+#define ahd_scb_task_attribute_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_TASK_ATTRIBUTE", 0x195, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_cdb_len_print;
+#else
+#define ahd_scb_cdb_len_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_CDB_LEN", 0x196, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_task_management_print;
+#else
+#define ahd_scb_task_management_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_TASK_MANAGEMENT", 0x197, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_dataptr_print;
+#else
+#define ahd_scb_dataptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_DATAPTR", 0x198, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_datacnt_print;
+#else
+#define ahd_scb_datacnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_DATACNT", 0x1a0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_sgptr_print;
+#else
+#define ahd_scb_sgptr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_SGPTR", 0x1a4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_busaddr_print;
+#else
+#define ahd_scb_busaddr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_BUSADDR", 0x1a8, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_next_print;
+#else
+#define ahd_scb_next_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_NEXT", 0x1ac, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_next2_print;
+#else
+#define ahd_scb_next2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_NEXT2", 0x1ae, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_spare_print;
+#else
+#define ahd_scb_spare_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_SPARE", 0x1b0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_scb_disconnected_lists_print;
+#else
+#define ahd_scb_disconnected_lists_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_DISCONNECTED_LISTS", 0x1b8, regvalue, cur_col, wrap)
+#endif
+
+
+#define	MODE_PTR        		0x00
+#define		DST_MODE        	0x70
+#define		SRC_MODE        	0x07
+
+#define	INTSTAT         		0x01
+#define		INT_PEND        	0xff
+#define		HWERRINT        	0x80
+#define		BRKADRINT       	0x40
+#define		SWTMINT         	0x20
+#define		PCIINT          	0x10
+#define		SCSIINT         	0x08
+#define		SEQINT          	0x04
+#define		CMDCMPLT        	0x02
+#define		SPLTINT         	0x01
+
+#define	SEQINTCODE      		0x02
+#define		BAD_SCB_STATUS  	0x1a
+#define		SAW_HWERR       	0x19
+#define		TRACEPOINT3     	0x18
+#define		TRACEPOINT2     	0x17
+#define		TRACEPOINT1     	0x16
+#define		TRACEPOINT0     	0x15
+#define		TASKMGMT_CMD_CMPLT_OKAY	0x14
+#define		TASKMGMT_FUNC_COMPLETE	0x13
+#define		ENTERING_NONPACK	0x12
+#define		CFG4OVERRUN     	0x11
+#define		STATUS_OVERRUN  	0x10
+#define		CFG4ISTAT_INTR  	0x0f
+#define		INVALID_SEQINT  	0x0e
+#define		ILLEGAL_PHASE   	0x0d
+#define		DUMP_CARD_STATE 	0x0c
+#define		MISSED_BUSFREE  	0x0b
+#define		MKMSG_FAILED    	0x0a
+#define		DATA_OVERRUN    	0x09
+#define		BAD_STATUS      	0x08
+#define		HOST_MSG_LOOP   	0x07
+#define		PDATA_REINIT    	0x06
+#define		IGN_WIDE_RES    	0x05
+#define		NO_MATCH        	0x04
+#define		PROTO_VIOLATION 	0x03
+#define		SEND_REJECT     	0x02
+#define		BAD_PHASE       	0x01
+#define		NO_SEQINT       	0x00
+
+#define	CLRINT          		0x03
+#define		CLRHWERRINT     	0x80
+#define		CLRBRKADRINT    	0x40
+#define		CLRSWTMINT      	0x20
+#define		CLRPCIINT       	0x10
+#define		CLRSCSIINT      	0x08
+#define		CLRSEQINT       	0x04
+#define		CLRCMDINT       	0x02
+#define		CLRSPLTINT      	0x01
+
+#define	ERROR           		0x04
+#define		CIOPARERR       	0x80
+#define		CIOACCESFAIL    	0x40
+#define		MPARERR         	0x20
+#define		DPARERR         	0x10
+#define		SQPARERR        	0x08
+#define		ILLOPCODE       	0x04
+#define		DSCTMOUT        	0x02
+
+#define	CLRERR          		0x04
+#define		CLRCIOPARERR    	0x80
+#define		CLRCIOACCESFAIL 	0x40
+#define		CLRMPARERR      	0x20
+#define		CLRDPARERR      	0x10
+#define		CLRSQPARERR     	0x08
+#define		CLRILLOPCODE    	0x04
+#define		CLRDSCTMOUT     	0x02
+
+#define	HCNTRL          		0x05
+#define		SEQ_RESET       	0x80
+#define		POWRDN          	0x40
+#define		SWINT           	0x10
+#define		SWTIMER_START_B 	0x08
+#define		PAUSE           	0x04
+#define		INTEN           	0x02
+#define		CHIPRST         	0x01
+#define		CHIPRSTACK      	0x01
+
+#define	HNSCB_QOFF      		0x06
+
+#define	HESCB_QOFF      		0x08
+
+#define	HS_MAILBOX      		0x0b
+#define		HOST_TQINPOS    	0x80
+#define		ENINT_COALESCE  	0x40
+
+#define	CLRSEQINTSTAT   		0x0c
+#define		CLRSEQ_SWTMRTO  	0x10
+#define		CLRSEQ_SEQINT   	0x08
+#define		CLRSEQ_SCSIINT  	0x04
+#define		CLRSEQ_PCIINT   	0x02
+#define		CLRSEQ_SPLTINT  	0x01
+
+#define	SEQINTSTAT      		0x0c
+#define		SEQ_SWTMRTO     	0x10
+#define		SEQ_SEQINT      	0x08
+#define		SEQ_SCSIINT     	0x04
+#define		SEQ_PCIINT      	0x02
+#define		SEQ_SPLTINT     	0x01
+
+#define	SWTIMER         		0x0e
+
+#define	SNSCB_QOFF      		0x10
+
+#define	SESCB_QOFF      		0x12
+
+#define	SDSCB_QOFF      		0x14
+
+#define	QOFF_CTLSTA     		0x16
+#define		EMPTY_SCB_AVAIL 	0x80
+#define		NEW_SCB_AVAIL   	0x40
+#define		SDSCB_ROLLOVR   	0x20
+#define		HS_MAILBOX_ACT  	0x10
+#define		SCB_QSIZE       	0x0f
+#define		SCB_QSIZE_16384 	0x0c
+#define		SCB_QSIZE_8192  	0x0b
+#define		SCB_QSIZE_4096  	0x0a
+#define		SCB_QSIZE_2048  	0x09
+#define		SCB_QSIZE_1024  	0x08
+#define		SCB_QSIZE_512   	0x07
+#define		SCB_QSIZE_256   	0x06
+#define		SCB_QSIZE_128   	0x05
+#define		SCB_QSIZE_64    	0x04
+#define		SCB_QSIZE_32    	0x03
+#define		SCB_QSIZE_16    	0x02
+#define		SCB_QSIZE_8     	0x01
+#define		SCB_QSIZE_4     	0x00
+
+#define	INTCTL          		0x18
+#define		SWTMINTMASK     	0x80
+#define		SWTMINTEN       	0x40
+#define		SWTIMER_START   	0x20
+#define		AUTOCLRCMDINT   	0x10
+#define		PCIINTEN        	0x08
+#define		SCSIINTEN       	0x04
+#define		SEQINTEN        	0x02
+#define		SPLTINTEN       	0x01
+
+#define	DFCNTRL         		0x19
+#define		SCSIENWRDIS     	0x40
+#define		SCSIENACK       	0x20
+#define		DIRECTIONACK    	0x04
+#define		FIFOFLUSHACK    	0x02
+#define		DIRECTIONEN     	0x01
+
+#define	DSCOMMAND0      		0x19
+#define		CACHETHEN       	0x80
+#define		DPARCKEN        	0x40
+#define		MPARCKEN        	0x20
+#define		EXTREQLCK       	0x10
+#define		DISABLE_TWATE   	0x02
+#define		CIOPARCKEN      	0x01
+
+#define	DFSTATUS        		0x1a
+#define		PRELOAD_AVAIL   	0x80
+#define		PKT_PRELOAD_AVAIL	0x40
+#define		MREQPEND        	0x10
+#define		HDONE           	0x08
+#define		DFTHRESH        	0x04
+#define		FIFOFULL        	0x02
+#define		FIFOEMP         	0x01
+
+#define	SG_CACHE_SHADOW 		0x1b
+#define		ODD_SEG         	0x04
+#define		LAST_SEG        	0x02
+#define		LAST_SEG_DONE   	0x01
+
+#define	ARBCTL          		0x1b
+#define		RESET_HARB      	0x80
+#define		RETRY_SWEN      	0x08
+#define		USE_TIME        	0x07
+
+#define	SG_CACHE_PRE    		0x1b
+
+#define	LQIN            		0x20
+
+#define	TYPEPTR         		0x20
+
+#define	TAGPTR          		0x21
+
+#define	LUNPTR          		0x22
+
+#define	DATALENPTR      		0x23
+
+#define	STATLENPTR      		0x24
+
+#define	CMDLENPTR       		0x25
+
+#define	ATTRPTR         		0x26
+
+#define	FLAGPTR         		0x27
+
+#define	CMDPTR          		0x28
+
+#define	QNEXTPTR        		0x29
+
+#define	IDPTR           		0x2a
+
+#define	ABRTBYTEPTR     		0x2b
+
+#define	ABRTBITPTR      		0x2c
+
+#define	MAXCMDBYTES     		0x2d
+
+#define	MAXCMD2RCV      		0x2e
+
+#define	SHORTTHRESH     		0x2f
+
+#define	LUNLEN          		0x30
+#define		TLUNLEN         	0xf0
+#define		ILUNLEN         	0x0f
+
+#define	CDBLIMIT        		0x31
+
+#define	MAXCMD          		0x32
+
+#define	MAXCMDCNT       		0x33
+
+#define	LQRSVD01        		0x34
+
+#define	LQRSVD16        		0x35
+
+#define	LQRSVD17        		0x36
+
+#define	CMDRSVD0        		0x37
+
+#define	LQCTL0          		0x38
+#define		LQITARGCLT      	0xc0
+#define		LQIINITGCLT     	0x30
+#define		LQ0TARGCLT      	0x0c
+#define		LQ0INITGCLT     	0x03
+
+#define	LQCTL1          		0x38
+#define		PCI2PCI         	0x04
+#define		SINGLECMD       	0x02
+#define		ABORTPENDING    	0x01
+
+#define	SCSBIST0        		0x39
+#define		GSBISTERR       	0x40
+#define		GSBISTDONE      	0x20
+#define		GSBISTRUN       	0x10
+#define		OSBISTERR       	0x04
+#define		OSBISTDONE      	0x02
+#define		OSBISTRUN       	0x01
+
+#define	LQCTL2          		0x39
+#define		LQIRETRY        	0x80
+#define		LQICONTINUE     	0x40
+#define		LQITOIDLE       	0x20
+#define		LQIPAUSE        	0x10
+#define		LQORETRY        	0x08
+#define		LQOCONTINUE     	0x04
+#define		LQOTOIDLE       	0x02
+#define		LQOPAUSE        	0x01
+
+#define	SCSBIST1        		0x3a
+#define		NTBISTERR       	0x04
+#define		NTBISTDONE      	0x02
+#define		NTBISTRUN       	0x01
+
+#define	SCSISEQ0        		0x3a
+#define		TEMODEO         	0x80
+#define		ENSELO          	0x40
+#define		ENARBO          	0x20
+#define		FORCEBUSFREE    	0x10
+#define		SCSIRSTO        	0x01
+
+#define	SCSISEQ1        		0x3b
+
+#define	SXFRCTL0        		0x3c
+#define		DFON            	0x80
+#define		DFPEXP          	0x40
+#define		BIOSCANCELEN    	0x10
+#define		SPIOEN          	0x08
+
+#define	BUSINITID       		0x3c
+
+#define	DLCOUNT         		0x3c
+
+#define	SXFRCTL1        		0x3d
+#define		BITBUCKET       	0x80
+#define		ENSACHK         	0x40
+#define		ENSPCHK         	0x20
+#define		STIMESEL        	0x18
+#define		ENSTIMER        	0x04
+#define		ACTNEGEN        	0x02
+#define		STPWEN          	0x01
+
+#define	BUSTARGID       		0x3e
+
+#define	SXFRCTL2        		0x3e
+#define		AUTORSTDIS      	0x10
+#define		CMDDMAEN        	0x08
+#define		ASU             	0x07
+
+#define	DFFSTAT         		0x3f
+#define		CURRFIFO        	0x03
+#define		FIFO1FREE       	0x20
+#define		FIFO0FREE       	0x10
+#define		CURRFIFO_NONE   	0x03
+#define		CURRFIFO_1      	0x01
+#define		CURRFIFO_0      	0x00
+
+#define	SCSISIGO        		0x40
+#define		CDO             	0x80
+#define		IOO             	0x40
+#define		MSGO            	0x20
+#define		ATNO            	0x10
+#define		SELO            	0x08
+#define		BSYO            	0x04
+#define		REQO            	0x02
+#define		ACKO            	0x01
+
+#define	MULTARGID       		0x40
+
+#define	SCSISIGI        		0x41
+#define		ATNI            	0x10
+#define		SELI            	0x08
+#define		BSYI            	0x04
+#define		REQI            	0x02
+#define		ACKI            	0x01
+
+#define	SCSIPHASE       		0x42
+#define		STATUS_PHASE    	0x20
+#define		COMMAND_PHASE   	0x10
+#define		MSG_IN_PHASE    	0x08
+#define		MSG_OUT_PHASE   	0x04
+#define		DATA_PHASE_MASK 	0x03
+#define		DATA_IN_PHASE   	0x02
+#define		DATA_OUT_PHASE  	0x01
+
+#define	SCSIDAT0_IMG    		0x43
+
+#define	SCSIDAT         		0x44
+
+#define	SCSIBUS         		0x46
+
+#define	TARGIDIN        		0x48
+#define		CLKOUT          	0x80
+#define		TARGID          	0x0f
+
+#define	SELID           		0x49
+#define		SELID_MASK      	0xf0
+#define		ONEBIT          	0x08
+
+#define	SBLKCTL         		0x4a
+#define		DIAGLEDEN       	0x80
+#define		DIAGLEDON       	0x40
+#define		ENAB40          	0x08
+#define		ENAB20          	0x04
+#define		SELWIDE         	0x02
+
+#define	OPTIONMODE      		0x4a
+#define		OPTIONMODE_DEFAULTS	0x02
+#define		BIOSCANCTL      	0x80
+#define		AUTOACKEN       	0x40
+#define		BIASCANCTL      	0x20
+#define		BUSFREEREV      	0x10
+#define		ENDGFORMCHK     	0x04
+#define		AUTO_MSGOUT_DE  	0x02
+
+#define	SSTAT0          		0x4b
+#define		TARGET          	0x80
+#define		SELDO           	0x40
+#define		SELDI           	0x20
+#define		SELINGO         	0x10
+#define		IOERR           	0x08
+#define		OVERRUN         	0x04
+#define		SPIORDY         	0x02
+#define		ARBDO           	0x01
+
+#define	CLRSINT0        		0x4b
+#define		CLRSELDO        	0x40
+#define		CLRSELDI        	0x20
+#define		CLRSELINGO      	0x10
+#define		CLRIOERR        	0x08
+#define		CLROVERRUN      	0x04
+#define		CLRSPIORDY      	0x02
+#define		CLRARBDO        	0x01
+
+#define	SIMODE0         		0x4b
+#define		ENSELDO         	0x40
+#define		ENSELDI         	0x20
+#define		ENSELINGO       	0x10
+#define		ENIOERR         	0x08
+#define		ENOVERRUN       	0x04
+#define		ENSPIORDY       	0x02
+#define		ENARBDO         	0x01
+
+#define	CLRSINT1        		0x4c
+#define		CLRSELTIMEO     	0x80
+#define		CLRATNO         	0x40
+#define		CLRSCSIRSTI     	0x20
+#define		CLRBUSFREE      	0x08
+#define		CLRSCSIPERR     	0x04
+#define		CLRSTRB2FAST    	0x02
+#define		CLRREQINIT      	0x01
+
+#define	SSTAT1          		0x4c
+#define		SELTO           	0x80
+#define		ATNTARG         	0x40
+#define		SCSIRSTI        	0x20
+#define		PHASEMIS        	0x10
+#define		BUSFREE         	0x08
+#define		SCSIPERR        	0x04
+#define		STRB2FAST       	0x02
+#define		REQINIT         	0x01
+
+#define	SSTAT2          		0x4d
+#define		BUSFREETIME     	0xc0
+#define		NONPACKREQ      	0x20
+#define		EXP_ACTIVE      	0x10
+#define		BSYX            	0x08
+#define		WIDE_RES        	0x04
+#define		SDONE           	0x02
+#define		DMADONE         	0x01
+#define		BUSFREE_DFF1    	0xc0
+#define		BUSFREE_DFF0    	0x80
+#define		BUSFREE_LQO     	0x40
+
+#define	CLRSINT2        		0x4d
+#define		CLRNONPACKREQ   	0x20
+#define		CLRWIDE_RES     	0x04
+#define		CLRSDONE        	0x02
+#define		CLRDMADONE      	0x01
+
+#define	SIMODE2         		0x4d
+#define		ENWIDE_RES      	0x04
+#define		ENSDONE         	0x02
+#define		ENDMADONE       	0x01
+
+#define	PERRDIAG        		0x4e
+#define		HIZERO          	0x80
+#define		HIPERR          	0x40
+#define		PREVPHASE       	0x20
+#define		PARITYERR       	0x10
+#define		AIPERR          	0x08
+#define		CRCERR          	0x04
+#define		DGFORMERR       	0x02
+#define		DTERR           	0x01
+
+#define	LQISTATE        		0x4e
+
+#define	SOFFCNT         		0x4f
+
+#define	LQOSTATE        		0x4f
+
+#define	LQISTAT0        		0x50
+#define		LQIATNQAS       	0x20
+#define		LQICRCT1        	0x10
+#define		LQICRCT2        	0x08
+#define		LQIBADLQT       	0x04
+#define		LQIATNLQ        	0x02
+#define		LQIATNCMD       	0x01
+
+#define	CLRLQIINT0      		0x50
+#define		CLRLQIATNQAS    	0x20
+#define		CLRLQICRCT1     	0x10
+#define		CLRLQICRCT2     	0x08
+#define		CLRLQIBADLQT    	0x04
+#define		CLRLQIATNLQ     	0x02
+#define		CLRLQIATNCMD    	0x01
+
+#define	LQIMODE0        		0x50
+#define		ENLQIATNQASK    	0x20
+#define		ENLQICRCT1      	0x10
+#define		ENLQICRCT2      	0x08
+#define		ENLQIBADLQT     	0x04
+#define		ENLQIATNLQ      	0x02
+#define		ENLQIATNCMD     	0x01
+
+#define	LQIMODE1        		0x51
+#define		ENLQIPHASE_LQ   	0x80
+#define		ENLQIPHASE_NLQ  	0x40
+#define		ENLIQABORT      	0x20
+#define		ENLQICRCI_LQ    	0x10
+#define		ENLQICRCI_NLQ   	0x08
+#define		ENLQIBADLQI     	0x04
+#define		ENLQIOVERI_LQ   	0x02
+#define		ENLQIOVERI_NLQ  	0x01
+
+#define	LQISTAT1        		0x51
+#define		LQIPHASE_LQ     	0x80
+#define		LQIPHASE_NLQ    	0x40
+#define		LQIABORT        	0x20
+#define		LQICRCI_LQ      	0x10
+#define		LQICRCI_NLQ     	0x08
+#define		LQIBADLQI       	0x04
+#define		LQIOVERI_LQ     	0x02
+#define		LQIOVERI_NLQ    	0x01
+
+#define	CLRLQIINT1      		0x51
+#define		CLRLQIPHASE_LQ  	0x80
+#define		CLRLQIPHASE_NLQ 	0x40
+#define		CLRLIQABORT     	0x20
+#define		CLRLQICRCI_LQ   	0x10
+#define		CLRLQICRCI_NLQ  	0x08
+#define		CLRLQIBADLQI    	0x04
+#define		CLRLQIOVERI_LQ  	0x02
+#define		CLRLQIOVERI_NLQ 	0x01
+
+#define	LQISTAT2        		0x52
+#define		PACKETIZED      	0x80
+#define		LQIPHASE_OUTPKT 	0x40
+#define		LQIWORKONLQ     	0x20
+#define		LQIWAITFIFO     	0x10
+#define		LQISTOPPKT      	0x08
+#define		LQISTOPLQ       	0x04
+#define		LQISTOPCMD      	0x02
+#define		LQIGSAVAIL      	0x01
+
+#define	SSTAT3          		0x53
+#define		NTRAMPERR       	0x02
+#define		OSRAMPERR       	0x01
+
+#define	SIMODE3         		0x53
+#define		ENNTRAMPERR     	0x02
+#define		ENOSRAMPERR     	0x01
+
+#define	CLRSINT3        		0x53
+#define		CLRNTRAMPERR    	0x02
+#define		CLROSRAMPERR    	0x01
+
+#define	LQOMODE0        		0x54
+#define		ENLQOTARGSCBPERR	0x10
+#define		ENLQOSTOPT2     	0x08
+#define		ENLQOATNLQ      	0x04
+#define		ENLQOATNPKT     	0x02
+#define		ENLQOTCRC       	0x01
+
+#define	LQOSTAT0        		0x54
+#define		LQOTARGSCBPERR  	0x10
+#define		LQOSTOPT2       	0x08
+#define		LQOATNLQ        	0x04
+#define		LQOATNPKT       	0x02
+#define		LQOTCRC         	0x01
+
+#define	CLRLQOINT0      		0x54
+#define		CLRLQOTARGSCBPERR	0x10
+#define		CLRLQOSTOPT2    	0x08
+#define		CLRLQOATNLQ     	0x04
+#define		CLRLQOATNPKT    	0x02
+#define		CLRLQOTCRC      	0x01
+
+#define	LQOSTAT1        		0x55
+#define		LQOINITSCBPERR  	0x10
+#define		LQOSTOPI2       	0x08
+#define		LQOBADQAS       	0x04
+#define		LQOBUSFREE      	0x02
+#define		LQOPHACHGINPKT  	0x01
+
+#define	CLRLQOINT1      		0x55
+#define		CLRLQOINITSCBPERR	0x10
+#define		CLRLQOSTOPI2    	0x08
+#define		CLRLQOBADQAS    	0x04
+#define		CLRLQOBUSFREE   	0x02
+#define		CLRLQOPHACHGINPKT	0x01
+
+#define	LQOMODE1        		0x55
+#define		ENLQOINITSCBPERR	0x10
+#define		ENLQOSTOPI2     	0x08
+#define		ENLQOBADQAS     	0x04
+#define		ENLQOBUSFREE    	0x02
+#define		ENLQOPHACHGINPKT	0x01
+
+#define	LQOSTAT2        		0x56
+#define		LQOPKT          	0xe0
+#define		LQOWAITFIFO     	0x10
+#define		LQOPHACHGOUTPKT 	0x02
+#define		LQOSTOP0        	0x01
+
+#define	OS_SPACE_CNT    		0x56
+
+#define	SIMODE1         		0x57
+#define		ENSELTIMO       	0x80
+#define		ENATNTARG       	0x40
+#define		ENSCSIRST       	0x20
+#define		ENPHASEMIS      	0x10
+#define		ENBUSFREE       	0x08
+#define		ENSCSIPERR      	0x04
+#define		ENSTRB2FAST     	0x02
+#define		ENREQINIT       	0x01
+
+#define	GSFIFO          		0x58
+
+#define	DFFSXFRCTL      		0x5a
+#define		DFFBITBUCKET    	0x08
+#define		CLRSHCNT        	0x04
+#define		CLRCHN          	0x02
+#define		RSTCHN          	0x01
+
+#define	LQOSCSCTL       		0x5a
+#define		LQOH2A_VERSION  	0x80
+#define		LQONOCHKOVER    	0x01
+
+#define	NEXTSCB         		0x5a
+
+#define	CLRSEQINTSRC    		0x5b
+#define		CLRCTXTDONE     	0x40
+#define		CLRSAVEPTRS     	0x20
+#define		CLRCFG4DATA     	0x10
+#define		CLRCFG4ISTAT    	0x08
+#define		CLRCFG4TSTAT    	0x04
+#define		CLRCFG4ICMD     	0x02
+#define		CLRCFG4TCMD     	0x01
+
+#define	SEQINTSRC       		0x5b
+#define		CTXTDONE        	0x40
+#define		SAVEPTRS        	0x20
+#define		CFG4DATA        	0x10
+#define		CFG4ISTAT       	0x08
+#define		CFG4TSTAT       	0x04
+#define		CFG4ICMD        	0x02
+#define		CFG4TCMD        	0x01
+
+#define	CURRSCB         		0x5c
+
+#define	SEQIMODE        		0x5c
+#define		ENCTXTDONE      	0x40
+#define		ENSAVEPTRS      	0x20
+#define		ENCFG4DATA      	0x10
+#define		ENCFG4ISTAT     	0x08
+#define		ENCFG4TSTAT     	0x04
+#define		ENCFG4ICMD      	0x02
+#define		ENCFG4TCMD      	0x01
+
+#define	MDFFSTAT        		0x5d
+#define		SHCNTNEGATIVE   	0x40
+#define		SHCNTMINUS1     	0x20
+#define		LASTSDONE       	0x10
+#define		SHVALID         	0x08
+#define		DLZERO          	0x04
+#define		DATAINFIFO      	0x02
+#define		FIFOFREE        	0x01
+
+#define	CRCCONTROL      		0x5d
+#define		CRCVALCHKEN     	0x40
+
+#define	DFFTAG          		0x5e
+
+#define	LASTSCB         		0x5e
+
+#define	SCSITEST        		0x5e
+#define		CNTRTEST        	0x08
+#define		SEL_TXPLL_DEBUG 	0x04
+
+#define	IOPDNCTL        		0x5f
+#define		DISABLE_OE      	0x80
+#define		PDN_IDIST       	0x04
+#define		PDN_DIFFSENSE   	0x01
+
+#define	SHADDR          		0x60
+
+#define	NEGOADDR        		0x60
+
+#define	DGRPCRCI        		0x60
+
+#define	NEGPERIOD       		0x61
+
+#define	PACKCRCI        		0x62
+
+#define	NEGOFFSET       		0x62
+
+#define	NEGPPROPTS      		0x63
+#define		PPROPT_PACE     	0x08
+#define		PPROPT_QAS      	0x04
+#define		PPROPT_DT       	0x02
+#define		PPROPT_IUT      	0x01
+
+#define	NEGCONOPTS      		0x64
+#define		ENSNAPSHOT      	0x40
+#define		RTI_WRTDIS      	0x20
+#define		RTI_OVRDTRN     	0x10
+#define		ENSLOWCRC       	0x08
+#define		ENAUTOATNI      	0x04
+#define		ENAUTOATNO      	0x02
+#define		WIDEXFER        	0x01
+
+#define	ANNEXCOL        		0x65
+
+#define	SCSCHKN         		0x66
+#define		STSELSKIDDIS    	0x40
+#define		CURRFIFODEF     	0x20
+#define		WIDERESEN       	0x10
+#define		SDONEMSKDIS     	0x08
+#define		DFFACTCLR       	0x04
+#define		SHVALIDSTDIS    	0x02
+#define		LSTSGCLRDIS     	0x01
+
+#define	ANNEXDAT        		0x66
+
+#define	IOWNID          		0x67
+
+#define	PLL960CTL0      		0x68
+
+#define	SHCNT           		0x68
+
+#define	TOWNID          		0x69
+
+#define	PLL960CTL1      		0x69
+
+#define	PLL960CNT0      		0x6a
+
+#define	XSIG            		0x6a
+
+#define	SELOID          		0x6b
+
+#define	PLL400CTL0      		0x6c
+#define		PLL_VCOSEL      	0x80
+#define		PLL_PWDN        	0x40
+#define		PLL_NS          	0x30
+#define		PLL_ENLUD       	0x08
+#define		PLL_ENLPF       	0x04
+#define		PLL_DLPF        	0x02
+#define		PLL_ENFBM       	0x01
+
+#define	FAIRNESS        		0x6c
+
+#define	PLL400CTL1      		0x6d
+#define		PLL_CNTEN       	0x80
+#define		PLL_CNTCLR      	0x40
+#define		PLL_RST         	0x01
+
+#define	PLL400CNT0      		0x6e
+
+#define	UNFAIRNESS      		0x6e
+
+#define	HADDR           		0x70
+
+#define	PLLDELAY        		0x70
+#define		SPLIT_DROP_REQ  	0x80
+
+#define	HODMAADR        		0x70
+
+#define	HODMACNT        		0x78
+
+#define	HCNT            		0x78
+
+#define	HODMAEN         		0x7a
+
+#define	SGHADDR         		0x7c
+
+#define	SCBHADDR        		0x7c
+
+#define	SGHCNT          		0x84
+
+#define	SCBHCNT         		0x84
+
+#define	DFF_THRSH       		0x88
+#define		WR_DFTHRSH      	0x70
+#define		RD_DFTHRSH      	0x07
+#define		WR_DFTHRSH_MAX  	0x70
+#define		WR_DFTHRSH_90   	0x60
+#define		WR_DFTHRSH_85   	0x50
+#define		WR_DFTHRSH_75   	0x40
+#define		WR_DFTHRSH_63   	0x30
+#define		WR_DFTHRSH_50   	0x20
+#define		WR_DFTHRSH_25   	0x10
+#define		RD_DFTHRSH_MAX  	0x07
+#define		RD_DFTHRSH_90   	0x06
+#define		RD_DFTHRSH_85   	0x05
+#define		RD_DFTHRSH_75   	0x04
+#define		RD_DFTHRSH_63   	0x03
+#define		RD_DFTHRSH_50   	0x02
+#define		RD_DFTHRSH_25   	0x01
+#define		WR_DFTHRSH_MIN  	0x00
+#define		RD_DFTHRSH_MIN  	0x00
+
+#define	ROMADDR         		0x8a
+
+#define	ROMCNTRL        		0x8d
+#define		ROMOP           	0xe0
+#define		ROMSPD          	0x18
+#define		REPEAT          	0x02
+#define		RDY             	0x01
+
+#define	ROMDATA         		0x8e
+
+#define	CMCRXMSG0       		0x90
+
+#define	ROENABLE        		0x90
+#define		MSIROEN         	0x20
+#define		OVLYROEN        	0x10
+#define		CMCROEN         	0x08
+#define		SGROEN          	0x04
+#define		DCH1ROEN        	0x02
+#define		DCH0ROEN        	0x01
+
+#define	OVLYRXMSG0      		0x90
+
+#define	DCHRXMSG0       		0x90
+
+#define	OVLYRXMSG1      		0x91
+
+#define	NSENABLE        		0x91
+#define		MSINSEN         	0x20
+#define		OVLYNSEN        	0x10
+#define		CMCNSEN         	0x08
+#define		SGNSEN          	0x04
+#define		DCH1NSEN        	0x02
+#define		DCH0NSEN        	0x01
+
+#define	DCHRXMSG1       		0x91
+
+#define	CMCRXMSG1       		0x91
+
+#define	DCHRXMSG2       		0x92
+
+#define	OVLYRXMSG2      		0x92
+
+#define	CMCRXMSG2       		0x92
+
+#define	OST             		0x92
+
+#define	DCHRXMSG3       		0x93
+
+#define	CMCRXMSG3       		0x93
+
+#define	PCIXCTL         		0x93
+#define		SERRPULSE       	0x80
+#define		UNEXPSCIEN      	0x20
+#define		SPLTSMADIS      	0x10
+#define		SPLTSTADIS      	0x08
+#define		SRSPDPEEN       	0x04
+#define		TSCSERREN       	0x02
+#define		CMPABCDIS       	0x01
+
+#define	OVLYRXMSG3      		0x93
+
+#define	OVLYSEQBCNT     		0x94
+
+#define	CMCSEQBCNT      		0x94
+
+#define	DCHSEQBCNT      		0x94
+
+#define	CMCSPLTSTAT0    		0x96
+
+#define	OVLYSPLTSTAT0   		0x96
+
+#define	DCHSPLTSTAT0    		0x96
+
+#define	DCHSPLTSTAT1    		0x97
+
+#define	CMCSPLTSTAT1    		0x97
+
+#define	OVLYSPLTSTAT1   		0x97
+
+#define	SGRXMSG0        		0x98
+#define		CDNUM           	0xf8
+#define		CFNUM           	0x07
+
+#define	SLVSPLTOUTADR0  		0x98
+#define		LOWER_ADDR      	0x7f
+
+#define	SGRXMSG1        		0x99
+#define		CBNUM           	0xff
+
+#define	SLVSPLTOUTADR1  		0x99
+#define		REQ_DNUM        	0xf8
+#define		REQ_FNUM        	0x07
+
+#define	SGRXMSG2        		0x9a
+#define		MINDEX          	0xff
+
+#define	SLVSPLTOUTADR2  		0x9a
+#define		REQ_BNUM        	0xff
+
+#define	SGRXMSG3        		0x9b
+#define		MCLASS          	0x0f
+
+#define	SLVSPLTOUTADR3  		0x9b
+#define		TAG_NUM         	0x1f
+#define		RLXORD          	0x10
+
+#define	SGSEQBCNT       		0x9c
+
+#define	SLVSPLTOUTATTR0 		0x9c
+#define		LOWER_BCNT      	0xff
+
+#define	SLVSPLTOUTATTR1 		0x9d
+#define		CMPLT_DNUM      	0xf8
+#define		CMPLT_FNUM      	0x07
+
+#define	SLVSPLTOUTATTR2 		0x9e
+#define		CMPLT_BNUM      	0xff
+
+#define	SGSPLTSTAT0     		0x9e
+#define		STAETERM        	0x80
+#define		SCBCERR         	0x40
+#define		SCADERR         	0x20
+#define		SCDATBUCKET     	0x10
+#define		CNTNOTCMPLT     	0x08
+#define		RXOVRUN         	0x04
+#define		RXSCEMSG        	0x02
+#define		RXSPLTRSP       	0x01
+
+#define	SFUNCT          		0x9f
+#define		TEST_GROUP      	0xf0
+#define		TEST_NUM        	0x0f
+
+#define	SGSPLTSTAT1     		0x9f
+#define		RXDATABUCKET    	0x01
+
+#define	DF0PCISTAT      		0xa0
+
+#define	REG0            		0xa0
+
+#define	DF1PCISTAT      		0xa1
+
+#define	SGPCISTAT       		0xa2
+
+#define	REG1            		0xa2
+
+#define	CMCPCISTAT      		0xa3
+
+#define	OVLYPCISTAT     		0xa4
+#define		SCAAPERR        	0x08
+#define		RDPERR          	0x04
+
+#define	REG_ISR         		0xa4
+
+#define	SG_STATE        		0xa6
+#define		FETCH_INPROG    	0x04
+#define		LOADING_NEEDED  	0x02
+#define		SEGS_AVAIL      	0x01
+
+#define	MSIPCISTAT      		0xa6
+#define		RMA             	0x20
+#define		RTA             	0x10
+#define		CLRPENDMSI      	0x08
+#define		DPR             	0x01
+
+#define	TARGPCISTAT     		0xa7
+#define		DPE             	0x80
+#define		SSE             	0x40
+#define		STA             	0x08
+#define		TWATERR         	0x02
+
+#define	DATA_COUNT_ODD  		0xa7
+
+#define	SCBPTR          		0xa8
+
+#define	CCSCBACNT       		0xab
+
+#define	SCBAUTOPTR      		0xab
+#define		AUSCBPTR_EN     	0x80
+#define		SCBPTR_ADDR     	0x38
+#define		SCBPTR_OFF      	0x07
+
+#define	CCSGADDR        		0xac
+
+#define	CCSCBADDR       		0xac
+
+#define	CCSCBADR_BK     		0xac
+
+#define	CMC_RAMBIST     		0xad
+#define		SG_ELEMENT_SIZE 	0x80
+#define		SCBRAMBIST_FAIL 	0x40
+#define		SG_BIST_FAIL    	0x20
+#define		SG_BIST_EN      	0x10
+#define		CMC_BUFFER_BIST_FAIL	0x02
+#define		CMC_BUFFER_BIST_EN	0x01
+
+#define	CCSGCTL         		0xad
+#define		CCSGEN          	0x0c
+#define		CCSGDONE        	0x80
+#define		SG_CACHE_AVAIL  	0x10
+#define		CCSGENACK       	0x08
+#define		SG_FETCH_REQ    	0x02
+#define		CCSGRESET       	0x01
+
+#define	CCSCBCTL        		0xad
+#define		CCSCBDONE       	0x80
+#define		ARRDONE         	0x40
+#define		CCARREN         	0x10
+#define		CCSCBEN         	0x08
+#define		CCSCBDIR        	0x04
+#define		CCSCBRESET      	0x01
+
+#define	CCSGRAM         		0xb0
+
+#define	FLEXADR         		0xb0
+
+#define	CCSCBRAM        		0xb0
+
+#define	FLEXCNT         		0xb3
+
+#define	FLEXDMASTAT     		0xb5
+#define		FLEXDMAERR      	0x02
+#define		FLEXDMADONE     	0x01
+
+#define	FLEXDATA        		0xb6
+
+#define	BRDDAT          		0xb8
+
+#define	BRDCTL          		0xb9
+#define		FLXARBACK       	0x80
+#define		FLXARBREQ       	0x40
+#define		BRDADDR         	0x38
+#define		BRDEN           	0x04
+#define		BRDRW           	0x02
+#define		BRDSTB          	0x01
+
+#define	SEEADR          		0xba
+
+#define	SEEDAT          		0xbc
+
+#define	SEECTL          		0xbe
+#define		SEEOP_EWEN      	0x40
+#define		SEEOP_WALL      	0x40
+#define		SEEOP_EWDS      	0x40
+#define		SEEOPCODE       	0x70
+#define		SEERST          	0x02
+#define		SEESTART        	0x01
+#define		SEEOP_ERASE     	0x70
+#define		SEEOP_READ      	0x60
+#define		SEEOP_WRITE     	0x50
+#define		SEEOP_ERAL      	0x40
+
+#define	SEESTAT         		0xbe
+#define		INIT_DONE       	0x80
+#define		LDALTID_L       	0x08
+#define		SEEARBACK       	0x04
+#define		SEEBUSY         	0x02
+
+#define	SCBCNT          		0xbf
+
+#define	DFWADDR         		0xc0
+
+#define	DSPFLTRCTL      		0xc0
+#define		FLTRDISABLE     	0x20
+#define		EDGESENSE       	0x10
+#define		DSPFCNTSEL      	0x0f
+
+#define	DSPDATACTL      		0xc1
+#define		BYPASSENAB      	0x80
+#define		DESQDIS         	0x10
+#define		RCVROFFSTDIS    	0x04
+#define		XMITOFFSTDIS    	0x02
+
+#define	DFRADDR         		0xc2
+
+#define	DSPREQCTL       		0xc2
+#define		MANREQCTL       	0xc0
+#define		MANREQDLY       	0x3f
+
+#define	DSPACKCTL       		0xc3
+#define		MANACKCTL       	0xc0
+#define		MANACKDLY       	0x3f
+
+#define	DFDAT           		0xc4
+
+#define	DSPSELECT       		0xc4
+#define		AUTOINCEN       	0x80
+#define		DSPSEL          	0x1f
+
+#define	WRTBIASCTL      		0xc5
+#define		AUTOXBCDIS      	0x80
+#define		XMITMANVAL      	0x3f
+
+#define	RCVRBIOSCTL     		0xc6
+#define		AUTORBCDIS      	0x80
+#define		RCVRMANVAL      	0x3f
+
+#define	WRTBIASCALC     		0xc7
+
+#define	DFPTRS          		0xc8
+
+#define	RCVRBIASCALC    		0xc8
+
+#define	DFBKPTR         		0xc9
+
+#define	SKEWCALC        		0xc9
+
+#define	DFDBCTL         		0xcb
+#define		DFF_CIO_WR_RDY  	0x20
+#define		DFF_CIO_RD_RDY  	0x10
+#define		DFF_DIR_ERR     	0x08
+#define		DFF_RAMBIST_FAIL	0x04
+#define		DFF_RAMBIST_DONE	0x02
+#define		DFF_RAMBIST_EN  	0x01
+
+#define	DFSCNT          		0xcc
+
+#define	DFBCNT          		0xce
+
+#define	OVLYADDR        		0xd4
+
+#define	SEQCTL0         		0xd6
+#define		PERRORDIS       	0x80
+#define		PAUSEDIS        	0x40
+#define		FAILDIS         	0x20
+#define		FASTMODE        	0x10
+#define		BRKADRINTEN     	0x08
+#define		STEP            	0x04
+#define		SEQRESET        	0x02
+#define		LOADRAM         	0x01
+
+#define	SEQCTL1         		0xd7
+#define		OVRLAY_DATA_CHK 	0x08
+#define		RAMBIST_DONE    	0x04
+#define		RAMBIST_FAIL    	0x02
+#define		RAMBIST_EN      	0x01
+
+#define	FLAGS           		0xd8
+#define		ZERO            	0x02
+#define		CARRY           	0x01
+
+#define	SEQINTCTL       		0xd9
+#define		INTVEC1DSL      	0x80
+#define		INT1_CONTEXT    	0x20
+#define		SCS_SEQ_INT1M1  	0x10
+#define		SCS_SEQ_INT1M0  	0x08
+#define		INTMASK2        	0x04
+#define		INTMASK1        	0x02
+#define		IRET            	0x01
+
+#define	SEQRAM          		0xda
+
+#define	PRGMCNT         		0xde
+
+#define	ACCUM           		0xe0
+
+#define	SINDEX          		0xe2
+
+#define	DINDEX          		0xe4
+
+#define	BRKADDR1        		0xe6
+#define		BRKDIS          	0x80
+
+#define	BRKADDR0        		0xe6
+
+#define	ALLONES         		0xe8
+
+#define	ALLZEROS        		0xea
+
+#define	NONE            		0xea
+
+#define	SINDIR          		0xec
+
+#define	DINDIR          		0xed
+
+#define	FUNCTION1       		0xf0
+
+#define	STACK           		0xf2
+
+#define	CURADDR         		0xf4
+
+#define	INTVEC1_ADDR    		0xf4
+
+#define	INTVEC2_ADDR    		0xf6
+
+#define	LASTADDR        		0xf6
+
+#define	LONGJMP_ADDR    		0xf8
+
+#define	ACCUM_SAVE      		0xfa
+
+#define	WAITING_SCB_TAILS		0x100
+
+#define	AHD_PCI_CONFIG_BASE		0x100
+
+#define	SRAM_BASE       		0x100
+
+#define	WAITING_TID_HEAD		0x120
+
+#define	WAITING_TID_TAIL		0x122
+
+#define	NEXT_QUEUED_SCB_ADDR		0x124
+
+#define	COMPLETE_SCB_HEAD		0x128
+
+#define	COMPLETE_SCB_DMAINPROG_HEAD		0x12a
+
+#define	COMPLETE_DMA_SCB_HEAD		0x12c
+
+#define	QFREEZE_COUNT   		0x12e
+
+#define	SAVED_MODE      		0x130
+
+#define	MSG_OUT         		0x131
+
+#define	DMAPARAMS       		0x132
+#define		PRELOADEN       	0x80
+#define		WIDEODD         	0x40
+#define		SCSIEN          	0x20
+#define		SDMAEN          	0x10
+#define		SDMAENACK       	0x10
+#define		HDMAENACK       	0x08
+#define		HDMAEN          	0x08
+#define		DIRECTION       	0x04
+#define		FIFOFLUSH       	0x02
+#define		FIFORESET       	0x01
+
+#define	SEQ_FLAGS       		0x133
+#define		NOT_IDENTIFIED  	0x80
+#define		NO_CDB_SENT     	0x40
+#define		TARGET_CMD_IS_TAGGED	0x40
+#define		DPHASE          	0x20
+#define		TARG_CMD_PENDING	0x10
+#define		CMDPHASE_PENDING	0x08
+#define		DPHASE_PENDING  	0x04
+#define		SPHASE_PENDING  	0x02
+#define		NO_DISCONNECT   	0x01
+
+#define	SAVED_SCSIID    		0x134
+
+#define	SAVED_LUN       		0x135
+
+#define	LASTPHASE       		0x136
+#define		PHASE_MASK      	0xe0
+#define		CDI             	0x80
+#define		IOI             	0x40
+#define		MSGI            	0x20
+#define		P_BUSFREE       	0x01
+#define		P_MESGIN        	0xe0
+#define		P_STATUS        	0xc0
+#define		P_MESGOUT       	0xa0
+#define		P_COMMAND       	0x80
+#define		P_DATAIN_DT     	0x60
+#define		P_DATAIN        	0x40
+#define		P_DATAOUT_DT    	0x20
+#define		P_DATAOUT       	0x00
+
+#define	QOUTFIFO_ENTRY_VALID_TAG		0x137
+
+#define	SHARED_DATA_ADDR		0x138
+
+#define	QOUTFIFO_NEXT_ADDR		0x13c
+
+#define	KERNEL_TQINPOS  		0x140
+
+#define	TQINPOS         		0x141
+
+#define	ARG_1           		0x142
+#define	RETURN_1        		0x142
+#define		SEND_MSG        	0x80
+#define		SEND_SENSE      	0x40
+#define		SEND_REJ        	0x20
+#define		MSGOUT_PHASEMIS 	0x10
+#define		EXIT_MSG_LOOP   	0x08
+#define		CONT_MSG_LOOP_WRITE	0x04
+#define		CONT_MSG_LOOP_READ	0x03
+#define		CONT_MSG_LOOP_TARG	0x02
+
+#define	ARG_2           		0x143
+#define	RETURN_2        		0x143
+
+#define	LAST_MSG        		0x144
+
+#define	SCSISEQ_TEMPLATE		0x145
+#define		MANUALCTL       	0x40
+#define		ENSELI          	0x20
+#define		ENRSELI         	0x10
+#define		MANUALP         	0x0c
+#define		ENAUTOATNP      	0x02
+#define		ALTSTIM         	0x01
+
+#define	INITIATOR_TAG   		0x146
+
+#define	SEQ_FLAGS2      		0x147
+#define		SELECTOUT_QFROZEN	0x04
+#define		TARGET_MSG_PENDING	0x02
+
+#define	ALLOCFIFO_SCBPTR		0x148
+
+#define	INT_COALESCING_TIMER		0x14a
+
+#define	INT_COALESCING_MAXCMDS		0x14c
+
+#define	INT_COALESCING_MINCMDS		0x14d
+
+#define	CMDS_PENDING    		0x14e
+
+#define	INT_COALESCING_CMDCOUNT		0x150
+
+#define	LOCAL_HS_MAILBOX		0x151
+
+#define	CMDSIZE_TABLE   		0x152
+
+#define	SCB_BASE        		0x180
+
+#define	SCB_RESIDUAL_DATACNT		0x180
+#define	SCB_CDB_STORE   		0x180
+#define	SCB_HOST_CDB_PTR		0x180
+
+#define	SCB_RESIDUAL_SGPTR		0x184
+#define		SG_ADDR_MASK    	0xf8
+#define		SG_OVERRUN_RESID	0x02
+
+#define	SCB_SCSI_STATUS 		0x188
+#define	SCB_HOST_CDB_LEN		0x188
+
+#define	SCB_TARGET_PHASES		0x189
+
+#define	SCB_TARGET_DATA_DIR		0x18a
+
+#define	SCB_TARGET_ITAG 		0x18b
+
+#define	SCB_SENSE_BUSADDR		0x18c
+#define	SCB_NEXT_COMPLETE		0x18c
+
+#define	SCB_TAG         		0x190
+#define	SCB_FIFO_USE_COUNT		0x190
+
+#define	SCB_CONTROL     		0x192
+#define		TARGET_SCB      	0x80
+#define		DISCENB         	0x40
+#define		TAG_ENB         	0x20
+#define		MK_MESSAGE      	0x10
+#define		STATUS_RCVD     	0x08
+#define		DISCONNECTED    	0x04
+#define		SCB_TAG_TYPE    	0x03
+
+#define	SCB_SCSIID      		0x193
+#define		TID             	0xf0
+#define		OID             	0x0f
+
+#define	SCB_LUN         		0x194
+#define		LID             	0xff
+
+#define	SCB_TASK_ATTRIBUTE		0x195
+#define		SCB_XFERLEN_ODD 	0x01
+
+#define	SCB_CDB_LEN     		0x196
+#define		SCB_CDB_LEN_PTR 	0x80
+
+#define	SCB_TASK_MANAGEMENT		0x197
+
+#define	SCB_DATAPTR     		0x198
+
+#define	SCB_DATACNT     		0x1a0
+#define		SG_LAST_SEG     	0x80
+#define		SG_HIGH_ADDR_BITS	0x7f
+
+#define	SCB_SGPTR       		0x1a4
+#define		SG_STATUS_VALID 	0x04
+#define		SG_FULL_RESID   	0x02
+#define		SG_LIST_NULL    	0x01
+
+#define	SCB_BUSADDR     		0x1a8
+
+#define	SCB_NEXT        		0x1ac
+#define	SCB_NEXT_SCB_BUSADDR		0x1ac
+
+#define	SCB_NEXT2       		0x1ae
+
+#define	SCB_SPARE       		0x1b0
+#define	SCB_PKT_LUN     		0x1b0
+
+#define	SCB_DISCONNECTED_LISTS		0x1b8
+
+
+#define	AHD_TIMER_US_PER_TICK	0x19
+#define	SCB_TRANSFER_SIZE_FULL_LUN	0x38
+#define	STATUS_QUEUE_FULL	0x28
+#define	STATUS_BUSY	0x08
+#define	MAX_OFFSET_NON_PACED	0x7f
+#define	MAX_OFFSET_PACED	0xfe
+#define	BUS_32_BIT	0x02
+#define	CCSGADDR_MAX	0x80
+#define	TID_SHIFT	0x04
+#define	MK_MESSAGE_BIT_OFFSET	0x04
+#define	WRTBIASCTL_HP_DEFAULT	0x00
+#define	SEEOP_EWDS_ADDR	0x00
+#define	AHD_AMPLITUDE_SHIFT	0x00
+#define	AHD_AMPLITUDE_MASK	0x07
+#define	AHD_ANNEXCOL_AMPLITUDE	0x06
+#define	AHD_SLEWRATE_DEF_REVA	0x08
+#define	AHD_SLEWRATE_SHIFT	0x03
+#define	AHD_SLEWRATE_MASK	0x78
+#define	AHD_PRECOMP_CUTBACK_29	0x06
+#define	AHD_NUM_PER_DEV_ANNEXCOLS	0x04
+#define	B_CURRFIFO_0	0x02
+#define	LUNLEN_SINGLE_LEVEL_LUN	0x0f
+#define	NVRAM_SCB_OFFSET	0x2c
+#define	AHD_TIMER_MAX_US	0x18ffe7
+#define	AHD_TIMER_MAX_TICKS	0xffff
+#define	STATUS_PKT_SENSE	0xff
+#define	CMD_GROUP_CODE_SHIFT	0x05
+#define	AHD_SENSE_BUFSIZE	0x100
+#define	MAX_OFFSET_PACED_BUG	0x7f
+#define	BUS_8_BIT	0x00
+#define	STIMESEL_BUG_ADJ	0x08
+#define	STIMESEL_MIN	0x18
+#define	STIMESEL_SHIFT	0x03
+#define	CCSGRAM_MAXSEGS	0x10
+#define	INVALID_ADDR	0x80
+#define	TARGET_CMD_CMPLT	0xfe
+#define	SEEOP_WRAL_ADDR	0x40
+#define	SEEOP_ERAL_ADDR	0x80
+#define	AHD_AMPLITUDE_DEF	0x07
+#define	AHD_SLEWRATE_DEF_REVB	0x08
+#define	AHD_PRECOMP_CUTBACK_37	0x07
+#define	AHD_PRECOMP_CUTBACK_17	0x04
+#define	AHD_PRECOMP_SHIFT	0x00
+#define	AHD_PRECOMP_MASK	0x07
+#define	AHD_ANNEXCOL_PRECOMP_SLEW	0x04
+#define	SRC_MODE_SHIFT	0x00
+#define	PKT_OVERRUN_BUFSIZE	0x200
+#define	SCB_TRANSFER_SIZE_1BYTE_LUN	0x30
+#define	TARGET_DATA_IN	0x01
+#define	HOST_MSG	0xff
+#define	MAX_OFFSET	0xfe
+#define	BUS_16_BIT	0x01
+#define	CCSCBADDR_MAX	0x80
+#define	NUMDSPS 	0x14
+#define	SEEOP_EWEN_ADDR	0xc0
+#define	AHD_ANNEXCOL_PER_DEV0	0x04
+#define	DST_MODE_SHIFT	0x04
+
+
+/* Downloaded Constant Definitions */
+#define	SCB_TRANSFER_SIZE	0x06
+#define	PKT_OVERRUN_BUFOFFSET	0x05
+#define	SG_SIZEOF	0x04
+#define	SG_PREFETCH_ADDR_MASK	0x03
+#define	SG_PREFETCH_ALIGN_MASK	0x02
+#define	SG_PREFETCH_CNT_LIMIT	0x01
+#define	SG_PREFETCH_CNT	0x00
+#define	DOWNLOAD_CONST_COUNT	0x07
+
+
+/* Exported Labels */
+#define	LABEL_seq_isr 	0x269
+#define	LABEL_timer_isr	0x265
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
new file mode 100644
index 0000000..3098a75
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
@@ -0,0 +1,3635 @@
+/*
+ * DO NOT EDIT - This file is automatically generated
+ *		 from the following source files:
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#94 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#70 $
+ */
+
+#include "aic79xx_osm.h"
+
+static ahd_reg_parse_entry_t MODE_PTR_parse_table[] = {
+	{ "SRC_MODE",		0x07, 0x07 },
+	{ "DST_MODE",		0x70, 0x70 }
+};
+
+int
+ahd_mode_ptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(MODE_PTR_parse_table, 2, "MODE_PTR",
+	    0x00, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t INTSTAT_parse_table[] = {
+	{ "SPLTINT",		0x01, 0x01 },
+	{ "CMDCMPLT",		0x02, 0x02 },
+	{ "SEQINT",		0x04, 0x04 },
+	{ "SCSIINT",		0x08, 0x08 },
+	{ "PCIINT",		0x10, 0x10 },
+	{ "SWTMINT",		0x20, 0x20 },
+	{ "BRKADRINT",		0x40, 0x40 },
+	{ "HWERRINT",		0x80, 0x80 },
+	{ "INT_PEND",		0xff, 0xff }
+};
+
+int
+ahd_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(INTSTAT_parse_table, 9, "INTSTAT",
+	    0x01, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SEQINTCODE_parse_table[] = {
+	{ "NO_SEQINT",		0x00, 0xff },
+	{ "BAD_PHASE",		0x01, 0xff },
+	{ "SEND_REJECT",	0x02, 0xff },
+	{ "PROTO_VIOLATION",	0x03, 0xff },
+	{ "NO_MATCH",		0x04, 0xff },
+	{ "IGN_WIDE_RES",	0x05, 0xff },
+	{ "PDATA_REINIT",	0x06, 0xff },
+	{ "HOST_MSG_LOOP",	0x07, 0xff },
+	{ "BAD_STATUS",		0x08, 0xff },
+	{ "DATA_OVERRUN",	0x09, 0xff },
+	{ "MKMSG_FAILED",	0x0a, 0xff },
+	{ "MISSED_BUSFREE",	0x0b, 0xff },
+	{ "DUMP_CARD_STATE",	0x0c, 0xff },
+	{ "ILLEGAL_PHASE",	0x0d, 0xff },
+	{ "INVALID_SEQINT",	0x0e, 0xff },
+	{ "CFG4ISTAT_INTR",	0x0f, 0xff },
+	{ "STATUS_OVERRUN",	0x10, 0xff },
+	{ "CFG4OVERRUN",	0x11, 0xff },
+	{ "ENTERING_NONPACK",	0x12, 0xff },
+	{ "TASKMGMT_FUNC_COMPLETE",0x13, 0xff },
+	{ "TASKMGMT_CMD_CMPLT_OKAY",0x14, 0xff },
+	{ "TRACEPOINT0",	0x15, 0xff },
+	{ "TRACEPOINT1",	0x16, 0xff },
+	{ "TRACEPOINT2",	0x17, 0xff },
+	{ "TRACEPOINT3",	0x18, 0xff },
+	{ "SAW_HWERR",		0x19, 0xff },
+	{ "BAD_SCB_STATUS",	0x1a, 0xff }
+};
+
+int
+ahd_seqintcode_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SEQINTCODE_parse_table, 27, "SEQINTCODE",
+	    0x02, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CLRINT_parse_table[] = {
+	{ "CLRSPLTINT",		0x01, 0x01 },
+	{ "CLRCMDINT",		0x02, 0x02 },
+	{ "CLRSEQINT",		0x04, 0x04 },
+	{ "CLRSCSIINT",		0x08, 0x08 },
+	{ "CLRPCIINT",		0x10, 0x10 },
+	{ "CLRSWTMINT",		0x20, 0x20 },
+	{ "CLRBRKADRINT",	0x40, 0x40 },
+	{ "CLRHWERRINT",	0x80, 0x80 }
+};
+
+int
+ahd_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CLRINT_parse_table, 8, "CLRINT",
+	    0x03, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t ERROR_parse_table[] = {
+	{ "DSCTMOUT",		0x02, 0x02 },
+	{ "ILLOPCODE",		0x04, 0x04 },
+	{ "SQPARERR",		0x08, 0x08 },
+	{ "DPARERR",		0x10, 0x10 },
+	{ "MPARERR",		0x20, 0x20 },
+	{ "CIOACCESFAIL",	0x40, 0x40 },
+	{ "CIOPARERR",		0x80, 0x80 }
+};
+
+int
+ahd_error_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(ERROR_parse_table, 7, "ERROR",
+	    0x04, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CLRERR_parse_table[] = {
+	{ "CLRDSCTMOUT",	0x02, 0x02 },
+	{ "CLRILLOPCODE",	0x04, 0x04 },
+	{ "CLRSQPARERR",	0x08, 0x08 },
+	{ "CLRDPARERR",		0x10, 0x10 },
+	{ "CLRMPARERR",		0x20, 0x20 },
+	{ "CLRCIOACCESFAIL",	0x40, 0x40 },
+	{ "CLRCIOPARERR",	0x80, 0x80 }
+};
+
+int
+ahd_clrerr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CLRERR_parse_table, 7, "CLRERR",
+	    0x04, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t HCNTRL_parse_table[] = {
+	{ "CHIPRST",		0x01, 0x01 },
+	{ "CHIPRSTACK",		0x01, 0x01 },
+	{ "INTEN",		0x02, 0x02 },
+	{ "PAUSE",		0x04, 0x04 },
+	{ "SWTIMER_START_B",	0x08, 0x08 },
+	{ "SWINT",		0x10, 0x10 },
+	{ "POWRDN",		0x40, 0x40 },
+	{ "SEQ_RESET",		0x80, 0x80 }
+};
+
+int
+ahd_hcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(HCNTRL_parse_table, 8, "HCNTRL",
+	    0x05, regvalue, cur_col, wrap));
+}
+
+int
+ahd_hnscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "HNSCB_QOFF",
+	    0x06, regvalue, cur_col, wrap));
+}
+
+int
+ahd_hescb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "HESCB_QOFF",
+	    0x08, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
+	{ "ENINT_COALESCE",	0x40, 0x40 },
+	{ "HOST_TQINPOS",	0x80, 0x80 }
+};
+
+int
+ahd_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(HS_MAILBOX_parse_table, 2, "HS_MAILBOX",
+	    0x0b, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CLRSEQINTSTAT_parse_table[] = {
+	{ "CLRSEQ_SPLTINT",	0x01, 0x01 },
+	{ "CLRSEQ_PCIINT",	0x02, 0x02 },
+	{ "CLRSEQ_SCSIINT",	0x04, 0x04 },
+	{ "CLRSEQ_SEQINT",	0x08, 0x08 },
+	{ "CLRSEQ_SWTMRTO",	0x10, 0x10 }
+};
+
+int
+ahd_clrseqintstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CLRSEQINTSTAT_parse_table, 5, "CLRSEQINTSTAT",
+	    0x0c, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SEQINTSTAT_parse_table[] = {
+	{ "SEQ_SPLTINT",	0x01, 0x01 },
+	{ "SEQ_PCIINT",		0x02, 0x02 },
+	{ "SEQ_SCSIINT",	0x04, 0x04 },
+	{ "SEQ_SEQINT",		0x08, 0x08 },
+	{ "SEQ_SWTMRTO",	0x10, 0x10 }
+};
+
+int
+ahd_seqintstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SEQINTSTAT_parse_table, 5, "SEQINTSTAT",
+	    0x0c, regvalue, cur_col, wrap));
+}
+
+int
+ahd_swtimer_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SWTIMER",
+	    0x0e, regvalue, cur_col, wrap));
+}
+
+int
+ahd_snscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SNSCB_QOFF",
+	    0x10, regvalue, cur_col, wrap));
+}
+
+int
+ahd_sescb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SESCB_QOFF",
+	    0x12, regvalue, cur_col, wrap));
+}
+
+int
+ahd_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SDSCB_QOFF",
+	    0x14, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
+	{ "SCB_QSIZE_4",	0x00, 0x0f },
+	{ "SCB_QSIZE_8",	0x01, 0x0f },
+	{ "SCB_QSIZE_16",	0x02, 0x0f },
+	{ "SCB_QSIZE_32",	0x03, 0x0f },
+	{ "SCB_QSIZE_64",	0x04, 0x0f },
+	{ "SCB_QSIZE_128",	0x05, 0x0f },
+	{ "SCB_QSIZE_256",	0x06, 0x0f },
+	{ "SCB_QSIZE_512",	0x07, 0x0f },
+	{ "SCB_QSIZE_1024",	0x08, 0x0f },
+	{ "SCB_QSIZE_2048",	0x09, 0x0f },
+	{ "SCB_QSIZE_4096",	0x0a, 0x0f },
+	{ "SCB_QSIZE_8192",	0x0b, 0x0f },
+	{ "SCB_QSIZE_16384",	0x0c, 0x0f },
+	{ "SCB_QSIZE",		0x0f, 0x0f },
+	{ "HS_MAILBOX_ACT",	0x10, 0x10 },
+	{ "SDSCB_ROLLOVR",	0x20, 0x20 },
+	{ "NEW_SCB_AVAIL",	0x40, 0x40 },
+	{ "EMPTY_SCB_AVAIL",	0x80, 0x80 }
+};
+
+int
+ahd_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(QOFF_CTLSTA_parse_table, 18, "QOFF_CTLSTA",
+	    0x16, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t INTCTL_parse_table[] = {
+	{ "SPLTINTEN",		0x01, 0x01 },
+	{ "SEQINTEN",		0x02, 0x02 },
+	{ "SCSIINTEN",		0x04, 0x04 },
+	{ "PCIINTEN",		0x08, 0x08 },
+	{ "AUTOCLRCMDINT",	0x10, 0x10 },
+	{ "SWTIMER_START",	0x20, 0x20 },
+	{ "SWTMINTEN",		0x40, 0x40 },
+	{ "SWTMINTMASK",	0x80, 0x80 }
+};
+
+int
+ahd_intctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(INTCTL_parse_table, 8, "INTCTL",
+	    0x18, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DFCNTRL_parse_table[] = {
+	{ "DIRECTIONEN",	0x01, 0x01 },
+	{ "FIFOFLUSH",		0x02, 0x02 },
+	{ "FIFOFLUSHACK",	0x02, 0x02 },
+	{ "DIRECTION",		0x04, 0x04 },
+	{ "DIRECTIONACK",	0x04, 0x04 },
+	{ "HDMAEN",		0x08, 0x08 },
+	{ "HDMAENACK",		0x08, 0x08 },
+	{ "SCSIEN",		0x20, 0x20 },
+	{ "SCSIENACK",		0x20, 0x20 },
+	{ "SCSIENWRDIS",	0x40, 0x40 },
+	{ "PRELOADEN",		0x80, 0x80 }
+};
+
+int
+ahd_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DFCNTRL_parse_table, 11, "DFCNTRL",
+	    0x19, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
+	{ "CIOPARCKEN",		0x01, 0x01 },
+	{ "DISABLE_TWATE",	0x02, 0x02 },
+	{ "EXTREQLCK",		0x10, 0x10 },
+	{ "MPARCKEN",		0x20, 0x20 },
+	{ "DPARCKEN",		0x40, 0x40 },
+	{ "CACHETHEN",		0x80, 0x80 }
+};
+
+int
+ahd_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DSCOMMAND0_parse_table, 6, "DSCOMMAND0",
+	    0x19, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DFSTATUS_parse_table[] = {
+	{ "FIFOEMP",		0x01, 0x01 },
+	{ "FIFOFULL",		0x02, 0x02 },
+	{ "DFTHRESH",		0x04, 0x04 },
+	{ "HDONE",		0x08, 0x08 },
+	{ "MREQPEND",		0x10, 0x10 },
+	{ "PKT_PRELOAD_AVAIL",	0x40, 0x40 },
+	{ "PRELOAD_AVAIL",	0x80, 0x80 }
+};
+
+int
+ahd_dfstatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DFSTATUS_parse_table, 7, "DFSTATUS",
+	    0x1a, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
+	{ "LAST_SEG_DONE",	0x01, 0x01 },
+	{ "LAST_SEG",		0x02, 0x02 },
+	{ "ODD_SEG",		0x04, 0x04 },
+	{ "SG_ADDR_MASK",	0xf8, 0xf8 }
+};
+
+int
+ahd_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SG_CACHE_SHADOW_parse_table, 4, "SG_CACHE_SHADOW",
+	    0x1b, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t ARBCTL_parse_table[] = {
+	{ "USE_TIME",		0x07, 0x07 },
+	{ "RETRY_SWEN",		0x08, 0x08 },
+	{ "RESET_HARB",		0x80, 0x80 }
+};
+
+int
+ahd_arbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(ARBCTL_parse_table, 3, "ARBCTL",
+	    0x1b, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
+	{ "LAST_SEG",		0x02, 0x02 },
+	{ "ODD_SEG",		0x04, 0x04 },
+	{ "SG_ADDR_MASK",	0xf8, 0xf8 }
+};
+
+int
+ahd_sg_cache_pre_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SG_CACHE_PRE_parse_table, 3, "SG_CACHE_PRE",
+	    0x1b, regvalue, cur_col, wrap));
+}
+
+int
+ahd_lqin_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "LQIN",
+	    0x20, regvalue, cur_col, wrap));
+}
+
+int
+ahd_typeptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "TYPEPTR",
+	    0x20, regvalue, cur_col, wrap));
+}
+
+int
+ahd_tagptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "TAGPTR",
+	    0x21, regvalue, cur_col, wrap));
+}
+
+int
+ahd_lunptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "LUNPTR",
+	    0x22, regvalue, cur_col, wrap));
+}
+
+int
+ahd_datalenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "DATALENPTR",
+	    0x23, regvalue, cur_col, wrap));
+}
+
+int
+ahd_statlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "STATLENPTR",
+	    0x24, regvalue, cur_col, wrap));
+}
+
+int
+ahd_cmdlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "CMDLENPTR",
+	    0x25, regvalue, cur_col, wrap));
+}
+
+int
+ahd_attrptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "ATTRPTR",
+	    0x26, regvalue, cur_col, wrap));
+}
+
+int
+ahd_flagptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "FLAGPTR",
+	    0x27, regvalue, cur_col, wrap));
+}
+
+int
+ahd_cmdptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "CMDPTR",
+	    0x28, regvalue, cur_col, wrap));
+}
+
+int
+ahd_qnextptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "QNEXTPTR",
+	    0x29, regvalue, cur_col, wrap));
+}
+
+int
+ahd_idptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "IDPTR",
+	    0x2a, regvalue, cur_col, wrap));
+}
+
+int
+ahd_abrtbyteptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "ABRTBYTEPTR",
+	    0x2b, regvalue, cur_col, wrap));
+}
+
+int
+ahd_abrtbitptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "ABRTBITPTR",
+	    0x2c, regvalue, cur_col, wrap));
+}
+
+int
+ahd_maxcmdbytes_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "MAXCMDBYTES",
+	    0x2d, regvalue, cur_col, wrap));
+}
+
+int
+ahd_maxcmd2rcv_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "MAXCMD2RCV",
+	    0x2e, regvalue, cur_col, wrap));
+}
+
+int
+ahd_shortthresh_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SHORTTHRESH",
+	    0x2f, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t LUNLEN_parse_table[] = {
+	{ "ILUNLEN",		0x0f, 0x0f },
+	{ "TLUNLEN",		0xf0, 0xf0 }
+};
+
+int
+ahd_lunlen_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(LUNLEN_parse_table, 2, "LUNLEN",
+	    0x30, regvalue, cur_col, wrap));
+}
+
+int
+ahd_cdblimit_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "CDBLIMIT",
+	    0x31, regvalue, cur_col, wrap));
+}
+
+int
+ahd_maxcmd_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "MAXCMD",
+	    0x32, regvalue, cur_col, wrap));
+}
+
+int
+ahd_maxcmdcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "MAXCMDCNT",
+	    0x33, regvalue, cur_col, wrap));
+}
+
+int
+ahd_lqrsvd01_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "LQRSVD01",
+	    0x34, regvalue, cur_col, wrap));
+}
+
+int
+ahd_lqrsvd16_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "LQRSVD16",
+	    0x35, regvalue, cur_col, wrap));
+}
+
+int
+ahd_lqrsvd17_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "LQRSVD17",
+	    0x36, regvalue, cur_col, wrap));
+}
+
+int
+ahd_cmdrsvd0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "CMDRSVD0",
+	    0x37, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t LQCTL0_parse_table[] = {
+	{ "LQ0INITGCLT",	0x03, 0x03 },
+	{ "LQ0TARGCLT",		0x0c, 0x0c },
+	{ "LQIINITGCLT",	0x30, 0x30 },
+	{ "LQITARGCLT",		0xc0, 0xc0 }
+};
+
+int
+ahd_lqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(LQCTL0_parse_table, 4, "LQCTL0",
+	    0x38, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t LQCTL1_parse_table[] = {
+	{ "ABORTPENDING",	0x01, 0x01 },
+	{ "SINGLECMD",		0x02, 0x02 },
+	{ "PCI2PCI",		0x04, 0x04 }
+};
+
+int
+ahd_lqctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(LQCTL1_parse_table, 3, "LQCTL1",
+	    0x38, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCSBIST0_parse_table[] = {
+	{ "OSBISTRUN",		0x01, 0x01 },
+	{ "OSBISTDONE",		0x02, 0x02 },
+	{ "OSBISTERR",		0x04, 0x04 },
+	{ "GSBISTRUN",		0x10, 0x10 },
+	{ "GSBISTDONE",		0x20, 0x20 },
+	{ "GSBISTERR",		0x40, 0x40 }
+};
+
+int
+ahd_scsbist0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCSBIST0_parse_table, 6, "SCSBIST0",
+	    0x39, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t LQCTL2_parse_table[] = {
+	{ "LQOPAUSE",		0x01, 0x01 },
+	{ "LQOTOIDLE",		0x02, 0x02 },
+	{ "LQOCONTINUE",	0x04, 0x04 },
+	{ "LQORETRY",		0x08, 0x08 },
+	{ "LQIPAUSE",		0x10, 0x10 },
+	{ "LQITOIDLE",		0x20, 0x20 },
+	{ "LQICONTINUE",	0x40, 0x40 },
+	{ "LQIRETRY",		0x80, 0x80 }
+};
+
+int
+ahd_lqctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(LQCTL2_parse_table, 8, "LQCTL2",
+	    0x39, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCSBIST1_parse_table[] = {
+	{ "NTBISTRUN",		0x01, 0x01 },
+	{ "NTBISTDONE",		0x02, 0x02 },
+	{ "NTBISTERR",		0x04, 0x04 }
+};
+
+int
+ahd_scsbist1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCSBIST1_parse_table, 3, "SCSBIST1",
+	    0x3a, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = {
+	{ "SCSIRSTO",		0x01, 0x01 },
+	{ "FORCEBUSFREE",	0x10, 0x10 },
+	{ "ENARBO",		0x20, 0x20 },
+	{ "ENSELO",		0x40, 0x40 },
+	{ "TEMODEO",		0x80, 0x80 }
+};
+
+int
+ahd_scsiseq0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCSISEQ0_parse_table, 5, "SCSISEQ0",
+	    0x3a, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCSISEQ1_parse_table[] = {
+	{ "ALTSTIM",		0x01, 0x01 },
+	{ "ENAUTOATNP",		0x02, 0x02 },
+	{ "MANUALP",		0x0c, 0x0c },
+	{ "ENRSELI",		0x10, 0x10 },
+	{ "ENSELI",		0x20, 0x20 },
+	{ "MANUALCTL",		0x40, 0x40 }
+};
+
+int
+ahd_scsiseq1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCSISEQ1_parse_table, 6, "SCSISEQ1",
+	    0x3b, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SXFRCTL0_parse_table[] = {
+	{ "SPIOEN",		0x08, 0x08 },
+	{ "BIOSCANCELEN",	0x10, 0x10 },
+	{ "DFPEXP",		0x40, 0x40 },
+	{ "DFON",		0x80, 0x80 }
+};
+
+int
+ahd_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SXFRCTL0_parse_table, 4, "SXFRCTL0",
+	    0x3c, regvalue, cur_col, wrap));
+}
+
+int
+ahd_businitid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "BUSINITID",
+	    0x3c, regvalue, cur_col, wrap));
+}
+
+int
+ahd_dlcount_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "DLCOUNT",
+	    0x3c, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SXFRCTL1_parse_table[] = {
+	{ "STPWEN",		0x01, 0x01 },
+	{ "ACTNEGEN",		0x02, 0x02 },
+	{ "ENSTIMER",		0x04, 0x04 },
+	{ "STIMESEL",		0x18, 0x18 },
+	{ "ENSPCHK",		0x20, 0x20 },
+	{ "ENSACHK",		0x40, 0x40 },
+	{ "BITBUCKET",		0x80, 0x80 }
+};
+
+int
+ahd_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SXFRCTL1_parse_table, 7, "SXFRCTL1",
+	    0x3d, regvalue, cur_col, wrap));
+}
+
+int
+ahd_bustargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "BUSTARGID",
+	    0x3e, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SXFRCTL2_parse_table[] = {
+	{ "ASU",		0x07, 0x07 },
+	{ "CMDDMAEN",		0x08, 0x08 },
+	{ "AUTORSTDIS",		0x10, 0x10 }
+};
+
+int
+ahd_sxfrctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SXFRCTL2_parse_table, 3, "SXFRCTL2",
+	    0x3e, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DFFSTAT_parse_table[] = {
+	{ "CURRFIFO_0",		0x00, 0x03 },
+	{ "CURRFIFO_1",		0x01, 0x03 },
+	{ "CURRFIFO_NONE",	0x03, 0x03 },
+	{ "FIFO0FREE",		0x10, 0x10 },
+	{ "FIFO1FREE",		0x20, 0x20 },
+	{ "CURRFIFO",		0x03, 0x03 }
+};
+
+int
+ahd_dffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DFFSTAT_parse_table, 6, "DFFSTAT",
+	    0x3f, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCSISIGO_parse_table[] = {
+	{ "P_DATAOUT",		0x00, 0xe0 },
+	{ "P_DATAOUT_DT",	0x20, 0xe0 },
+	{ "P_DATAIN",		0x40, 0xe0 },
+	{ "P_DATAIN_DT",	0x60, 0xe0 },
+	{ "P_COMMAND",		0x80, 0xe0 },
+	{ "P_MESGOUT",		0xa0, 0xe0 },
+	{ "P_STATUS",		0xc0, 0xe0 },
+	{ "P_MESGIN",		0xe0, 0xe0 },
+	{ "ACKO",		0x01, 0x01 },
+	{ "REQO",		0x02, 0x02 },
+	{ "BSYO",		0x04, 0x04 },
+	{ "SELO",		0x08, 0x08 },
+	{ "ATNO",		0x10, 0x10 },
+	{ "MSGO",		0x20, 0x20 },
+	{ "IOO",		0x40, 0x40 },
+	{ "CDO",		0x80, 0x80 },
+	{ "PHASE_MASK",		0xe0, 0xe0 }
+};
+
+int
+ahd_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCSISIGO_parse_table, 17, "SCSISIGO",
+	    0x40, regvalue, cur_col, wrap));
+}
+
+int
+ahd_multargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "MULTARGID",
+	    0x40, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCSISIGI_parse_table[] = {
+	{ "P_DATAOUT",		0x00, 0xe0 },
+	{ "P_DATAOUT_DT",	0x20, 0xe0 },
+	{ "P_DATAIN",		0x40, 0xe0 },
+	{ "P_DATAIN_DT",	0x60, 0xe0 },
+	{ "P_COMMAND",		0x80, 0xe0 },
+	{ "P_MESGOUT",		0xa0, 0xe0 },
+	{ "P_STATUS",		0xc0, 0xe0 },
+	{ "P_MESGIN",		0xe0, 0xe0 },
+	{ "ACKI",		0x01, 0x01 },
+	{ "REQI",		0x02, 0x02 },
+	{ "BSYI",		0x04, 0x04 },
+	{ "SELI",		0x08, 0x08 },
+	{ "ATNI",		0x10, 0x10 },
+	{ "MSGI",		0x20, 0x20 },
+	{ "IOI",		0x40, 0x40 },
+	{ "CDI",		0x80, 0x80 },
+	{ "PHASE_MASK",		0xe0, 0xe0 }
+};
+
+int
+ahd_scsisigi_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCSISIGI_parse_table, 17, "SCSISIGI",
+	    0x41, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCSIPHASE_parse_table[] = {
+	{ "DATA_OUT_PHASE",	0x01, 0x03 },
+	{ "DATA_IN_PHASE",	0x02, 0x03 },
+	{ "DATA_PHASE_MASK",	0x03, 0x03 },
+	{ "MSG_OUT_PHASE",	0x04, 0x04 },
+	{ "MSG_IN_PHASE",	0x08, 0x08 },
+	{ "COMMAND_PHASE",	0x10, 0x10 },
+	{ "STATUS_PHASE",	0x20, 0x20 }
+};
+
+int
+ahd_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCSIPHASE_parse_table, 7, "SCSIPHASE",
+	    0x42, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scsidat0_img_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCSIDAT0_IMG",
+	    0x43, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scsidat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCSIDAT",
+	    0x44, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scsibus_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCSIBUS",
+	    0x46, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t TARGIDIN_parse_table[] = {
+	{ "TARGID",		0x0f, 0x0f },
+	{ "CLKOUT",		0x80, 0x80 }
+};
+
+int
+ahd_targidin_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(TARGIDIN_parse_table, 2, "TARGIDIN",
+	    0x48, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SELID_parse_table[] = {
+	{ "ONEBIT",		0x08, 0x08 },
+	{ "SELID_MASK",		0xf0, 0xf0 }
+};
+
+int
+ahd_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SELID_parse_table, 2, "SELID",
+	    0x49, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SBLKCTL_parse_table[] = {
+	{ "SELWIDE",		0x02, 0x02 },
+	{ "ENAB20",		0x04, 0x04 },
+	{ "ENAB40",		0x08, 0x08 },
+	{ "DIAGLEDON",		0x40, 0x40 },
+	{ "DIAGLEDEN",		0x80, 0x80 }
+};
+
+int
+ahd_sblkctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SBLKCTL_parse_table, 5, "SBLKCTL",
+	    0x4a, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t OPTIONMODE_parse_table[] = {
+	{ "AUTO_MSGOUT_DE",	0x02, 0x02 },
+	{ "ENDGFORMCHK",	0x04, 0x04 },
+	{ "BUSFREEREV",		0x10, 0x10 },
+	{ "BIASCANCTL",		0x20, 0x20 },
+	{ "AUTOACKEN",		0x40, 0x40 },
+	{ "BIOSCANCTL",		0x80, 0x80 },
+	{ "OPTIONMODE_DEFAULTS",0x02, 0x02 }
+};
+
+int
+ahd_optionmode_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(OPTIONMODE_parse_table, 7, "OPTIONMODE",
+	    0x4a, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
+	{ "ARBDO",		0x01, 0x01 },
+	{ "SPIORDY",		0x02, 0x02 },
+	{ "OVERRUN",		0x04, 0x04 },
+	{ "IOERR",		0x08, 0x08 },
+	{ "SELINGO",		0x10, 0x10 },
+	{ "SELDI",		0x20, 0x20 },
+	{ "SELDO",		0x40, 0x40 },
+	{ "TARGET",		0x80, 0x80 }
+};
+
+int
+ahd_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SSTAT0_parse_table, 8, "SSTAT0",
+	    0x4b, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CLRSINT0_parse_table[] = {
+	{ "CLRARBDO",		0x01, 0x01 },
+	{ "CLRSPIORDY",		0x02, 0x02 },
+	{ "CLROVERRUN",		0x04, 0x04 },
+	{ "CLRIOERR",		0x08, 0x08 },
+	{ "CLRSELINGO",		0x10, 0x10 },
+	{ "CLRSELDI",		0x20, 0x20 },
+	{ "CLRSELDO",		0x40, 0x40 }
+};
+
+int
+ahd_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CLRSINT0_parse_table, 7, "CLRSINT0",
+	    0x4b, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SIMODE0_parse_table[] = {
+	{ "ENARBDO",		0x01, 0x01 },
+	{ "ENSPIORDY",		0x02, 0x02 },
+	{ "ENOVERRUN",		0x04, 0x04 },
+	{ "ENIOERR",		0x08, 0x08 },
+	{ "ENSELINGO",		0x10, 0x10 },
+	{ "ENSELDI",		0x20, 0x20 },
+	{ "ENSELDO",		0x40, 0x40 }
+};
+
+int
+ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SIMODE0_parse_table, 7, "SIMODE0",
+	    0x4b, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CLRSINT1_parse_table[] = {
+	{ "CLRREQINIT",		0x01, 0x01 },
+	{ "CLRSTRB2FAST",	0x02, 0x02 },
+	{ "CLRSCSIPERR",	0x04, 0x04 },
+	{ "CLRBUSFREE",		0x08, 0x08 },
+	{ "CLRSCSIRSTI",	0x20, 0x20 },
+	{ "CLRATNO",		0x40, 0x40 },
+	{ "CLRSELTIMEO",	0x80, 0x80 }
+};
+
+int
+ahd_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CLRSINT1_parse_table, 7, "CLRSINT1",
+	    0x4c, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SSTAT1_parse_table[] = {
+	{ "REQINIT",		0x01, 0x01 },
+	{ "STRB2FAST",		0x02, 0x02 },
+	{ "SCSIPERR",		0x04, 0x04 },
+	{ "BUSFREE",		0x08, 0x08 },
+	{ "PHASEMIS",		0x10, 0x10 },
+	{ "SCSIRSTI",		0x20, 0x20 },
+	{ "ATNTARG",		0x40, 0x40 },
+	{ "SELTO",		0x80, 0x80 }
+};
+
+int
+ahd_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SSTAT1_parse_table, 8, "SSTAT1",
+	    0x4c, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SSTAT2_parse_table[] = {
+	{ "BUSFREE_LQO",	0x40, 0xc0 },
+	{ "BUSFREE_DFF0",	0x80, 0xc0 },
+	{ "BUSFREE_DFF1",	0xc0, 0xc0 },
+	{ "DMADONE",		0x01, 0x01 },
+	{ "SDONE",		0x02, 0x02 },
+	{ "WIDE_RES",		0x04, 0x04 },
+	{ "BSYX",		0x08, 0x08 },
+	{ "EXP_ACTIVE",		0x10, 0x10 },
+	{ "NONPACKREQ",		0x20, 0x20 },
+	{ "BUSFREETIME",	0xc0, 0xc0 }
+};
+
+int
+ahd_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SSTAT2_parse_table, 10, "SSTAT2",
+	    0x4d, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CLRSINT2_parse_table[] = {
+	{ "CLRDMADONE",		0x01, 0x01 },
+	{ "CLRSDONE",		0x02, 0x02 },
+	{ "CLRWIDE_RES",	0x04, 0x04 },
+	{ "CLRNONPACKREQ",	0x20, 0x20 }
+};
+
+int
+ahd_clrsint2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CLRSINT2_parse_table, 4, "CLRSINT2",
+	    0x4d, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SIMODE2_parse_table[] = {
+	{ "ENDMADONE",		0x01, 0x01 },
+	{ "ENSDONE",		0x02, 0x02 },
+	{ "ENWIDE_RES",		0x04, 0x04 }
+};
+
+int
+ahd_simode2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SIMODE2_parse_table, 3, "SIMODE2",
+	    0x4d, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t PERRDIAG_parse_table[] = {
+	{ "DTERR",		0x01, 0x01 },
+	{ "DGFORMERR",		0x02, 0x02 },
+	{ "CRCERR",		0x04, 0x04 },
+	{ "AIPERR",		0x08, 0x08 },
+	{ "PARITYERR",		0x10, 0x10 },
+	{ "PREVPHASE",		0x20, 0x20 },
+	{ "HIPERR",		0x40, 0x40 },
+	{ "HIZERO",		0x80, 0x80 }
+};
+
+int
+ahd_perrdiag_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(PERRDIAG_parse_table, 8, "PERRDIAG",
+	    0x4e, regvalue, cur_col, wrap));
+}
+
+int
+ahd_lqistate_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "LQISTATE",
+	    0x4e, regvalue, cur_col, wrap));
+}
+
+int
+ahd_soffcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SOFFCNT",
+	    0x4f, regvalue, cur_col, wrap));
+}
+
+int
+ahd_lqostate_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "LQOSTATE",
+	    0x4f, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t LQISTAT0_parse_table[] = {
+	{ "LQIATNCMD",		0x01, 0x01 },
+	{ "LQIATNLQ",		0x02, 0x02 },
+	{ "LQIBADLQT",		0x04, 0x04 },
+	{ "LQICRCT2",		0x08, 0x08 },
+	{ "LQICRCT1",		0x10, 0x10 },
+	{ "LQIATNQAS",		0x20, 0x20 }
+};
+
+int
+ahd_lqistat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(LQISTAT0_parse_table, 6, "LQISTAT0",
+	    0x50, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CLRLQIINT0_parse_table[] = {
+	{ "CLRLQIATNCMD",	0x01, 0x01 },
+	{ "CLRLQIATNLQ",	0x02, 0x02 },
+	{ "CLRLQIBADLQT",	0x04, 0x04 },
+	{ "CLRLQICRCT2",	0x08, 0x08 },
+	{ "CLRLQICRCT1",	0x10, 0x10 },
+	{ "CLRLQIATNQAS",	0x20, 0x20 }
+};
+
+int
+ahd_clrlqiint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CLRLQIINT0_parse_table, 6, "CLRLQIINT0",
+	    0x50, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t LQIMODE0_parse_table[] = {
+	{ "ENLQIATNCMD",	0x01, 0x01 },
+	{ "ENLQIATNLQ",		0x02, 0x02 },
+	{ "ENLQIBADLQT",	0x04, 0x04 },
+	{ "ENLQICRCT2",		0x08, 0x08 },
+	{ "ENLQICRCT1",		0x10, 0x10 },
+	{ "ENLQIATNQASK",	0x20, 0x20 }
+};
+
+int
+ahd_lqimode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(LQIMODE0_parse_table, 6, "LQIMODE0",
+	    0x50, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t LQIMODE1_parse_table[] = {
+	{ "ENLQIOVERI_NLQ",	0x01, 0x01 },
+	{ "ENLQIOVERI_LQ",	0x02, 0x02 },
+	{ "ENLQIBADLQI",	0x04, 0x04 },
+	{ "ENLQICRCI_NLQ",	0x08, 0x08 },
+	{ "ENLQICRCI_LQ",	0x10, 0x10 },
+	{ "ENLIQABORT",		0x20, 0x20 },
+	{ "ENLQIPHASE_NLQ",	0x40, 0x40 },
+	{ "ENLQIPHASE_LQ",	0x80, 0x80 }
+};
+
+int
+ahd_lqimode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(LQIMODE1_parse_table, 8, "LQIMODE1",
+	    0x51, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t LQISTAT1_parse_table[] = {
+	{ "LQIOVERI_NLQ",	0x01, 0x01 },
+	{ "LQIOVERI_LQ",	0x02, 0x02 },
+	{ "LQIBADLQI",		0x04, 0x04 },
+	{ "LQICRCI_NLQ",	0x08, 0x08 },
+	{ "LQICRCI_LQ",		0x10, 0x10 },
+	{ "LQIABORT",		0x20, 0x20 },
+	{ "LQIPHASE_NLQ",	0x40, 0x40 },
+	{ "LQIPHASE_LQ",	0x80, 0x80 }
+};
+
+int
+ahd_lqistat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(LQISTAT1_parse_table, 8, "LQISTAT1",
+	    0x51, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CLRLQIINT1_parse_table[] = {
+	{ "CLRLQIOVERI_NLQ",	0x01, 0x01 },
+	{ "CLRLQIOVERI_LQ",	0x02, 0x02 },
+	{ "CLRLQIBADLQI",	0x04, 0x04 },
+	{ "CLRLQICRCI_NLQ",	0x08, 0x08 },
+	{ "CLRLQICRCI_LQ",	0x10, 0x10 },
+	{ "CLRLIQABORT",	0x20, 0x20 },
+	{ "CLRLQIPHASE_NLQ",	0x40, 0x40 },
+	{ "CLRLQIPHASE_LQ",	0x80, 0x80 }
+};
+
+int
+ahd_clrlqiint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CLRLQIINT1_parse_table, 8, "CLRLQIINT1",
+	    0x51, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t LQISTAT2_parse_table[] = {
+	{ "LQIGSAVAIL",		0x01, 0x01 },
+	{ "LQISTOPCMD",		0x02, 0x02 },
+	{ "LQISTOPLQ",		0x04, 0x04 },
+	{ "LQISTOPPKT",		0x08, 0x08 },
+	{ "LQIWAITFIFO",	0x10, 0x10 },
+	{ "LQIWORKONLQ",	0x20, 0x20 },
+	{ "LQIPHASE_OUTPKT",	0x40, 0x40 },
+	{ "PACKETIZED",		0x80, 0x80 }
+};
+
+int
+ahd_lqistat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(LQISTAT2_parse_table, 8, "LQISTAT2",
+	    0x52, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SSTAT3_parse_table[] = {
+	{ "OSRAMPERR",		0x01, 0x01 },
+	{ "NTRAMPERR",		0x02, 0x02 }
+};
+
+int
+ahd_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SSTAT3_parse_table, 2, "SSTAT3",
+	    0x53, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SIMODE3_parse_table[] = {
+	{ "ENOSRAMPERR",	0x01, 0x01 },
+	{ "ENNTRAMPERR",	0x02, 0x02 }
+};
+
+int
+ahd_simode3_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SIMODE3_parse_table, 2, "SIMODE3",
+	    0x53, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CLRSINT3_parse_table[] = {
+	{ "CLROSRAMPERR",	0x01, 0x01 },
+	{ "CLRNTRAMPERR",	0x02, 0x02 }
+};
+
+int
+ahd_clrsint3_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CLRSINT3_parse_table, 2, "CLRSINT3",
+	    0x53, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t LQOMODE0_parse_table[] = {
+	{ "ENLQOTCRC",		0x01, 0x01 },
+	{ "ENLQOATNPKT",	0x02, 0x02 },
+	{ "ENLQOATNLQ",		0x04, 0x04 },
+	{ "ENLQOSTOPT2",	0x08, 0x08 },
+	{ "ENLQOTARGSCBPERR",	0x10, 0x10 }
+};
+
+int
+ahd_lqomode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(LQOMODE0_parse_table, 5, "LQOMODE0",
+	    0x54, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = {
+	{ "LQOTCRC",		0x01, 0x01 },
+	{ "LQOATNPKT",		0x02, 0x02 },
+	{ "LQOATNLQ",		0x04, 0x04 },
+	{ "LQOSTOPT2",		0x08, 0x08 },
+	{ "LQOTARGSCBPERR",	0x10, 0x10 }
+};
+
+int
+ahd_lqostat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(LQOSTAT0_parse_table, 5, "LQOSTAT0",
+	    0x54, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CLRLQOINT0_parse_table[] = {
+	{ "CLRLQOTCRC",		0x01, 0x01 },
+	{ "CLRLQOATNPKT",	0x02, 0x02 },
+	{ "CLRLQOATNLQ",	0x04, 0x04 },
+	{ "CLRLQOSTOPT2",	0x08, 0x08 },
+	{ "CLRLQOTARGSCBPERR",	0x10, 0x10 }
+};
+
+int
+ahd_clrlqoint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CLRLQOINT0_parse_table, 5, "CLRLQOINT0",
+	    0x54, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = {
+	{ "LQOPHACHGINPKT",	0x01, 0x01 },
+	{ "LQOBUSFREE",		0x02, 0x02 },
+	{ "LQOBADQAS",		0x04, 0x04 },
+	{ "LQOSTOPI2",		0x08, 0x08 },
+	{ "LQOINITSCBPERR",	0x10, 0x10 }
+};
+
+int
+ahd_lqostat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(LQOSTAT1_parse_table, 5, "LQOSTAT1",
+	    0x55, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CLRLQOINT1_parse_table[] = {
+	{ "CLRLQOPHACHGINPKT",	0x01, 0x01 },
+	{ "CLRLQOBUSFREE",	0x02, 0x02 },
+	{ "CLRLQOBADQAS",	0x04, 0x04 },
+	{ "CLRLQOSTOPI2",	0x08, 0x08 },
+	{ "CLRLQOINITSCBPERR",	0x10, 0x10 }
+};
+
+int
+ahd_clrlqoint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CLRLQOINT1_parse_table, 5, "CLRLQOINT1",
+	    0x55, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t LQOMODE1_parse_table[] = {
+	{ "ENLQOPHACHGINPKT",	0x01, 0x01 },
+	{ "ENLQOBUSFREE",	0x02, 0x02 },
+	{ "ENLQOBADQAS",	0x04, 0x04 },
+	{ "ENLQOSTOPI2",	0x08, 0x08 },
+	{ "ENLQOINITSCBPERR",	0x10, 0x10 }
+};
+
+int
+ahd_lqomode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(LQOMODE1_parse_table, 5, "LQOMODE1",
+	    0x55, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = {
+	{ "LQOSTOP0",		0x01, 0x01 },
+	{ "LQOPHACHGOUTPKT",	0x02, 0x02 },
+	{ "LQOWAITFIFO",	0x10, 0x10 },
+	{ "LQOPKT",		0xe0, 0xe0 }
+};
+
+int
+ahd_lqostat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(LQOSTAT2_parse_table, 4, "LQOSTAT2",
+	    0x56, regvalue, cur_col, wrap));
+}
+
+int
+ahd_os_space_cnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "OS_SPACE_CNT",
+	    0x56, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SIMODE1_parse_table[] = {
+	{ "ENREQINIT",		0x01, 0x01 },
+	{ "ENSTRB2FAST",	0x02, 0x02 },
+	{ "ENSCSIPERR",		0x04, 0x04 },
+	{ "ENBUSFREE",		0x08, 0x08 },
+	{ "ENPHASEMIS",		0x10, 0x10 },
+	{ "ENSCSIRST",		0x20, 0x20 },
+	{ "ENATNTARG",		0x40, 0x40 },
+	{ "ENSELTIMO",		0x80, 0x80 }
+};
+
+int
+ahd_simode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SIMODE1_parse_table, 8, "SIMODE1",
+	    0x57, regvalue, cur_col, wrap));
+}
+
+int
+ahd_gsfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "GSFIFO",
+	    0x58, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = {
+	{ "RSTCHN",		0x01, 0x01 },
+	{ "CLRCHN",		0x02, 0x02 },
+	{ "CLRSHCNT",		0x04, 0x04 },
+	{ "DFFBITBUCKET",	0x08, 0x08 }
+};
+
+int
+ahd_dffsxfrctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DFFSXFRCTL_parse_table, 4, "DFFSXFRCTL",
+	    0x5a, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t LQOSCSCTL_parse_table[] = {
+	{ "LQONOCHKOVER",	0x01, 0x01 },
+	{ "LQOH2A_VERSION",	0x80, 0x80 }
+};
+
+int
+ahd_lqoscsctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(LQOSCSCTL_parse_table, 2, "LQOSCSCTL",
+	    0x5a, regvalue, cur_col, wrap));
+}
+
+int
+ahd_nextscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "NEXTSCB",
+	    0x5a, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CLRSEQINTSRC_parse_table[] = {
+	{ "CLRCFG4TCMD",	0x01, 0x01 },
+	{ "CLRCFG4ICMD",	0x02, 0x02 },
+	{ "CLRCFG4TSTAT",	0x04, 0x04 },
+	{ "CLRCFG4ISTAT",	0x08, 0x08 },
+	{ "CLRCFG4DATA",	0x10, 0x10 },
+	{ "CLRSAVEPTRS",	0x20, 0x20 },
+	{ "CLRCTXTDONE",	0x40, 0x40 }
+};
+
+int
+ahd_clrseqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CLRSEQINTSRC_parse_table, 7, "CLRSEQINTSRC",
+	    0x5b, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = {
+	{ "CFG4TCMD",		0x01, 0x01 },
+	{ "CFG4ICMD",		0x02, 0x02 },
+	{ "CFG4TSTAT",		0x04, 0x04 },
+	{ "CFG4ISTAT",		0x08, 0x08 },
+	{ "CFG4DATA",		0x10, 0x10 },
+	{ "SAVEPTRS",		0x20, 0x20 },
+	{ "CTXTDONE",		0x40, 0x40 }
+};
+
+int
+ahd_seqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SEQINTSRC_parse_table, 7, "SEQINTSRC",
+	    0x5b, regvalue, cur_col, wrap));
+}
+
+int
+ahd_currscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "CURRSCB",
+	    0x5c, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SEQIMODE_parse_table[] = {
+	{ "ENCFG4TCMD",		0x01, 0x01 },
+	{ "ENCFG4ICMD",		0x02, 0x02 },
+	{ "ENCFG4TSTAT",	0x04, 0x04 },
+	{ "ENCFG4ISTAT",	0x08, 0x08 },
+	{ "ENCFG4DATA",		0x10, 0x10 },
+	{ "ENSAVEPTRS",		0x20, 0x20 },
+	{ "ENCTXTDONE",		0x40, 0x40 }
+};
+
+int
+ahd_seqimode_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SEQIMODE_parse_table, 7, "SEQIMODE",
+	    0x5c, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = {
+	{ "FIFOFREE",		0x01, 0x01 },
+	{ "DATAINFIFO",		0x02, 0x02 },
+	{ "DLZERO",		0x04, 0x04 },
+	{ "SHVALID",		0x08, 0x08 },
+	{ "LASTSDONE",		0x10, 0x10 },
+	{ "SHCNTMINUS1",	0x20, 0x20 },
+	{ "SHCNTNEGATIVE",	0x40, 0x40 }
+};
+
+int
+ahd_mdffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(MDFFSTAT_parse_table, 7, "MDFFSTAT",
+	    0x5d, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CRCCONTROL_parse_table[] = {
+	{ "CRCVALCHKEN",	0x40, 0x40 }
+};
+
+int
+ahd_crccontrol_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CRCCONTROL_parse_table, 1, "CRCCONTROL",
+	    0x5d, regvalue, cur_col, wrap));
+}
+
+int
+ahd_dfftag_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "DFFTAG",
+	    0x5e, regvalue, cur_col, wrap));
+}
+
+int
+ahd_lastscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "LASTSCB",
+	    0x5e, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCSITEST_parse_table[] = {
+	{ "SEL_TXPLL_DEBUG",	0x04, 0x04 },
+	{ "CNTRTEST",		0x08, 0x08 }
+};
+
+int
+ahd_scsitest_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCSITEST_parse_table, 2, "SCSITEST",
+	    0x5e, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t IOPDNCTL_parse_table[] = {
+	{ "PDN_DIFFSENSE",	0x01, 0x01 },
+	{ "PDN_IDIST",		0x04, 0x04 },
+	{ "DISABLE_OE",		0x80, 0x80 }
+};
+
+int
+ahd_iopdnctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(IOPDNCTL_parse_table, 3, "IOPDNCTL",
+	    0x5f, regvalue, cur_col, wrap));
+}
+
+int
+ahd_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SHADDR",
+	    0x60, regvalue, cur_col, wrap));
+}
+
+int
+ahd_negoaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "NEGOADDR",
+	    0x60, regvalue, cur_col, wrap));
+}
+
+int
+ahd_dgrpcrci_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "DGRPCRCI",
+	    0x60, regvalue, cur_col, wrap));
+}
+
+int
+ahd_negperiod_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "NEGPERIOD",
+	    0x61, regvalue, cur_col, wrap));
+}
+
+int
+ahd_packcrci_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "PACKCRCI",
+	    0x62, regvalue, cur_col, wrap));
+}
+
+int
+ahd_negoffset_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "NEGOFFSET",
+	    0x62, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t NEGPPROPTS_parse_table[] = {
+	{ "PPROPT_IUT",		0x01, 0x01 },
+	{ "PPROPT_DT",		0x02, 0x02 },
+	{ "PPROPT_QAS",		0x04, 0x04 },
+	{ "PPROPT_PACE",	0x08, 0x08 }
+};
+
+int
+ahd_negppropts_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NEGPPROPTS_parse_table, 4, "NEGPPROPTS",
+	    0x63, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t NEGCONOPTS_parse_table[] = {
+	{ "WIDEXFER",		0x01, 0x01 },
+	{ "ENAUTOATNO",		0x02, 0x02 },
+	{ "ENAUTOATNI",		0x04, 0x04 },
+	{ "ENSLOWCRC",		0x08, 0x08 },
+	{ "RTI_OVRDTRN",	0x10, 0x10 },
+	{ "RTI_WRTDIS",		0x20, 0x20 },
+	{ "ENSNAPSHOT",		0x40, 0x40 }
+};
+
+int
+ahd_negconopts_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NEGCONOPTS_parse_table, 7, "NEGCONOPTS",
+	    0x64, regvalue, cur_col, wrap));
+}
+
+int
+ahd_annexcol_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "ANNEXCOL",
+	    0x65, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCSCHKN_parse_table[] = {
+	{ "LSTSGCLRDIS",	0x01, 0x01 },
+	{ "SHVALIDSTDIS",	0x02, 0x02 },
+	{ "DFFACTCLR",		0x04, 0x04 },
+	{ "SDONEMSKDIS",	0x08, 0x08 },
+	{ "WIDERESEN",		0x10, 0x10 },
+	{ "CURRFIFODEF",	0x20, 0x20 },
+	{ "STSELSKIDDIS",	0x40, 0x40 }
+};
+
+int
+ahd_scschkn_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCSCHKN_parse_table, 7, "SCSCHKN",
+	    0x66, regvalue, cur_col, wrap));
+}
+
+int
+ahd_annexdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "ANNEXDAT",
+	    0x66, regvalue, cur_col, wrap));
+}
+
+int
+ahd_iownid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "IOWNID",
+	    0x67, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t PLL960CTL0_parse_table[] = {
+	{ "PLL_ENFBM",		0x01, 0x01 },
+	{ "PLL_DLPF",		0x02, 0x02 },
+	{ "PLL_ENLPF",		0x04, 0x04 },
+	{ "PLL_ENLUD",		0x08, 0x08 },
+	{ "PLL_NS",		0x30, 0x30 },
+	{ "PLL_PWDN",		0x40, 0x40 },
+	{ "PLL_VCOSEL",		0x80, 0x80 }
+};
+
+int
+ahd_pll960ctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(PLL960CTL0_parse_table, 7, "PLL960CTL0",
+	    0x68, regvalue, cur_col, wrap));
+}
+
+int
+ahd_shcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SHCNT",
+	    0x68, regvalue, cur_col, wrap));
+}
+
+int
+ahd_townid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "TOWNID",
+	    0x69, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t PLL960CTL1_parse_table[] = {
+	{ "PLL_RST",		0x01, 0x01 },
+	{ "PLL_CNTCLR",		0x40, 0x40 },
+	{ "PLL_CNTEN",		0x80, 0x80 }
+};
+
+int
+ahd_pll960ctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(PLL960CTL1_parse_table, 3, "PLL960CTL1",
+	    0x69, regvalue, cur_col, wrap));
+}
+
+int
+ahd_pll960cnt0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "PLL960CNT0",
+	    0x6a, regvalue, cur_col, wrap));
+}
+
+int
+ahd_xsig_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "XSIG",
+	    0x6a, regvalue, cur_col, wrap));
+}
+
+int
+ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SELOID",
+	    0x6b, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t PLL400CTL0_parse_table[] = {
+	{ "PLL_ENFBM",		0x01, 0x01 },
+	{ "PLL_DLPF",		0x02, 0x02 },
+	{ "PLL_ENLPF",		0x04, 0x04 },
+	{ "PLL_ENLUD",		0x08, 0x08 },
+	{ "PLL_NS",		0x30, 0x30 },
+	{ "PLL_PWDN",		0x40, 0x40 },
+	{ "PLL_VCOSEL",		0x80, 0x80 }
+};
+
+int
+ahd_pll400ctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(PLL400CTL0_parse_table, 7, "PLL400CTL0",
+	    0x6c, regvalue, cur_col, wrap));
+}
+
+int
+ahd_fairness_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "FAIRNESS",
+	    0x6c, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t PLL400CTL1_parse_table[] = {
+	{ "PLL_RST",		0x01, 0x01 },
+	{ "PLL_CNTCLR",		0x40, 0x40 },
+	{ "PLL_CNTEN",		0x80, 0x80 }
+};
+
+int
+ahd_pll400ctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(PLL400CTL1_parse_table, 3, "PLL400CTL1",
+	    0x6d, regvalue, cur_col, wrap));
+}
+
+int
+ahd_pll400cnt0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "PLL400CNT0",
+	    0x6e, regvalue, cur_col, wrap));
+}
+
+int
+ahd_unfairness_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "UNFAIRNESS",
+	    0x6e, regvalue, cur_col, wrap));
+}
+
+int
+ahd_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "HADDR",
+	    0x70, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t PLLDELAY_parse_table[] = {
+	{ "SPLIT_DROP_REQ",	0x80, 0x80 }
+};
+
+int
+ahd_plldelay_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(PLLDELAY_parse_table, 1, "PLLDELAY",
+	    0x70, regvalue, cur_col, wrap));
+}
+
+int
+ahd_hodmaadr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "HODMAADR",
+	    0x70, regvalue, cur_col, wrap));
+}
+
+int
+ahd_hodmacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "HODMACNT",
+	    0x78, regvalue, cur_col, wrap));
+}
+
+int
+ahd_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "HCNT",
+	    0x78, regvalue, cur_col, wrap));
+}
+
+int
+ahd_hodmaen_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "HODMAEN",
+	    0x7a, regvalue, cur_col, wrap));
+}
+
+int
+ahd_sghaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SGHADDR",
+	    0x7c, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scbhaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCBHADDR",
+	    0x7c, regvalue, cur_col, wrap));
+}
+
+int
+ahd_sghcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SGHCNT",
+	    0x84, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scbhcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCBHCNT",
+	    0x84, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DFF_THRSH_parse_table[] = {
+	{ "WR_DFTHRSH_MIN",	0x00, 0x70 },
+	{ "RD_DFTHRSH_MIN",	0x00, 0x07 },
+	{ "RD_DFTHRSH_25",	0x01, 0x07 },
+	{ "RD_DFTHRSH_50",	0x02, 0x07 },
+	{ "RD_DFTHRSH_63",	0x03, 0x07 },
+	{ "RD_DFTHRSH_75",	0x04, 0x07 },
+	{ "RD_DFTHRSH_85",	0x05, 0x07 },
+	{ "RD_DFTHRSH_90",	0x06, 0x07 },
+	{ "RD_DFTHRSH_MAX",	0x07, 0x07 },
+	{ "WR_DFTHRSH_25",	0x10, 0x70 },
+	{ "WR_DFTHRSH_50",	0x20, 0x70 },
+	{ "WR_DFTHRSH_63",	0x30, 0x70 },
+	{ "WR_DFTHRSH_75",	0x40, 0x70 },
+	{ "WR_DFTHRSH_85",	0x50, 0x70 },
+	{ "WR_DFTHRSH_90",	0x60, 0x70 },
+	{ "WR_DFTHRSH_MAX",	0x70, 0x70 },
+	{ "RD_DFTHRSH",		0x07, 0x07 },
+	{ "WR_DFTHRSH",		0x70, 0x70 }
+};
+
+int
+ahd_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DFF_THRSH_parse_table, 18, "DFF_THRSH",
+	    0x88, regvalue, cur_col, wrap));
+}
+
+int
+ahd_romaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "ROMADDR",
+	    0x8a, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t ROMCNTRL_parse_table[] = {
+	{ "RDY",		0x01, 0x01 },
+	{ "REPEAT",		0x02, 0x02 },
+	{ "ROMSPD",		0x18, 0x18 },
+	{ "ROMOP",		0xe0, 0xe0 }
+};
+
+int
+ahd_romcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(ROMCNTRL_parse_table, 4, "ROMCNTRL",
+	    0x8d, regvalue, cur_col, wrap));
+}
+
+int
+ahd_romdata_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "ROMDATA",
+	    0x8e, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CMCRXMSG0_parse_table[] = {
+	{ "CFNUM",		0x07, 0x07 },
+	{ "CDNUM",		0xf8, 0xf8 }
+};
+
+int
+ahd_cmcrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CMCRXMSG0_parse_table, 2, "CMCRXMSG0",
+	    0x90, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t ROENABLE_parse_table[] = {
+	{ "DCH0ROEN",		0x01, 0x01 },
+	{ "DCH1ROEN",		0x02, 0x02 },
+	{ "SGROEN",		0x04, 0x04 },
+	{ "CMCROEN",		0x08, 0x08 },
+	{ "OVLYROEN",		0x10, 0x10 },
+	{ "MSIROEN",		0x20, 0x20 }
+};
+
+int
+ahd_roenable_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(ROENABLE_parse_table, 6, "ROENABLE",
+	    0x90, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t OVLYRXMSG0_parse_table[] = {
+	{ "CFNUM",		0x07, 0x07 },
+	{ "CDNUM",		0xf8, 0xf8 }
+};
+
+int
+ahd_ovlyrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(OVLYRXMSG0_parse_table, 2, "OVLYRXMSG0",
+	    0x90, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DCHRXMSG0_parse_table[] = {
+	{ "CFNUM",		0x07, 0x07 },
+	{ "CDNUM",		0xf8, 0xf8 }
+};
+
+int
+ahd_dchrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DCHRXMSG0_parse_table, 2, "DCHRXMSG0",
+	    0x90, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t OVLYRXMSG1_parse_table[] = {
+	{ "CBNUM",		0xff, 0xff }
+};
+
+int
+ahd_ovlyrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(OVLYRXMSG1_parse_table, 1, "OVLYRXMSG1",
+	    0x91, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t NSENABLE_parse_table[] = {
+	{ "DCH0NSEN",		0x01, 0x01 },
+	{ "DCH1NSEN",		0x02, 0x02 },
+	{ "SGNSEN",		0x04, 0x04 },
+	{ "CMCNSEN",		0x08, 0x08 },
+	{ "OVLYNSEN",		0x10, 0x10 },
+	{ "MSINSEN",		0x20, 0x20 }
+};
+
+int
+ahd_nsenable_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NSENABLE_parse_table, 6, "NSENABLE",
+	    0x91, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DCHRXMSG1_parse_table[] = {
+	{ "CBNUM",		0xff, 0xff }
+};
+
+int
+ahd_dchrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DCHRXMSG1_parse_table, 1, "DCHRXMSG1",
+	    0x91, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CMCRXMSG1_parse_table[] = {
+	{ "CBNUM",		0xff, 0xff }
+};
+
+int
+ahd_cmcrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CMCRXMSG1_parse_table, 1, "CMCRXMSG1",
+	    0x91, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DCHRXMSG2_parse_table[] = {
+	{ "MINDEX",		0xff, 0xff }
+};
+
+int
+ahd_dchrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DCHRXMSG2_parse_table, 1, "DCHRXMSG2",
+	    0x92, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t OVLYRXMSG2_parse_table[] = {
+	{ "MINDEX",		0xff, 0xff }
+};
+
+int
+ahd_ovlyrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(OVLYRXMSG2_parse_table, 1, "OVLYRXMSG2",
+	    0x92, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CMCRXMSG2_parse_table[] = {
+	{ "MINDEX",		0xff, 0xff }
+};
+
+int
+ahd_cmcrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CMCRXMSG2_parse_table, 1, "CMCRXMSG2",
+	    0x92, regvalue, cur_col, wrap));
+}
+
+int
+ahd_ost_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "OST",
+	    0x92, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DCHRXMSG3_parse_table[] = {
+	{ "MCLASS",		0x0f, 0x0f }
+};
+
+int
+ahd_dchrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DCHRXMSG3_parse_table, 1, "DCHRXMSG3",
+	    0x93, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CMCRXMSG3_parse_table[] = {
+	{ "MCLASS",		0x0f, 0x0f }
+};
+
+int
+ahd_cmcrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CMCRXMSG3_parse_table, 1, "CMCRXMSG3",
+	    0x93, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t PCIXCTL_parse_table[] = {
+	{ "CMPABCDIS",		0x01, 0x01 },
+	{ "TSCSERREN",		0x02, 0x02 },
+	{ "SRSPDPEEN",		0x04, 0x04 },
+	{ "SPLTSTADIS",		0x08, 0x08 },
+	{ "SPLTSMADIS",		0x10, 0x10 },
+	{ "UNEXPSCIEN",		0x20, 0x20 },
+	{ "SERRPULSE",		0x80, 0x80 }
+};
+
+int
+ahd_pcixctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(PCIXCTL_parse_table, 7, "PCIXCTL",
+	    0x93, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t OVLYRXMSG3_parse_table[] = {
+	{ "MCLASS",		0x0f, 0x0f }
+};
+
+int
+ahd_ovlyrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(OVLYRXMSG3_parse_table, 1, "OVLYRXMSG3",
+	    0x93, regvalue, cur_col, wrap));
+}
+
+int
+ahd_ovlyseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "OVLYSEQBCNT",
+	    0x94, regvalue, cur_col, wrap));
+}
+
+int
+ahd_cmcseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "CMCSEQBCNT",
+	    0x94, regvalue, cur_col, wrap));
+}
+
+int
+ahd_dchseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "DCHSEQBCNT",
+	    0x94, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CMCSPLTSTAT0_parse_table[] = {
+	{ "RXSPLTRSP",		0x01, 0x01 },
+	{ "RXSCEMSG",		0x02, 0x02 },
+	{ "RXOVRUN",		0x04, 0x04 },
+	{ "CNTNOTCMPLT",	0x08, 0x08 },
+	{ "SCDATBUCKET",	0x10, 0x10 },
+	{ "SCADERR",		0x20, 0x20 },
+	{ "SCBCERR",		0x40, 0x40 },
+	{ "STAETERM",		0x80, 0x80 }
+};
+
+int
+ahd_cmcspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CMCSPLTSTAT0_parse_table, 8, "CMCSPLTSTAT0",
+	    0x96, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t OVLYSPLTSTAT0_parse_table[] = {
+	{ "RXSPLTRSP",		0x01, 0x01 },
+	{ "RXSCEMSG",		0x02, 0x02 },
+	{ "RXOVRUN",		0x04, 0x04 },
+	{ "CNTNOTCMPLT",	0x08, 0x08 },
+	{ "SCDATBUCKET",	0x10, 0x10 },
+	{ "SCADERR",		0x20, 0x20 },
+	{ "SCBCERR",		0x40, 0x40 },
+	{ "STAETERM",		0x80, 0x80 }
+};
+
+int
+ahd_ovlyspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(OVLYSPLTSTAT0_parse_table, 8, "OVLYSPLTSTAT0",
+	    0x96, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DCHSPLTSTAT0_parse_table[] = {
+	{ "RXSPLTRSP",		0x01, 0x01 },
+	{ "RXSCEMSG",		0x02, 0x02 },
+	{ "RXOVRUN",		0x04, 0x04 },
+	{ "CNTNOTCMPLT",	0x08, 0x08 },
+	{ "SCDATBUCKET",	0x10, 0x10 },
+	{ "SCADERR",		0x20, 0x20 },
+	{ "SCBCERR",		0x40, 0x40 },
+	{ "STAETERM",		0x80, 0x80 }
+};
+
+int
+ahd_dchspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DCHSPLTSTAT0_parse_table, 8, "DCHSPLTSTAT0",
+	    0x96, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DCHSPLTSTAT1_parse_table[] = {
+	{ "RXDATABUCKET",	0x01, 0x01 }
+};
+
+int
+ahd_dchspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DCHSPLTSTAT1_parse_table, 1, "DCHSPLTSTAT1",
+	    0x97, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CMCSPLTSTAT1_parse_table[] = {
+	{ "RXDATABUCKET",	0x01, 0x01 }
+};
+
+int
+ahd_cmcspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CMCSPLTSTAT1_parse_table, 1, "CMCSPLTSTAT1",
+	    0x97, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t OVLYSPLTSTAT1_parse_table[] = {
+	{ "RXDATABUCKET",	0x01, 0x01 }
+};
+
+int
+ahd_ovlyspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(OVLYSPLTSTAT1_parse_table, 1, "OVLYSPLTSTAT1",
+	    0x97, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SGRXMSG0_parse_table[] = {
+	{ "CFNUM",		0x07, 0x07 },
+	{ "CDNUM",		0xf8, 0xf8 }
+};
+
+int
+ahd_sgrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SGRXMSG0_parse_table, 2, "SGRXMSG0",
+	    0x98, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SLVSPLTOUTADR0_parse_table[] = {
+	{ "LOWER_ADDR",		0x7f, 0x7f }
+};
+
+int
+ahd_slvspltoutadr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SLVSPLTOUTADR0_parse_table, 1, "SLVSPLTOUTADR0",
+	    0x98, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SGRXMSG1_parse_table[] = {
+	{ "CBNUM",		0xff, 0xff }
+};
+
+int
+ahd_sgrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SGRXMSG1_parse_table, 1, "SGRXMSG1",
+	    0x99, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SLVSPLTOUTADR1_parse_table[] = {
+	{ "REQ_FNUM",		0x07, 0x07 },
+	{ "REQ_DNUM",		0xf8, 0xf8 }
+};
+
+int
+ahd_slvspltoutadr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SLVSPLTOUTADR1_parse_table, 2, "SLVSPLTOUTADR1",
+	    0x99, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SGRXMSG2_parse_table[] = {
+	{ "MINDEX",		0xff, 0xff }
+};
+
+int
+ahd_sgrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SGRXMSG2_parse_table, 1, "SGRXMSG2",
+	    0x9a, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SLVSPLTOUTADR2_parse_table[] = {
+	{ "REQ_BNUM",		0xff, 0xff }
+};
+
+int
+ahd_slvspltoutadr2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SLVSPLTOUTADR2_parse_table, 1, "SLVSPLTOUTADR2",
+	    0x9a, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SGRXMSG3_parse_table[] = {
+	{ "MCLASS",		0x0f, 0x0f }
+};
+
+int
+ahd_sgrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SGRXMSG3_parse_table, 1, "SGRXMSG3",
+	    0x9b, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SLVSPLTOUTADR3_parse_table[] = {
+	{ "RLXORD",		0x10, 0x10 },
+	{ "TAG_NUM",		0x1f, 0x1f }
+};
+
+int
+ahd_slvspltoutadr3_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SLVSPLTOUTADR3_parse_table, 2, "SLVSPLTOUTADR3",
+	    0x9b, regvalue, cur_col, wrap));
+}
+
+int
+ahd_sgseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SGSEQBCNT",
+	    0x9c, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SLVSPLTOUTATTR0_parse_table[] = {
+	{ "LOWER_BCNT",		0xff, 0xff }
+};
+
+int
+ahd_slvspltoutattr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SLVSPLTOUTATTR0_parse_table, 1, "SLVSPLTOUTATTR0",
+	    0x9c, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SLVSPLTOUTATTR1_parse_table[] = {
+	{ "CMPLT_FNUM",		0x07, 0x07 },
+	{ "CMPLT_DNUM",		0xf8, 0xf8 }
+};
+
+int
+ahd_slvspltoutattr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SLVSPLTOUTATTR1_parse_table, 2, "SLVSPLTOUTATTR1",
+	    0x9d, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SLVSPLTOUTATTR2_parse_table[] = {
+	{ "CMPLT_BNUM",		0xff, 0xff }
+};
+
+int
+ahd_slvspltoutattr2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SLVSPLTOUTATTR2_parse_table, 1, "SLVSPLTOUTATTR2",
+	    0x9e, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SGSPLTSTAT0_parse_table[] = {
+	{ "RXSPLTRSP",		0x01, 0x01 },
+	{ "RXSCEMSG",		0x02, 0x02 },
+	{ "RXOVRUN",		0x04, 0x04 },
+	{ "CNTNOTCMPLT",	0x08, 0x08 },
+	{ "SCDATBUCKET",	0x10, 0x10 },
+	{ "SCADERR",		0x20, 0x20 },
+	{ "SCBCERR",		0x40, 0x40 },
+	{ "STAETERM",		0x80, 0x80 }
+};
+
+int
+ahd_sgspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SGSPLTSTAT0_parse_table, 8, "SGSPLTSTAT0",
+	    0x9e, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SFUNCT_parse_table[] = {
+	{ "TEST_NUM",		0x0f, 0x0f },
+	{ "TEST_GROUP",		0xf0, 0xf0 }
+};
+
+int
+ahd_sfunct_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SFUNCT_parse_table, 2, "SFUNCT",
+	    0x9f, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SGSPLTSTAT1_parse_table[] = {
+	{ "RXDATABUCKET",	0x01, 0x01 }
+};
+
+int
+ahd_sgspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SGSPLTSTAT1_parse_table, 1, "SGSPLTSTAT1",
+	    0x9f, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DF0PCISTAT_parse_table[] = {
+	{ "DPR",		0x01, 0x01 },
+	{ "TWATERR",		0x02, 0x02 },
+	{ "RDPERR",		0x04, 0x04 },
+	{ "SCAAPERR",		0x08, 0x08 },
+	{ "RTA",		0x10, 0x10 },
+	{ "RMA",		0x20, 0x20 },
+	{ "SSE",		0x40, 0x40 },
+	{ "DPE",		0x80, 0x80 }
+};
+
+int
+ahd_df0pcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DF0PCISTAT_parse_table, 8, "DF0PCISTAT",
+	    0xa0, regvalue, cur_col, wrap));
+}
+
+int
+ahd_reg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "REG0",
+	    0xa0, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DF1PCISTAT_parse_table[] = {
+	{ "DPR",		0x01, 0x01 },
+	{ "TWATERR",		0x02, 0x02 },
+	{ "RDPERR",		0x04, 0x04 },
+	{ "SCAAPERR",		0x08, 0x08 },
+	{ "RTA",		0x10, 0x10 },
+	{ "RMA",		0x20, 0x20 },
+	{ "SSE",		0x40, 0x40 },
+	{ "DPE",		0x80, 0x80 }
+};
+
+int
+ahd_df1pcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DF1PCISTAT_parse_table, 8, "DF1PCISTAT",
+	    0xa1, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SGPCISTAT_parse_table[] = {
+	{ "DPR",		0x01, 0x01 },
+	{ "RDPERR",		0x04, 0x04 },
+	{ "SCAAPERR",		0x08, 0x08 },
+	{ "RTA",		0x10, 0x10 },
+	{ "RMA",		0x20, 0x20 },
+	{ "SSE",		0x40, 0x40 },
+	{ "DPE",		0x80, 0x80 }
+};
+
+int
+ahd_sgpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SGPCISTAT_parse_table, 7, "SGPCISTAT",
+	    0xa2, regvalue, cur_col, wrap));
+}
+
+int
+ahd_reg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "REG1",
+	    0xa2, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CMCPCISTAT_parse_table[] = {
+	{ "DPR",		0x01, 0x01 },
+	{ "TWATERR",		0x02, 0x02 },
+	{ "RDPERR",		0x04, 0x04 },
+	{ "SCAAPERR",		0x08, 0x08 },
+	{ "RTA",		0x10, 0x10 },
+	{ "RMA",		0x20, 0x20 },
+	{ "SSE",		0x40, 0x40 },
+	{ "DPE",		0x80, 0x80 }
+};
+
+int
+ahd_cmcpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CMCPCISTAT_parse_table, 8, "CMCPCISTAT",
+	    0xa3, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t OVLYPCISTAT_parse_table[] = {
+	{ "DPR",		0x01, 0x01 },
+	{ "RDPERR",		0x04, 0x04 },
+	{ "SCAAPERR",		0x08, 0x08 },
+	{ "RTA",		0x10, 0x10 },
+	{ "RMA",		0x20, 0x20 },
+	{ "SSE",		0x40, 0x40 },
+	{ "DPE",		0x80, 0x80 }
+};
+
+int
+ahd_ovlypcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(OVLYPCISTAT_parse_table, 7, "OVLYPCISTAT",
+	    0xa4, regvalue, cur_col, wrap));
+}
+
+int
+ahd_reg_isr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "REG_ISR",
+	    0xa4, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SG_STATE_parse_table[] = {
+	{ "SEGS_AVAIL",		0x01, 0x01 },
+	{ "LOADING_NEEDED",	0x02, 0x02 },
+	{ "FETCH_INPROG",	0x04, 0x04 }
+};
+
+int
+ahd_sg_state_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SG_STATE_parse_table, 3, "SG_STATE",
+	    0xa6, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t MSIPCISTAT_parse_table[] = {
+	{ "DPR",		0x01, 0x01 },
+	{ "TWATERR",		0x02, 0x02 },
+	{ "CLRPENDMSI",		0x08, 0x08 },
+	{ "RTA",		0x10, 0x10 },
+	{ "RMA",		0x20, 0x20 },
+	{ "SSE",		0x40, 0x40 }
+};
+
+int
+ahd_msipcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(MSIPCISTAT_parse_table, 6, "MSIPCISTAT",
+	    0xa6, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t TARGPCISTAT_parse_table[] = {
+	{ "TWATERR",		0x02, 0x02 },
+	{ "STA",		0x08, 0x08 },
+	{ "SSE",		0x40, 0x40 },
+	{ "DPE",		0x80, 0x80 }
+};
+
+int
+ahd_targpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(TARGPCISTAT_parse_table, 4, "TARGPCISTAT",
+	    0xa7, regvalue, cur_col, wrap));
+}
+
+int
+ahd_data_count_odd_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "DATA_COUNT_ODD",
+	    0xa7, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCBPTR",
+	    0xa8, regvalue, cur_col, wrap));
+}
+
+int
+ahd_ccscbacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "CCSCBACNT",
+	    0xab, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCBAUTOPTR_parse_table[] = {
+	{ "SCBPTR_OFF",		0x07, 0x07 },
+	{ "SCBPTR_ADDR",	0x38, 0x38 },
+	{ "AUSCBPTR_EN",	0x80, 0x80 }
+};
+
+int
+ahd_scbautoptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCBAUTOPTR_parse_table, 3, "SCBAUTOPTR",
+	    0xab, regvalue, cur_col, wrap));
+}
+
+int
+ahd_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "CCSGADDR",
+	    0xac, regvalue, cur_col, wrap));
+}
+
+int
+ahd_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "CCSCBADDR",
+	    0xac, regvalue, cur_col, wrap));
+}
+
+int
+ahd_ccscbadr_bk_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "CCSCBADR_BK",
+	    0xac, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CMC_RAMBIST_parse_table[] = {
+	{ "CMC_BUFFER_BIST_EN",	0x01, 0x01 },
+	{ "CMC_BUFFER_BIST_FAIL",0x02, 0x02 },
+	{ "SG_BIST_EN",		0x10, 0x10 },
+	{ "SG_BIST_FAIL",	0x20, 0x20 },
+	{ "SCBRAMBIST_FAIL",	0x40, 0x40 },
+	{ "SG_ELEMENT_SIZE",	0x80, 0x80 }
+};
+
+int
+ahd_cmc_rambist_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CMC_RAMBIST_parse_table, 6, "CMC_RAMBIST",
+	    0xad, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CCSGCTL_parse_table[] = {
+	{ "CCSGRESET",		0x01, 0x01 },
+	{ "SG_FETCH_REQ",	0x02, 0x02 },
+	{ "CCSGENACK",		0x08, 0x08 },
+	{ "SG_CACHE_AVAIL",	0x10, 0x10 },
+	{ "CCSGDONE",		0x80, 0x80 },
+	{ "CCSGEN",		0x0c, 0x0c }
+};
+
+int
+ahd_ccsgctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CCSGCTL_parse_table, 6, "CCSGCTL",
+	    0xad, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = {
+	{ "CCSCBRESET",		0x01, 0x01 },
+	{ "CCSCBDIR",		0x04, 0x04 },
+	{ "CCSCBEN",		0x08, 0x08 },
+	{ "CCARREN",		0x10, 0x10 },
+	{ "ARRDONE",		0x40, 0x40 },
+	{ "CCSCBDONE",		0x80, 0x80 }
+};
+
+int
+ahd_ccscbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(CCSCBCTL_parse_table, 6, "CCSCBCTL",
+	    0xad, regvalue, cur_col, wrap));
+}
+
+int
+ahd_ccsgram_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "CCSGRAM",
+	    0xb0, regvalue, cur_col, wrap));
+}
+
+int
+ahd_flexadr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "FLEXADR",
+	    0xb0, regvalue, cur_col, wrap));
+}
+
+int
+ahd_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "CCSCBRAM",
+	    0xb0, regvalue, cur_col, wrap));
+}
+
+int
+ahd_flexcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "FLEXCNT",
+	    0xb3, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t FLEXDMASTAT_parse_table[] = {
+	{ "FLEXDMADONE",	0x01, 0x01 },
+	{ "FLEXDMAERR",		0x02, 0x02 }
+};
+
+int
+ahd_flexdmastat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(FLEXDMASTAT_parse_table, 2, "FLEXDMASTAT",
+	    0xb5, regvalue, cur_col, wrap));
+}
+
+int
+ahd_flexdata_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "FLEXDATA",
+	    0xb6, regvalue, cur_col, wrap));
+}
+
+int
+ahd_brddat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "BRDDAT",
+	    0xb8, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t BRDCTL_parse_table[] = {
+	{ "BRDSTB",		0x01, 0x01 },
+	{ "BRDRW",		0x02, 0x02 },
+	{ "BRDEN",		0x04, 0x04 },
+	{ "BRDADDR",		0x38, 0x38 },
+	{ "FLXARBREQ",		0x40, 0x40 },
+	{ "FLXARBACK",		0x80, 0x80 }
+};
+
+int
+ahd_brdctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(BRDCTL_parse_table, 6, "BRDCTL",
+	    0xb9, regvalue, cur_col, wrap));
+}
+
+int
+ahd_seeadr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SEEADR",
+	    0xba, regvalue, cur_col, wrap));
+}
+
+int
+ahd_seedat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SEEDAT",
+	    0xbc, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SEECTL_parse_table[] = {
+	{ "SEEOP_ERAL",		0x40, 0x70 },
+	{ "SEEOP_WRITE",	0x50, 0x70 },
+	{ "SEEOP_READ",		0x60, 0x70 },
+	{ "SEEOP_ERASE",	0x70, 0x70 },
+	{ "SEESTART",		0x01, 0x01 },
+	{ "SEERST",		0x02, 0x02 },
+	{ "SEEOPCODE",		0x70, 0x70 },
+	{ "SEEOP_EWEN",		0x40, 0x40 },
+	{ "SEEOP_WALL",		0x40, 0x40 },
+	{ "SEEOP_EWDS",		0x40, 0x40 }
+};
+
+int
+ahd_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SEECTL_parse_table, 10, "SEECTL",
+	    0xbe, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SEESTAT_parse_table[] = {
+	{ "SEESTART",		0x01, 0x01 },
+	{ "SEEBUSY",		0x02, 0x02 },
+	{ "SEEARBACK",		0x04, 0x04 },
+	{ "LDALTID_L",		0x08, 0x08 },
+	{ "SEEOPCODE",		0x70, 0x70 },
+	{ "INIT_DONE",		0x80, 0x80 }
+};
+
+int
+ahd_seestat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SEESTAT_parse_table, 6, "SEESTAT",
+	    0xbe, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCBCNT",
+	    0xbf, regvalue, cur_col, wrap));
+}
+
+int
+ahd_dfwaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "DFWADDR",
+	    0xc0, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DSPFLTRCTL_parse_table[] = {
+	{ "DSPFCNTSEL",		0x0f, 0x0f },
+	{ "EDGESENSE",		0x10, 0x10 },
+	{ "FLTRDISABLE",	0x20, 0x20 }
+};
+
+int
+ahd_dspfltrctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DSPFLTRCTL_parse_table, 3, "DSPFLTRCTL",
+	    0xc0, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DSPDATACTL_parse_table[] = {
+	{ "XMITOFFSTDIS",	0x02, 0x02 },
+	{ "RCVROFFSTDIS",	0x04, 0x04 },
+	{ "DESQDIS",		0x10, 0x10 },
+	{ "BYPASSENAB",		0x80, 0x80 }
+};
+
+int
+ahd_dspdatactl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DSPDATACTL_parse_table, 4, "DSPDATACTL",
+	    0xc1, regvalue, cur_col, wrap));
+}
+
+int
+ahd_dfraddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "DFRADDR",
+	    0xc2, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DSPREQCTL_parse_table[] = {
+	{ "MANREQDLY",		0x3f, 0x3f },
+	{ "MANREQCTL",		0xc0, 0xc0 }
+};
+
+int
+ahd_dspreqctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DSPREQCTL_parse_table, 2, "DSPREQCTL",
+	    0xc2, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DSPACKCTL_parse_table[] = {
+	{ "MANACKDLY",		0x3f, 0x3f },
+	{ "MANACKCTL",		0xc0, 0xc0 }
+};
+
+int
+ahd_dspackctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DSPACKCTL_parse_table, 2, "DSPACKCTL",
+	    0xc3, regvalue, cur_col, wrap));
+}
+
+int
+ahd_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "DFDAT",
+	    0xc4, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DSPSELECT_parse_table[] = {
+	{ "DSPSEL",		0x1f, 0x1f },
+	{ "AUTOINCEN",		0x80, 0x80 }
+};
+
+int
+ahd_dspselect_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DSPSELECT_parse_table, 2, "DSPSELECT",
+	    0xc4, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t WRTBIASCTL_parse_table[] = {
+	{ "XMITMANVAL",		0x3f, 0x3f },
+	{ "AUTOXBCDIS",		0x80, 0x80 }
+};
+
+int
+ahd_wrtbiasctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(WRTBIASCTL_parse_table, 2, "WRTBIASCTL",
+	    0xc5, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t RCVRBIOSCTL_parse_table[] = {
+	{ "RCVRMANVAL",		0x3f, 0x3f },
+	{ "AUTORBCDIS",		0x80, 0x80 }
+};
+
+int
+ahd_rcvrbiosctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(RCVRBIOSCTL_parse_table, 2, "RCVRBIOSCTL",
+	    0xc6, regvalue, cur_col, wrap));
+}
+
+int
+ahd_wrtbiascalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "WRTBIASCALC",
+	    0xc7, regvalue, cur_col, wrap));
+}
+
+int
+ahd_dfptrs_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "DFPTRS",
+	    0xc8, regvalue, cur_col, wrap));
+}
+
+int
+ahd_rcvrbiascalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "RCVRBIASCALC",
+	    0xc8, regvalue, cur_col, wrap));
+}
+
+int
+ahd_dfbkptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "DFBKPTR",
+	    0xc9, regvalue, cur_col, wrap));
+}
+
+int
+ahd_skewcalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SKEWCALC",
+	    0xc9, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DFDBCTL_parse_table[] = {
+	{ "DFF_RAMBIST_EN",	0x01, 0x01 },
+	{ "DFF_RAMBIST_DONE",	0x02, 0x02 },
+	{ "DFF_RAMBIST_FAIL",	0x04, 0x04 },
+	{ "DFF_DIR_ERR",	0x08, 0x08 },
+	{ "DFF_CIO_RD_RDY",	0x10, 0x10 },
+	{ "DFF_CIO_WR_RDY",	0x20, 0x20 }
+};
+
+int
+ahd_dfdbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DFDBCTL_parse_table, 6, "DFDBCTL",
+	    0xcb, regvalue, cur_col, wrap));
+}
+
+int
+ahd_dfscnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "DFSCNT",
+	    0xcc, regvalue, cur_col, wrap));
+}
+
+int
+ahd_dfbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "DFBCNT",
+	    0xce, regvalue, cur_col, wrap));
+}
+
+int
+ahd_ovlyaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "OVLYADDR",
+	    0xd4, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SEQCTL0_parse_table[] = {
+	{ "LOADRAM",		0x01, 0x01 },
+	{ "SEQRESET",		0x02, 0x02 },
+	{ "STEP",		0x04, 0x04 },
+	{ "BRKADRINTEN",	0x08, 0x08 },
+	{ "FASTMODE",		0x10, 0x10 },
+	{ "FAILDIS",		0x20, 0x20 },
+	{ "PAUSEDIS",		0x40, 0x40 },
+	{ "PERRORDIS",		0x80, 0x80 }
+};
+
+int
+ahd_seqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SEQCTL0_parse_table, 8, "SEQCTL0",
+	    0xd6, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SEQCTL1_parse_table[] = {
+	{ "RAMBIST_EN",		0x01, 0x01 },
+	{ "RAMBIST_FAIL",	0x02, 0x02 },
+	{ "RAMBIST_DONE",	0x04, 0x04 },
+	{ "OVRLAY_DATA_CHK",	0x08, 0x08 }
+};
+
+int
+ahd_seqctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SEQCTL1_parse_table, 4, "SEQCTL1",
+	    0xd7, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t FLAGS_parse_table[] = {
+	{ "CARRY",		0x01, 0x01 },
+	{ "ZERO",		0x02, 0x02 }
+};
+
+int
+ahd_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(FLAGS_parse_table, 2, "FLAGS",
+	    0xd8, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = {
+	{ "IRET",		0x01, 0x01 },
+	{ "INTMASK1",		0x02, 0x02 },
+	{ "INTMASK2",		0x04, 0x04 },
+	{ "SCS_SEQ_INT1M0",	0x08, 0x08 },
+	{ "SCS_SEQ_INT1M1",	0x10, 0x10 },
+	{ "INT1_CONTEXT",	0x20, 0x20 },
+	{ "INTVEC1DSL",		0x80, 0x80 }
+};
+
+int
+ahd_seqintctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SEQINTCTL_parse_table, 7, "SEQINTCTL",
+	    0xd9, regvalue, cur_col, wrap));
+}
+
+int
+ahd_seqram_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SEQRAM",
+	    0xda, regvalue, cur_col, wrap));
+}
+
+int
+ahd_prgmcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "PRGMCNT",
+	    0xde, regvalue, cur_col, wrap));
+}
+
+int
+ahd_accum_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "ACCUM",
+	    0xe0, regvalue, cur_col, wrap));
+}
+
+int
+ahd_sindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SINDEX",
+	    0xe2, regvalue, cur_col, wrap));
+}
+
+int
+ahd_dindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "DINDEX",
+	    0xe4, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t BRKADDR1_parse_table[] = {
+	{ "BRKDIS",		0x80, 0x80 }
+};
+
+int
+ahd_brkaddr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(BRKADDR1_parse_table, 1, "BRKADDR1",
+	    0xe6, regvalue, cur_col, wrap));
+}
+
+int
+ahd_brkaddr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "BRKADDR0",
+	    0xe6, regvalue, cur_col, wrap));
+}
+
+int
+ahd_allones_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "ALLONES",
+	    0xe8, regvalue, cur_col, wrap));
+}
+
+int
+ahd_allzeros_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "ALLZEROS",
+	    0xea, regvalue, cur_col, wrap));
+}
+
+int
+ahd_none_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "NONE",
+	    0xea, regvalue, cur_col, wrap));
+}
+
+int
+ahd_sindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SINDIR",
+	    0xec, regvalue, cur_col, wrap));
+}
+
+int
+ahd_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "DINDIR",
+	    0xed, regvalue, cur_col, wrap));
+}
+
+int
+ahd_function1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "FUNCTION1",
+	    0xf0, regvalue, cur_col, wrap));
+}
+
+int
+ahd_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "STACK",
+	    0xf2, regvalue, cur_col, wrap));
+}
+
+int
+ahd_curaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "CURADDR",
+	    0xf4, regvalue, cur_col, wrap));
+}
+
+int
+ahd_intvec1_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "INTVEC1_ADDR",
+	    0xf4, regvalue, cur_col, wrap));
+}
+
+int
+ahd_intvec2_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "INTVEC2_ADDR",
+	    0xf6, regvalue, cur_col, wrap));
+}
+
+int
+ahd_lastaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "LASTADDR",
+	    0xf6, regvalue, cur_col, wrap));
+}
+
+int
+ahd_longjmp_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "LONGJMP_ADDR",
+	    0xf8, regvalue, cur_col, wrap));
+}
+
+int
+ahd_accum_save_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "ACCUM_SAVE",
+	    0xfa, regvalue, cur_col, wrap));
+}
+
+int
+ahd_waiting_scb_tails_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "WAITING_SCB_TAILS",
+	    0x100, regvalue, cur_col, wrap));
+}
+
+int
+ahd_ahd_pci_config_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "AHD_PCI_CONFIG_BASE",
+	    0x100, regvalue, cur_col, wrap));
+}
+
+int
+ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SRAM_BASE",
+	    0x100, regvalue, cur_col, wrap));
+}
+
+int
+ahd_waiting_tid_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "WAITING_TID_HEAD",
+	    0x120, regvalue, cur_col, wrap));
+}
+
+int
+ahd_waiting_tid_tail_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "WAITING_TID_TAIL",
+	    0x122, regvalue, cur_col, wrap));
+}
+
+int
+ahd_next_queued_scb_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "NEXT_QUEUED_SCB_ADDR",
+	    0x124, regvalue, cur_col, wrap));
+}
+
+int
+ahd_complete_scb_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "COMPLETE_SCB_HEAD",
+	    0x128, regvalue, cur_col, wrap));
+}
+
+int
+ahd_complete_scb_dmainprog_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "COMPLETE_SCB_DMAINPROG_HEAD",
+	    0x12a, regvalue, cur_col, wrap));
+}
+
+int
+ahd_complete_dma_scb_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "COMPLETE_DMA_SCB_HEAD",
+	    0x12c, regvalue, cur_col, wrap));
+}
+
+int
+ahd_qfreeze_count_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "QFREEZE_COUNT",
+	    0x12e, regvalue, cur_col, wrap));
+}
+
+int
+ahd_saved_mode_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SAVED_MODE",
+	    0x130, regvalue, cur_col, wrap));
+}
+
+int
+ahd_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "MSG_OUT",
+	    0x131, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t DMAPARAMS_parse_table[] = {
+	{ "FIFORESET",		0x01, 0x01 },
+	{ "FIFOFLUSH",		0x02, 0x02 },
+	{ "DIRECTION",		0x04, 0x04 },
+	{ "HDMAEN",		0x08, 0x08 },
+	{ "HDMAENACK",		0x08, 0x08 },
+	{ "SDMAEN",		0x10, 0x10 },
+	{ "SDMAENACK",		0x10, 0x10 },
+	{ "SCSIEN",		0x20, 0x20 },
+	{ "WIDEODD",		0x40, 0x40 },
+	{ "PRELOADEN",		0x80, 0x80 }
+};
+
+int
+ahd_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(DMAPARAMS_parse_table, 10, "DMAPARAMS",
+	    0x132, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
+	{ "NO_DISCONNECT",	0x01, 0x01 },
+	{ "SPHASE_PENDING",	0x02, 0x02 },
+	{ "DPHASE_PENDING",	0x04, 0x04 },
+	{ "CMDPHASE_PENDING",	0x08, 0x08 },
+	{ "TARG_CMD_PENDING",	0x10, 0x10 },
+	{ "DPHASE",		0x20, 0x20 },
+	{ "NO_CDB_SENT",	0x40, 0x40 },
+	{ "TARGET_CMD_IS_TAGGED",0x40, 0x40 },
+	{ "NOT_IDENTIFIED",	0x80, 0x80 }
+};
+
+int
+ahd_seq_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SEQ_FLAGS_parse_table, 9, "SEQ_FLAGS",
+	    0x133, regvalue, cur_col, wrap));
+}
+
+int
+ahd_saved_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SAVED_SCSIID",
+	    0x134, regvalue, cur_col, wrap));
+}
+
+int
+ahd_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SAVED_LUN",
+	    0x135, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t LASTPHASE_parse_table[] = {
+	{ "P_DATAOUT",		0x00, 0xe0 },
+	{ "P_DATAOUT_DT",	0x20, 0xe0 },
+	{ "P_DATAIN",		0x40, 0xe0 },
+	{ "P_DATAIN_DT",	0x60, 0xe0 },
+	{ "P_COMMAND",		0x80, 0xe0 },
+	{ "P_MESGOUT",		0xa0, 0xe0 },
+	{ "P_STATUS",		0xc0, 0xe0 },
+	{ "P_MESGIN",		0xe0, 0xe0 },
+	{ "P_BUSFREE",		0x01, 0x01 },
+	{ "MSGI",		0x20, 0x20 },
+	{ "IOI",		0x40, 0x40 },
+	{ "CDI",		0x80, 0x80 },
+	{ "PHASE_MASK",		0xe0, 0xe0 }
+};
+
+int
+ahd_lastphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(LASTPHASE_parse_table, 13, "LASTPHASE",
+	    0x136, regvalue, cur_col, wrap));
+}
+
+int
+ahd_qoutfifo_entry_valid_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "QOUTFIFO_ENTRY_VALID_TAG",
+	    0x137, regvalue, cur_col, wrap));
+}
+
+int
+ahd_shared_data_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SHARED_DATA_ADDR",
+	    0x138, regvalue, cur_col, wrap));
+}
+
+int
+ahd_qoutfifo_next_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "QOUTFIFO_NEXT_ADDR",
+	    0x13c, regvalue, cur_col, wrap));
+}
+
+int
+ahd_kernel_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "KERNEL_TQINPOS",
+	    0x140, regvalue, cur_col, wrap));
+}
+
+int
+ahd_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "TQINPOS",
+	    0x141, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t ARG_1_parse_table[] = {
+	{ "CONT_MSG_LOOP_TARG",	0x02, 0x02 },
+	{ "CONT_MSG_LOOP_READ",	0x03, 0x03 },
+	{ "CONT_MSG_LOOP_WRITE",0x04, 0x04 },
+	{ "EXIT_MSG_LOOP",	0x08, 0x08 },
+	{ "MSGOUT_PHASEMIS",	0x10, 0x10 },
+	{ "SEND_REJ",		0x20, 0x20 },
+	{ "SEND_SENSE",		0x40, 0x40 },
+	{ "SEND_MSG",		0x80, 0x80 }
+};
+
+int
+ahd_arg_1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(ARG_1_parse_table, 8, "ARG_1",
+	    0x142, regvalue, cur_col, wrap));
+}
+
+int
+ahd_arg_2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "ARG_2",
+	    0x143, regvalue, cur_col, wrap));
+}
+
+int
+ahd_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "LAST_MSG",
+	    0x144, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
+	{ "ALTSTIM",		0x01, 0x01 },
+	{ "ENAUTOATNP",		0x02, 0x02 },
+	{ "MANUALP",		0x0c, 0x0c },
+	{ "ENRSELI",		0x10, 0x10 },
+	{ "ENSELI",		0x20, 0x20 },
+	{ "MANUALCTL",		0x40, 0x40 }
+};
+
+int
+ahd_scsiseq_template_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCSISEQ_TEMPLATE_parse_table, 6, "SCSISEQ_TEMPLATE",
+	    0x145, regvalue, cur_col, wrap));
+}
+
+int
+ahd_initiator_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "INITIATOR_TAG",
+	    0x146, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
+	{ "TARGET_MSG_PENDING",	0x02, 0x02 },
+	{ "SELECTOUT_QFROZEN",	0x04, 0x04 }
+};
+
+int
+ahd_seq_flags2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SEQ_FLAGS2_parse_table, 2, "SEQ_FLAGS2",
+	    0x147, regvalue, cur_col, wrap));
+}
+
+int
+ahd_allocfifo_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "ALLOCFIFO_SCBPTR",
+	    0x148, regvalue, cur_col, wrap));
+}
+
+int
+ahd_int_coalescing_timer_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "INT_COALESCING_TIMER",
+	    0x14a, regvalue, cur_col, wrap));
+}
+
+int
+ahd_int_coalescing_maxcmds_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "INT_COALESCING_MAXCMDS",
+	    0x14c, regvalue, cur_col, wrap));
+}
+
+int
+ahd_int_coalescing_mincmds_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "INT_COALESCING_MINCMDS",
+	    0x14d, regvalue, cur_col, wrap));
+}
+
+int
+ahd_cmds_pending_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "CMDS_PENDING",
+	    0x14e, regvalue, cur_col, wrap));
+}
+
+int
+ahd_int_coalescing_cmdcount_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "INT_COALESCING_CMDCOUNT",
+	    0x150, regvalue, cur_col, wrap));
+}
+
+int
+ahd_local_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "LOCAL_HS_MAILBOX",
+	    0x151, regvalue, cur_col, wrap));
+}
+
+int
+ahd_cmdsize_table_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "CMDSIZE_TABLE",
+	    0x152, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCB_BASE",
+	    0x180, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scb_residual_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT",
+	    0x180, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCB_RESIDUAL_SGPTR_parse_table[] = {
+	{ "SG_LIST_NULL",	0x01, 0x01 },
+	{ "SG_OVERRUN_RESID",	0x02, 0x02 },
+	{ "SG_ADDR_MASK",	0xf8, 0xf8 }
+};
+
+int
+ahd_scb_residual_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCB_RESIDUAL_SGPTR_parse_table, 3, "SCB_RESIDUAL_SGPTR",
+	    0x184, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scb_scsi_status_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCB_SCSI_STATUS",
+	    0x188, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scb_target_phases_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCB_TARGET_PHASES",
+	    0x189, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scb_target_data_dir_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCB_TARGET_DATA_DIR",
+	    0x18a, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scb_target_itag_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCB_TARGET_ITAG",
+	    0x18b, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scb_sense_busaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCB_SENSE_BUSADDR",
+	    0x18c, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scb_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCB_TAG",
+	    0x190, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
+	{ "SCB_TAG_TYPE",	0x03, 0x03 },
+	{ "DISCONNECTED",	0x04, 0x04 },
+	{ "STATUS_RCVD",	0x08, 0x08 },
+	{ "MK_MESSAGE",		0x10, 0x10 },
+	{ "TAG_ENB",		0x20, 0x20 },
+	{ "DISCENB",		0x40, 0x40 },
+	{ "TARGET_SCB",		0x80, 0x80 }
+};
+
+int
+ahd_scb_control_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCB_CONTROL_parse_table, 7, "SCB_CONTROL",
+	    0x192, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
+	{ "OID",		0x0f, 0x0f },
+	{ "TID",		0xf0, 0xf0 }
+};
+
+int
+ahd_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCB_SCSIID_parse_table, 2, "SCB_SCSIID",
+	    0x193, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCB_LUN_parse_table[] = {
+	{ "LID",		0xff, 0xff }
+};
+
+int
+ahd_scb_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCB_LUN_parse_table, 1, "SCB_LUN",
+	    0x194, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCB_TASK_ATTRIBUTE_parse_table[] = {
+	{ "SCB_XFERLEN_ODD",	0x01, 0x01 }
+};
+
+int
+ahd_scb_task_attribute_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCB_TASK_ATTRIBUTE_parse_table, 1, "SCB_TASK_ATTRIBUTE",
+	    0x195, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCB_CDB_LEN_parse_table[] = {
+	{ "SCB_CDB_LEN_PTR",	0x80, 0x80 }
+};
+
+int
+ahd_scb_cdb_len_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCB_CDB_LEN_parse_table, 1, "SCB_CDB_LEN",
+	    0x196, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scb_task_management_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCB_TASK_MANAGEMENT",
+	    0x197, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCB_DATAPTR",
+	    0x198, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
+	{ "SG_HIGH_ADDR_BITS",	0x7f, 0x7f },
+	{ "SG_LAST_SEG",	0x80, 0x80 }
+};
+
+int
+ahd_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCB_DATACNT_parse_table, 2, "SCB_DATACNT",
+	    0x1a0, regvalue, cur_col, wrap));
+}
+
+static ahd_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
+	{ "SG_LIST_NULL",	0x01, 0x01 },
+	{ "SG_FULL_RESID",	0x02, 0x02 },
+	{ "SG_STATUS_VALID",	0x04, 0x04 }
+};
+
+int
+ahd_scb_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(SCB_SGPTR_parse_table, 3, "SCB_SGPTR",
+	    0x1a4, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scb_busaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCB_BUSADDR",
+	    0x1a8, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scb_next_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCB_NEXT",
+	    0x1ac, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scb_next2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCB_NEXT2",
+	    0x1ae, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scb_spare_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCB_SPARE",
+	    0x1b0, regvalue, cur_col, wrap));
+}
+
+int
+ahd_scb_disconnected_lists_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahd_print_register(NULL, 0, "SCB_DISCONNECTED_LISTS",
+	    0x1b8, regvalue, cur_col, wrap));
+}
+
diff --git a/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped b/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
new file mode 100644
index 0000000..77c471f
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
@@ -0,0 +1,1139 @@
+/*
+ * DO NOT EDIT - This file is automatically generated
+ *		 from the following source files:
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#94 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#70 $
+ */
+static uint8_t seqprog[] = {
+	0xff, 0x02, 0x06, 0x78,
+	0x00, 0xea, 0x50, 0x59,
+	0x01, 0xea, 0x04, 0x30,
+	0xff, 0x04, 0x0c, 0x78,
+	0x19, 0xea, 0x50, 0x59,
+	0x19, 0xea, 0x04, 0x00,
+	0x33, 0xea, 0x44, 0x59,
+	0x33, 0xea, 0x00, 0x00,
+	0x60, 0x3a, 0x1a, 0x68,
+	0x04, 0x47, 0x1b, 0x68,
+	0xff, 0x21, 0x1b, 0x70,
+	0x40, 0x4b, 0x92, 0x69,
+	0x00, 0xe2, 0x54, 0x59,
+	0x40, 0x4b, 0x92, 0x69,
+	0x20, 0x4b, 0x82, 0x69,
+	0xfc, 0x42, 0x24, 0x78,
+	0x10, 0x40, 0x24, 0x78,
+	0x00, 0xe2, 0xc4, 0x5d,
+	0x20, 0x4d, 0x28, 0x78,
+	0x00, 0xe2, 0xc4, 0x5d,
+	0x30, 0x3f, 0xc0, 0x09,
+	0x30, 0xe0, 0x30, 0x60,
+	0x7f, 0x4a, 0x94, 0x08,
+	0x00, 0xe2, 0x32, 0x40,
+	0xc0, 0x4a, 0x94, 0x00,
+	0x00, 0xe2, 0x3e, 0x58,
+	0x00, 0xe2, 0x56, 0x58,
+	0x00, 0xe2, 0x66, 0x58,
+	0x00, 0xe2, 0x06, 0x40,
+	0x33, 0xea, 0x44, 0x59,
+	0x33, 0xea, 0x00, 0x00,
+	0x01, 0x52, 0x64, 0x78,
+	0x02, 0x58, 0x50, 0x31,
+	0xff, 0xea, 0x10, 0x0b,
+	0xff, 0x97, 0x4f, 0x78,
+	0x50, 0x4b, 0x4a, 0x68,
+	0xbf, 0x3a, 0x74, 0x08,
+	0x14, 0xea, 0x50, 0x59,
+	0x14, 0xea, 0x04, 0x00,
+	0x08, 0x92, 0x25, 0x03,
+	0xff, 0x90, 0x3f, 0x68,
+	0x00, 0xe2, 0x56, 0x5b,
+	0x00, 0xe2, 0x3e, 0x40,
+	0x00, 0xea, 0x44, 0x59,
+	0x01, 0xea, 0x00, 0x30,
+	0x80, 0xf9, 0x5e, 0x68,
+	0x00, 0xe2, 0x42, 0x59,
+	0x11, 0xea, 0x44, 0x59,
+	0x11, 0xea, 0x00, 0x00,
+	0x80, 0xf9, 0x42, 0x79,
+	0xff, 0xea, 0xd4, 0x0d,
+	0x22, 0xea, 0x44, 0x59,
+	0x22, 0xea, 0x00, 0x00,
+	0x10, 0x16, 0x70, 0x78,
+	0x01, 0x0b, 0xa2, 0x32,
+	0x10, 0x16, 0x2c, 0x00,
+	0x18, 0xad, 0x00, 0x79,
+	0x04, 0xad, 0xca, 0x68,
+	0x80, 0xad, 0x64, 0x78,
+	0x10, 0xad, 0x98, 0x78,
+	0xff, 0x88, 0x83, 0x68,
+	0xe7, 0xad, 0x5a, 0x09,
+	0x02, 0x8c, 0x59, 0x32,
+	0x02, 0x28, 0x19, 0x33,
+	0x02, 0xa8, 0x50, 0x36,
+	0x33, 0xea, 0x44, 0x59,
+	0x33, 0xea, 0x00, 0x00,
+	0x40, 0x3a, 0x64, 0x68,
+	0x50, 0x4b, 0x64, 0x68,
+	0x22, 0xea, 0x44, 0x59,
+	0x22, 0xea, 0x00, 0x00,
+	0xe7, 0xad, 0x5a, 0x09,
+	0x02, 0x8c, 0x59, 0x32,
+	0x1a, 0xea, 0x50, 0x59,
+	0x1a, 0xea, 0x04, 0x00,
+	0xff, 0xea, 0xd4, 0x0d,
+	0xe7, 0xad, 0x5a, 0x09,
+	0x00, 0xe2, 0xa6, 0x58,
+	0xff, 0xea, 0x56, 0x02,
+	0x04, 0x7c, 0x78, 0x32,
+	0x20, 0x16, 0x64, 0x78,
+	0x04, 0x38, 0x79, 0x32,
+	0x80, 0x37, 0x6f, 0x16,
+	0xff, 0x2d, 0xb5, 0x60,
+	0xff, 0x29, 0xb5, 0x60,
+	0x40, 0x51, 0xc5, 0x78,
+	0xff, 0x4f, 0xb5, 0x68,
+	0xff, 0x4d, 0xc1, 0x19,
+	0x00, 0x4e, 0xd5, 0x19,
+	0x00, 0xe2, 0xc4, 0x50,
+	0x01, 0x4c, 0xc1, 0x31,
+	0x00, 0x50, 0xd5, 0x19,
+	0x00, 0xe2, 0xc4, 0x48,
+	0x80, 0x18, 0x64, 0x78,
+	0x02, 0x4a, 0x1d, 0x30,
+	0x10, 0xea, 0x18, 0x00,
+	0x60, 0x18, 0x30, 0x00,
+	0x7f, 0x18, 0x30, 0x0c,
+	0x02, 0xea, 0x02, 0x00,
+	0xff, 0xea, 0xa0, 0x0a,
+	0x80, 0x18, 0x30, 0x04,
+	0x40, 0xad, 0x64, 0x78,
+	0xe7, 0xad, 0x5a, 0x09,
+	0x02, 0xa8, 0x40, 0x31,
+	0xff, 0xea, 0xc0, 0x09,
+	0x01, 0x4e, 0x9d, 0x1a,
+	0x00, 0x4f, 0x9f, 0x22,
+	0x01, 0x94, 0x6d, 0x33,
+	0x01, 0xea, 0x20, 0x33,
+	0x04, 0xac, 0x49, 0x32,
+	0xff, 0xea, 0x5a, 0x03,
+	0xff, 0xea, 0x5e, 0x03,
+	0x01, 0x10, 0xd4, 0x31,
+	0x10, 0x92, 0xf5, 0x68,
+	0x3d, 0x93, 0xc5, 0x29,
+	0xfe, 0xe2, 0xc4, 0x09,
+	0x01, 0xea, 0xc6, 0x01,
+	0x02, 0xe2, 0xc8, 0x31,
+	0x02, 0xec, 0x50, 0x31,
+	0x02, 0xa0, 0xda, 0x31,
+	0xff, 0xa9, 0xf4, 0x70,
+	0x02, 0xa0, 0x58, 0x37,
+	0xff, 0x21, 0xfd, 0x70,
+	0x02, 0x22, 0x51, 0x31,
+	0x02, 0xa0, 0x5c, 0x33,
+	0x02, 0xa0, 0x44, 0x36,
+	0x02, 0xa0, 0x40, 0x32,
+	0x02, 0xa0, 0x44, 0x36,
+	0x04, 0x47, 0x05, 0x69,
+	0x40, 0x16, 0x30, 0x69,
+	0xff, 0x2d, 0x35, 0x61,
+	0xff, 0x29, 0x65, 0x70,
+	0x01, 0x37, 0xc1, 0x31,
+	0x02, 0x28, 0x55, 0x32,
+	0x01, 0xea, 0x5a, 0x01,
+	0x04, 0x3c, 0xf9, 0x30,
+	0x02, 0x28, 0x51, 0x31,
+	0x01, 0xa8, 0x60, 0x31,
+	0x00, 0xa9, 0x60, 0x01,
+	0x01, 0x14, 0xd4, 0x31,
+	0x01, 0x50, 0xa1, 0x1a,
+	0xff, 0x4e, 0x9d, 0x1a,
+	0xff, 0x4f, 0x9f, 0x22,
+	0xff, 0x8d, 0x29, 0x71,
+	0x80, 0xac, 0x28, 0x71,
+	0x20, 0x16, 0x28, 0x69,
+	0x02, 0x8c, 0x51, 0x31,
+	0x00, 0xe2, 0x12, 0x41,
+	0x01, 0xac, 0x08, 0x31,
+	0x09, 0xea, 0x5a, 0x01,
+	0x02, 0x8c, 0x51, 0x32,
+	0xff, 0xea, 0x1a, 0x07,
+	0x04, 0x24, 0xf9, 0x30,
+	0x1d, 0xea, 0x3a, 0x41,
+	0x02, 0x2c, 0x51, 0x31,
+	0x04, 0xa8, 0xf9, 0x30,
+	0x19, 0xea, 0x3a, 0x41,
+	0x06, 0xea, 0x08, 0x81,
+	0x01, 0xe2, 0x5a, 0x35,
+	0x02, 0xf2, 0xf0, 0x35,
+	0x02, 0xf2, 0xf0, 0x31,
+	0x02, 0xf8, 0xe4, 0x35,
+	0x80, 0xea, 0xb2, 0x01,
+	0x01, 0xe2, 0x00, 0x30,
+	0xff, 0xea, 0xb2, 0x0d,
+	0x80, 0xea, 0xb2, 0x01,
+	0x11, 0x00, 0x00, 0x10,
+	0xff, 0xea, 0xb2, 0x0d,
+	0x01, 0xe2, 0x04, 0x30,
+	0x01, 0xea, 0x04, 0x34,
+	0x02, 0x20, 0xbd, 0x30,
+	0x02, 0x20, 0xb9, 0x30,
+	0x02, 0x20, 0x51, 0x31,
+	0x4c, 0x93, 0xd7, 0x28,
+	0x10, 0x92, 0x63, 0x79,
+	0x01, 0x6b, 0xc0, 0x30,
+	0x02, 0x64, 0xc8, 0x00,
+	0x40, 0x3a, 0x74, 0x04,
+	0x00, 0xe2, 0x56, 0x58,
+	0x33, 0xea, 0x44, 0x59,
+	0x33, 0xea, 0x00, 0x00,
+	0x30, 0x3f, 0xc0, 0x09,
+	0x30, 0xe0, 0x64, 0x61,
+	0x20, 0x3f, 0x7a, 0x69,
+	0x10, 0x3f, 0x64, 0x79,
+	0x02, 0xea, 0x7e, 0x00,
+	0x00, 0xea, 0x44, 0x59,
+	0x01, 0xea, 0x00, 0x30,
+	0x02, 0x48, 0x51, 0x35,
+	0x01, 0xea, 0x7e, 0x00,
+	0x11, 0xea, 0x44, 0x59,
+	0x11, 0xea, 0x00, 0x00,
+	0x02, 0x48, 0x51, 0x35,
+	0x08, 0xea, 0x98, 0x00,
+	0x08, 0x57, 0xae, 0x00,
+	0x08, 0x3c, 0x78, 0x00,
+	0xf0, 0x49, 0x68, 0x0a,
+	0x0f, 0x67, 0xc0, 0x09,
+	0x00, 0x34, 0x69, 0x02,
+	0x20, 0xea, 0x96, 0x00,
+	0x00, 0xe2, 0xf8, 0x41,
+	0x40, 0x3a, 0xae, 0x69,
+	0x02, 0x55, 0x06, 0x68,
+	0x02, 0x56, 0xae, 0x69,
+	0xff, 0x5b, 0xae, 0x61,
+	0x02, 0x20, 0x51, 0x31,
+	0x80, 0xea, 0xb2, 0x01,
+	0x44, 0xea, 0x00, 0x00,
+	0x01, 0x33, 0xc0, 0x31,
+	0x33, 0xea, 0x00, 0x00,
+	0xff, 0xea, 0xb2, 0x09,
+	0xff, 0xe0, 0xc0, 0x19,
+	0xff, 0xe0, 0xb0, 0x79,
+	0x02, 0xac, 0x51, 0x31,
+	0x00, 0xe2, 0xa6, 0x41,
+	0x02, 0x5e, 0x50, 0x31,
+	0x02, 0xa8, 0xb8, 0x30,
+	0x02, 0x5c, 0x50, 0x31,
+	0xff, 0xad, 0xc1, 0x71,
+	0x02, 0xac, 0x41, 0x31,
+	0x02, 0x22, 0x51, 0x31,
+	0x02, 0xa0, 0x5c, 0x33,
+	0x02, 0xa0, 0x44, 0x32,
+	0x00, 0xe2, 0xca, 0x41,
+	0x10, 0x92, 0xcb, 0x69,
+	0x3d, 0x93, 0xc9, 0x29,
+	0x01, 0xe4, 0xc8, 0x01,
+	0x01, 0xea, 0xca, 0x01,
+	0xff, 0xea, 0xda, 0x01,
+	0x02, 0x20, 0x51, 0x31,
+	0x02, 0xae, 0x41, 0x32,
+	0xff, 0x21, 0xd3, 0x61,
+	0xff, 0xea, 0x46, 0x02,
+	0x02, 0x5c, 0x50, 0x31,
+	0x40, 0xea, 0x96, 0x00,
+	0x02, 0x56, 0xcc, 0x6d,
+	0x01, 0x55, 0xcc, 0x6d,
+	0x10, 0x92, 0xdf, 0x79,
+	0x10, 0x40, 0xe8, 0x69,
+	0x01, 0x56, 0xe8, 0x79,
+	0xff, 0x97, 0x07, 0x78,
+	0x13, 0xea, 0x50, 0x59,
+	0x13, 0xea, 0x04, 0x00,
+	0x00, 0xe2, 0x06, 0x40,
+	0xbf, 0x3a, 0x74, 0x08,
+	0x08, 0xea, 0x98, 0x00,
+	0x08, 0x57, 0xae, 0x00,
+	0x01, 0x93, 0x69, 0x32,
+	0x01, 0x94, 0x6b, 0x32,
+	0x40, 0xea, 0x66, 0x02,
+	0x08, 0x3c, 0x78, 0x00,
+	0x80, 0xea, 0x62, 0x02,
+	0x00, 0xe2, 0xb8, 0x5b,
+	0x01, 0x36, 0xc1, 0x31,
+	0x9f, 0xe0, 0x4c, 0x7c,
+	0x80, 0xe0, 0x0c, 0x72,
+	0xa0, 0xe0, 0x44, 0x72,
+	0xc0, 0xe0, 0x3a, 0x72,
+	0xe0, 0xe0, 0x74, 0x72,
+	0x01, 0xea, 0x50, 0x59,
+	0x01, 0xea, 0x04, 0x00,
+	0x00, 0xe2, 0xf8, 0x41,
+	0x80, 0x33, 0x13, 0x7a,
+	0x03, 0xea, 0x50, 0x59,
+	0x03, 0xea, 0x04, 0x00,
+	0xee, 0x00, 0x1a, 0x6a,
+	0x05, 0xea, 0xb4, 0x00,
+	0x33, 0xea, 0x44, 0x59,
+	0x33, 0xea, 0x00, 0x00,
+	0x02, 0xa8, 0x90, 0x32,
+	0x00, 0xe2, 0x6a, 0x59,
+	0xef, 0x96, 0xd5, 0x19,
+	0x00, 0xe2, 0x2a, 0x52,
+	0x09, 0x80, 0xe1, 0x30,
+	0x02, 0xea, 0x36, 0x00,
+	0xa8, 0xea, 0x32, 0x00,
+	0x00, 0xe2, 0x30, 0x42,
+	0x01, 0x96, 0xd1, 0x30,
+	0x10, 0x80, 0x89, 0x31,
+	0x20, 0xea, 0x32, 0x00,
+	0xbf, 0x33, 0x67, 0x0a,
+	0x20, 0x19, 0x32, 0x6a,
+	0x02, 0x4d, 0xf8, 0x69,
+	0x40, 0x33, 0x67, 0x02,
+	0x00, 0xe2, 0xf8, 0x41,
+	0x80, 0x33, 0xb5, 0x6a,
+	0x01, 0x44, 0x10, 0x33,
+	0x08, 0x92, 0x25, 0x03,
+	0x00, 0xe2, 0xf8, 0x41,
+	0x10, 0xea, 0x80, 0x00,
+	0x01, 0x31, 0xc5, 0x31,
+	0x80, 0xe2, 0x60, 0x62,
+	0x10, 0x92, 0x85, 0x6a,
+	0xc0, 0x94, 0xc5, 0x01,
+	0x40, 0x92, 0x51, 0x6a,
+	0xbf, 0xe2, 0xc4, 0x09,
+	0x20, 0x92, 0x65, 0x7a,
+	0x01, 0xe2, 0x88, 0x30,
+	0x00, 0xe2, 0xb8, 0x5b,
+	0xa0, 0x36, 0x6d, 0x62,
+	0x23, 0x92, 0x89, 0x08,
+	0x00, 0xe2, 0xb8, 0x5b,
+	0xa0, 0x36, 0x6d, 0x62,
+	0x00, 0xa8, 0x64, 0x42,
+	0xff, 0xe2, 0x64, 0x62,
+	0x00, 0xe2, 0x84, 0x42,
+	0x40, 0xea, 0x98, 0x00,
+	0x01, 0xe2, 0x88, 0x30,
+	0x00, 0xe2, 0xb8, 0x5b,
+	0xa0, 0x36, 0x43, 0x72,
+	0x40, 0xea, 0x98, 0x00,
+	0x01, 0x31, 0x89, 0x32,
+	0x08, 0xea, 0x62, 0x02,
+	0x00, 0xe2, 0xf8, 0x41,
+	0xe0, 0xea, 0xd4, 0x5b,
+	0x80, 0xe0, 0xc0, 0x6a,
+	0x04, 0xe0, 0x66, 0x73,
+	0x02, 0xe0, 0x96, 0x73,
+	0x00, 0xea, 0x1e, 0x73,
+	0x03, 0xe0, 0xa6, 0x73,
+	0x23, 0xe0, 0x96, 0x72,
+	0x08, 0xe0, 0xbc, 0x72,
+	0x00, 0xe2, 0xb8, 0x5b,
+	0x07, 0xea, 0x50, 0x59,
+	0x07, 0xea, 0x04, 0x00,
+	0x08, 0x42, 0xf9, 0x71,
+	0x04, 0x42, 0x93, 0x62,
+	0x01, 0x43, 0x89, 0x30,
+	0x00, 0xe2, 0x84, 0x42,
+	0x01, 0x44, 0xd4, 0x31,
+	0x00, 0xe2, 0x84, 0x42,
+	0x01, 0x00, 0x60, 0x32,
+	0x33, 0xea, 0x44, 0x59,
+	0x33, 0xea, 0x00, 0x00,
+	0x4c, 0x34, 0xc1, 0x28,
+	0x01, 0x64, 0xc0, 0x31,
+	0x00, 0x30, 0x45, 0x59,
+	0x01, 0x30, 0x01, 0x30,
+	0x01, 0xe0, 0xba, 0x7a,
+	0xa0, 0xea, 0xca, 0x5b,
+	0x01, 0xa0, 0xba, 0x62,
+	0x01, 0x84, 0xaf, 0x7a,
+	0x01, 0x95, 0xbd, 0x6a,
+	0x05, 0xea, 0x50, 0x59,
+	0x05, 0xea, 0x04, 0x00,
+	0x00, 0xe2, 0xbc, 0x42,
+	0x03, 0xea, 0x50, 0x59,
+	0x03, 0xea, 0x04, 0x00,
+	0x00, 0xe2, 0xbc, 0x42,
+	0x07, 0xea, 0xdc, 0x5b,
+	0x01, 0x44, 0xd4, 0x31,
+	0x00, 0xe2, 0xf8, 0x41,
+	0x3f, 0xe0, 0x6a, 0x0a,
+	0xc0, 0x34, 0xc1, 0x09,
+	0x00, 0x35, 0x51, 0x01,
+	0xff, 0xea, 0x52, 0x09,
+	0x30, 0x34, 0xc5, 0x09,
+	0x3d, 0xe2, 0xc4, 0x29,
+	0xb8, 0xe2, 0xc4, 0x19,
+	0x01, 0xea, 0xc6, 0x01,
+	0x02, 0xe2, 0xc8, 0x31,
+	0x02, 0xec, 0x40, 0x31,
+	0xff, 0xa1, 0xdc, 0x72,
+	0x02, 0xe8, 0xda, 0x31,
+	0x02, 0xa0, 0x50, 0x31,
+	0x00, 0xe2, 0xfe, 0x42,
+	0x80, 0x33, 0x67, 0x02,
+	0x01, 0x44, 0xd4, 0x31,
+	0x00, 0xe2, 0xb8, 0x5b,
+	0x01, 0x33, 0x67, 0x02,
+	0xe0, 0x36, 0x19, 0x63,
+	0x02, 0x33, 0x67, 0x02,
+	0x20, 0x46, 0x12, 0x63,
+	0xff, 0xea, 0x52, 0x09,
+	0xa8, 0xea, 0xca, 0x5b,
+	0x04, 0x92, 0xf9, 0x7a,
+	0x01, 0x34, 0xc1, 0x31,
+	0x00, 0x93, 0xf9, 0x62,
+	0x01, 0x35, 0xc1, 0x31,
+	0x00, 0x94, 0x03, 0x73,
+	0x01, 0xa9, 0x52, 0x11,
+	0xff, 0xa9, 0xee, 0x6a,
+	0x00, 0xe2, 0x12, 0x43,
+	0x10, 0x33, 0x67, 0x02,
+	0x04, 0x92, 0x13, 0x7b,
+	0xfb, 0x92, 0x25, 0x0b,
+	0xff, 0xea, 0x66, 0x0a,
+	0x01, 0xa4, 0x0d, 0x6b,
+	0x02, 0xa8, 0x90, 0x32,
+	0x00, 0xe2, 0x6a, 0x59,
+	0x10, 0x92, 0xbd, 0x7a,
+	0xff, 0xea, 0xdc, 0x5b,
+	0x00, 0xe2, 0xbc, 0x42,
+	0x04, 0xea, 0x50, 0x59,
+	0x04, 0xea, 0x04, 0x00,
+	0x00, 0xe2, 0xbc, 0x42,
+	0x04, 0xea, 0x50, 0x59,
+	0x04, 0xea, 0x04, 0x00,
+	0x00, 0xe2, 0xf8, 0x41,
+	0x08, 0x92, 0xb5, 0x7a,
+	0xc0, 0x33, 0x29, 0x7b,
+	0x80, 0x33, 0xb5, 0x6a,
+	0xff, 0x88, 0x29, 0x6b,
+	0x40, 0x33, 0xb5, 0x6a,
+	0x10, 0x92, 0x2f, 0x7b,
+	0x0a, 0xea, 0x50, 0x59,
+	0x0a, 0xea, 0x04, 0x00,
+	0x00, 0xe2, 0x4e, 0x5b,
+	0x00, 0xe2, 0x82, 0x43,
+	0x50, 0x4b, 0x36, 0x6b,
+	0xbf, 0x3a, 0x74, 0x08,
+	0x01, 0xe0, 0xf4, 0x31,
+	0xff, 0xea, 0xc0, 0x09,
+	0x01, 0x2e, 0x5d, 0x1a,
+	0x00, 0x2f, 0x5f, 0x22,
+	0x04, 0x47, 0x8f, 0x02,
+	0x01, 0xfa, 0xc0, 0x35,
+	0x02, 0xa8, 0x84, 0x32,
+	0x02, 0xea, 0xb4, 0x00,
+	0x33, 0xea, 0x44, 0x59,
+	0x33, 0xea, 0x00, 0x00,
+	0x02, 0x42, 0x51, 0x31,
+	0xff, 0x90, 0x65, 0x68,
+	0xff, 0x88, 0x5b, 0x6b,
+	0x01, 0xa4, 0x57, 0x6b,
+	0x02, 0xa4, 0x5f, 0x6b,
+	0x01, 0x84, 0x5f, 0x7b,
+	0x02, 0x28, 0x19, 0x33,
+	0x02, 0xa8, 0x50, 0x36,
+	0xff, 0x88, 0x5f, 0x73,
+	0x00, 0xe2, 0x32, 0x5b,
+	0x02, 0xa8, 0x20, 0x33,
+	0x02, 0x2c, 0x19, 0x33,
+	0x02, 0xa8, 0x58, 0x32,
+	0x04, 0xa4, 0x49, 0x07,
+	0xc0, 0x33, 0xb5, 0x6a,
+	0x04, 0x92, 0x25, 0x03,
+	0x20, 0x92, 0x83, 0x6b,
+	0x02, 0xa8, 0x40, 0x31,
+	0xc0, 0x34, 0xc1, 0x09,
+	0x00, 0x35, 0x51, 0x01,
+	0xff, 0xea, 0x52, 0x09,
+	0x30, 0x34, 0xc5, 0x09,
+	0x3d, 0xe2, 0xc4, 0x29,
+	0xb8, 0xe2, 0xc4, 0x19,
+	0x01, 0xea, 0xc6, 0x01,
+	0x02, 0xe2, 0xc8, 0x31,
+	0x02, 0xa0, 0xda, 0x31,
+	0x02, 0xa0, 0x50, 0x31,
+	0xf7, 0x57, 0xae, 0x08,
+	0x08, 0xea, 0x98, 0x00,
+	0x01, 0x44, 0xd4, 0x31,
+	0xee, 0x00, 0x8c, 0x6b,
+	0x02, 0xea, 0xb4, 0x00,
+	0x00, 0xe2, 0xb4, 0x5b,
+	0x09, 0x4c, 0x8e, 0x7b,
+	0x08, 0x4c, 0x06, 0x68,
+	0x0b, 0xea, 0x50, 0x59,
+	0x0b, 0xea, 0x04, 0x00,
+	0x01, 0x44, 0xd4, 0x31,
+	0x20, 0x33, 0xf9, 0x79,
+	0x00, 0xe2, 0x9e, 0x5b,
+	0x00, 0xe2, 0xf8, 0x41,
+	0x01, 0x84, 0xa3, 0x7b,
+	0x01, 0xa4, 0x49, 0x07,
+	0x08, 0x60, 0x30, 0x33,
+	0x08, 0x80, 0x41, 0x37,
+	0xdf, 0x33, 0x67, 0x0a,
+	0xee, 0x00, 0xb0, 0x6b,
+	0x05, 0xea, 0xb4, 0x00,
+	0x33, 0xea, 0x44, 0x59,
+	0x33, 0xea, 0x00, 0x00,
+	0x00, 0xe2, 0x6a, 0x59,
+	0x00, 0xe2, 0xbc, 0x42,
+	0x01, 0xea, 0x6c, 0x02,
+	0xc0, 0xea, 0x66, 0x06,
+	0xff, 0x42, 0xc4, 0x6b,
+	0x01, 0x41, 0xb8, 0x6b,
+	0x02, 0x41, 0xb8, 0x7b,
+	0xff, 0x42, 0xc4, 0x6b,
+	0x01, 0x41, 0xb8, 0x6b,
+	0x02, 0x41, 0xb8, 0x7b,
+	0xff, 0x42, 0xc4, 0x7b,
+	0x04, 0x4c, 0xb8, 0x6b,
+	0xe0, 0x41, 0x6c, 0x0e,
+	0x01, 0x44, 0xd4, 0x31,
+	0xff, 0x42, 0xcc, 0x7b,
+	0x04, 0x4c, 0xcc, 0x6b,
+	0xe0, 0x41, 0x6c, 0x0a,
+	0xe0, 0x36, 0xf9, 0x61,
+	0xff, 0xea, 0xca, 0x09,
+	0x01, 0xe2, 0xc8, 0x31,
+	0x01, 0x46, 0xda, 0x35,
+	0x01, 0x44, 0xd4, 0x35,
+	0x10, 0xea, 0x80, 0x00,
+	0x01, 0xe2, 0x62, 0x36,
+	0x04, 0xa6, 0xe4, 0x7b,
+	0xff, 0xea, 0x5a, 0x09,
+	0xff, 0xea, 0x4c, 0x0d,
+	0x01, 0xa6, 0x02, 0x6c,
+	0x10, 0xad, 0x64, 0x78,
+	0x80, 0xad, 0xfa, 0x6b,
+	0x08, 0xad, 0x64, 0x68,
+	0x04, 0x84, 0xf9, 0x30,
+	0x00, 0xea, 0x08, 0x81,
+	0xff, 0xea, 0xd4, 0x09,
+	0x02, 0x84, 0xf9, 0x88,
+	0x0d, 0xea, 0x5a, 0x01,
+	0x04, 0xa6, 0x4c, 0x05,
+	0x04, 0xa6, 0x64, 0x78,
+	0xff, 0xea, 0x5a, 0x09,
+	0x03, 0x84, 0x59, 0x89,
+	0x03, 0xea, 0x4c, 0x01,
+	0x80, 0x1a, 0x64, 0x78,
+	0x08, 0x19, 0x64, 0x78,
+	0x08, 0xb0, 0xe0, 0x30,
+	0x04, 0xb0, 0xe0, 0x30,
+	0x03, 0xb0, 0xf0, 0x30,
+	0x01, 0xb0, 0x06, 0x33,
+	0x7f, 0x83, 0xe9, 0x08,
+	0x04, 0xac, 0x58, 0x19,
+	0xff, 0xea, 0xc0, 0x09,
+	0x04, 0x84, 0x09, 0x9b,
+	0x00, 0x85, 0x0b, 0x23,
+	0x00, 0x86, 0x0d, 0x23,
+	0x00, 0x87, 0x0f, 0x23,
+	0x01, 0x84, 0xc5, 0x31,
+	0x80, 0x83, 0x25, 0x7c,
+	0x02, 0xe2, 0xc4, 0x01,
+	0xff, 0xea, 0x4c, 0x09,
+	0x01, 0xe2, 0x36, 0x30,
+	0xc8, 0x19, 0x32, 0x00,
+	0x88, 0x19, 0x32, 0x00,
+	0x01, 0xac, 0xd4, 0x99,
+	0x00, 0xe2, 0x64, 0x50,
+	0xfe, 0xa6, 0x4c, 0x0d,
+	0x0b, 0x98, 0xe1, 0x30,
+	0xfd, 0xa4, 0x49, 0x09,
+	0x80, 0xa3, 0x39, 0x7c,
+	0x02, 0xa4, 0x48, 0x01,
+	0x01, 0xa4, 0x36, 0x30,
+	0xa8, 0xea, 0x32, 0x00,
+	0xfd, 0xa4, 0x49, 0x0b,
+	0x05, 0xa3, 0x07, 0x33,
+	0x80, 0x83, 0x45, 0x6c,
+	0x02, 0xea, 0x4c, 0x05,
+	0xff, 0xea, 0x4c, 0x0d,
+	0x00, 0xe2, 0x3e, 0x59,
+	0x02, 0xa6, 0xe6, 0x6b,
+	0x80, 0xf9, 0xf2, 0x05,
+	0xc0, 0x33, 0x53, 0x7c,
+	0x03, 0xea, 0x50, 0x59,
+	0x03, 0xea, 0x04, 0x00,
+	0x20, 0x33, 0x77, 0x7c,
+	0x01, 0x84, 0x5d, 0x6c,
+	0x06, 0xea, 0x50, 0x59,
+	0x06, 0xea, 0x04, 0x00,
+	0x00, 0xe2, 0x7a, 0x44,
+	0x01, 0x00, 0x60, 0x32,
+	0xee, 0x00, 0x66, 0x6c,
+	0x05, 0xea, 0xb4, 0x00,
+	0x33, 0xea, 0x44, 0x59,
+	0x33, 0xea, 0x00, 0x00,
+	0x80, 0x3d, 0x7a, 0x00,
+	0xfc, 0x42, 0x68, 0x7c,
+	0x7f, 0x3d, 0x7a, 0x08,
+	0x00, 0x30, 0x45, 0x59,
+	0x01, 0x30, 0x01, 0x30,
+	0x09, 0xea, 0x50, 0x59,
+	0x09, 0xea, 0x04, 0x00,
+	0x00, 0xe2, 0xf8, 0x41,
+	0x01, 0xa4, 0x5d, 0x6c,
+	0x00, 0xe2, 0x30, 0x5c,
+	0x20, 0x33, 0x67, 0x02,
+	0x01, 0x00, 0x60, 0x32,
+	0x02, 0xa6, 0x82, 0x7c,
+	0x00, 0xe2, 0x46, 0x5c,
+	0x00, 0xe2, 0x56, 0x58,
+	0x00, 0xe2, 0x66, 0x58,
+	0x00, 0xe2, 0x3a, 0x58,
+	0x00, 0x30, 0x45, 0x59,
+	0x01, 0x30, 0x01, 0x30,
+	0x20, 0x19, 0x82, 0x6c,
+	0x00, 0xe2, 0xb2, 0x5c,
+	0x04, 0x19, 0x9c, 0x6c,
+	0x02, 0x19, 0x32, 0x00,
+	0x01, 0x84, 0x9d, 0x7c,
+	0x01, 0x1b, 0x96, 0x7c,
+	0x01, 0x1a, 0x9c, 0x6c,
+	0x00, 0xe2, 0x4c, 0x44,
+	0x80, 0x4b, 0xa2, 0x6c,
+	0x01, 0x4c, 0x9e, 0x7c,
+	0x03, 0x42, 0x4c, 0x6c,
+	0x00, 0xe2, 0xe0, 0x5b,
+	0x80, 0xf9, 0xf2, 0x01,
+	0x04, 0x33, 0xf9, 0x79,
+	0x00, 0xe2, 0xf8, 0x41,
+	0x08, 0x5d, 0xba, 0x6c,
+	0x00, 0xe2, 0x56, 0x58,
+	0x00, 0x30, 0x45, 0x59,
+	0x01, 0x30, 0x01, 0x30,
+	0x02, 0x1b, 0xaa, 0x7c,
+	0x08, 0x5d, 0xb8, 0x7c,
+	0x03, 0x68, 0x00, 0x37,
+	0x01, 0x84, 0x09, 0x07,
+	0x80, 0x1b, 0xc4, 0x7c,
+	0x80, 0x84, 0xc5, 0x6c,
+	0xff, 0x85, 0x0b, 0x1b,
+	0xff, 0x86, 0x0d, 0x23,
+	0xff, 0x87, 0x0f, 0x23,
+	0xf8, 0x1b, 0x08, 0x0b,
+	0xff, 0xea, 0x06, 0x0b,
+	0x03, 0x68, 0x00, 0x37,
+	0x00, 0xe2, 0xc4, 0x58,
+	0x10, 0xea, 0x18, 0x00,
+	0xf9, 0xd9, 0xb2, 0x0d,
+	0x01, 0xd9, 0xb2, 0x05,
+	0x01, 0x52, 0x48, 0x31,
+	0x20, 0xa4, 0xee, 0x7c,
+	0x20, 0x5b, 0xee, 0x7c,
+	0x80, 0xf9, 0xfc, 0x7c,
+	0x02, 0xea, 0xb4, 0x00,
+	0x11, 0x00, 0x00, 0x10,
+	0x04, 0x19, 0x08, 0x7d,
+	0xdf, 0x19, 0x32, 0x08,
+	0x60, 0x5b, 0xe6, 0x6c,
+	0x01, 0x4c, 0xe2, 0x7c,
+	0x20, 0x19, 0x32, 0x00,
+	0x01, 0xd9, 0xb2, 0x05,
+	0x02, 0xea, 0xb4, 0x00,
+	0x01, 0xd9, 0xb2, 0x05,
+	0x10, 0x5b, 0x00, 0x6d,
+	0x08, 0x5b, 0x0a, 0x6d,
+	0x20, 0x5b, 0xfa, 0x6c,
+	0x02, 0x5b, 0x2a, 0x6d,
+	0x0e, 0xea, 0x50, 0x59,
+	0x0e, 0xea, 0x04, 0x00,
+	0x80, 0xf9, 0xea, 0x6c,
+	0xdf, 0x5c, 0xb8, 0x08,
+	0x01, 0xd9, 0xb2, 0x05,
+	0x01, 0xa4, 0xe5, 0x6d,
+	0x00, 0xe2, 0x30, 0x5c,
+	0x00, 0xe2, 0x34, 0x5d,
+	0x01, 0x90, 0x21, 0x1b,
+	0x01, 0xd9, 0xb2, 0x05,
+	0x00, 0xe2, 0x32, 0x5b,
+	0xf3, 0x96, 0xd5, 0x19,
+	0x00, 0xe2, 0x18, 0x55,
+	0x80, 0x96, 0x19, 0x6d,
+	0x0f, 0xea, 0x50, 0x59,
+	0x0f, 0xea, 0x04, 0x00,
+	0x00, 0xe2, 0x20, 0x45,
+	0x04, 0x8c, 0xe1, 0x30,
+	0x01, 0xea, 0xf2, 0x00,
+	0x02, 0xea, 0x36, 0x00,
+	0xa8, 0xea, 0x32, 0x00,
+	0xff, 0x97, 0x27, 0x7d,
+	0x14, 0xea, 0x50, 0x59,
+	0x14, 0xea, 0x04, 0x00,
+	0x00, 0xe2, 0x96, 0x5d,
+	0x01, 0xd9, 0xb2, 0x05,
+	0x09, 0x80, 0xe1, 0x30,
+	0x02, 0xea, 0x36, 0x00,
+	0xa8, 0xea, 0x32, 0x00,
+	0x00, 0xe2, 0x8e, 0x5d,
+	0x01, 0xd9, 0xb2, 0x05,
+	0x02, 0xa6, 0x44, 0x7d,
+	0x00, 0xe2, 0x3e, 0x59,
+	0x20, 0x5b, 0x52, 0x6d,
+	0xfc, 0x42, 0x3e, 0x7d,
+	0x10, 0x40, 0x40, 0x6d,
+	0x20, 0x4d, 0x42, 0x7d,
+	0x08, 0x5d, 0x52, 0x6d,
+	0x02, 0xa6, 0xe6, 0x6b,
+	0x00, 0xe2, 0x3e, 0x59,
+	0x20, 0x5b, 0x52, 0x6d,
+	0x01, 0x1b, 0x72, 0x6d,
+	0xfc, 0x42, 0x4e, 0x7d,
+	0x10, 0x40, 0x50, 0x6d,
+	0x20, 0x4d, 0x64, 0x78,
+	0x08, 0x5d, 0x64, 0x78,
+	0x02, 0x19, 0x32, 0x00,
+	0x01, 0x5b, 0x40, 0x31,
+	0x00, 0xe2, 0xb2, 0x5c,
+	0x00, 0xe2, 0x9e, 0x5b,
+	0x20, 0xea, 0xb6, 0x00,
+	0x00, 0xe2, 0xe0, 0x5b,
+	0x20, 0x5c, 0xb8, 0x00,
+	0x04, 0x19, 0x68, 0x6d,
+	0x01, 0x1a, 0x68, 0x6d,
+	0x00, 0xe2, 0x3e, 0x59,
+	0x01, 0x1a, 0x64, 0x78,
+	0x80, 0xf9, 0xf2, 0x01,
+	0x20, 0xa0, 0xcc, 0x7d,
+	0xff, 0x90, 0x21, 0x1b,
+	0x08, 0x92, 0x43, 0x6b,
+	0x02, 0xea, 0xb4, 0x04,
+	0x01, 0xa4, 0x49, 0x03,
+	0x40, 0x5b, 0x82, 0x6d,
+	0x00, 0xe2, 0x3e, 0x59,
+	0x40, 0x5b, 0x82, 0x6d,
+	0x04, 0x5d, 0xe6, 0x7d,
+	0x01, 0x1a, 0xe6, 0x7d,
+	0x20, 0x4d, 0x64, 0x78,
+	0x40, 0x5b, 0xcc, 0x7d,
+	0x04, 0x5d, 0xe6, 0x7d,
+	0x01, 0x1a, 0xe6, 0x7d,
+	0x80, 0xf9, 0xf2, 0x01,
+	0xff, 0x90, 0x21, 0x1b,
+	0x08, 0x92, 0x43, 0x6b,
+	0x02, 0xea, 0xb4, 0x04,
+	0x00, 0xe2, 0x3e, 0x59,
+	0x01, 0x1b, 0x64, 0x78,
+	0x80, 0xf9, 0xf2, 0x01,
+	0x02, 0xea, 0xb4, 0x04,
+	0x00, 0xe2, 0x3e, 0x59,
+	0x01, 0x1b, 0xaa, 0x6d,
+	0x40, 0x5b, 0xb8, 0x7d,
+	0x01, 0x1b, 0xaa, 0x6d,
+	0x02, 0x19, 0x32, 0x00,
+	0x01, 0x1a, 0x64, 0x78,
+	0x80, 0xf9, 0xf2, 0x01,
+	0xff, 0xea, 0x10, 0x03,
+	0x08, 0x92, 0x25, 0x03,
+	0x00, 0xe2, 0x42, 0x43,
+	0x01, 0x1a, 0xb4, 0x7d,
+	0x40, 0x5b, 0xb0, 0x7d,
+	0x01, 0x1a, 0x9e, 0x6d,
+	0xfc, 0x42, 0x64, 0x78,
+	0x01, 0x1a, 0xb8, 0x6d,
+	0x10, 0xea, 0x50, 0x59,
+	0x10, 0xea, 0x04, 0x00,
+	0xfc, 0x42, 0x64, 0x78,
+	0x10, 0x40, 0xbe, 0x6d,
+	0x20, 0x4d, 0x64, 0x78,
+	0x40, 0x5b, 0x9e, 0x6d,
+	0x01, 0x1a, 0x64, 0x78,
+	0x01, 0x90, 0x21, 0x1b,
+	0x30, 0x3f, 0xc0, 0x09,
+	0x30, 0xe0, 0x64, 0x60,
+	0x40, 0x4b, 0x64, 0x68,
+	0xff, 0xea, 0x52, 0x01,
+	0xee, 0x00, 0xd2, 0x6d,
+	0x80, 0xf9, 0xf2, 0x01,
+	0xff, 0x90, 0x21, 0x1b,
+	0x02, 0xea, 0xb4, 0x00,
+	0x20, 0xea, 0x9a, 0x00,
+	0xf3, 0x42, 0xde, 0x6d,
+	0x12, 0xea, 0x50, 0x59,
+	0x12, 0xea, 0x04, 0x00,
+	0x00, 0xe2, 0xf8, 0x41,
+	0x0d, 0xea, 0x50, 0x59,
+	0x0d, 0xea, 0x04, 0x00,
+	0x00, 0xe2, 0xf8, 0x41,
+	0x01, 0x90, 0x21, 0x1b,
+	0x11, 0xea, 0x50, 0x59,
+	0x11, 0xea, 0x04, 0x00,
+	0x00, 0xe2, 0x32, 0x5b,
+	0x08, 0x5a, 0xb4, 0x00,
+	0x00, 0xe2, 0x0c, 0x5e,
+	0xa8, 0xea, 0x32, 0x00,
+	0x00, 0xe2, 0x3e, 0x59,
+	0x80, 0x1a, 0xfa, 0x7d,
+	0x00, 0xe2, 0x0c, 0x5e,
+	0x80, 0x19, 0x32, 0x00,
+	0x40, 0x5b, 0x00, 0x6e,
+	0x08, 0x5a, 0x00, 0x7e,
+	0x20, 0x4d, 0x64, 0x78,
+	0x02, 0x84, 0x09, 0x03,
+	0x40, 0x5b, 0xcc, 0x7d,
+	0xff, 0x90, 0x21, 0x1b,
+	0x80, 0xf9, 0xf2, 0x01,
+	0x08, 0x92, 0x43, 0x6b,
+	0x02, 0xea, 0xb4, 0x04,
+	0x01, 0x38, 0xe1, 0x30,
+	0x05, 0x39, 0xe3, 0x98,
+	0x01, 0xe0, 0xf4, 0x31,
+	0xff, 0xea, 0xc0, 0x09,
+	0x00, 0x3a, 0xe5, 0x20,
+	0x00, 0x3b, 0xe7, 0x20,
+	0x01, 0xfa, 0xc0, 0x31,
+	0x04, 0xea, 0xe8, 0x30,
+	0xff, 0xea, 0xf0, 0x08,
+	0x02, 0xea, 0xf2, 0x00,
+	0xff, 0xea, 0xf4, 0x0c
+};
+
+typedef int ahd_patch_func_t (struct ahd_softc *ahd);
+static ahd_patch_func_t ahd_patch22_func;
+
+static int
+ahd_patch22_func(struct ahd_softc *ahd)
+{
+	return ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch21_func;
+
+static int
+ahd_patch21_func(struct ahd_softc *ahd)
+{
+	return ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) == 0);
+}
+
+static ahd_patch_func_t ahd_patch20_func;
+
+static int
+ahd_patch20_func(struct ahd_softc *ahd)
+{
+	return ((ahd->features & AHD_RTI) == 0);
+}
+
+static ahd_patch_func_t ahd_patch19_func;
+
+static int
+ahd_patch19_func(struct ahd_softc *ahd)
+{
+	return ((ahd->flags & AHD_INITIATORROLE) != 0);
+}
+
+static ahd_patch_func_t ahd_patch18_func;
+
+static int
+ahd_patch18_func(struct ahd_softc *ahd)
+{
+	return ((ahd->flags & AHD_TARGETROLE) != 0);
+}
+
+static ahd_patch_func_t ahd_patch17_func;
+
+static int
+ahd_patch17_func(struct ahd_softc *ahd)
+{
+	return ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch16_func;
+
+static int
+ahd_patch16_func(struct ahd_softc *ahd)
+{
+	return ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0);
+}
+
+static ahd_patch_func_t ahd_patch15_func;
+
+static int
+ahd_patch15_func(struct ahd_softc *ahd)
+{
+	return ((ahd->flags & AHD_39BIT_ADDRESSING) != 0);
+}
+
+static ahd_patch_func_t ahd_patch14_func;
+
+static int
+ahd_patch14_func(struct ahd_softc *ahd)
+{
+	return ((ahd->flags & AHD_64BIT_ADDRESSING) != 0);
+}
+
+static ahd_patch_func_t ahd_patch13_func;
+
+static int
+ahd_patch13_func(struct ahd_softc *ahd)
+{
+	return ((ahd->features & AHD_NEW_DFCNTRL_OPTS) == 0);
+}
+
+static ahd_patch_func_t ahd_patch12_func;
+
+static int
+ahd_patch12_func(struct ahd_softc *ahd)
+{
+	return ((ahd->bugs & AHD_REG_SLOW_SETTLE_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch11_func;
+
+static int
+ahd_patch11_func(struct ahd_softc *ahd)
+{
+	return ((ahd->bugs & AHD_EARLY_REQ_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch10_func;
+
+static int
+ahd_patch10_func(struct ahd_softc *ahd)
+{
+	return ((ahd->bugs & AHD_BUSFREEREV_BUG) == 0);
+}
+
+static ahd_patch_func_t ahd_patch9_func;
+
+static int
+ahd_patch9_func(struct ahd_softc *ahd)
+{
+	return ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch8_func;
+
+static int
+ahd_patch8_func(struct ahd_softc *ahd)
+{
+	return ((ahd->bugs & AHD_LQO_ATNO_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch7_func;
+
+static int
+ahd_patch7_func(struct ahd_softc *ahd)
+{
+	return ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch6_func;
+
+static int
+ahd_patch6_func(struct ahd_softc *ahd)
+{
+	return ((ahd->bugs & AHD_NONPACKFIFO_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch5_func;
+
+static int
+ahd_patch5_func(struct ahd_softc *ahd)
+{
+	return ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch4_func;
+
+static int
+ahd_patch4_func(struct ahd_softc *ahd)
+{
+	return ((ahd->bugs & AHD_PKT_LUN_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch3_func;
+
+static int
+ahd_patch3_func(struct ahd_softc *ahd)
+{
+	return ((ahd->bugs & AHD_FAINT_LED_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch2_func;
+
+static int
+ahd_patch2_func(struct ahd_softc *ahd)
+{
+	return ((ahd->bugs & AHD_SET_MODE_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch1_func;
+
+static int
+ahd_patch1_func(struct ahd_softc *ahd)
+{
+	return ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0);
+}
+
+static ahd_patch_func_t ahd_patch0_func;
+
+static int
+ahd_patch0_func(struct ahd_softc *ahd)
+{
+	return (0);
+}
+
+static struct patch {
+	ahd_patch_func_t		*patch_func;
+	uint32_t		 begin		:10,
+				 skip_instr	:10,
+				 skip_patch	:12;
+} patches[] = {
+	{ ahd_patch1_func, 0, 3, 3 },
+	{ ahd_patch1_func, 1, 1, 2 },
+	{ ahd_patch0_func, 2, 1, 1 },
+	{ ahd_patch1_func, 3, 3, 3 },
+	{ ahd_patch1_func, 4, 1, 2 },
+	{ ahd_patch0_func, 5, 1, 1 },
+	{ ahd_patch2_func, 6, 1, 2 },
+	{ ahd_patch0_func, 7, 1, 1 },
+	{ ahd_patch3_func, 20, 5, 1 },
+	{ ahd_patch2_func, 29, 1, 2 },
+	{ ahd_patch0_func, 30, 1, 1 },
+	{ ahd_patch1_func, 37, 1, 2 },
+	{ ahd_patch0_func, 38, 1, 1 },
+	{ ahd_patch2_func, 43, 1, 2 },
+	{ ahd_patch0_func, 44, 1, 1 },
+	{ ahd_patch2_func, 47, 1, 2 },
+	{ ahd_patch0_func, 48, 1, 1 },
+	{ ahd_patch2_func, 51, 1, 2 },
+	{ ahd_patch0_func, 52, 1, 1 },
+	{ ahd_patch2_func, 65, 1, 2 },
+	{ ahd_patch0_func, 66, 1, 1 },
+	{ ahd_patch2_func, 69, 1, 2 },
+	{ ahd_patch0_func, 70, 1, 1 },
+	{ ahd_patch1_func, 73, 1, 2 },
+	{ ahd_patch0_func, 74, 1, 1 },
+	{ ahd_patch4_func, 107, 1, 1 },
+	{ ahd_patch2_func, 162, 6, 1 },
+	{ ahd_patch1_func, 168, 2, 1 },
+	{ ahd_patch5_func, 170, 1, 1 },
+	{ ahd_patch2_func, 179, 1, 2 },
+	{ ahd_patch0_func, 180, 1, 1 },
+	{ ahd_patch6_func, 181, 2, 2 },
+	{ ahd_patch0_func, 183, 6, 3 },
+	{ ahd_patch2_func, 186, 1, 2 },
+	{ ahd_patch0_func, 187, 1, 1 },
+	{ ahd_patch2_func, 190, 1, 2 },
+	{ ahd_patch0_func, 191, 1, 1 },
+	{ ahd_patch7_func, 193, 2, 1 },
+	{ ahd_patch5_func, 201, 16, 2 },
+	{ ahd_patch0_func, 217, 1, 1 },
+	{ ahd_patch8_func, 237, 2, 1 },
+	{ ahd_patch1_func, 241, 1, 2 },
+	{ ahd_patch0_func, 242, 1, 1 },
+	{ ahd_patch7_func, 245, 2, 1 },
+	{ ahd_patch1_func, 259, 1, 2 },
+	{ ahd_patch0_func, 260, 1, 1 },
+	{ ahd_patch1_func, 263, 1, 2 },
+	{ ahd_patch0_func, 264, 1, 1 },
+	{ ahd_patch2_func, 267, 1, 2 },
+	{ ahd_patch0_func, 268, 1, 1 },
+	{ ahd_patch1_func, 323, 1, 2 },
+	{ ahd_patch0_func, 324, 1, 1 },
+	{ ahd_patch2_func, 332, 1, 2 },
+	{ ahd_patch0_func, 333, 1, 1 },
+	{ ahd_patch2_func, 336, 1, 2 },
+	{ ahd_patch0_func, 337, 1, 1 },
+	{ ahd_patch1_func, 343, 1, 2 },
+	{ ahd_patch0_func, 344, 1, 1 },
+	{ ahd_patch1_func, 346, 1, 2 },
+	{ ahd_patch0_func, 347, 1, 1 },
+	{ ahd_patch9_func, 366, 1, 1 },
+	{ ahd_patch9_func, 369, 1, 1 },
+	{ ahd_patch9_func, 371, 1, 1 },
+	{ ahd_patch9_func, 383, 1, 1 },
+	{ ahd_patch1_func, 393, 1, 2 },
+	{ ahd_patch0_func, 394, 1, 1 },
+	{ ahd_patch1_func, 396, 1, 2 },
+	{ ahd_patch0_func, 397, 1, 1 },
+	{ ahd_patch1_func, 405, 1, 2 },
+	{ ahd_patch0_func, 406, 1, 1 },
+	{ ahd_patch2_func, 419, 1, 2 },
+	{ ahd_patch0_func, 420, 1, 1 },
+	{ ahd_patch10_func, 450, 1, 1 },
+	{ ahd_patch1_func, 457, 1, 2 },
+	{ ahd_patch0_func, 458, 1, 1 },
+	{ ahd_patch2_func, 470, 1, 2 },
+	{ ahd_patch0_func, 471, 1, 1 },
+	{ ahd_patch11_func, 476, 6, 2 },
+	{ ahd_patch0_func, 482, 1, 1 },
+	{ ahd_patch12_func, 505, 1, 1 },
+	{ ahd_patch13_func, 514, 1, 1 },
+	{ ahd_patch14_func, 515, 1, 2 },
+	{ ahd_patch0_func, 516, 1, 1 },
+	{ ahd_patch15_func, 519, 1, 1 },
+	{ ahd_patch14_func, 520, 1, 1 },
+	{ ahd_patch16_func, 531, 1, 2 },
+	{ ahd_patch0_func, 532, 1, 1 },
+	{ ahd_patch1_func, 551, 1, 2 },
+	{ ahd_patch0_func, 552, 1, 1 },
+	{ ahd_patch1_func, 555, 1, 2 },
+	{ ahd_patch0_func, 556, 1, 1 },
+	{ ahd_patch2_func, 561, 1, 2 },
+	{ ahd_patch0_func, 562, 1, 1 },
+	{ ahd_patch2_func, 566, 1, 2 },
+	{ ahd_patch0_func, 567, 1, 1 },
+	{ ahd_patch1_func, 568, 1, 2 },
+	{ ahd_patch0_func, 569, 1, 1 },
+	{ ahd_patch2_func, 580, 1, 2 },
+	{ ahd_patch0_func, 581, 1, 1 },
+	{ ahd_patch17_func, 585, 1, 1 },
+	{ ahd_patch18_func, 590, 1, 1 },
+	{ ahd_patch19_func, 591, 2, 1 },
+	{ ahd_patch18_func, 595, 1, 2 },
+	{ ahd_patch0_func, 596, 1, 1 },
+	{ ahd_patch2_func, 599, 1, 2 },
+	{ ahd_patch0_func, 600, 1, 1 },
+	{ ahd_patch2_func, 615, 1, 2 },
+	{ ahd_patch0_func, 616, 1, 1 },
+	{ ahd_patch20_func, 617, 14, 1 },
+	{ ahd_patch1_func, 635, 1, 2 },
+	{ ahd_patch0_func, 636, 1, 1 },
+	{ ahd_patch20_func, 637, 1, 1 },
+	{ ahd_patch1_func, 649, 1, 2 },
+	{ ahd_patch0_func, 650, 1, 1 },
+	{ ahd_patch1_func, 657, 1, 2 },
+	{ ahd_patch0_func, 658, 1, 1 },
+	{ ahd_patch17_func, 681, 1, 1 },
+	{ ahd_patch17_func, 719, 1, 1 },
+	{ ahd_patch1_func, 730, 1, 2 },
+	{ ahd_patch0_func, 731, 1, 1 },
+	{ ahd_patch1_func, 748, 1, 2 },
+	{ ahd_patch0_func, 749, 1, 1 },
+	{ ahd_patch1_func, 751, 1, 2 },
+	{ ahd_patch0_func, 752, 1, 1 },
+	{ ahd_patch1_func, 755, 1, 2 },
+	{ ahd_patch0_func, 756, 1, 1 },
+	{ ahd_patch21_func, 758, 1, 2 },
+	{ ahd_patch0_func, 759, 2, 1 },
+	{ ahd_patch22_func, 762, 4, 2 },
+	{ ahd_patch0_func, 766, 1, 1 },
+	{ ahd_patch22_func, 774, 11, 1 }
+};
+
+static struct cs {
+	uint16_t	begin;
+	uint16_t	end;
+} critical_sections[] = {
+	{ 11, 12 },
+	{ 13, 14 },
+	{ 29, 42 },
+	{ 56, 59 },
+	{ 101, 128 },
+	{ 129, 157 },
+	{ 159, 162 },
+	{ 170, 178 },
+	{ 201, 250 },
+	{ 681, 697 },
+	{ 697, 711 },
+	{ 721, 725 }
+};
+
+static const int num_critical_sections = sizeof(critical_sections)
+				       / sizeof(*critical_sections);
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
new file mode 100644
index 0000000..8ff16fd
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -0,0 +1,1352 @@
+/*
+ * Core definitions and data structures shareable across OS platforms.
+ *
+ * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.h#79 $
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _AIC7XXX_H_
+#define _AIC7XXX_H_
+
+/* Register Definitions */
+#include "aic7xxx_reg.h"
+
+/************************* Forward Declarations *******************************/
+struct ahc_platform_data;
+struct scb_platform_data;
+struct seeprom_descriptor;
+
+/****************************** Useful Macros *********************************/
+#ifndef MAX
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef MIN
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array))
+
+#define ALL_CHANNELS '\0'
+#define ALL_TARGETS_MASK 0xFFFF
+#define INITIATOR_WILDCARD	(~0)
+
+#define SCSIID_TARGET(ahc, scsiid) \
+	(((scsiid) & ((((ahc)->features & AHC_TWIN) != 0) ? TWIN_TID : TID)) \
+	>> TID_SHIFT)
+#define SCSIID_OUR_ID(scsiid) \
+	((scsiid) & OID)
+#define SCSIID_CHANNEL(ahc, scsiid) \
+	((((ahc)->features & AHC_TWIN) != 0) \
+        ? ((((scsiid) & TWIN_CHNLB) != 0) ? 'B' : 'A') \
+       : 'A')
+#define	SCB_IS_SCSIBUS_B(ahc, scb) \
+	(SCSIID_CHANNEL(ahc, (scb)->hscb->scsiid) == 'B')
+#define	SCB_GET_OUR_ID(scb) \
+	SCSIID_OUR_ID((scb)->hscb->scsiid)
+#define	SCB_GET_TARGET(ahc, scb) \
+	SCSIID_TARGET((ahc), (scb)->hscb->scsiid)
+#define	SCB_GET_CHANNEL(ahc, scb) \
+	SCSIID_CHANNEL(ahc, (scb)->hscb->scsiid)
+#define	SCB_GET_LUN(scb) \
+	((scb)->hscb->lun & LID)
+#define SCB_GET_TARGET_OFFSET(ahc, scb)	\
+	(SCB_GET_TARGET(ahc, scb) + (SCB_IS_SCSIBUS_B(ahc, scb) ? 8 : 0))
+#define SCB_GET_TARGET_MASK(ahc, scb) \
+	(0x01 << (SCB_GET_TARGET_OFFSET(ahc, scb)))
+#ifdef AHC_DEBUG
+#define SCB_IS_SILENT(scb)					\
+	((ahc_debug & AHC_SHOW_MASKED_ERRORS) == 0		\
+      && (((scb)->flags & SCB_SILENT) != 0))
+#else
+#define SCB_IS_SILENT(scb)					\
+	(((scb)->flags & SCB_SILENT) != 0)
+#endif
+#define TCL_TARGET_OFFSET(tcl) \
+	((((tcl) >> 4) & TID) >> 4)
+#define TCL_LUN(tcl) \
+	(tcl & (AHC_NUM_LUNS - 1))
+#define BUILD_TCL(scsiid, lun) \
+	((lun) | (((scsiid) & TID) << 4))
+
+#ifndef	AHC_TARGET_MODE
+#undef	AHC_TMODE_ENABLE
+#define	AHC_TMODE_ENABLE 0
+#endif
+
+/**************************** Driver Constants ********************************/
+/*
+ * The maximum number of supported targets.
+ */
+#define AHC_NUM_TARGETS 16
+
+/*
+ * The maximum number of supported luns.
+ * The identify message only supports 64 luns in SPI3.
+ * You can have 2^64 luns when information unit transfers are enabled,
+ * but it is doubtful this driver will ever support IUTs.
+ */
+#define AHC_NUM_LUNS 64
+
+/*
+ * The maximum transfer per S/G segment.
+ */
+#define AHC_MAXTRANSFER_SIZE	 0x00ffffff	/* limited by 24bit counter */
+
+/*
+ * The maximum amount of SCB storage in hardware on a controller.
+ * This value represents an upper bound.  Controllers vary in the number
+ * they actually support.
+ */
+#define AHC_SCB_MAX	255
+
+/*
+ * The maximum number of concurrent transactions supported per driver instance.
+ * Sequencer Control Blocks (SCBs) store per-transaction information.  Although
+ * the space for SCBs on the host adapter varies by model, the driver will
+ * page the SCBs between host and controller memory as needed.  We are limited
+ * to 253 because:
+ * 	1) The 8bit nature of the RISC engine holds us to an 8bit value.
+ * 	2) We reserve one value, 255, to represent the invalid element.
+ *	3) Our input queue scheme requires one SCB to always be reserved
+ *	   in advance of queuing any SCBs.  This takes us down to 254.
+ *	4) To handle our output queue correctly on machines that only
+ * 	   support 32bit stores, we must clear the array 4 bytes at a
+ *	   time.  To avoid colliding with a DMA write from the sequencer,
+ *	   we must be sure that 4 slots are empty when we write to clear
+ *	   the queue.  This reduces us to 253 SCBs: 1 that just completed
+ *	   and the known three additional empty slots in the queue that
+ *	   precede it.
+ */
+#define AHC_MAX_QUEUE	253
+
+/*
+ * The maximum amount of SCB storage we allocate in host memory.  This
+ * number should reflect the 1 additional SCB we require to handle our
+ * qinfifo mechanism.
+ */
+#define AHC_SCB_MAX_ALLOC (AHC_MAX_QUEUE+1)
+
+/*
+ * Ring Buffer of incoming target commands.
+ * We allocate 256 to simplify the logic in the sequencer
+ * by using the natural wrap point of an 8bit counter.
+ */
+#define AHC_TMODE_CMDS	256
+
+/* Reset line assertion time in us */
+#define AHC_BUSRESET_DELAY	25
+
+/******************* Chip Characteristics/Operating Settings  *****************/
+/*
+ * Chip Type
+ * The chip order is from least sophisticated to most sophisticated.
+ */
+typedef enum {
+	AHC_NONE	= 0x0000,
+	AHC_CHIPID_MASK	= 0x00FF,
+	AHC_AIC7770	= 0x0001,
+	AHC_AIC7850	= 0x0002,
+	AHC_AIC7855	= 0x0003,
+	AHC_AIC7859	= 0x0004,
+	AHC_AIC7860	= 0x0005,
+	AHC_AIC7870	= 0x0006,
+	AHC_AIC7880	= 0x0007,
+	AHC_AIC7895	= 0x0008,
+	AHC_AIC7895C	= 0x0009,
+	AHC_AIC7890	= 0x000a,
+	AHC_AIC7896	= 0x000b,
+	AHC_AIC7892	= 0x000c,
+	AHC_AIC7899	= 0x000d,
+	AHC_VL		= 0x0100,	/* Bus type VL */
+	AHC_EISA	= 0x0200,	/* Bus type EISA */
+	AHC_PCI		= 0x0400,	/* Bus type PCI */
+	AHC_BUS_MASK	= 0x0F00
+} ahc_chip;
+
+/*
+ * Features available in each chip type.
+ */
+typedef enum {
+	AHC_FENONE	= 0x00000,
+	AHC_ULTRA	= 0x00001,	/* Supports 20MHz Transfers */
+	AHC_ULTRA2	= 0x00002,	/* Supports 40MHz Transfers */
+	AHC_WIDE  	= 0x00004,	/* Wide Channel */
+	AHC_TWIN	= 0x00008,	/* Twin Channel */
+	AHC_MORE_SRAM	= 0x00010,	/* 80 bytes instead of 64 */
+	AHC_CMD_CHAN	= 0x00020,	/* Has a Command DMA Channel */
+	AHC_QUEUE_REGS	= 0x00040,	/* Has Queue management registers */
+	AHC_SG_PRELOAD	= 0x00080,	/* Can perform auto-SG preload */
+	AHC_SPIOCAP	= 0x00100,	/* Has a Serial Port I/O Cap Register */
+	AHC_MULTI_TID	= 0x00200,	/* Has bitmask of TIDs for select-in */
+	AHC_HS_MAILBOX	= 0x00400,	/* Has HS_MAILBOX register */
+	AHC_DT		= 0x00800,	/* Double Transition transfers */
+	AHC_NEW_TERMCTL	= 0x01000,	/* Newer termination scheme */
+	AHC_MULTI_FUNC	= 0x02000,	/* Multi-Function Twin Channel Device */
+	AHC_LARGE_SCBS	= 0x04000,	/* 64byte SCBs */
+	AHC_AUTORATE	= 0x08000,	/* Automatic update of SCSIRATE/OFFSET*/
+	AHC_AUTOPAUSE	= 0x10000,	/* Automatic pause on register access */
+	AHC_TARGETMODE	= 0x20000,	/* Has tested target mode support */
+	AHC_MULTIROLE	= 0x40000,	/* Space for two roles at a time */
+	AHC_REMOVABLE	= 0x80000,	/* Hot-Swap supported */
+	AHC_AIC7770_FE	= AHC_FENONE,
+	/*
+	 * The real 7850 does not support Ultra modes, but there are
+	 * several cards that use the generic 7850 PCI ID even though
+	 * they are using an Ultra capable chip (7859/7860).  We start
+	 * out with the AHC_ULTRA feature set and then check the DEVSTATUS
+	 * register to determine if the capability is really present.
+	 */
+	AHC_AIC7850_FE	= AHC_SPIOCAP|AHC_AUTOPAUSE|AHC_TARGETMODE|AHC_ULTRA,
+	AHC_AIC7860_FE	= AHC_AIC7850_FE,
+	AHC_AIC7870_FE	= AHC_TARGETMODE,
+	AHC_AIC7880_FE	= AHC_AIC7870_FE|AHC_ULTRA,
+	/*
+	 * Although we have space for both the initiator and
+	 * target roles on ULTRA2 chips, we currently disable
+	 * the initiator role to allow multi-scsi-id target mode
+	 * configurations.  We can only respond on the same SCSI
+	 * ID as our initiator role if we allow initiator operation.
+	 * At some point, we should add a configuration knob to
+	 * allow both roles to be loaded.
+	 */
+	AHC_AIC7890_FE	= AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA2
+			  |AHC_QUEUE_REGS|AHC_SG_PRELOAD|AHC_MULTI_TID
+			  |AHC_HS_MAILBOX|AHC_NEW_TERMCTL|AHC_LARGE_SCBS
+			  |AHC_TARGETMODE,
+	AHC_AIC7892_FE	= AHC_AIC7890_FE|AHC_DT|AHC_AUTORATE|AHC_AUTOPAUSE,
+	AHC_AIC7895_FE	= AHC_AIC7880_FE|AHC_MORE_SRAM|AHC_AUTOPAUSE
+			  |AHC_CMD_CHAN|AHC_MULTI_FUNC|AHC_LARGE_SCBS,
+	AHC_AIC7895C_FE	= AHC_AIC7895_FE|AHC_MULTI_TID,
+	AHC_AIC7896_FE	= AHC_AIC7890_FE|AHC_MULTI_FUNC,
+	AHC_AIC7899_FE	= AHC_AIC7892_FE|AHC_MULTI_FUNC
+} ahc_feature;
+
+/*
+ * Bugs in the silicon that we work around in software.
+ */
+typedef enum {
+	AHC_BUGNONE		= 0x00,
+	/*
+	 * On all chips prior to the U2 product line,
+	 * the WIDEODD S/G segment feature does not
+	 * work during scsi->HostBus transfers.
+	 */
+	AHC_TMODE_WIDEODD_BUG	= 0x01,
+	/*
+	 * On the aic7890/91 Rev 0 chips, the autoflush
+	 * feature does not work.  A manual flush of
+	 * the DMA FIFO is required.
+	 */
+	AHC_AUTOFLUSH_BUG	= 0x02,
+	/*
+	 * On many chips, cacheline streaming does not work.
+	 */
+	AHC_CACHETHEN_BUG	= 0x04,
+	/*
+	 * On the aic7896/97 chips, cacheline
+	 * streaming must be enabled.
+	 */
+	AHC_CACHETHEN_DIS_BUG	= 0x08,
+	/*
+	 * PCI 2.1 Retry failure on non-empty data fifo.
+	 */
+	AHC_PCI_2_1_RETRY_BUG	= 0x10,
+	/*
+	 * Controller does not handle cacheline residuals
+	 * properly on S/G segments if PCI MWI instructions
+	 * are allowed.
+	 */
+	AHC_PCI_MWI_BUG		= 0x20,
+	/*
+	 * An SCB upload using the SCB channel's
+	 * auto array entry copy feature may 
+	 * corrupt data.  This appears to only
+	 * occur on 66MHz systems.
+	 */
+	AHC_SCBCHAN_UPLOAD_BUG	= 0x40
+} ahc_bug;
+
+/*
+ * Configuration specific settings.
+ * The driver determines these settings by probing the
+ * chip/controller's configuration.
+ */
+typedef enum {
+	AHC_FNONE	      = 0x000,
+	AHC_PRIMARY_CHANNEL   = 0x003,  /*
+					 * The channel that should
+					 * be probed first.
+					 */
+	AHC_USEDEFAULTS	      = 0x004,  /*
+					 * For cards without an seeprom
+					 * or a BIOS to initialize the chip's
+					 * SRAM, we use the default target
+					 * settings.
+					 */
+	AHC_SEQUENCER_DEBUG   = 0x008,
+	AHC_SHARED_SRAM	      = 0x010,
+	AHC_LARGE_SEEPROM     = 0x020,  /* Uses C56_66 not C46 */
+	AHC_RESET_BUS_A	      = 0x040,
+	AHC_RESET_BUS_B	      = 0x080,
+	AHC_EXTENDED_TRANS_A  = 0x100,
+	AHC_EXTENDED_TRANS_B  = 0x200,
+	AHC_TERM_ENB_A	      = 0x400,
+	AHC_TERM_ENB_B	      = 0x800,
+	AHC_INITIATORROLE     = 0x1000,  /*
+					  * Allow initiator operations on
+					  * this controller.
+					  */
+	AHC_TARGETROLE	      = 0x2000,  /*
+					  * Allow target operations on this
+					  * controller.
+					  */
+	AHC_NEWEEPROM_FMT     = 0x4000,
+	AHC_RESOURCE_SHORTAGE = 0x8000,
+	AHC_TQINFIFO_BLOCKED  = 0x10000,  /* Blocked waiting for ATIOs */
+	AHC_INT50_SPEEDFLEX   = 0x20000,  /*
+					   * Internal 50pin connector
+					   * sits behind an aic3860
+					   */
+	AHC_SCB_BTT	      = 0x40000,  /*
+					   * The busy targets table is
+					   * stored in SCB space rather
+					   * than SRAM.
+					   */
+	AHC_BIOS_ENABLED      = 0x80000,
+	AHC_ALL_INTERRUPTS    = 0x100000,
+	AHC_PAGESCBS	      = 0x400000,  /* Enable SCB paging */
+	AHC_EDGE_INTERRUPT    = 0x800000,  /* Device uses edge triggered ints */
+	AHC_39BIT_ADDRESSING  = 0x1000000, /* Use 39 bit addressing scheme. */
+	AHC_LSCBS_ENABLED     = 0x2000000, /* 64Byte SCBs enabled */
+	AHC_SCB_CONFIG_USED   = 0x4000000, /* No SEEPROM but SCB2 had info. */
+	AHC_NO_BIOS_INIT      = 0x8000000, /* No BIOS left over settings. */
+	AHC_DISABLE_PCI_PERR  = 0x10000000,
+	AHC_HAS_TERM_LOGIC    = 0x20000000
+} ahc_flag;
+
+/************************* Hardware  SCB Definition ***************************/
+
+/*
+ * The driver keeps up to MAX_SCB scb structures per card in memory.  The SCB
+ * consists of a "hardware SCB" mirroring the fields available on the card
+ * and additional information the kernel stores for each transaction.
+ *
+ * To minimize space utilization, a portion of the hardware scb stores
+ * different data during different portions of a SCSI transaction.
+ * As initialized by the host driver for the initiator role, this area
+ * contains the SCSI cdb (or a pointer to the  cdb) to be executed.  After
+ * the cdb has been presented to the target, this area serves to store
+ * residual transfer information and the SCSI status byte.
+ * For the target role, the contents of this area do not change, but
+ * still serve a different purpose than for the initiator role.  See
+ * struct target_data for details.
+ */
+
+/*
+ * Status information embedded in the shared poriton of
+ * an SCB after passing the cdb to the target.  The kernel
+ * driver will only read this data for transactions that
+ * complete abnormally (non-zero status byte).
+ */
+struct status_pkt {
+	uint32_t residual_datacnt;	/* Residual in the current S/G seg */
+	uint32_t residual_sg_ptr;	/* The next S/G for this transfer */
+	uint8_t	 scsi_status;		/* Standard SCSI status byte */
+};
+
+/*
+ * Target mode version of the shared data SCB segment.
+ */
+struct target_data {
+	uint32_t residual_datacnt;	/* Residual in the current S/G seg */
+	uint32_t residual_sg_ptr;	/* The next S/G for this transfer */
+	uint8_t  scsi_status;		/* SCSI status to give to initiator */
+	uint8_t  target_phases;		/* Bitmap of phases to execute */
+	uint8_t  data_phase;		/* Data-In or Data-Out */
+	uint8_t  initiator_tag;		/* Initiator's transaction tag */
+};
+
+struct hardware_scb {
+/*0*/	union {
+		/*
+		 * If the cdb is 12 bytes or less, we embed it directly
+		 * in the SCB.  For longer cdbs, we embed the address
+		 * of the cdb payload as seen by the chip and a DMA
+		 * is used to pull it in.
+		 */
+		uint8_t	 cdb[12];
+		uint32_t cdb_ptr;
+		struct	 status_pkt status;
+		struct	 target_data tdata;
+	} shared_data;
+/*
+ * A word about residuals.
+ * The scb is presented to the sequencer with the dataptr and datacnt
+ * fields initialized to the contents of the first S/G element to
+ * transfer.  The sgptr field is initialized to the bus address for
+ * the S/G element that follows the first in the in core S/G array
+ * or'ed with the SG_FULL_RESID flag.  Sgptr may point to an invalid
+ * S/G entry for this transfer (single S/G element transfer with the
+ * first elements address and length preloaded in the dataptr/datacnt
+ * fields).  If no transfer is to occur, sgptr is set to SG_LIST_NULL.
+ * The SG_FULL_RESID flag ensures that the residual will be correctly
+ * noted even if no data transfers occur.  Once the data phase is entered,
+ * the residual sgptr and datacnt are loaded from the sgptr and the
+ * datacnt fields.  After each S/G element's dataptr and length are
+ * loaded into the hardware, the residual sgptr is advanced.  After
+ * each S/G element is expired, its datacnt field is checked to see
+ * if the LAST_SEG flag is set.  If so, SG_LIST_NULL is set in the
+ * residual sg ptr and the transfer is considered complete.  If the
+ * sequencer determines that there is a residual in the tranfer, it
+ * will set the SG_RESID_VALID flag in sgptr and dma the scb back into
+ * host memory.  To sumarize:
+ *
+ * Sequencer:
+ *	o A residual has occurred if SG_FULL_RESID is set in sgptr,
+ *	  or residual_sgptr does not have SG_LIST_NULL set.
+ *
+ *	o We are transfering the last segment if residual_datacnt has
+ *	  the SG_LAST_SEG flag set.
+ *
+ * Host:
+ *	o A residual has occurred if a completed scb has the
+ *	  SG_RESID_VALID flag set.
+ *
+ *	o residual_sgptr and sgptr refer to the "next" sg entry
+ *	  and so may point beyond the last valid sg entry for the
+ *	  transfer.
+ */ 
+/*12*/	uint32_t dataptr;
+/*16*/	uint32_t datacnt;		/*
+					 * Byte 3 (numbered from 0) of
+					 * the datacnt is really the
+					 * 4th byte in that data address.
+					 */
+/*20*/	uint32_t sgptr;
+#define SG_PTR_MASK	0xFFFFFFF8
+/*24*/	uint8_t  control;	/* See SCB_CONTROL in aic7xxx.reg for details */
+/*25*/	uint8_t  scsiid;	/* what to load in the SCSIID register */
+/*26*/	uint8_t  lun;
+/*27*/	uint8_t  tag;			/*
+					 * Index into our kernel SCB array.
+					 * Also used as the tag for tagged I/O
+					 */
+/*28*/	uint8_t  cdb_len;
+/*29*/	uint8_t  scsirate;		/* Value for SCSIRATE register */
+/*30*/	uint8_t  scsioffset;		/* Value for SCSIOFFSET register */
+/*31*/	uint8_t  next;			/*
+					 * Used for threading SCBs in the
+					 * "Waiting for Selection" and
+					 * "Disconnected SCB" lists down
+					 * in the sequencer.
+					 */
+/*32*/	uint8_t  cdb32[32];		/*
+					 * CDB storage for cdbs of size
+					 * 13->32.  We store them here
+					 * because hardware scbs are
+					 * allocated from DMA safe
+					 * memory so we are guaranteed
+					 * the controller can access
+					 * this data.
+					 */
+};
+
+/************************ Kernel SCB Definitions ******************************/
+/*
+ * Some fields of the SCB are OS dependent.  Here we collect the
+ * definitions for elements that all OS platforms need to include
+ * in there SCB definition.
+ */
+
+/*
+ * Definition of a scatter/gather element as transfered to the controller.
+ * The aic7xxx chips only support a 24bit length.  We use the top byte of
+ * the length to store additional address bits and a flag to indicate
+ * that a given segment terminates the transfer.  This gives us an
+ * addressable range of 512GB on machines with 64bit PCI or with chips
+ * that can support dual address cycles on 32bit PCI busses.
+ */
+struct ahc_dma_seg {
+	uint32_t	addr;
+	uint32_t	len;
+#define	AHC_DMA_LAST_SEG	0x80000000
+#define	AHC_SG_HIGH_ADDR_MASK	0x7F000000
+#define	AHC_SG_LEN_MASK		0x00FFFFFF
+};
+
+struct sg_map_node {
+	bus_dmamap_t		 sg_dmamap;
+	dma_addr_t		 sg_physaddr;
+	struct ahc_dma_seg*	 sg_vaddr;
+	SLIST_ENTRY(sg_map_node) links;
+};
+
+/*
+ * The current state of this SCB.
+ */
+typedef enum {
+	SCB_FREE		= 0x0000,
+	SCB_OTHERTCL_TIMEOUT	= 0x0002,/*
+					  * Another device was active
+					  * during the first timeout for
+					  * this SCB so we gave ourselves
+					  * an additional timeout period
+					  * in case it was hogging the
+					  * bus.
+				          */
+	SCB_DEVICE_RESET	= 0x0004,
+	SCB_SENSE		= 0x0008,
+	SCB_CDB32_PTR		= 0x0010,
+	SCB_RECOVERY_SCB	= 0x0020,
+	SCB_AUTO_NEGOTIATE	= 0x0040,/* Negotiate to achieve goal. */
+	SCB_NEGOTIATE		= 0x0080,/* Negotiation forced for command. */
+	SCB_ABORT		= 0x0100,
+	SCB_UNTAGGEDQ		= 0x0200,
+	SCB_ACTIVE		= 0x0400,
+	SCB_TARGET_IMMEDIATE	= 0x0800,
+	SCB_TRANSMISSION_ERROR	= 0x1000,/*
+					  * We detected a parity or CRC
+					  * error that has effected the
+					  * payload of the command.  This
+					  * flag is checked when normal
+					  * status is returned to catch
+					  * the case of a target not
+					  * responding to our attempt
+					  * to report the error.
+					  */
+	SCB_TARGET_SCB		= 0x2000,
+	SCB_SILENT		= 0x4000 /*
+					  * Be quiet about transmission type
+					  * errors.  They are expected and we
+					  * don't want to upset the user.  This
+					  * flag is typically used during DV.
+					  */
+} scb_flag;
+
+struct scb {
+	struct	hardware_scb	 *hscb;
+	union {
+		SLIST_ENTRY(scb)  sle;
+		TAILQ_ENTRY(scb)  tqe;
+	} links;
+	LIST_ENTRY(scb)		  pending_links;
+	ahc_io_ctx_t		  io_ctx;
+	struct ahc_softc	 *ahc_softc;
+	scb_flag		  flags;
+#ifndef __linux__
+	bus_dmamap_t		  dmamap;
+#endif
+	struct scb_platform_data *platform_data;
+	struct sg_map_node	 *sg_map;
+	struct ahc_dma_seg 	 *sg_list;
+	dma_addr_t		  sg_list_phys;
+	u_int			  sg_count;/* How full ahc_dma_seg is */
+};
+
+struct scb_data {
+	SLIST_HEAD(, scb) free_scbs;	/*
+					 * Pool of SCBs ready to be assigned
+					 * commands to execute.
+					 */
+	struct	scb *scbindex[256];	/*
+					 * Mapping from tag to SCB.
+					 * As tag identifiers are an
+					 * 8bit value, we provide space
+					 * for all possible tag values.
+					 * Any lookups to entries at or
+					 * above AHC_SCB_MAX_ALLOC will
+					 * always fail.
+					 */
+	struct	hardware_scb	*hscbs;	/* Array of hardware SCBs */
+	struct	scb *scbarray;		/* Array of kernel SCBs */
+	struct	scsi_sense_data *sense; /* Per SCB sense data */
+
+	/*
+	 * "Bus" addresses of our data structures.
+	 */
+	bus_dma_tag_t	 hscb_dmat;	/* dmat for our hardware SCB array */
+	bus_dmamap_t	 hscb_dmamap;
+	dma_addr_t	 hscb_busaddr;
+	bus_dma_tag_t	 sense_dmat;
+	bus_dmamap_t	 sense_dmamap;
+	dma_addr_t	 sense_busaddr;
+	bus_dma_tag_t	 sg_dmat;	/* dmat for our sg segments */
+	SLIST_HEAD(, sg_map_node) sg_maps;
+	uint8_t	numscbs;
+	uint8_t	maxhscbs;		/* Number of SCBs on the card */
+	uint8_t	init_level;		/*
+					 * How far we've initialized
+					 * this structure.
+					 */
+};
+
+/************************ Target Mode Definitions *****************************/
+
+/*
+ * Connection desciptor for select-in requests in target mode.
+ */
+struct target_cmd {
+	uint8_t scsiid;		/* Our ID and the initiator's ID */
+	uint8_t identify;	/* Identify message */
+	uint8_t bytes[22];	/* 
+				 * Bytes contains any additional message
+				 * bytes terminated by 0xFF.  The remainder
+				 * is the cdb to execute.
+				 */
+	uint8_t cmd_valid;	/*
+				 * When a command is complete, the firmware
+				 * will set cmd_valid to all bits set.
+				 * After the host has seen the command,
+				 * the bits are cleared.  This allows us
+				 * to just peek at host memory to determine
+				 * if more work is complete. cmd_valid is on
+				 * an 8 byte boundary to simplify setting
+				 * it on aic7880 hardware which only has
+				 * limited direct access to the DMA FIFO.
+				 */
+	uint8_t pad[7];
+};
+
+/*
+ * Number of events we can buffer up if we run out
+ * of immediate notify ccbs.
+ */
+#define AHC_TMODE_EVENT_BUFFER_SIZE 8
+struct ahc_tmode_event {
+	uint8_t initiator_id;
+	uint8_t event_type;	/* MSG type or EVENT_TYPE_BUS_RESET */
+#define	EVENT_TYPE_BUS_RESET 0xFF
+	uint8_t event_arg;
+};
+
+/*
+ * Per enabled lun target mode state.
+ * As this state is directly influenced by the host OS'es target mode
+ * environment, we let the OS module define it.  Forward declare the
+ * structure here so we can store arrays of them, etc. in OS neutral
+ * data structures.
+ */
+#ifdef AHC_TARGET_MODE 
+struct ahc_tmode_lstate {
+	struct cam_path *path;
+	struct ccb_hdr_slist accept_tios;
+	struct ccb_hdr_slist immed_notifies;
+	struct ahc_tmode_event event_buffer[AHC_TMODE_EVENT_BUFFER_SIZE];
+	uint8_t event_r_idx;
+	uint8_t event_w_idx;
+};
+#else
+struct ahc_tmode_lstate;
+#endif
+
+/******************** Transfer Negotiation Datastructures *********************/
+#define AHC_TRANS_CUR		0x01	/* Modify current neogtiation status */
+#define AHC_TRANS_ACTIVE	0x03	/* Assume this target is on the bus */
+#define AHC_TRANS_GOAL		0x04	/* Modify negotiation goal */
+#define AHC_TRANS_USER		0x08	/* Modify user negotiation settings */
+
+#define AHC_WIDTH_UNKNOWN	0xFF
+#define AHC_PERIOD_UNKNOWN	0xFF
+#define AHC_OFFSET_UNKNOWN	0xFF
+#define AHC_PPR_OPTS_UNKNOWN	0xFF
+
+/*
+ * Transfer Negotiation Information.
+ */
+struct ahc_transinfo {
+	uint8_t protocol_version;	/* SCSI Revision level */
+	uint8_t transport_version;	/* SPI Revision level */
+	uint8_t width;			/* Bus width */
+	uint8_t period;			/* Sync rate factor */
+	uint8_t offset;			/* Sync offset */
+	uint8_t ppr_options;		/* Parallel Protocol Request options */
+};
+
+/*
+ * Per-initiator current, goal and user transfer negotiation information. */
+struct ahc_initiator_tinfo {
+	uint8_t scsirate;		/* Computed value for SCSIRATE reg */
+	struct ahc_transinfo curr;
+	struct ahc_transinfo goal;
+	struct ahc_transinfo user;
+};
+
+/*
+ * Per enabled target ID state.
+ * Pointers to lun target state as well as sync/wide negotiation information
+ * for each initiator<->target mapping.  For the initiator role we pretend
+ * that we are the target and the targets are the initiators since the
+ * negotiation is the same regardless of role.
+ */
+struct ahc_tmode_tstate {
+	struct ahc_tmode_lstate*	enabled_luns[AHC_NUM_LUNS];
+	struct ahc_initiator_tinfo	transinfo[AHC_NUM_TARGETS];
+
+	/*
+	 * Per initiator state bitmasks.
+	 */
+	uint16_t	 auto_negotiate;/* Auto Negotiation Required */
+	uint16_t	 ultraenb;	/* Using ultra sync rate  */
+	uint16_t	 discenable;	/* Disconnection allowed  */
+	uint16_t	 tagenable;	/* Tagged Queuing allowed */
+};
+
+/*
+ * Data structure for our table of allowed synchronous transfer rates.
+ */
+struct ahc_syncrate {
+	u_int sxfr_u2;	/* Value of the SXFR parameter for Ultra2+ Chips */
+	u_int sxfr;	/* Value of the SXFR parameter for <= Ultra Chips */
+#define		ULTRA_SXFR 0x100	/* Rate Requires Ultra Mode set */
+#define		ST_SXFR	   0x010	/* Rate Single Transition Only */
+#define		DT_SXFR	   0x040	/* Rate Double Transition Only */
+	uint8_t period; /* Period to send to SCSI target */
+	char *rate;
+};
+
+/* Safe and valid period for async negotiations. */
+#define	AHC_ASYNC_XFER_PERIOD 0x45
+#define	AHC_ULTRA2_XFER_PERIOD 0x0a
+
+/*
+ * Indexes into our table of syncronous transfer rates.
+ */
+#define AHC_SYNCRATE_DT		0
+#define AHC_SYNCRATE_ULTRA2	1
+#define AHC_SYNCRATE_ULTRA	3
+#define AHC_SYNCRATE_FAST	6
+#define AHC_SYNCRATE_MAX	AHC_SYNCRATE_DT
+#define	AHC_SYNCRATE_MIN	13
+
+/***************************** Lookup Tables **********************************/
+/*
+ * Phase -> name and message out response
+ * to parity errors in each phase table. 
+ */
+struct ahc_phase_table_entry {
+        uint8_t phase;
+        uint8_t mesg_out; /* Message response to parity errors */
+	char *phasemsg;
+};
+
+/************************** Serial EEPROM Format ******************************/
+
+struct seeprom_config {
+/*
+ * Per SCSI ID Configuration Flags
+ */
+	uint16_t device_flags[16];	/* words 0-15 */
+#define		CFXFER		0x0007	/* synchronous transfer rate */
+#define		CFSYNCH		0x0008	/* enable synchronous transfer */
+#define		CFDISC		0x0010	/* enable disconnection */
+#define		CFWIDEB		0x0020	/* wide bus device */
+#define		CFSYNCHISULTRA	0x0040	/* CFSYNCH is an ultra offset (2940AU)*/
+#define		CFSYNCSINGLE	0x0080	/* Single-Transition signalling */
+#define		CFSTART		0x0100	/* send start unit SCSI command */
+#define		CFINCBIOS	0x0200	/* include in BIOS scan */
+#define		CFRNFOUND	0x0400	/* report even if not found */
+#define		CFMULTILUNDEV	0x0800	/* Probe multiple luns in BIOS scan */
+#define		CFWBCACHEENB	0x4000	/* Enable W-Behind Cache on disks */
+#define		CFWBCACHENOP	0xc000	/* Don't touch W-Behind Cache */
+
+/*
+ * BIOS Control Bits
+ */
+	uint16_t bios_control;		/* word 16 */
+#define		CFSUPREM	0x0001	/* support all removeable drives */
+#define		CFSUPREMB	0x0002	/* support removeable boot drives */
+#define		CFBIOSEN	0x0004	/* BIOS enabled */
+#define		CFBIOS_BUSSCAN	0x0008	/* Have the BIOS Scan the Bus */
+#define		CFSM2DRV	0x0010	/* support more than two drives */
+#define		CFSTPWLEVEL	0x0010	/* Termination level control */
+#define		CF284XEXTEND	0x0020	/* extended translation (284x cards) */	
+#define		CFCTRL_A	0x0020	/* BIOS displays Ctrl-A message */	
+#define		CFTERM_MENU	0x0040	/* BIOS displays termination menu */	
+#define		CFEXTEND	0x0080	/* extended translation enabled */
+#define		CFSCAMEN	0x0100	/* SCAM enable */
+#define		CFMSG_LEVEL	0x0600	/* BIOS Message Level */
+#define			CFMSG_VERBOSE	0x0000
+#define			CFMSG_SILENT	0x0200
+#define			CFMSG_DIAG	0x0400
+#define		CFBOOTCD	0x0800  /* Support Bootable CD-ROM */
+/*		UNUSED		0xff00	*/
+
+/*
+ * Host Adapter Control Bits
+ */
+	uint16_t adapter_control;	/* word 17 */	
+#define		CFAUTOTERM	0x0001	/* Perform Auto termination */
+#define		CFULTRAEN	0x0002	/* Ultra SCSI speed enable */
+#define		CF284XSELTO     0x0003	/* Selection timeout (284x cards) */
+#define		CF284XFIFO      0x000C	/* FIFO Threshold (284x cards) */
+#define		CFSTERM		0x0004	/* SCSI low byte termination */
+#define		CFWSTERM	0x0008	/* SCSI high byte termination */
+#define		CFSPARITY	0x0010	/* SCSI parity */
+#define		CF284XSTERM     0x0020	/* SCSI low byte term (284x cards) */	
+#define		CFMULTILUN	0x0020
+#define		CFRESETB	0x0040	/* reset SCSI bus at boot */
+#define		CFCLUSTERENB	0x0080	/* Cluster Enable */
+#define		CFBOOTCHAN	0x0300	/* probe this channel first */
+#define		CFBOOTCHANSHIFT 8
+#define		CFSEAUTOTERM	0x0400	/* Ultra2 Perform secondary Auto Term*/
+#define		CFSELOWTERM	0x0800	/* Ultra2 secondary low term */
+#define		CFSEHIGHTERM	0x1000	/* Ultra2 secondary high term */
+#define		CFENABLEDV	0x4000	/* Perform Domain Validation*/
+
+/*
+ * Bus Release Time, Host Adapter ID
+ */
+	uint16_t brtime_id;		/* word 18 */
+#define		CFSCSIID	0x000f	/* host adapter SCSI ID */
+/*		UNUSED		0x00f0	*/
+#define		CFBRTIME	0xff00	/* bus release time */
+
+/*
+ * Maximum targets
+ */
+	uint16_t max_targets;		/* word 19 */	
+#define		CFMAXTARG	0x00ff	/* maximum targets */
+#define		CFBOOTLUN	0x0f00	/* Lun to boot from */
+#define		CFBOOTID	0xf000	/* Target to boot from */
+	uint16_t res_1[10];		/* words 20-29 */
+	uint16_t signature;		/* Signature == 0x250 */
+#define		CFSIGNATURE	0x250
+#define		CFSIGNATURE2	0x300
+	uint16_t checksum;		/* word 31 */
+};
+
+/****************************  Message Buffer *********************************/
+typedef enum {
+	MSG_TYPE_NONE			= 0x00,
+	MSG_TYPE_INITIATOR_MSGOUT	= 0x01,
+	MSG_TYPE_INITIATOR_MSGIN	= 0x02,
+	MSG_TYPE_TARGET_MSGOUT		= 0x03,
+	MSG_TYPE_TARGET_MSGIN		= 0x04
+} ahc_msg_type;
+
+typedef enum {
+	MSGLOOP_IN_PROG,
+	MSGLOOP_MSGCOMPLETE,
+	MSGLOOP_TERMINATED
+} msg_loop_stat;
+
+/*********************** Software Configuration Structure *********************/
+TAILQ_HEAD(scb_tailq, scb);
+
+struct ahc_aic7770_softc {
+	/*
+	 * Saved register state used for chip_init().
+	 */
+	uint8_t busspd;
+	uint8_t bustime;
+};
+
+struct ahc_pci_softc {
+	/*
+	 * Saved register state used for chip_init().
+	 */
+	uint32_t  devconfig;
+	uint16_t  targcrccnt;
+	uint8_t   command;
+	uint8_t   csize_lattime;
+	uint8_t   optionmode;
+	uint8_t   crccontrol1;
+	uint8_t   dscommand0;
+	uint8_t   dspcistatus;
+	uint8_t   scbbaddr;
+	uint8_t   dff_thrsh;
+};
+
+union ahc_bus_softc {
+	struct ahc_aic7770_softc aic7770_softc;
+	struct ahc_pci_softc pci_softc;
+};
+
+typedef void (*ahc_bus_intr_t)(struct ahc_softc *);
+typedef int (*ahc_bus_chip_init_t)(struct ahc_softc *);
+typedef int (*ahc_bus_suspend_t)(struct ahc_softc *);
+typedef int (*ahc_bus_resume_t)(struct ahc_softc *);
+typedef void ahc_callback_t (void *);
+
+struct ahc_softc {
+	bus_space_tag_t           tag;
+	bus_space_handle_t        bsh;
+#ifndef __linux__
+	bus_dma_tag_t		  buffer_dmat;   /* dmat for buffer I/O */
+#endif
+	struct scb_data		 *scb_data;
+
+	struct scb		 *next_queued_scb;
+
+	/*
+	 * SCBs that have been sent to the controller
+	 */
+	LIST_HEAD(, scb)	  pending_scbs;
+
+	/*
+	 * Counting lock for deferring the release of additional
+	 * untagged transactions from the untagged_queues.  When
+	 * the lock is decremented to 0, all queues in the
+	 * untagged_queues array are run.
+	 */
+	u_int			  untagged_queue_lock;
+
+	/*
+	 * Per-target queue of untagged-transactions.  The
+	 * transaction at the head of the queue is the
+	 * currently pending untagged transaction for the
+	 * target.  The driver only allows a single untagged
+	 * transaction per target.
+	 */
+	struct scb_tailq	  untagged_queues[AHC_NUM_TARGETS];
+
+	/*
+	 * Bus attachment specific data.
+	 */
+	union ahc_bus_softc	  bus_softc;
+
+	/*
+	 * Platform specific data.
+	 */
+	struct ahc_platform_data *platform_data;
+
+	/*
+	 * Platform specific device information.
+	 */
+	ahc_dev_softc_t		  dev_softc;
+
+	/*
+	 * Bus specific device information.
+	 */
+	ahc_bus_intr_t		  bus_intr;
+
+	/*
+	 * Bus specific initialization required
+	 * after a chip reset.
+	 */
+	ahc_bus_chip_init_t	  bus_chip_init;
+
+	/*
+	 * Bus specific suspend routine.
+	 */
+	ahc_bus_suspend_t	  bus_suspend;
+
+	/*
+	 * Bus specific resume routine.
+	 */
+	ahc_bus_resume_t	  bus_resume;
+
+	/*
+	 * Target mode related state kept on a per enabled lun basis.
+	 * Targets that are not enabled will have null entries.
+	 * As an initiator, we keep one target entry for our initiator
+	 * ID to store our sync/wide transfer settings.
+	 */
+	struct ahc_tmode_tstate  *enabled_targets[AHC_NUM_TARGETS];
+
+	/*
+	 * The black hole device responsible for handling requests for
+	 * disabled luns on enabled targets.
+	 */
+	struct ahc_tmode_lstate  *black_hole;
+
+	/*
+	 * Device instance currently on the bus awaiting a continue TIO
+	 * for a command that was not given the disconnect priveledge.
+	 */
+	struct ahc_tmode_lstate  *pending_device;
+
+	/*
+	 * Card characteristics
+	 */
+	ahc_chip		  chip;
+	ahc_feature		  features;
+	ahc_bug			  bugs;
+	ahc_flag		  flags;
+	struct seeprom_config	 *seep_config;
+
+	/* Values to store in the SEQCTL register for pause and unpause */
+	uint8_t			  unpause;
+	uint8_t			  pause;
+
+	/* Command Queues */
+	uint8_t			  qoutfifonext;
+	uint8_t			  qinfifonext;
+	uint8_t			 *qoutfifo;
+	uint8_t			 *qinfifo;
+
+	/* Critical Section Data */
+	struct cs		 *critical_sections;
+	u_int			  num_critical_sections;
+
+	/* Links for chaining softcs */
+	TAILQ_ENTRY(ahc_softc)	  links;
+
+	/* Channel Names ('A', 'B', etc.) */
+	char			  channel;
+	char			  channel_b;
+
+	/* Initiator Bus ID */
+	uint8_t			  our_id;
+	uint8_t			  our_id_b;
+
+	/*
+	 * PCI error detection.
+	 */
+	int			  unsolicited_ints;
+
+	/*
+	 * Target incoming command FIFO.
+	 */
+	struct target_cmd	 *targetcmds;
+	uint8_t			  tqinfifonext;
+
+	/*
+	 * Cached copy of the sequencer control register.
+	 */
+	uint8_t			  seqctl;
+
+	/*
+	 * Incoming and outgoing message handling.
+	 */
+	uint8_t			  send_msg_perror;
+	ahc_msg_type		  msg_type;
+	uint8_t			  msgout_buf[12];/* Message we are sending */
+	uint8_t			  msgin_buf[12];/* Message we are receiving */
+	u_int			  msgout_len;	/* Length of message to send */
+	u_int			  msgout_index;	/* Current index in msgout */
+	u_int			  msgin_index;	/* Current index in msgin */
+
+	/*
+	 * Mapping information for data structures shared
+	 * between the sequencer and kernel.
+	 */
+	bus_dma_tag_t		  parent_dmat;
+	bus_dma_tag_t		  shared_data_dmat;
+	bus_dmamap_t		  shared_data_dmamap;
+	dma_addr_t		  shared_data_busaddr;
+
+	/*
+	 * Bus address of the one byte buffer used to
+	 * work-around a DMA bug for chips <= aic7880
+	 * in target mode.
+	 */
+	dma_addr_t		  dma_bug_buf;
+
+	/* Number of enabled target mode device on this card */
+	u_int			  enabled_luns;
+
+	/* Initialization level of this data structure */
+	u_int			  init_level;
+
+	/* PCI cacheline size. */
+	u_int			  pci_cachesize;
+
+	/*
+	 * Count of parity errors we have seen as a target.
+	 * We auto-disable parity error checking after seeing
+	 * AHC_PCI_TARGET_PERR_THRESH number of errors.
+	 */
+	u_int			  pci_target_perr_count;
+#define		AHC_PCI_TARGET_PERR_THRESH	10
+
+	/* Maximum number of sequencer instructions supported. */
+	u_int			  instruction_ram_size;
+
+	/* Per-Unit descriptive information */
+	const char		 *description;
+	char			 *name;
+	int			  unit;
+
+	/* Selection Timer settings */
+	int			  seltime;
+	int			  seltime_b;
+
+	uint16_t	 	  user_discenable;/* Disconnection allowed  */
+	uint16_t		  user_tagenable;/* Tagged Queuing allowed */
+};
+
+TAILQ_HEAD(ahc_softc_tailq, ahc_softc);
+extern struct ahc_softc_tailq ahc_tailq;
+
+/************************ Active Device Information ***************************/
+typedef enum {
+	ROLE_UNKNOWN,
+	ROLE_INITIATOR,
+	ROLE_TARGET
+} role_t;
+
+struct ahc_devinfo {
+	int	 our_scsiid;
+	int	 target_offset;
+	uint16_t target_mask;
+	u_int	 target;
+	u_int	 lun;
+	char	 channel;
+	role_t	 role;		/*
+				 * Only guaranteed to be correct if not
+				 * in the busfree state.
+				 */
+};
+
+/****************************** PCI Structures ********************************/
+typedef int (ahc_device_setup_t)(struct ahc_softc *);
+
+struct ahc_pci_identity {
+	uint64_t		 full_id;
+	uint64_t		 id_mask;
+	char			*name;
+	ahc_device_setup_t	*setup;
+};
+extern struct ahc_pci_identity ahc_pci_ident_table[];
+extern const u_int ahc_num_pci_devs;
+
+/***************************** VL/EISA Declarations ***************************/
+struct aic7770_identity {
+	uint32_t		 full_id;
+	uint32_t		 id_mask;
+	const char		*name;
+	ahc_device_setup_t	*setup;
+};
+extern struct aic7770_identity aic7770_ident_table[];
+extern const int ahc_num_aic7770_devs;
+
+#define AHC_EISA_SLOT_OFFSET	0xc00
+#define AHC_EISA_IOSIZE		0x100
+
+/*************************** Function Declarations ****************************/
+/******************************************************************************/
+u_int			ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl);
+void			ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl);
+void			ahc_busy_tcl(struct ahc_softc *ahc,
+				     u_int tcl, u_int busyid);
+
+/***************************** PCI Front End *********************************/
+struct ahc_pci_identity	*ahc_find_pci_device(ahc_dev_softc_t);
+int			 ahc_pci_config(struct ahc_softc *,
+					struct ahc_pci_identity *);
+int			 ahc_pci_test_register_access(struct ahc_softc *);
+
+/*************************** EISA/VL Front End ********************************/
+struct aic7770_identity *aic7770_find_device(uint32_t);
+int			 aic7770_config(struct ahc_softc *ahc,
+					struct aic7770_identity *,
+					u_int port);
+
+/************************** SCB and SCB queue management **********************/
+int		ahc_probe_scbs(struct ahc_softc *);
+void		ahc_run_untagged_queues(struct ahc_softc *ahc);
+void		ahc_run_untagged_queue(struct ahc_softc *ahc,
+				       struct scb_tailq *queue);
+void		ahc_qinfifo_requeue_tail(struct ahc_softc *ahc,
+					 struct scb *scb);
+int		ahc_match_scb(struct ahc_softc *ahc, struct scb *scb,
+			      int target, char channel, int lun,
+			      u_int tag, role_t role);
+
+/****************************** Initialization ********************************/
+struct ahc_softc	*ahc_alloc(void *platform_arg, char *name);
+int			 ahc_softc_init(struct ahc_softc *);
+void			 ahc_controller_info(struct ahc_softc *ahc, char *buf);
+int			 ahc_chip_init(struct ahc_softc *ahc);
+int			 ahc_init(struct ahc_softc *ahc);
+void			 ahc_intr_enable(struct ahc_softc *ahc, int enable);
+void			 ahc_pause_and_flushwork(struct ahc_softc *ahc);
+int			 ahc_suspend(struct ahc_softc *ahc); 
+int			 ahc_resume(struct ahc_softc *ahc);
+void			 ahc_softc_insert(struct ahc_softc *);
+struct ahc_softc	*ahc_find_softc(struct ahc_softc *ahc);
+void			 ahc_set_unit(struct ahc_softc *, int);
+void			 ahc_set_name(struct ahc_softc *, char *);
+void			 ahc_alloc_scbs(struct ahc_softc *ahc);
+void			 ahc_free(struct ahc_softc *ahc);
+int			 ahc_reset(struct ahc_softc *ahc, int reinit);
+void			 ahc_shutdown(void *arg);
+
+/*************************** Interrupt Services *******************************/
+void			ahc_clear_intstat(struct ahc_softc *ahc);
+void			ahc_run_qoutfifo(struct ahc_softc *ahc);
+#ifdef AHC_TARGET_MODE
+void			ahc_run_tqinfifo(struct ahc_softc *ahc, int paused);
+#endif
+void			ahc_handle_brkadrint(struct ahc_softc *ahc);
+void			ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat);
+void			ahc_handle_scsiint(struct ahc_softc *ahc,
+					   u_int intstat);
+void			ahc_clear_critical_section(struct ahc_softc *ahc);
+
+/***************************** Error Recovery *********************************/
+typedef enum {
+	SEARCH_COMPLETE,
+	SEARCH_COUNT,
+	SEARCH_REMOVE
+} ahc_search_action;
+int			ahc_search_qinfifo(struct ahc_softc *ahc, int target,
+					   char channel, int lun, u_int tag,
+					   role_t role, uint32_t status,
+					   ahc_search_action action);
+int			ahc_search_untagged_queues(struct ahc_softc *ahc,
+						   ahc_io_ctx_t ctx,
+						   int target, char channel,
+						   int lun, uint32_t status,
+						   ahc_search_action action);
+int			ahc_search_disc_list(struct ahc_softc *ahc, int target,
+					     char channel, int lun, u_int tag,
+					     int stop_on_first, int remove,
+					     int save_state);
+void			ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
+int			ahc_reset_channel(struct ahc_softc *ahc, char channel,
+					  int initiate_reset);
+int			ahc_abort_scbs(struct ahc_softc *ahc, int target,
+				       char channel, int lun, u_int tag,
+				       role_t role, uint32_t status);
+void			ahc_restart(struct ahc_softc *ahc);
+void			ahc_calc_residual(struct ahc_softc *ahc,
+					  struct scb *scb);
+/*************************** Utility Functions ********************************/
+struct ahc_phase_table_entry*
+			ahc_lookup_phase_entry(int phase);
+void			ahc_compile_devinfo(struct ahc_devinfo *devinfo,
+					    u_int our_id, u_int target,
+					    u_int lun, char channel,
+					    role_t role);
+/************************** Transfer Negotiation ******************************/
+struct ahc_syncrate*	ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
+					  u_int *ppr_options, u_int maxsync);
+u_int			ahc_find_period(struct ahc_softc *ahc,
+					u_int scsirate, u_int maxsync);
+void			ahc_validate_offset(struct ahc_softc *ahc,
+					    struct ahc_initiator_tinfo *tinfo,
+					    struct ahc_syncrate *syncrate,
+					    u_int *offset, int wide,
+					    role_t role);
+void			ahc_validate_width(struct ahc_softc *ahc,
+					   struct ahc_initiator_tinfo *tinfo,
+					   u_int *bus_width,
+					   role_t role);
+/*
+ * Negotiation types.  These are used to qualify if we should renegotiate
+ * even if our goal and current transport parameters are identical.
+ */
+typedef enum {
+	AHC_NEG_TO_GOAL,	/* Renegotiate only if goal and curr differ. */
+	AHC_NEG_IF_NON_ASYNC,	/* Renegotiate so long as goal is non-async. */
+	AHC_NEG_ALWAYS		/* Renegotiat even if goal is async. */
+} ahc_neg_type;
+int			ahc_update_neg_request(struct ahc_softc*,
+					       struct ahc_devinfo*,
+					       struct ahc_tmode_tstate*,
+					       struct ahc_initiator_tinfo*,
+					       ahc_neg_type);
+void			ahc_set_width(struct ahc_softc *ahc,
+				      struct ahc_devinfo *devinfo,
+				      u_int width, u_int type, int paused);
+void			ahc_set_syncrate(struct ahc_softc *ahc,
+					 struct ahc_devinfo *devinfo,
+					 struct ahc_syncrate *syncrate,
+					 u_int period, u_int offset,
+					 u_int ppr_options,
+					 u_int type, int paused);
+typedef enum {
+	AHC_QUEUE_NONE,
+	AHC_QUEUE_BASIC,
+	AHC_QUEUE_TAGGED
+} ahc_queue_alg;
+
+void			ahc_set_tags(struct ahc_softc *ahc,
+				     struct ahc_devinfo *devinfo,
+				     ahc_queue_alg alg);
+
+/**************************** Target Mode *************************************/
+#ifdef AHC_TARGET_MODE
+void		ahc_send_lstate_events(struct ahc_softc *,
+				       struct ahc_tmode_lstate *);
+void		ahc_handle_en_lun(struct ahc_softc *ahc,
+				  struct cam_sim *sim, union ccb *ccb);
+cam_status	ahc_find_tmode_devs(struct ahc_softc *ahc,
+				    struct cam_sim *sim, union ccb *ccb,
+				    struct ahc_tmode_tstate **tstate,
+				    struct ahc_tmode_lstate **lstate,
+				    int notfound_failure);
+#ifndef AHC_TMODE_ENABLE
+#define AHC_TMODE_ENABLE 0
+#endif
+#endif
+/******************************* Debug ***************************************/
+#ifdef AHC_DEBUG
+extern uint32_t ahc_debug;
+#define	AHC_SHOW_MISC		0x0001
+#define	AHC_SHOW_SENSE		0x0002
+#define AHC_DUMP_SEEPROM	0x0004
+#define AHC_SHOW_TERMCTL	0x0008
+#define AHC_SHOW_MEMORY		0x0010
+#define AHC_SHOW_MESSAGES	0x0020
+#define	AHC_SHOW_DV		0x0040
+#define AHC_SHOW_SELTO		0x0080
+#define AHC_SHOW_QFULL		0x0200
+#define AHC_SHOW_QUEUE		0x0400
+#define AHC_SHOW_TQIN		0x0800
+#define AHC_SHOW_MASKED_ERRORS	0x1000
+#define AHC_DEBUG_SEQUENCER	0x2000
+#endif
+void			ahc_print_scb(struct scb *scb);
+void			ahc_print_devinfo(struct ahc_softc *ahc,
+					  struct ahc_devinfo *dev);
+void			ahc_dump_card_state(struct ahc_softc *ahc);
+int			ahc_print_register(ahc_reg_parse_entry_t *table,
+					   u_int num_entries,
+					   const char *name,
+					   u_int address,
+					   u_int value,
+					   u_int *cur_column,
+					   u_int wrap_point);
+/******************************* SEEPROM *************************************/
+int		ahc_acquire_seeprom(struct ahc_softc *ahc,
+				    struct seeprom_descriptor *sd);
+void		ahc_release_seeprom(struct seeprom_descriptor *sd);
+#endif /* _AIC7XXX_H_ */
diff --git a/drivers/scsi/aic7xxx/aic7xxx.reg b/drivers/scsi/aic7xxx/aic7xxx.reg
new file mode 100644
index 0000000..810ec70
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx.reg
@@ -0,0 +1,1594 @@
+/*
+ * Aic7xxx register and scratch ram definitions.
+ *
+ * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $FreeBSD$
+ */
+VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#39 $"
+
+/*
+ * This file is processed by the aic7xxx_asm utility for use in assembling
+ * firmware for the aic7xxx family of SCSI host adapters as well as to generate
+ * a C header file for use in the kernel portion of the Aic7xxx driver.
+ *
+ * All page numbers refer to the Adaptec AIC-7770 Data Book available from
+ * Adaptec's Technical Documents Department 1-800-934-2766
+ */
+
+/*
+ * SCSI Sequence Control (p. 3-11).
+ * Each bit, when set starts a specific SCSI sequence on the bus
+ */
+register SCSISEQ {
+	address			0x000
+	access_mode RW
+	field	TEMODE		0x80
+	field	ENSELO		0x40
+	field	ENSELI		0x20
+	field	ENRSELI		0x10
+	field	ENAUTOATNO	0x08
+	field	ENAUTOATNI	0x04
+	field	ENAUTOATNP	0x02
+	field	SCSIRSTO	0x01
+}
+
+/*
+ * SCSI Transfer Control 0 Register (pp. 3-13).
+ * Controls the SCSI module data path.
+ */
+register SXFRCTL0 {
+	address			0x001
+	access_mode RW
+	field	DFON		0x80
+	field	DFPEXP		0x40
+	field	FAST20		0x20
+	field	CLRSTCNT	0x10
+	field	SPIOEN		0x08
+	field	SCAMEN		0x04
+	field	CLRCHN		0x02
+}
+
+/*
+ * SCSI Transfer Control 1 Register (pp. 3-14,15).
+ * Controls the SCSI module data path.
+ */
+register SXFRCTL1 {
+	address			0x002
+	access_mode RW
+	field	BITBUCKET	0x80
+	field	SWRAPEN		0x40
+	field	ENSPCHK		0x20
+	mask	STIMESEL	0x18
+	field	ENSTIMER	0x04
+	field	ACTNEGEN	0x02
+	field	STPWEN		0x01	/* Powered Termination */
+}
+
+/*
+ * SCSI Control Signal Read Register (p. 3-15).
+ * Reads the actual state of the SCSI bus pins
+ */
+register SCSISIGI {
+	address			0x003
+	access_mode RO
+	field	CDI		0x80
+	field	IOI		0x40
+	field	MSGI		0x20
+	field	ATNI		0x10
+	field	SELI		0x08
+	field	BSYI		0x04
+	field	REQI		0x02
+	field	ACKI		0x01
+/*
+ * Possible phases in SCSISIGI
+ */
+	mask	PHASE_MASK	CDI|IOI|MSGI
+	mask	P_DATAOUT	0x00
+	mask	P_DATAIN	IOI
+	mask	P_DATAOUT_DT	P_DATAOUT|MSGI
+	mask	P_DATAIN_DT	P_DATAIN|MSGI
+	mask	P_COMMAND	CDI
+	mask	P_MESGOUT	CDI|MSGI
+	mask	P_STATUS	CDI|IOI
+	mask	P_MESGIN	CDI|IOI|MSGI
+}
+
+/*
+ * SCSI Control Signal Write Register (p. 3-16).
+ * Writing to this register modifies the control signals on the bus.  Only
+ * those signals that are allowed in the current mode (Initiator/Target) are
+ * asserted.
+ */
+register SCSISIGO {
+	address			0x003
+	access_mode WO
+	field	CDO		0x80
+	field	IOO		0x40
+	field	MSGO		0x20
+	field	ATNO		0x10
+	field	SELO		0x08
+	field	BSYO		0x04
+	field	REQO		0x02
+	field	ACKO		0x01
+/*
+ * Possible phases to write into SCSISIG0
+ */
+	mask	PHASE_MASK	CDI|IOI|MSGI
+	mask	P_DATAOUT	0x00
+	mask	P_DATAIN	IOI
+	mask	P_COMMAND	CDI
+	mask	P_MESGOUT	CDI|MSGI
+	mask	P_STATUS	CDI|IOI
+	mask	P_MESGIN	CDI|IOI|MSGI
+}
+
+/* 
+ * SCSI Rate Control (p. 3-17).
+ * Contents of this register determine the Synchronous SCSI data transfer
+ * rate and the maximum synchronous Req/Ack offset.  An offset of 0 in the
+ * SOFS (3:0) bits disables synchronous data transfers.  Any offset value
+ * greater than 0 enables synchronous transfers.
+ */
+register SCSIRATE {
+	address			0x004
+	access_mode RW
+	field	WIDEXFER	0x80		/* Wide transfer control */
+	field	ENABLE_CRC	0x40		/* CRC for D-Phases */
+	field	SINGLE_EDGE	0x10		/* Disable DT Transfers */
+	mask	SXFR		0x70		/* Sync transfer rate */
+	mask	SXFR_ULTRA2	0x0f		/* Sync transfer rate */
+	mask	SOFS		0x0f		/* Sync offset */
+}
+
+/*
+ * SCSI ID (p. 3-18).
+ * Contains the ID of the board and the current target on the
+ * selected channel.
+ */
+register SCSIID	{
+	address			0x005
+	access_mode RW
+	mask	TID		0xf0		/* Target ID mask */
+	mask	TWIN_TID	0x70
+	field	TWIN_CHNLB	0x80
+	mask	OID		0x0f		/* Our ID mask */
+	/*
+	 * SCSI Maximum Offset (p. 4-61 aic7890/91 Data Book)
+	 * The aic7890/91 allow an offset of up to 127 transfers in both wide
+	 * and narrow mode.
+	 */
+	alias	SCSIOFFSET
+	mask	SOFS_ULTRA2	0x7f		/* Sync offset U2 chips */
+}
+
+/*
+ * SCSI Latched Data (p. 3-19).
+ * Read/Write latches used to transfer data on the SCSI bus during
+ * Automatic or Manual PIO mode.  SCSIDATH can be used for the
+ * upper byte of a 16bit wide asynchronouse data phase transfer.
+ */
+register SCSIDATL {
+	address			0x006
+	access_mode RW
+}
+
+register SCSIDATH {
+	address			0x007
+	access_mode RW
+}
+
+/*
+ * SCSI Transfer Count (pp. 3-19,20)
+ * These registers count down the number of bytes transferred
+ * across the SCSI bus.  The counter is decremented only once
+ * the data has been safely transferred.  SDONE in SSTAT0 is
+ * set when STCNT goes to 0
+ */ 
+register STCNT {
+	address			0x008
+	size	3
+	access_mode RW
+}
+
+/* ALT_MODE registers (Ultra2 and Ultra160 chips) */
+register SXFRCTL2 {
+	address			0x013
+	access_mode RW
+	field	AUTORSTDIS	0x10
+	field	CMDDMAEN	0x08
+	mask	ASYNC_SETUP	0x07
+}
+
+/* ALT_MODE register on Ultra160 chips */
+register OPTIONMODE {
+	address			0x008
+	access_mode RW
+	field	AUTORATEEN		0x80
+	field	AUTOACKEN		0x40
+	field	ATNMGMNTEN		0x20
+	field	BUSFREEREV		0x10
+	field	EXPPHASEDIS		0x08
+	field	SCSIDATL_IMGEN		0x04
+	field	AUTO_MSGOUT_DE		0x02
+	field	DIS_MSGIN_DUALEDGE	0x01
+	mask	OPTIONMODE_DEFAULTS	AUTO_MSGOUT_DE|DIS_MSGIN_DUALEDGE
+}
+
+/* ALT_MODE register on Ultra160 chips */
+register TARGCRCCNT {
+	address			0x00a
+	size	2
+	access_mode RW
+}
+
+/*
+ * Clear SCSI Interrupt 0 (p. 3-20)
+ * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT0.
+ */
+register CLRSINT0 {
+	address			0x00b
+	access_mode WO
+	field	CLRSELDO	0x40
+	field	CLRSELDI	0x20
+	field	CLRSELINGO	0x10
+	field	CLRSWRAP	0x08
+	field	CLRIOERR	0x08	/* Ultra2 Only */
+	field	CLRSPIORDY	0x02
+}
+
+/*
+ * SCSI Status 0 (p. 3-21)
+ * Contains one set of SCSI Interrupt codes
+ * These are most likely of interest to the sequencer
+ */
+register SSTAT0	{
+	address			0x00b
+	access_mode RO
+	field	TARGET		0x80	/* Board acting as target */
+	field	SELDO		0x40	/* Selection Done */
+	field	SELDI		0x20	/* Board has been selected */
+	field	SELINGO		0x10	/* Selection In Progress */
+	field	SWRAP		0x08	/* 24bit counter wrap */
+	field	IOERR		0x08	/* LVD Tranceiver mode changed */
+	field	SDONE		0x04	/* STCNT = 0x000000 */
+	field	SPIORDY		0x02	/* SCSI PIO Ready */
+	field	DMADONE		0x01	/* DMA transfer completed */
+}
+
+/*
+ * Clear SCSI Interrupt 1 (p. 3-23)
+ * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT1.
+ */
+register CLRSINT1 {
+	address			0x00c
+	access_mode WO
+	field	CLRSELTIMEO	0x80
+	field	CLRATNO		0x40
+	field	CLRSCSIRSTI	0x20
+	field	CLRBUSFREE	0x08
+	field	CLRSCSIPERR	0x04
+	field	CLRPHASECHG	0x02
+	field	CLRREQINIT	0x01
+}
+
+/*
+ * SCSI Status 1 (p. 3-24)
+ */
+register SSTAT1	{
+	address			0x00c
+	access_mode RO
+	field	SELTO		0x80
+	field	ATNTARG 	0x40
+	field	SCSIRSTI	0x20
+	field	PHASEMIS	0x10
+	field	BUSFREE		0x08
+	field	SCSIPERR	0x04
+	field	PHASECHG	0x02
+	field	REQINIT		0x01
+}
+
+/*
+ * SCSI Status 2 (pp. 3-25,26)
+ */
+register SSTAT2 {
+	address			0x00d
+	access_mode RO
+	field	OVERRUN		0x80
+	field	SHVALID		0x40	/* Shaddow Layer non-zero */
+	field	EXP_ACTIVE	0x10	/* SCSI Expander Active */
+	field	CRCVALERR	0x08	/* CRC doesn't match (U3 only) */
+	field	CRCENDERR	0x04	/* No terminal CRC packet (U3 only) */
+	field	CRCREQERR	0x02	/* Illegal CRC packet req (U3 only) */
+	field	DUAL_EDGE_ERR	0x01	/* Incorrect data phase (U3 only) */
+	mask	SFCNT		0x1f
+}
+
+/*
+ * SCSI Status 3 (p. 3-26)
+ */
+register SSTAT3 {
+	address			0x00e
+	access_mode RO
+	mask	SCSICNT		0xf0
+	mask	OFFCNT		0x0f
+	mask	U2OFFCNT	0x7f
+}
+
+/*
+ * SCSI ID for the aic7890/91 chips
+ */
+register SCSIID_ULTRA2 {
+	address			0x00f
+	access_mode RW
+	mask	TID		0xf0		/* Target ID mask */
+	mask	OID		0x0f		/* Our ID mask */
+}
+
+/*
+ * SCSI Interrupt Mode 1 (p. 3-28)
+ * Setting any bit will enable the corresponding function
+ * in SIMODE0 to interrupt via the IRQ pin.
+ */
+register SIMODE0 {
+	address			0x010
+	access_mode RW
+	field	ENSELDO		0x40
+	field	ENSELDI		0x20
+	field	ENSELINGO	0x10
+	field	ENSWRAP		0x08
+	field	ENIOERR		0x08	/* LVD Tranceiver mode changes */
+	field	ENSDONE		0x04
+	field	ENSPIORDY	0x02
+	field	ENDMADONE	0x01
+}
+
+/*
+ * SCSI Interrupt Mode 1 (pp. 3-28,29)
+ * Setting any bit will enable the corresponding function
+ * in SIMODE1 to interrupt via the IRQ pin.
+ */
+register SIMODE1 {
+	address			0x011
+	access_mode RW
+	field	ENSELTIMO	0x80
+	field	ENATNTARG	0x40
+	field	ENSCSIRST	0x20
+	field	ENPHASEMIS	0x10
+	field	ENBUSFREE	0x08
+	field	ENSCSIPERR	0x04
+	field	ENPHASECHG	0x02
+	field	ENREQINIT	0x01
+}
+
+/*
+ * SCSI Data Bus (High) (p. 3-29)
+ * This register reads data on the SCSI Data bus directly.
+ */
+register SCSIBUSL {
+	address			0x012
+	access_mode RW
+}
+
+register SCSIBUSH {
+	address			0x013
+	access_mode RW
+}
+
+/*
+ * SCSI/Host Address (p. 3-30)
+ * These registers hold the host address for the byte about to be
+ * transferred on the SCSI bus.  They are counted up in the same
+ * manner as STCNT is counted down.  SHADDR should always be used
+ * to determine the address of the last byte transferred since HADDR
+ * can be skewed by write ahead.
+ */
+register SHADDR {
+	address			0x014
+	size	4
+	access_mode RO
+}
+
+/*
+ * Selection Timeout Timer (p. 3-30)
+ */
+register SELTIMER {
+	address			0x018
+	access_mode RW
+	field	STAGE6		0x20
+	field	STAGE5		0x10
+	field	STAGE4		0x08
+	field	STAGE3		0x04
+	field	STAGE2		0x02
+	field	STAGE1		0x01
+	alias	TARGIDIN
+}
+
+/*
+ * Selection/Reselection ID (p. 3-31)
+ * Upper four bits are the device id.  The ONEBIT is set when the re/selecting
+ * device did not set its own ID.
+ */
+register SELID {
+	address			0x019
+	access_mode RW
+	mask	SELID_MASK	0xf0
+	field	ONEBIT		0x08
+}
+
+register SCAMCTL {
+	address			0x01a
+	access_mode RW
+	field	ENSCAMSELO	0x80
+	field	CLRSCAMSELID	0x40
+	field	ALTSTIM		0x20
+	field	DFLTTID		0x10
+	mask	SCAMLVL		0x03
+}
+
+/*
+ * Target Mode Selecting in ID bitmask (aic7890/91/96/97)
+ */
+register TARGID {
+	address			0x01b
+	size			2
+	access_mode RW
+}
+
+/*
+ * Serial Port I/O Cabability register (p. 4-95 aic7860 Data Book)
+ * Indicates if external logic has been attached to the chip to
+ * perform the tasks of accessing a serial eeprom, testing termination
+ * strength, and performing cable detection.  On the aic7860, most of
+ * these features are handled on chip, but on the aic7855 an attached
+ * aic3800 does the grunt work.
+ */
+register SPIOCAP {
+	address			0x01b
+	access_mode RW
+	field	SOFT1		0x80
+	field	SOFT0		0x40
+	field	SOFTCMDEN	0x20	
+	field	EXT_BRDCTL	0x10	/* External Board control */
+	field	SEEPROM		0x08	/* External serial eeprom logic */
+	field	EEPROM		0x04	/* Writable external BIOS ROM */
+	field	ROM		0x02	/* Logic for accessing external ROM */
+	field	SSPIOCPS	0x01	/* Termination and cable detection */
+}
+
+register BRDCTL	{
+	address			0x01d
+	field	BRDDAT7		0x80
+	field	BRDDAT6		0x40
+	field	BRDDAT5		0x20
+	field	BRDSTB		0x10
+	field	BRDCS		0x08
+	field	BRDRW		0x04
+	field	BRDCTL1		0x02
+	field	BRDCTL0		0x01
+	/* 7890 Definitions */
+	field	BRDDAT4		0x10
+	field	BRDDAT3		0x08
+	field	BRDDAT2		0x04
+	field	BRDRW_ULTRA2	0x02
+	field	BRDSTB_ULTRA2	0x01
+}
+
+/*
+ * Serial EEPROM Control (p. 4-92 in 7870 Databook)
+ * Controls the reading and writing of an external serial 1-bit
+ * EEPROM Device.  In order to access the serial EEPROM, you must
+ * first set the SEEMS bit that generates a request to the memory
+ * port for access to the serial EEPROM device.  When the memory
+ * port is not busy servicing another request, it reconfigures
+ * to allow access to the serial EEPROM.  When this happens, SEERDY
+ * gets set high to verify that the memory port access has been
+ * granted.  
+ *
+ * After successful arbitration for the memory port, the SEECS bit of 
+ * the SEECTL register is connected to the chip select.  The SEECK, 
+ * SEEDO, and SEEDI are connected to the clock, data out, and data in 
+ * lines respectively.  The SEERDY bit of SEECTL is useful in that it 
+ * gives us an 800 nsec timer.  After a write to the SEECTL register, 
+ * the SEERDY goes high 800 nsec later.  The one exception to this is 
+ * when we first request access to the memory port.  The SEERDY goes 
+ * high to signify that access has been granted and, for this case, has 
+ * no implied timing.
+ *
+ * See 93cx6.c for detailed information on the protocol necessary to 
+ * read the serial EEPROM.
+ */
+register SEECTL {
+	address			0x01e
+	field	EXTARBACK	0x80
+	field	EXTARBREQ	0x40
+	field	SEEMS		0x20
+	field	SEERDY		0x10
+	field	SEECS		0x08
+	field	SEECK		0x04
+	field	SEEDO		0x02
+	field	SEEDI		0x01
+}
+/*
+ * SCSI Block Control (p. 3-32)
+ * Controls Bus type and channel selection.  In a twin channel configuration
+ * addresses 0x00-0x1e are gated to the appropriate channel based on this
+ * register.  SELWIDE allows for the coexistence of 8bit and 16bit devices
+ * on a wide bus.
+ */
+register SBLKCTL {
+	address			0x01f
+	access_mode RW
+	field	DIAGLEDEN	0x80	/* Aic78X0 only */
+	field	DIAGLEDON	0x40	/* Aic78X0 only */
+	field	AUTOFLUSHDIS	0x20
+	field	SELBUSB		0x08
+	field	ENAB40		0x08	/* LVD transceiver active */
+	field	ENAB20		0x04	/* SE/HVD transceiver active */
+	field	SELWIDE		0x02
+	field	XCVR		0x01	/* External transceiver active */
+}
+
+/*
+ * Sequencer Control (p. 3-33)
+ * Error detection mode and speed configuration
+ */
+register SEQCTL {
+	address			0x060
+	access_mode RW
+	field	PERRORDIS	0x80
+	field	PAUSEDIS	0x40
+	field	FAILDIS		0x20
+	field	FASTMODE	0x10
+	field	BRKADRINTEN	0x08
+	field	STEP		0x04
+	field	SEQRESET	0x02
+	field	LOADRAM		0x01
+}
+
+/*
+ * Sequencer RAM Data (p. 3-34)
+ * Single byte window into the Scratch Ram area starting at the address
+ * specified by SEQADDR0 and SEQADDR1.  To write a full word, simply write
+ * four bytes in succession.  The SEQADDRs will increment after the most
+ * significant byte is written
+ */
+register SEQRAM {
+	address			0x061
+	access_mode RW
+}
+
+/*
+ * Sequencer Address Registers (p. 3-35)
+ * Only the first bit of SEQADDR1 holds addressing information
+ */
+register SEQADDR0 {
+	address			0x062
+	access_mode RW
+}
+
+register SEQADDR1 {
+	address			0x063
+	access_mode RW
+	mask	SEQADDR1_MASK	0x01
+}
+
+/*
+ * Accumulator
+ * We cheat by passing arguments in the Accumulator up to the kernel driver
+ */
+register ACCUM {
+	address			0x064
+	access_mode RW
+	accumulator
+}
+
+register SINDEX	{
+	address			0x065
+	access_mode RW
+	sindex
+}
+
+register DINDEX {
+	address			0x066
+	access_mode RW
+}
+
+register ALLONES {
+	address			0x069
+	access_mode RO
+	allones
+}
+
+register ALLZEROS {
+	address			0x06a
+	access_mode RO
+	allzeros
+}
+
+register NONE {
+	address			0x06a
+	access_mode WO
+	none
+}
+
+register FLAGS {
+	address			0x06b
+	access_mode RO
+	field	ZERO		0x02
+	field	CARRY		0x01
+}
+
+register SINDIR	{
+	address			0x06c
+	access_mode RO
+}
+
+register DINDIR	 {
+	address			0x06d
+	access_mode WO
+}
+
+register FUNCTION1 {
+	address			0x06e
+	access_mode RW
+}
+
+register STACK {
+	address			0x06f
+	access_mode RO
+}
+
+const	STACK_SIZE	4
+
+/*
+ * Board Control (p. 3-43)
+ */
+register BCTL {
+	address			0x084
+	access_mode RW
+	field	ACE		0x08
+	field	ENABLE		0x01
+}
+
+/*
+ * On the aic78X0 chips, Board Control is replaced by the DSCommand
+ * register (p. 4-64)
+ */
+register DSCOMMAND0 {
+	address			0x084
+	access_mode RW
+	field	CACHETHEN	0x80	/* Cache Threshold enable */
+	field	DPARCKEN	0x40	/* Data Parity Check Enable */
+	field	MPARCKEN	0x20	/* Memory Parity Check Enable */
+	field	EXTREQLCK	0x10	/* External Request Lock */
+	/* aic7890/91/96/97 only */
+	field	INTSCBRAMSEL	0x08	/* Internal SCB RAM Select */
+	field	RAMPS		0x04	/* External SCB RAM Present */
+	field	USCBSIZE32	0x02	/* Use 32byte SCB Page Size */
+	field	CIOPARCKEN	0x01	/* Internal bus parity error enable */
+}
+
+register DSCOMMAND1 {
+	address			0x085
+	access_mode RW
+	mask	DSLATT		0xfc	/* PCI latency timer (non-ultra2) */
+	field	HADDLDSEL1	0x02	/* Host Address Load Select Bits */
+	field	HADDLDSEL0	0x01
+}
+
+/*
+ * Bus On/Off Time (p. 3-44) aic7770 only
+ */
+register BUSTIME {
+	address			0x085
+	access_mode RW
+	mask	BOFF		0xf0
+	mask	BON		0x0f
+}
+
+/*
+ * Bus Speed (p. 3-45) aic7770 only
+ */
+register BUSSPD {
+	address			0x086
+	access_mode RW
+	mask	DFTHRSH		0xc0
+	mask	STBOFF		0x38
+	mask	STBON		0x07
+	mask	DFTHRSH_100	0xc0
+	mask	DFTHRSH_75	0x80
+}
+
+/* aic7850/55/60/70/80/95 only */
+register DSPCISTATUS {
+	address			0x086
+	mask	DFTHRSH_100	0xc0
+}
+
+/* aic7890/91/96/97 only */
+register HS_MAILBOX {
+	address			0x086
+	mask	HOST_MAILBOX	0xF0
+	mask	SEQ_MAILBOX	0x0F
+	mask	HOST_TQINPOS	0x80	/* Boundary at either 0 or 128 */
+}
+
+const	HOST_MAILBOX_SHIFT	4
+const	SEQ_MAILBOX_SHIFT	0
+
+/*
+ * Host Control (p. 3-47) R/W
+ * Overall host control of the device.
+ */
+register HCNTRL {
+	address			0x087
+	access_mode RW
+	field	POWRDN		0x40
+	field	SWINT		0x10
+	field	IRQMS		0x08
+	field	PAUSE		0x04
+	field	INTEN		0x02
+	field	CHIPRST		0x01
+	field	CHIPRSTACK	0x01
+}
+
+/*
+ * Host Address (p. 3-48)
+ * This register contains the address of the byte about
+ * to be transferred across the host bus.
+ */
+register HADDR {
+	address			0x088
+	size	4
+	access_mode RW
+}
+
+register HCNT {
+	address			0x08c
+	size	3
+	access_mode RW
+}
+
+/*
+ * SCB Pointer (p. 3-49)
+ * Gate one of the SCBs into the SCBARRAY window.
+ */
+register SCBPTR {
+	address			0x090
+	access_mode RW
+}
+
+/*
+ * Interrupt Status (p. 3-50)
+ * Status for system interrupts
+ */
+register INTSTAT {
+	address			0x091
+	access_mode RW
+	field	BRKADRINT 0x08
+	field	SCSIINT	  0x04
+	field	CMDCMPLT  0x02
+	field	SEQINT    0x01
+	mask	BAD_PHASE	SEQINT		/* unknown scsi bus phase */
+	mask	SEND_REJECT	0x10|SEQINT	/* sending a message reject */
+	mask	PROTO_VIOLATION	0x20|SEQINT	/* SCSI protocol violation */ 
+	mask	NO_MATCH	0x30|SEQINT	/* no cmd match for reconnect */
+	mask	IGN_WIDE_RES	0x40|SEQINT	/* Complex IGN Wide Res Msg */
+	mask	PDATA_REINIT	0x50|SEQINT	/*
+						 * Returned to data phase
+						 * that requires data
+						 * transfer pointers to be
+						 * recalculated from the
+						 * transfer residual.
+						 */
+	mask	HOST_MSG_LOOP	0x60|SEQINT	/*
+						 * The bus is ready for the
+						 * host to perform another
+						 * message transaction.  This
+						 * mechanism is used for things
+						 * like sync/wide negotiation
+						 * that require a kernel based
+						 * message state engine.
+						 */
+	mask	BAD_STATUS	0x70|SEQINT	/* Bad status from target */
+	mask	PERR_DETECTED	0x80|SEQINT	/*
+						 * Either the phase_lock
+						 * or inb_next routine has
+						 * noticed a parity error.
+						 */
+	mask	DATA_OVERRUN	0x90|SEQINT	/*
+						 * Target attempted to write
+						 * beyond the bounds of its
+						 * command.
+						 */
+	mask	MKMSG_FAILED	0xa0|SEQINT	/*
+						 * Target completed command
+						 * without honoring our ATN
+						 * request to issue a message. 
+						 */
+	mask	MISSED_BUSFREE	0xb0|SEQINT	/*
+						 * The sequencer never saw
+						 * the bus go free after
+						 * either a command complete
+						 * or disconnect message.
+						 */
+	mask	SCB_MISMATCH	0xc0|SEQINT	/*
+						 * Downloaded SCB's tag does
+						 * not match the entry we
+						 * intended to download.
+						 */
+	mask	NO_FREE_SCB	0xd0|SEQINT	/*
+						 * get_free_or_disc_scb failed.
+						 */
+	mask	OUT_OF_RANGE	0xe0|SEQINT
+
+	mask	SEQINT_MASK	0xf0|SEQINT	/* SEQINT Status Codes */
+	mask	INT_PEND  (BRKADRINT|SEQINT|SCSIINT|CMDCMPLT)
+}
+
+/*
+ * Hard Error (p. 3-53)
+ * Reporting of catastrophic errors.  You usually cannot recover from
+ * these without a full board reset.
+ */
+register ERROR {
+	address			0x092
+	access_mode RO
+	field	CIOPARERR	0x80	/* Ultra2 only */
+	field	PCIERRSTAT	0x40	/* PCI only */
+	field	MPARERR		0x20	/* PCI only */
+	field	DPARERR		0x10	/* PCI only */
+	field	SQPARERR	0x08
+	field	ILLOPCODE	0x04
+	field	ILLSADDR	0x02
+	field	ILLHADDR	0x01
+}
+
+/*
+ * Clear Interrupt Status (p. 3-52)
+ */
+register CLRINT {
+	address			0x092
+	access_mode WO
+	field	CLRPARERR	0x10	/* PCI only */
+	field	CLRBRKADRINT	0x08
+	field	CLRSCSIINT      0x04
+	field	CLRCMDINT 	0x02
+	field	CLRSEQINT 	0x01
+}
+
+register DFCNTRL {
+	address			0x093
+	access_mode RW
+	field	PRELOADEN	0x80	/* aic7890 only */
+	field	WIDEODD		0x40
+	field	SCSIEN		0x20
+	field	SDMAEN		0x10
+	field	SDMAENACK	0x10
+	field	HDMAEN		0x08
+	field	HDMAENACK	0x08
+	field	DIRECTION	0x04
+	field	FIFOFLUSH	0x02
+	field	FIFORESET	0x01
+}
+
+register DFSTATUS {
+	address			0x094
+	access_mode RO
+	field	PRELOAD_AVAIL	0x80
+	field	DFCACHETH	0x40
+	field	FIFOQWDEMP	0x20
+	field	MREQPEND	0x10
+	field	HDONE		0x08
+	field	DFTHRESH	0x04
+	field	FIFOFULL	0x02
+	field	FIFOEMP		0x01
+}
+
+register DFWADDR {
+	address			0x95
+	access_mode RW
+}
+
+register DFRADDR {
+	address			0x97
+	access_mode RW
+}
+
+register DFDAT {
+	address			0x099
+	access_mode RW
+}
+
+/*
+ * SCB Auto Increment (p. 3-59)
+ * Byte offset into the SCB Array and an optional bit to allow auto
+ * incrementing of the address during download and upload operations
+ */
+register SCBCNT {
+	address			0x09a
+	access_mode RW
+	field	SCBAUTO		0x80
+	mask	SCBCNT_MASK	0x1f
+}
+
+/*
+ * Queue In FIFO (p. 3-60)
+ * Input queue for queued SCBs (commands that the seqencer has yet to start)
+ */
+register QINFIFO {
+	address			0x09b
+	access_mode RW
+}
+
+/*
+ * Queue In Count (p. 3-60)
+ * Number of queued SCBs
+ */
+register QINCNT	{
+	address			0x09c
+	access_mode RO
+}
+
+/*
+ * Queue Out FIFO (p. 3-61)
+ * Queue of SCBs that have completed and await the host
+ */
+register QOUTFIFO {
+	address			0x09d
+	access_mode WO
+}
+
+register CRCCONTROL1 {
+	address			0x09d
+	access_mode RW
+	field	CRCONSEEN		0x80
+	field	CRCVALCHKEN		0x40
+	field	CRCENDCHKEN		0x20
+	field	CRCREQCHKEN		0x10
+	field	TARGCRCENDEN		0x08
+	field	TARGCRCCNTEN		0x04
+}
+
+
+/*
+ * Queue Out Count (p. 3-61)
+ * Number of queued SCBs in the Out FIFO
+ */
+register QOUTCNT {
+	address			0x09e
+	access_mode RO
+}
+
+register SCSIPHASE {
+	address			0x09e
+	access_mode RO
+	field	STATUS_PHASE	0x20
+	field	COMMAND_PHASE	0x10
+	field	MSG_IN_PHASE	0x08
+	field	MSG_OUT_PHASE	0x04
+	field	DATA_IN_PHASE	0x02
+	field	DATA_OUT_PHASE	0x01
+	mask	DATA_PHASE_MASK	0x03
+}
+
+/*
+ * Special Function
+ */
+register SFUNCT {
+	address			0x09f
+	access_mode RW
+	field	ALT_MODE	0x80
+}
+
+/*
+ * SCB Definition (p. 5-4)
+ */
+scb {
+	address		0x0a0
+	size		64
+
+	SCB_CDB_PTR {
+		size	4
+		alias	SCB_RESIDUAL_DATACNT
+		alias	SCB_CDB_STORE
+	}
+	SCB_RESIDUAL_SGPTR {
+		size	4
+	}
+	SCB_SCSI_STATUS {
+		size	1
+	}
+	SCB_TARGET_PHASES {
+		size	1
+	}
+	SCB_TARGET_DATA_DIR {
+		size	1
+	}
+	SCB_TARGET_ITAG {
+		size	1
+	}
+	SCB_DATAPTR {
+		size	4
+	}
+	SCB_DATACNT {
+		/*
+		 * The last byte is really the high address bits for
+		 * the data address.
+		 */
+		size	4
+		field	SG_LAST_SEG		0x80	/* In the fourth byte */
+		mask	SG_HIGH_ADDR_BITS	0x7F	/* In the fourth byte */
+	}
+	SCB_SGPTR {
+		size	4
+		field	SG_RESID_VALID	0x04	/* In the first byte */
+		field	SG_FULL_RESID	0x02	/* In the first byte */
+		field	SG_LIST_NULL	0x01	/* In the first byte */
+	}
+	SCB_CONTROL {
+		size	1
+		field	TARGET_SCB			0x80
+		field	STATUS_RCVD			0x80
+		field	DISCENB				0x40
+		field	TAG_ENB				0x20
+		field	MK_MESSAGE			0x10
+		field	ULTRAENB			0x08
+		field	DISCONNECTED			0x04
+		mask	SCB_TAG_TYPE			0x03
+	}
+	SCB_SCSIID {
+		size	1
+		field	TWIN_CHNLB			0x80
+		mask	TWIN_TID			0x70
+		mask	TID				0xf0
+		mask	OID				0x0f
+	}
+	SCB_LUN {
+		field	SCB_XFERLEN_ODD			0x80
+		mask	LID				0x3f
+		size	1
+	}
+	SCB_TAG {
+		size	1
+	}
+	SCB_CDB_LEN {
+		size	1
+	}
+	SCB_SCSIRATE {
+		size	1
+	}
+	SCB_SCSIOFFSET {
+		size	1
+	}
+	SCB_NEXT {
+		size	1
+	}
+	SCB_64_SPARE {
+		size	16
+	}
+	SCB_64_BTT {
+		size	16
+	}
+}
+
+const	SCB_UPLOAD_SIZE		32
+const	SCB_DOWNLOAD_SIZE	32
+const	SCB_DOWNLOAD_SIZE_64	48
+
+const	SG_SIZEOF	0x08		/* sizeof(struct ahc_dma) */
+
+/* --------------------- AHA-2840-only definitions -------------------- */
+
+register SEECTL_2840 {
+	address			0x0c0
+	access_mode RW
+	field	CS_2840		0x04
+	field	CK_2840		0x02
+	field	DO_2840		0x01
+}
+
+register STATUS_2840 {
+	address			0x0c1
+	access_mode RW
+	field	EEPROM_TF	0x80
+	mask	BIOS_SEL	0x60
+	mask	ADSEL		0x1e
+	field	DI_2840		0x01
+}
+
+/* --------------------- AIC-7870-only definitions -------------------- */
+
+register CCHADDR {
+	address			0x0E0
+	size 8
+}
+
+register CCHCNT {
+	address			0x0E8
+}
+
+register CCSGRAM {
+	address			0x0E9
+}
+
+register CCSGADDR {
+	address			0x0EA
+}
+
+register CCSGCTL {
+	address			0x0EB
+	field	CCSGDONE	0x80
+	field	CCSGEN		0x08
+	field	SG_FETCH_NEEDED 0x02	/* Bit used for software state */
+	field	CCSGRESET	0x01
+}
+
+register CCSCBCNT {
+	address			0xEF
+}
+
+register CCSCBCTL {
+	address			0x0EE
+	field	CCSCBDONE	0x80
+	field	ARRDONE		0x40	/* SCB Array prefetch done */
+	field	CCARREN		0x10
+	field	CCSCBEN		0x08
+	field	CCSCBDIR	0x04
+	field	CCSCBRESET	0x01
+}
+
+register CCSCBADDR {
+	address			0x0ED
+}
+
+register CCSCBRAM {
+	address			0xEC
+}
+
+/*
+ * SCB bank address (7895/7896/97 only)
+ */
+register SCBBADDR {
+	address			0x0F0
+	access_mode RW
+}
+
+register CCSCBPTR {
+	address			0x0F1
+}
+
+register HNSCB_QOFF {
+	address			0x0F4
+}
+
+register SNSCB_QOFF {
+	address			0x0F6
+}
+
+register SDSCB_QOFF {
+	address			0x0F8
+}
+
+register QOFF_CTLSTA {
+	address			0x0FA
+	field	SCB_AVAIL	0x40
+	field	SNSCB_ROLLOVER	0x20
+	field	SDSCB_ROLLOVER	0x10
+	mask	SCB_QSIZE	0x07
+	mask	SCB_QSIZE_256	0x06
+}
+
+register DFF_THRSH {
+	address			0x0FB
+	mask	WR_DFTHRSH	0x70
+	mask	RD_DFTHRSH	0x07
+	mask	RD_DFTHRSH_MIN	0x00
+	mask	RD_DFTHRSH_25	0x01
+	mask	RD_DFTHRSH_50	0x02
+	mask	RD_DFTHRSH_63	0x03
+	mask	RD_DFTHRSH_75	0x04
+	mask	RD_DFTHRSH_85	0x05
+	mask	RD_DFTHRSH_90	0x06
+	mask	RD_DFTHRSH_MAX	0x07
+	mask	WR_DFTHRSH_MIN	0x00
+	mask	WR_DFTHRSH_25	0x10
+	mask	WR_DFTHRSH_50	0x20
+	mask	WR_DFTHRSH_63	0x30
+	mask	WR_DFTHRSH_75	0x40
+	mask	WR_DFTHRSH_85	0x50
+	mask	WR_DFTHRSH_90	0x60
+	mask	WR_DFTHRSH_MAX	0x70
+}
+
+register SG_CACHE_PRE {
+	access_mode WO
+	address			0x0fc
+	mask	SG_ADDR_MASK	0xf8
+	field	LAST_SEG	0x02
+	field	LAST_SEG_DONE	0x01
+}
+
+register SG_CACHE_SHADOW {
+	access_mode RO
+	address			0x0fc
+	mask	SG_ADDR_MASK	0xf8
+	field	LAST_SEG	0x02
+	field	LAST_SEG_DONE	0x01
+}
+/* ---------------------- Scratch RAM Offsets ------------------------- */
+/* These offsets are either to values that are initialized by the board's
+ * BIOS or are specified by the sequencer code.
+ *
+ * The host adapter card (at least the BIOS) uses 20-2f for SCSI
+ * device information, 32-33 and 5a-5f as well. As it turns out, the
+ * BIOS trashes 20-2f, writing the synchronous negotiation results
+ * on top of the BIOS values, so we re-use those for our per-target
+ * scratchspace (actually a value that can be copied directly into
+ * SCSIRATE).  The kernel driver will enable synchronous negotiation
+ * for all targets that have a value other than 0 in the lower four
+ * bits of the target scratch space.  This should work regardless of
+ * whether the bios has been installed.
+ */
+
+scratch_ram {
+	address		0x020
+	size		58
+
+	/*
+	 * 1 byte per target starting at this address for configuration values
+	 */
+	BUSY_TARGETS {
+		alias		TARG_SCSIRATE
+		size		16
+	}
+	/*
+	 * Bit vector of targets that have ULTRA enabled as set by
+	 * the BIOS.  The Sequencer relies on a per-SCB field to
+	 * control whether to enable Ultra transfers or not.  During
+	 * initialization, we read this field and reuse it for 2
+	 * entries in the busy target table.
+	 */
+	ULTRA_ENB {
+		alias		CMDSIZE_TABLE
+		size		2
+	}
+	/*
+	 * Bit vector of targets that have disconnection disabled as set by
+	 * the BIOS.  The Sequencer relies in a per-SCB field to control the
+	 * disconnect priveldge.  During initialization, we read this field
+	 * and reuse it for 2 entries in the busy target table.
+	 */
+	DISC_DSB {
+		size		2
+	}
+	CMDSIZE_TABLE_TAIL {
+		size		4
+	}
+	/*
+	 * Partial transfer past cacheline end to be
+	 * transferred using an extra S/G.
+	 */
+	MWI_RESIDUAL {
+		size		1
+		alias	TARG_IMMEDIATE_SCB
+	}
+	/*
+	 * SCBID of the next SCB to be started by the controller.
+	 */
+	NEXT_QUEUED_SCB {
+		size		1
+	}
+	/*
+	 * Single byte buffer used to designate the type or message
+	 * to send to a target.
+	 */
+	MSG_OUT {
+		size		1
+	}
+	/* Parameters for DMA Logic */
+	DMAPARAMS {
+		size		1
+		field	PRELOADEN	0x80
+		field	WIDEODD		0x40
+		field	SCSIEN		0x20
+		field	SDMAEN		0x10
+		field	SDMAENACK	0x10
+		field	HDMAEN		0x08
+		field	HDMAENACK	0x08
+		field	DIRECTION	0x04	/* Set indicates PCI->SCSI */
+		field	FIFOFLUSH	0x02
+		field	FIFORESET	0x01
+	}
+	SEQ_FLAGS {
+		size		1
+		field	NOT_IDENTIFIED		0x80
+		field	NO_CDB_SENT		0x40
+		field	TARGET_CMD_IS_TAGGED	0x40
+		field	DPHASE			0x20
+		/* Target flags */
+		field	TARG_CMD_PENDING	0x10
+		field	CMDPHASE_PENDING	0x08
+		field	DPHASE_PENDING		0x04
+		field	SPHASE_PENDING		0x02
+		field	NO_DISCONNECT		0x01
+	}
+	/*
+	 * Temporary storage for the
+	 * target/channel/lun of a
+	 * reconnecting target
+	 */
+	SAVED_SCSIID {
+		size		1
+	}
+	SAVED_LUN {
+		size		1
+	}
+	/*
+	 * The last bus phase as seen by the sequencer. 
+	 */
+	LASTPHASE {
+		size		1
+		field	CDI		0x80
+		field	IOI		0x40
+		field	MSGI		0x20
+		mask	PHASE_MASK	CDI|IOI|MSGI
+		mask	P_DATAOUT	0x00
+		mask	P_DATAIN	IOI
+		mask	P_COMMAND	CDI
+		mask	P_MESGOUT	CDI|MSGI
+		mask	P_STATUS	CDI|IOI
+		mask	P_MESGIN	CDI|IOI|MSGI
+		mask	P_BUSFREE	0x01
+	}
+	/*
+	 * head of list of SCBs awaiting
+	 * selection
+	 */
+	WAITING_SCBH {
+		size		1
+	}
+	/*
+	 * head of list of SCBs that are
+	 * disconnected.  Used for SCB
+	 * paging.
+	 */
+	DISCONNECTED_SCBH {
+		size		1
+	}
+	/*
+	 * head of list of SCBs that are
+	 * not in use.  Used for SCB paging.
+	 */
+	FREE_SCBH {
+		size		1
+	}
+	/*
+	 * head of list of SCBs that have
+	 * completed but have not been
+	 * put into the qoutfifo.
+	 */
+	COMPLETE_SCBH {
+		size		1
+	}
+	/*
+	 * Address of the hardware scb array in the host.
+	 */
+	HSCB_ADDR {
+		size		4
+	}
+	/*
+	 * Base address of our shared data with the kernel driver in host
+	 * memory.  This includes the qoutfifo and target mode
+	 * incoming command queue.
+	 */
+	SHARED_DATA_ADDR {
+		size		4
+	}
+	KERNEL_QINPOS {
+		size		1
+	}
+	QINPOS {
+		size		1
+	}
+	QOUTPOS {
+		size		1
+	}
+	/*
+	 * Kernel and sequencer offsets into the queue of
+	 * incoming target mode command descriptors.  The
+	 * queue is full when the KERNEL_TQINPOS == TQINPOS.
+	 */
+	KERNEL_TQINPOS {
+		size		1
+	}
+	TQINPOS {                
+		size		1
+	}
+	ARG_1 {
+		size		1
+		mask	SEND_MSG		0x80
+		mask	SEND_SENSE		0x40
+		mask	SEND_REJ		0x20
+		mask	MSGOUT_PHASEMIS		0x10
+		mask	EXIT_MSG_LOOP		0x08
+		mask	CONT_MSG_LOOP		0x04
+		mask	CONT_TARG_SESSION	0x02
+		alias	RETURN_1
+	}
+	ARG_2 {
+		size		1
+		alias	RETURN_2
+	}
+
+	/*
+	 * Snapshot of MSG_OUT taken after each message is sent.
+	 */
+	LAST_MSG {
+		size		1
+	}
+
+	/*
+	 * Sequences the kernel driver has okayed for us.  This allows
+	 * the driver to do things like prevent initiator or target
+	 * operations.
+	 */
+	SCSISEQ_TEMPLATE {
+		size		1
+		field	ENSELO		0x40
+		field	ENSELI		0x20
+		field	ENRSELI		0x10
+		field	ENAUTOATNO	0x08
+		field	ENAUTOATNI	0x04
+		field	ENAUTOATNP	0x02
+	}
+}
+
+scratch_ram {
+	address		0x056
+	size		4
+	/*
+	 * These scratch ram locations are initialized by the 274X BIOS.
+	 * We reuse them after capturing the BIOS settings during
+	 * initialization.
+	 */
+
+	/*
+	 * The initiator specified tag for this target mode transaction.
+	 */
+	HA_274_BIOSGLOBAL {
+		size	1
+		field	HA_274_EXTENDED_TRANS	0x01
+		alias	INITIATOR_TAG
+	}
+
+	SEQ_FLAGS2 {
+		size	1
+		field	SCB_DMA			0x01
+		field	TARGET_MSG_PENDING	0x02
+	}
+}
+
+scratch_ram {
+	address		0x05a
+	size		6
+	/*
+	 * These are reserved registers in the card's scratch ram on the 2742.
+	 * The EISA configuraiton chip is mapped here.  On Rev E. of the
+	 * aic7770, the sequencer can use this area for scratch, but the
+	 * host cannot directly access these registers.  On later chips, this
+	 * area can be read and written by both the host and the sequencer.
+	 * Even on later chips, many of these locations are initialized by
+	 * the BIOS.
+	 */
+	SCSICONF {
+		size		1
+		field	TERM_ENB	0x80
+		field	RESET_SCSI	0x40
+		field	ENSPCHK		0x20
+		mask	HSCSIID		0x07	/* our SCSI ID */
+		mask	HWSCSIID	0x0f	/* our SCSI ID if Wide Bus */
+	}
+	INTDEF {
+		address		0x05c
+		size		1
+		field	EDGE_TRIG	0x80
+		mask	VECTOR		0x0f
+	}
+	HOSTCONF {
+		address		0x05d
+		size		1
+	}
+	HA_274_BIOSCTRL	{
+		address		0x05f
+		size		1
+		mask	BIOSMODE		0x30
+		mask	BIOSDISABLED		0x30	
+		field	CHANNEL_B_PRIMARY	0x08
+	}
+}
+
+scratch_ram {
+	address		0x070
+	size		16
+
+	/*
+	 * Per target SCSI offset values for Ultra2 controllers.
+	 */
+	TARG_OFFSET {
+		size		16
+	}
+}
+
+const TID_SHIFT		4
+const SCB_LIST_NULL	0xff
+const TARGET_CMD_CMPLT	0xfe
+
+const CCSGADDR_MAX	0x80
+const CCSGRAM_MAXSEGS	16
+
+/* WDTR Message values */
+const BUS_8_BIT			0x00
+const BUS_16_BIT		0x01
+const BUS_32_BIT		0x02
+
+/* Offset maximums */
+const MAX_OFFSET_8BIT		0x0f
+const MAX_OFFSET_16BIT		0x08
+const MAX_OFFSET_ULTRA2		0x7f
+const MAX_OFFSET		0x7f
+const HOST_MSG			0xff
+
+/* Target mode command processing constants */
+const CMD_GROUP_CODE_SHIFT	0x05
+
+const STATUS_BUSY		0x08
+const STATUS_QUEUE_FULL	0x28
+const TARGET_DATA_IN		1
+
+/*
+ * Downloaded (kernel inserted) constants
+ */
+/* Offsets into the SCBID array where different data is stored */
+const QOUTFIFO_OFFSET download
+const QINFIFO_OFFSET download
+const CACHESIZE_MASK download
+const INVERTED_CACHESIZE_MASK download
+const SG_PREFETCH_CNT download
+const SG_PREFETCH_ALIGN_MASK download
+const SG_PREFETCH_ADDR_MASK download
diff --git a/drivers/scsi/aic7xxx/aic7xxx.seq b/drivers/scsi/aic7xxx/aic7xxx.seq
new file mode 100644
index 0000000..d84b741
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx.seq
@@ -0,0 +1,2398 @@
+/*
+ * Adaptec 274x/284x/294x device driver firmware for Linux and FreeBSD.
+ *
+ * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $FreeBSD$
+ */
+
+VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#56 $"
+PATCH_ARG_LIST = "struct ahc_softc *ahc"
+PREFIX = "ahc_"
+
+#include "aic7xxx.reg"
+#include "scsi_message.h"
+
+/*
+ * A few words on the waiting SCB list:
+ * After starting the selection hardware, we check for reconnecting targets
+ * as well as for our selection to complete just in case the reselection wins
+ * bus arbitration.  The problem with this is that we must keep track of the
+ * SCB that we've already pulled from the QINFIFO and started the selection
+ * on just in case the reselection wins so that we can retry the selection at
+ * a later time.  This problem cannot be resolved by holding a single entry
+ * in scratch ram since a reconnecting target can request sense and this will
+ * create yet another SCB waiting for selection.  The solution used here is to 
+ * use byte 27 of the SCB as a psuedo-next pointer and to thread a list
+ * of SCBs that are awaiting selection.  Since 0-0xfe are valid SCB indexes, 
+ * SCB_LIST_NULL is 0xff which is out of range.  An entry is also added to
+ * this list everytime a request sense occurs or after completing a non-tagged
+ * command for which a second SCB has been queued.  The sequencer will
+ * automatically consume the entries.
+ */
+
+bus_free_sel:
+	/*
+	 * Turn off the selection hardware.  We need to reset the
+	 * selection request in order to perform a new selection.
+	 */
+	and	SCSISEQ, TEMODE|ENSELI|ENRSELI|ENAUTOATNP;
+	and	SIMODE1, ~ENBUSFREE;
+poll_for_work:
+	call	clear_target_state;
+	and	SXFRCTL0, ~SPIOEN;
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		clr	SCSIBUSL;
+	}
+	test	SCSISEQ, ENSELO	jnz poll_for_selection;
+	if ((ahc->features & AHC_TWIN) != 0) {
+		xor	SBLKCTL,SELBUSB;	/* Toggle to the other bus */
+		test	SCSISEQ, ENSELO		jnz poll_for_selection;
+	}
+	cmp	WAITING_SCBH,SCB_LIST_NULL jne start_waiting;
+poll_for_work_loop:
+	if ((ahc->features & AHC_TWIN) != 0) {
+		xor	SBLKCTL,SELBUSB;	/* Toggle to the other bus */
+	}
+	test	SSTAT0, SELDO|SELDI	jnz selection;
+test_queue:
+	/* Has the driver posted any work for us? */
+BEGIN_CRITICAL;
+	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+		test	QOFF_CTLSTA, SCB_AVAIL jz poll_for_work_loop;
+	} else {
+		mov	A, QINPOS;
+		cmp	KERNEL_QINPOS, A je poll_for_work_loop;
+	}
+	mov	ARG_1, NEXT_QUEUED_SCB;
+
+	/*
+	 * We have at least one queued SCB now and we don't have any 
+	 * SCBs in the list of SCBs awaiting selection.  Allocate a
+	 * card SCB for the host's SCB and get to work on it.
+	 */
+	if ((ahc->flags & AHC_PAGESCBS) != 0) {
+		mov	ALLZEROS	call	get_free_or_disc_scb;
+	} else {
+		/* In the non-paging case, the SCBID == hardware SCB index */
+		mov	SCBPTR, ARG_1;
+	}
+	or	SEQ_FLAGS2, SCB_DMA;
+END_CRITICAL;
+dma_queued_scb:
+	/*
+	 * DMA the SCB from host ram into the current SCB location.
+	 */
+	mvi	DMAPARAMS, HDMAEN|DIRECTION|FIFORESET;
+	mov	ARG_1	call dma_scb;
+	/*
+	 * Check one last time to see if this SCB was canceled
+	 * before we completed the DMA operation.  If it was,
+	 * the QINFIFO next pointer will not match our saved
+	 * value.
+	 */
+	mov	A, ARG_1;
+BEGIN_CRITICAL;
+	cmp	NEXT_QUEUED_SCB, A jne abort_qinscb;
+	if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+		cmp	SCB_TAG, A je . + 2;
+		mvi	SCB_MISMATCH call set_seqint;
+	}
+	mov	NEXT_QUEUED_SCB, SCB_NEXT;
+	mov	SCB_NEXT,WAITING_SCBH;
+	mov	WAITING_SCBH, SCBPTR;
+	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+		mov	NONE, SNSCB_QOFF;
+	} else {
+		inc	QINPOS;
+	}
+	and	SEQ_FLAGS2, ~SCB_DMA;
+END_CRITICAL;
+start_waiting:
+	/*
+	 * Start the first entry on the waiting SCB list.
+	 */
+	mov	SCBPTR, WAITING_SCBH;
+	call	start_selection;
+
+poll_for_selection:
+	/*
+	 * Twin channel devices cannot handle things like SELTO
+	 * interrupts on the "background" channel.  So, while
+	 * selecting, keep polling the current channel until
+	 * either a selection or reselection occurs.
+	 */
+	test	SSTAT0, SELDO|SELDI	jz poll_for_selection;
+
+selection:
+	/*
+	 * We aren't expecting a bus free, so interrupt
+	 * the kernel driver if it happens.
+	 */
+	mvi	CLRSINT1,CLRBUSFREE;
+	if ((ahc->features & AHC_DT) == 0) {
+		or	SIMODE1, ENBUSFREE;
+	}
+
+	/*
+	 * Guard against a bus free after (re)selection
+	 * but prior to enabling the busfree interrupt.  SELDI
+	 * and SELDO will be cleared in that case.
+	 */
+	test	SSTAT0, SELDI|SELDO	jz bus_free_sel;
+	test	SSTAT0,SELDO	jnz select_out;
+select_in:
+	if ((ahc->flags & AHC_TARGETROLE) != 0) {
+		if ((ahc->flags & AHC_INITIATORROLE) != 0) {
+			test	SSTAT0, TARGET	jz initiator_reselect;
+		}
+		mvi	CLRSINT0, CLRSELDI;
+
+		/*
+		 * We've just been selected.  Assert BSY and
+		 * setup the phase for receiving messages
+		 * from the target.
+		 */
+		mvi	SCSISIGO, P_MESGOUT|BSYO;
+
+		/*
+		 * Setup the DMA for sending the identify and
+		 * command information.
+		 */
+		mvi	SEQ_FLAGS, CMDPHASE_PENDING;
+
+		mov     A, TQINPOS;
+		if ((ahc->features & AHC_CMD_CHAN) != 0) {
+			mvi	DINDEX, CCHADDR;
+			mvi	SHARED_DATA_ADDR call set_32byte_addr;
+			mvi	CCSCBCTL, CCSCBRESET;
+		} else {
+			mvi	DINDEX, HADDR;
+			mvi	SHARED_DATA_ADDR call set_32byte_addr;
+			mvi	DFCNTRL, FIFORESET;
+		}
+
+		/* Initiator that selected us */
+		and	SAVED_SCSIID, SELID_MASK, SELID;
+		/* The Target ID we were selected at */
+		if ((ahc->features & AHC_MULTI_TID) != 0) {
+			and	A, OID, TARGIDIN;
+		} else if ((ahc->features & AHC_ULTRA2) != 0) {
+			and	A, OID, SCSIID_ULTRA2;
+		} else {
+			and	A, OID, SCSIID;
+		}
+		or	SAVED_SCSIID, A;
+		if ((ahc->features & AHC_TWIN) != 0) {
+			test 	SBLKCTL, SELBUSB jz . + 2;
+			or	SAVED_SCSIID, TWIN_CHNLB;
+		}
+		if ((ahc->features & AHC_CMD_CHAN) != 0) {
+			mov	CCSCBRAM, SAVED_SCSIID;
+		} else {
+			mov	DFDAT, SAVED_SCSIID;
+		}
+
+		/*
+		 * If ATN isn't asserted, the target isn't interested
+		 * in talking to us.  Go directly to bus free.
+		 * XXX SCSI-1 may require us to assume lun 0 if
+		 * ATN is false.
+		 */
+		test	SCSISIGI, ATNI	jz	target_busfree;
+
+		/*
+		 * Watch ATN closely now as we pull in messages from the
+		 * initiator.  We follow the guidlines from section 6.5
+		 * of the SCSI-2 spec for what messages are allowed when.
+		 */
+		call	target_inb;
+
+		/*
+		 * Our first message must be one of IDENTIFY, ABORT, or
+		 * BUS_DEVICE_RESET.
+		 */
+		test	DINDEX, MSG_IDENTIFYFLAG jz host_target_message_loop;
+		/* Store for host */
+		if ((ahc->features & AHC_CMD_CHAN) != 0) {
+			mov	CCSCBRAM, DINDEX;
+		} else {
+			mov	DFDAT, DINDEX;
+		}
+		and	SAVED_LUN, MSG_IDENTIFY_LUNMASK, DINDEX;
+
+		/* Remember for disconnection decision */
+		test	DINDEX, MSG_IDENTIFY_DISCFLAG jnz . + 2;
+		/* XXX Honor per target settings too */
+		or	SEQ_FLAGS, NO_DISCONNECT;
+
+		test	SCSISIGI, ATNI	jz	ident_messages_done;
+		call	target_inb;
+		/*
+		 * If this is a tagged request, the tagged message must
+		 * immediately follow the identify.  We test for a valid
+		 * tag message by seeing if it is >= MSG_SIMPLE_Q_TAG and
+		 * < MSG_IGN_WIDE_RESIDUE.
+		 */
+		add	A, -MSG_SIMPLE_Q_TAG, DINDEX;
+		jnc	ident_messages_done_msg_pending;
+		add	A, -MSG_IGN_WIDE_RESIDUE, DINDEX;
+		jc	ident_messages_done_msg_pending;
+
+		/* Store for host */
+		if ((ahc->features & AHC_CMD_CHAN) != 0) {
+			mov	CCSCBRAM, DINDEX;
+		} else {
+			mov	DFDAT, DINDEX;
+		}
+		
+		/*
+		 * If the initiator doesn't feel like providing a tag number,
+		 * we've got a failed selection and must transition to bus
+		 * free.
+		 */
+		test	SCSISIGI, ATNI	jz	target_busfree;
+
+		/*
+		 * Store the tag for the host.
+		 */
+		call	target_inb;
+		if ((ahc->features & AHC_CMD_CHAN) != 0) {
+			mov	CCSCBRAM, DINDEX;
+		} else {
+			mov	DFDAT, DINDEX;
+		}
+		mov	INITIATOR_TAG, DINDEX;
+		or	SEQ_FLAGS, TARGET_CMD_IS_TAGGED;
+
+ident_messages_done:
+		/* Terminate the ident list */
+		if ((ahc->features & AHC_CMD_CHAN) != 0) {
+			mvi	CCSCBRAM, SCB_LIST_NULL;
+		} else {
+			mvi	DFDAT, SCB_LIST_NULL;
+		}
+		or	SEQ_FLAGS, TARG_CMD_PENDING;
+		test	SEQ_FLAGS2, TARGET_MSG_PENDING
+			jnz target_mesgout_pending;
+		test	SCSISIGI, ATNI jnz target_mesgout_continue;
+		jmp	target_ITloop;
+
+
+ident_messages_done_msg_pending:
+		or	SEQ_FLAGS2, TARGET_MSG_PENDING;
+		jmp	ident_messages_done;
+
+		/*
+		 * Pushed message loop to allow the kernel to
+		 * run it's own target mode message state engine.
+		 */
+host_target_message_loop:
+		mvi	HOST_MSG_LOOP call set_seqint;
+		cmp	RETURN_1, EXIT_MSG_LOOP	je target_ITloop;
+		test	SSTAT0, SPIORDY jz .;
+		jmp	host_target_message_loop;
+	}
+
+if ((ahc->flags & AHC_INITIATORROLE) != 0) {
+/*
+ * Reselection has been initiated by a target. Make a note that we've been
+ * reselected, but haven't seen an IDENTIFY message from the target yet.
+ */
+initiator_reselect:
+	/* XXX test for and handle ONE BIT condition */
+	or	SXFRCTL0, SPIOEN|CLRSTCNT|CLRCHN;
+	and	SAVED_SCSIID, SELID_MASK, SELID;
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		and	A, OID, SCSIID_ULTRA2;
+	} else {
+		and	A, OID, SCSIID;
+	}
+	or	SAVED_SCSIID, A;
+	if ((ahc->features & AHC_TWIN) != 0) {
+		test	SBLKCTL, SELBUSB	jz . + 2;
+		or	SAVED_SCSIID, TWIN_CHNLB;
+	}
+	mvi	CLRSINT0, CLRSELDI;
+	jmp	ITloop;
+}
+
+abort_qinscb:
+	call	add_scb_to_free_list;
+	jmp	poll_for_work_loop;
+
+start_selection:
+	/*
+	 * If bus reset interrupts have been disabled (from a previous
+	 * reset), re-enable them now.  Resets are only of interest
+	 * when we have outstanding transactions, so we can safely
+	 * defer re-enabling the interrupt until, as an initiator,
+	 * we start sending out transactions again.
+	 */
+	test	SIMODE1, ENSCSIRST	jnz . + 3;
+	mvi	CLRSINT1, CLRSCSIRSTI;
+	or	SIMODE1, ENSCSIRST;
+	if ((ahc->features & AHC_TWIN) != 0) {
+		and	SINDEX,~SELBUSB,SBLKCTL;/* Clear channel select bit */
+		test	SCB_SCSIID, TWIN_CHNLB jz . + 2;
+		or	SINDEX, SELBUSB;
+		mov	SBLKCTL,SINDEX;		/* select channel */
+	}
+initialize_scsiid:
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		mov	SCSIID_ULTRA2, SCB_SCSIID;
+	} else if ((ahc->features & AHC_TWIN) != 0) {
+		and	SCSIID, TWIN_TID|OID, SCB_SCSIID;
+	} else {
+		mov	SCSIID, SCB_SCSIID;
+	}
+	if ((ahc->flags & AHC_TARGETROLE) != 0) {
+		mov	SINDEX, SCSISEQ_TEMPLATE;
+		test	SCB_CONTROL, TARGET_SCB jz . + 2;
+		or	SINDEX, TEMODE;
+		mov	SCSISEQ, SINDEX ret;
+	} else {
+		mov	SCSISEQ, SCSISEQ_TEMPLATE ret;
+	}
+
+/*
+ * Initialize transfer settings with SCB provided settings.
+ */
+set_transfer_settings:
+	if ((ahc->features & AHC_ULTRA) != 0) {
+		test	SCB_CONTROL, ULTRAENB jz . + 2;
+		or	SXFRCTL0, FAST20;
+	} 
+	/*
+	 * Initialize SCSIRATE with the appropriate value for this target.
+	 */
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		bmov	SCSIRATE, SCB_SCSIRATE, 2 ret;
+	} else {
+		mov	SCSIRATE, SCB_SCSIRATE ret;
+	}
+
+if ((ahc->flags & AHC_TARGETROLE) != 0) {
+/*
+ * We carefully toggle SPIOEN to allow us to return the 
+ * message byte we receive so it can be checked prior to
+ * driving REQ on the bus for the next byte.
+ */
+target_inb:
+	/*
+	 * Drive REQ on the bus by enabling SCSI PIO.
+	 */
+	or	SXFRCTL0, SPIOEN;
+	/* Wait for the byte */
+	test	SSTAT0, SPIORDY jz .;
+	/* Prevent our read from triggering another REQ */
+	and	SXFRCTL0, ~SPIOEN;
+	/* Save latched contents */
+	mov	DINDEX, SCSIDATL ret;
+}
+
+/*
+ * After the selection, remove this SCB from the "waiting SCB"
+ * list.  This is achieved by simply moving our "next" pointer into
+ * WAITING_SCBH.  Our next pointer will be set to null the next time this
+ * SCB is used, so don't bother with it now.
+ */
+select_out:
+	/* Turn off the selection hardware */
+	and	SCSISEQ, TEMODE|ENSELI|ENRSELI|ENAUTOATNP, SCSISEQ;
+	mov	SCBPTR, WAITING_SCBH;
+	mov	WAITING_SCBH,SCB_NEXT;
+	mov	SAVED_SCSIID, SCB_SCSIID;
+	and	SAVED_LUN, LID, SCB_LUN;
+	call	set_transfer_settings;
+	if ((ahc->flags & AHC_TARGETROLE) != 0) {
+		test	SSTAT0, TARGET	jz initiator_select;
+
+		or	SXFRCTL0, CLRSTCNT|CLRCHN;
+
+		/*
+		 * Put tag in connonical location since not
+		 * all connections have an SCB.
+		 */
+		mov	INITIATOR_TAG, SCB_TARGET_ITAG;
+
+		/*
+		 * We've just re-selected an initiator.
+		 * Assert BSY and setup the phase for
+		 * sending our identify messages.
+		 */
+		mvi	P_MESGIN|BSYO call change_phase;
+		mvi	CLRSINT0, CLRSELDO;
+
+		/*
+		 * Start out with a simple identify message.
+		 */
+		or	SAVED_LUN, MSG_IDENTIFYFLAG call target_outb;
+
+		/*
+		 * If we are the result of a tagged command, send
+		 * a simple Q tag and the tag id.
+		 */
+		test	SCB_CONTROL, TAG_ENB	jz . + 3;
+		mvi	MSG_SIMPLE_Q_TAG call target_outb;
+		mov	SCB_TARGET_ITAG call target_outb;
+target_synccmd:
+		/*
+		 * Now determine what phases the host wants us
+		 * to go through.
+		 */
+		mov	SEQ_FLAGS, SCB_TARGET_PHASES;
+		
+		test	SCB_CONTROL, MK_MESSAGE	jz target_ITloop;
+		mvi	P_MESGIN|BSYO call change_phase;
+		jmp	host_target_message_loop;
+target_ITloop:
+		/*
+		 * Start honoring ATN signals now that
+		 * we properly identified ourselves.
+		 */
+		test	SCSISIGI, ATNI			jnz target_mesgout;
+		test	SEQ_FLAGS, CMDPHASE_PENDING	jnz target_cmdphase;
+		test	SEQ_FLAGS, DPHASE_PENDING	jnz target_dphase;
+		test	SEQ_FLAGS, SPHASE_PENDING	jnz target_sphase;
+
+		/*
+		 * No more work to do.  Either disconnect or not depending
+		 * on the state of NO_DISCONNECT.
+		 */
+		test	SEQ_FLAGS, NO_DISCONNECT jz target_disconnect; 
+		mvi	TARG_IMMEDIATE_SCB, SCB_LIST_NULL;
+		call	complete_target_cmd;
+		if ((ahc->flags & AHC_PAGESCBS) != 0) {
+			mov	ALLZEROS	call	get_free_or_disc_scb;
+		}
+		cmp	TARG_IMMEDIATE_SCB, SCB_LIST_NULL je .;
+		mvi	DMAPARAMS, HDMAEN|DIRECTION|FIFORESET;
+		mov	TARG_IMMEDIATE_SCB call dma_scb;
+		call	set_transfer_settings;
+		or	SXFRCTL0, CLRSTCNT|CLRCHN;
+		jmp	target_synccmd;
+
+target_mesgout:
+		mvi	SCSISIGO, P_MESGOUT|BSYO;
+target_mesgout_continue:
+		call	target_inb;
+target_mesgout_pending:
+		and	SEQ_FLAGS2, ~TARGET_MSG_PENDING;
+		/* Local Processing goes here... */
+		jmp	host_target_message_loop;
+		
+target_disconnect:
+		mvi	P_MESGIN|BSYO call change_phase;
+		test	SEQ_FLAGS, DPHASE	jz . + 2;
+		mvi	MSG_SAVEDATAPOINTER call target_outb;
+		mvi	MSG_DISCONNECT call target_outb;
+
+target_busfree_wait:
+		/* Wait for preceding I/O session to complete. */
+		test	SCSISIGI, ACKI jnz .;
+target_busfree:
+		and	SIMODE1, ~ENBUSFREE;
+		if ((ahc->features & AHC_ULTRA2) != 0) {
+			clr	SCSIBUSL;
+		}
+		clr	SCSISIGO;
+		mvi	LASTPHASE, P_BUSFREE;
+		call	complete_target_cmd;
+		jmp	poll_for_work;
+
+target_cmdphase:
+		/*
+		 * The target has dropped ATN (doesn't want to abort or BDR)
+		 * and we believe this selection to be valid.  If the ring
+		 * buffer for new commands is full, return busy or queue full.
+		 */
+		if ((ahc->features & AHC_HS_MAILBOX) != 0) {
+			and	A, HOST_TQINPOS, HS_MAILBOX;
+		} else {
+			mov	A, KERNEL_TQINPOS;
+		}
+		cmp	TQINPOS, A jne tqinfifo_has_space;
+		mvi	P_STATUS|BSYO call change_phase;
+		test	SEQ_FLAGS, TARGET_CMD_IS_TAGGED jz . + 3;
+		mvi	STATUS_QUEUE_FULL call target_outb;
+		jmp	target_busfree_wait;
+		mvi	STATUS_BUSY call target_outb;
+		jmp	target_busfree_wait;
+tqinfifo_has_space:	
+		mvi	P_COMMAND|BSYO call change_phase;
+		call	target_inb;
+		mov	A, DINDEX;
+		/* Store for host */
+		if ((ahc->features & AHC_CMD_CHAN) != 0) {
+			mov	CCSCBRAM, A;
+		} else {
+			mov	DFDAT, A;
+		}
+
+		/*
+		 * Determine the number of bytes to read
+		 * based on the command group code via table lookup.
+		 * We reuse the first 8 bytes of the TARG_SCSIRATE
+		 * BIOS array for this table. Count is one less than
+		 * the total for the command since we've already fetched
+		 * the first byte.
+		 */
+		shr	A, CMD_GROUP_CODE_SHIFT;
+		add	SINDEX, CMDSIZE_TABLE, A;
+		mov	A, SINDIR;
+
+		test	A, 0xFF jz command_phase_done;
+		or	SXFRCTL0, SPIOEN;
+command_loop:
+		test	SSTAT0, SPIORDY jz .;
+		cmp	A, 1 jne . + 2;
+		and	SXFRCTL0, ~SPIOEN;	/* Last Byte */
+		if ((ahc->features & AHC_CMD_CHAN) != 0) {
+			mov	CCSCBRAM, SCSIDATL;
+		} else {
+			mov	DFDAT, SCSIDATL;
+		}
+		dec	A;
+		test	A, 0xFF jnz command_loop;
+
+command_phase_done:
+		and	SEQ_FLAGS, ~CMDPHASE_PENDING;
+		jmp	target_ITloop;
+
+target_dphase:
+		/*
+		 * Data phases on the bus are from the
+		 * perspective of the initiator.  The dma
+		 * code looks at LASTPHASE to determine the
+		 * data direction of the DMA.  Toggle it for
+		 * target transfers.
+		 */
+		xor	LASTPHASE, IOI, SCB_TARGET_DATA_DIR;
+		or	SCB_TARGET_DATA_DIR, BSYO call change_phase;
+		jmp	p_data;
+
+target_sphase:
+		mvi	P_STATUS|BSYO call change_phase;
+		mvi	LASTPHASE, P_STATUS;
+		mov	SCB_SCSI_STATUS call target_outb;
+		/* XXX Watch for ATN or parity errors??? */
+		mvi	SCSISIGO, P_MESGIN|BSYO;
+		/* MSG_CMDCMPLT is 0, but we can't do an immediate of 0 */
+		mov	ALLZEROS call target_outb;
+		jmp	target_busfree_wait;
+	
+complete_target_cmd:
+		test	SEQ_FLAGS, TARG_CMD_PENDING	jnz . + 2;
+		mov	SCB_TAG jmp complete_post;
+		if ((ahc->features & AHC_CMD_CHAN) != 0) {
+			/* Set the valid byte */
+			mvi	CCSCBADDR, 24;
+			mov	CCSCBRAM, ALLONES;
+			mvi	CCHCNT, 28;
+			or	CCSCBCTL, CCSCBEN|CCSCBRESET;
+			test	CCSCBCTL, CCSCBDONE jz .;
+			clr	CCSCBCTL;
+		} else {
+			/* Set the valid byte */
+			or	DFCNTRL, FIFORESET;
+			mvi	DFWADDR, 3; /* Third 64bit word or byte 24 */
+			mov	DFDAT, ALLONES;
+			mvi	28	call set_hcnt;
+			or	DFCNTRL, HDMAEN|FIFOFLUSH;
+			call	dma_finish;
+		}
+		inc	TQINPOS;
+		mvi	INTSTAT,CMDCMPLT ret;
+	}
+
+if ((ahc->flags & AHC_INITIATORROLE) != 0) {
+initiator_select:
+	or	SXFRCTL0, SPIOEN|CLRSTCNT|CLRCHN;
+	/*
+	 * As soon as we get a successful selection, the target
+	 * should go into the message out phase since we have ATN
+	 * asserted.
+	 */
+	mvi	MSG_OUT, MSG_IDENTIFYFLAG;
+	mvi	SEQ_FLAGS, NO_CDB_SENT;
+	mvi	CLRSINT0, CLRSELDO;
+
+	/*
+	 * Main loop for information transfer phases.  Wait for the
+	 * target to assert REQ before checking MSG, C/D and I/O for
+	 * the bus phase.
+	 */
+mesgin_phasemis:
+ITloop:
+	call	phase_lock;
+
+	mov	A, LASTPHASE;
+
+	test	A, ~P_DATAIN	jz p_data;
+	cmp	A,P_COMMAND	je p_command;
+	cmp	A,P_MESGOUT	je p_mesgout;
+	cmp	A,P_STATUS	je p_status;
+	cmp	A,P_MESGIN	je p_mesgin;
+
+	mvi	BAD_PHASE call set_seqint;
+	jmp	ITloop;			/* Try reading the bus again. */
+
+await_busfree:
+	and	SIMODE1, ~ENBUSFREE;
+	mov	NONE, SCSIDATL;		/* Ack the last byte */
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		clr	SCSIBUSL;	/* Prevent bit leakage durint SELTO */
+	}
+	and	SXFRCTL0, ~SPIOEN;
+	test	SSTAT1,REQINIT|BUSFREE	jz .;
+	test	SSTAT1, BUSFREE jnz poll_for_work;
+	mvi	MISSED_BUSFREE call set_seqint;
+}
+	
+clear_target_state:
+	/*
+	 * We assume that the kernel driver may reset us
+	 * at any time, even in the middle of a DMA, so
+	 * clear DFCNTRL too.
+	 */
+	clr	DFCNTRL;
+	or	SXFRCTL0, CLRSTCNT|CLRCHN;
+
+	/*
+	 * We don't know the target we will connect to,
+	 * so default to narrow transfers to avoid
+	 * parity problems.
+	 */
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		bmov	SCSIRATE, ALLZEROS, 2;
+	} else {
+		clr	SCSIRATE;
+		if ((ahc->features & AHC_ULTRA) != 0) {
+			and	SXFRCTL0, ~(FAST20);
+		}
+	}
+	mvi	LASTPHASE, P_BUSFREE;
+	/* clear target specific flags */
+	mvi	SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT ret;
+
+sg_advance:
+	clr	A;			/* add sizeof(struct scatter) */
+	add	SCB_RESIDUAL_SGPTR[0],SG_SIZEOF;
+	adc	SCB_RESIDUAL_SGPTR[1],A;
+	adc	SCB_RESIDUAL_SGPTR[2],A;
+	adc	SCB_RESIDUAL_SGPTR[3],A ret;
+
+if ((ahc->features & AHC_CMD_CHAN) != 0) {
+disable_ccsgen:
+	test	CCSGCTL, CCSGEN jz return;
+	test	CCSGCTL, CCSGDONE jz .;
+disable_ccsgen_fetch_done:
+	clr	CCSGCTL;
+	test	CCSGCTL, CCSGEN jnz .;
+	ret;
+idle_loop:
+	/*
+	 * Do we need any more segments for this transfer?
+	 */
+	test	SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jnz return;
+
+	/* Did we just finish fetching segs? */
+	cmp	CCSGCTL, CCSGEN|CCSGDONE je idle_sgfetch_complete;
+
+	/* Are we actively fetching segments? */
+	test	CCSGCTL, CCSGEN jnz return;
+
+	/*
+	 * Do we have any prefetch left???
+	 */
+	cmp	CCSGADDR, SG_PREFETCH_CNT jne idle_sg_avail;
+
+	/*
+	 * Need to fetch segments, but we can only do that
+	 * if the command channel is completely idle.  Make
+	 * sure we don't have an SCB prefetch going on.
+	 */
+	test	CCSCBCTL, CCSCBEN jnz return;
+
+	/*
+	 * We fetch a "cacheline aligned" and sized amount of data
+	 * so we don't end up referencing a non-existant page.
+	 * Cacheline aligned is in quotes because the kernel will
+	 * set the prefetch amount to a reasonable level if the
+	 * cacheline size is unknown.
+	 */
+	mvi	CCHCNT, SG_PREFETCH_CNT;
+	and	CCHADDR[0], SG_PREFETCH_ALIGN_MASK, SCB_RESIDUAL_SGPTR;
+	bmov	CCHADDR[1], SCB_RESIDUAL_SGPTR[1], 3;
+	mvi	CCSGCTL, CCSGEN|CCSGRESET ret;
+idle_sgfetch_complete:
+	call	disable_ccsgen_fetch_done;
+	and	CCSGADDR, SG_PREFETCH_ADDR_MASK, SCB_RESIDUAL_SGPTR;
+idle_sg_avail:
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		/* Does the hardware have space for another SG entry? */
+		test	DFSTATUS, PRELOAD_AVAIL jz return;
+		bmov 	HADDR, CCSGRAM, 7;
+		bmov	SCB_RESIDUAL_DATACNT[3], CCSGRAM, 1;
+		if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+			mov	SCB_RESIDUAL_DATACNT[3] call set_hhaddr;
+		}
+		call	sg_advance;
+		mov	SINDEX, SCB_RESIDUAL_SGPTR[0];
+		test	SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 2;
+		or	SINDEX, LAST_SEG;
+		mov	SG_CACHE_PRE, SINDEX;
+		/* Load the segment */
+		or	DFCNTRL, PRELOADEN;
+	}
+	ret;
+}
+
+if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) {
+/*
+ * Calculate the trailing portion of this S/G segment that cannot
+ * be transferred using memory write and invalidate PCI transactions.  
+ * XXX Can we optimize this for PCI writes only???
+ */
+calc_mwi_residual:
+	/*
+	 * If the ending address is on a cacheline boundary,
+	 * there is no need for an extra segment.
+	 */
+	mov	A, HCNT[0];
+	add	A, A, HADDR[0];
+	and	A, CACHESIZE_MASK;
+	test	A, 0xFF jz return;
+
+	/*
+	 * If the transfer is less than a cachline,
+	 * there is no need for an extra segment.
+	 */
+	test	HCNT[1], 0xFF	jnz calc_mwi_residual_final;
+	test	HCNT[2], 0xFF	jnz calc_mwi_residual_final;
+	add	NONE, INVERTED_CACHESIZE_MASK, HCNT[0];
+	jnc	return;
+
+calc_mwi_residual_final:
+	mov	MWI_RESIDUAL, A;
+	not	A;
+	inc	A;
+	add	HCNT[0], A;
+	adc	HCNT[1], -1;
+	adc	HCNT[2], -1 ret;
+}
+
+p_data:
+	test	SEQ_FLAGS,NOT_IDENTIFIED|NO_CDB_SENT jz p_data_allowed;
+	mvi	PROTO_VIOLATION call set_seqint;
+p_data_allowed:
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		mvi	DMAPARAMS, PRELOADEN|SCSIEN|HDMAEN;
+	} else {
+		mvi	DMAPARAMS, WIDEODD|SCSIEN|SDMAEN|HDMAEN|FIFORESET;
+	}
+	test	LASTPHASE, IOI jnz . + 2;
+	or	DMAPARAMS, DIRECTION;
+	if ((ahc->features & AHC_CMD_CHAN) != 0) {
+		/* We don't have any valid S/G elements */
+		mvi	CCSGADDR, SG_PREFETCH_CNT;
+	}
+	test	SEQ_FLAGS, DPHASE	jz data_phase_initialize;
+
+	/*
+	 * If we re-enter the data phase after going through another
+	 * phase, our transfer location has almost certainly been
+	 * corrupted by the interveining, non-data, transfers.  Ask
+	 * the host driver to fix us up based on the transfer residual.
+	 */
+	mvi	PDATA_REINIT	call set_seqint;
+	jmp	data_phase_loop;
+
+data_phase_initialize:
+	/* We have seen a data phase for the first time */
+	or	SEQ_FLAGS, DPHASE;
+
+	/*
+	 * Initialize the DMA address and counter from the SCB.
+	 * Also set SCB_RESIDUAL_SGPTR, including the LAST_SEG
+	 * flag in the highest byte of the data count.  We cannot
+	 * modify the saved values in the SCB until we see a save
+	 * data pointers message.
+	 */
+	if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+		/* The lowest address byte must be loaded last. */
+		mov	SCB_DATACNT[3] call set_hhaddr;
+	}
+	if ((ahc->features & AHC_CMD_CHAN) != 0) {
+		bmov	HADDR, SCB_DATAPTR, 7;
+		bmov	SCB_RESIDUAL_DATACNT[3], SCB_DATACNT[3], 5;
+	} else {
+		mvi	DINDEX, HADDR;
+		mvi	SCB_DATAPTR	call bcopy_7;
+		mvi	DINDEX, SCB_RESIDUAL_DATACNT + 3;
+		mvi	SCB_DATACNT + 3 call bcopy_5;
+	}
+	if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) {
+		call	calc_mwi_residual;
+	}
+	and	SCB_RESIDUAL_SGPTR[0], ~SG_FULL_RESID;
+
+	if ((ahc->features & AHC_ULTRA2) == 0) {
+		if ((ahc->features & AHC_CMD_CHAN) != 0) {
+			bmov	STCNT, HCNT, 3;
+		} else {
+			call	set_stcnt_from_hcnt;
+		}
+	}
+
+data_phase_loop:
+	/* Guard against overruns */
+	test	SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz data_phase_inbounds;
+
+	/*
+	 * Turn on `Bit Bucket' mode, wait until the target takes
+	 * us to another phase, and then notify the host.
+	 */
+	and	DMAPARAMS, DIRECTION;
+	mov	DFCNTRL, DMAPARAMS;
+	or	SXFRCTL1,BITBUCKET;
+	if ((ahc->features & AHC_DT) == 0) {
+		test	SSTAT1,PHASEMIS	jz .;
+	} else {
+		test	SCSIPHASE, DATA_PHASE_MASK jnz .;
+	}
+	and	SXFRCTL1, ~BITBUCKET;
+	mvi	DATA_OVERRUN call set_seqint;
+	jmp	ITloop;
+
+data_phase_inbounds:
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		mov	SINDEX, SCB_RESIDUAL_SGPTR[0];
+		test	SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 2;
+		or	SINDEX, LAST_SEG;
+		mov	SG_CACHE_PRE, SINDEX;
+		mov	DFCNTRL, DMAPARAMS;
+ultra2_dma_loop:
+		call	idle_loop;
+		/*
+		 * The transfer is complete if either the last segment
+		 * completes or the target changes phase.
+		 */
+		test	SG_CACHE_SHADOW, LAST_SEG_DONE jnz ultra2_dmafinish;
+		if ((ahc->features & AHC_DT) == 0) {
+			if ((ahc->flags & AHC_TARGETROLE) != 0) {
+				 /*
+				  * As a target, we control the phases,
+				  * so ignore PHASEMIS.
+				  */
+				test	SSTAT0, TARGET jnz ultra2_dma_loop;
+			}
+			if ((ahc->flags & AHC_INITIATORROLE) != 0) {
+				test	SSTAT1,PHASEMIS	jz ultra2_dma_loop;
+			}
+		} else {
+			test	DFCNTRL, SCSIEN jnz ultra2_dma_loop;
+		}
+
+ultra2_dmafinish:
+		/*
+		 * The transfer has terminated either due to a phase
+		 * change, and/or the completion of the last segment.
+		 * We have two goals here.  Do as much other work
+		 * as possible while the data fifo drains on a read
+		 * and respond as quickly as possible to the standard
+		 * messages (save data pointers/disconnect and command
+		 * complete) that usually follow a data phase.
+		 */
+		if ((ahc->bugs & AHC_AUTOFLUSH_BUG) != 0) {
+			/*
+			 * On chips with broken auto-flush, start
+			 * the flushing process now.  We'll poke
+			 * the chip from time to time to keep the
+			 * flush process going as we complete the
+			 * data phase.
+			 */
+			or	DFCNTRL, FIFOFLUSH;
+		}
+		/*
+		 * We assume that, even though data may still be
+		 * transferring to the host, that the SCSI side of
+		 * the DMA engine is now in a static state.  This
+		 * allows us to update our notion of where we are
+		 * in this transfer.
+		 *
+		 * If, by chance, we stopped before being able
+		 * to fetch additional segments for this transfer,
+		 * yet the last S/G was completely exhausted,
+		 * call our idle loop until it is able to load
+		 * another segment.  This will allow us to immediately
+		 * pickup on the next segment on the next data phase.
+		 *
+		 * If we happened to stop on the last segment, then
+		 * our residual information is still correct from
+		 * the idle loop and there is no need to perform
+		 * any fixups.
+		 */
+ultra2_ensure_sg:
+		test	SG_CACHE_SHADOW, LAST_SEG jz ultra2_shvalid;
+		/* Record if we've consumed all S/G entries */
+		test	SSTAT2, SHVALID	jnz residuals_correct;
+		or	SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL;
+		jmp	residuals_correct;
+
+ultra2_shvalid:
+		test	SSTAT2, SHVALID	jnz sgptr_fixup;
+		call	idle_loop;
+		jmp	ultra2_ensure_sg;
+
+sgptr_fixup:
+		/*
+		 * Fixup the residual next S/G pointer.  The S/G preload
+		 * feature of the chip allows us to load two elements
+		 * in addition to the currently active element.  We
+		 * store the bottom byte of the next S/G pointer in
+		 * the SG_CACEPTR register so we can restore the
+		 * correct value when the DMA completes.  If the next
+		 * sg ptr value has advanced to the point where higher
+		 * bytes in the address have been affected, fix them
+		 * too.
+		 */
+		test	SG_CACHE_SHADOW, 0x80 jz sgptr_fixup_done;
+		test	SCB_RESIDUAL_SGPTR[0], 0x80 jnz sgptr_fixup_done;
+		add	SCB_RESIDUAL_SGPTR[1], -1;
+		adc	SCB_RESIDUAL_SGPTR[2], -1; 
+		adc	SCB_RESIDUAL_SGPTR[3], -1;
+sgptr_fixup_done:
+		and	SCB_RESIDUAL_SGPTR[0], SG_ADDR_MASK, SG_CACHE_SHADOW;
+		/* We are not the last seg */
+		and	SCB_RESIDUAL_DATACNT[3], ~SG_LAST_SEG;
+residuals_correct:
+		/*
+		 * Go ahead and shut down the DMA engine now.
+		 * In the future, we'll want to handle end of
+		 * transfer messages prior to doing this, but this
+		 * requires similar restructuring for pre-ULTRA2
+		 * controllers.
+		 */
+		test	DMAPARAMS, DIRECTION jnz ultra2_fifoempty;
+ultra2_fifoflush:
+		if ((ahc->features & AHC_DT) == 0) {
+			if ((ahc->bugs & AHC_AUTOFLUSH_BUG) != 0) {
+				/*
+				 * On Rev A of the aic7890, the autoflush
+				 * feature doesn't function correctly.
+				 * Perform an explicit manual flush.  During
+				 * a manual flush, the FIFOEMP bit becomes
+				 * true every time the PCI FIFO empties
+				 * regardless of the state of the SCSI FIFO.
+				 * It can take up to 4 clock cycles for the
+				 * SCSI FIFO to get data into the PCI FIFO
+				 * and for FIFOEMP to de-assert.  Here we
+				 * guard against this condition by making
+				 * sure the FIFOEMP bit stays on for 5 full
+				 * clock cycles.
+				 */
+				or	DFCNTRL, FIFOFLUSH;
+				test	DFSTATUS, FIFOEMP jz ultra2_fifoflush;
+				test	DFSTATUS, FIFOEMP jz ultra2_fifoflush;
+				test	DFSTATUS, FIFOEMP jz ultra2_fifoflush;
+				test	DFSTATUS, FIFOEMP jz ultra2_fifoflush;
+			}
+			test	DFSTATUS, FIFOEMP jz ultra2_fifoflush;
+		} else {
+			/*
+			 * We enable the auto-ack feature on DT capable
+			 * controllers.  This means that the controller may
+			 * have already transferred some overrun bytes into
+			 * the data FIFO and acked them on the bus.  The only
+			 * way to detect this situation is to wait for
+			 * LAST_SEG_DONE to come true on a completed transfer
+			 * and then test to see if the data FIFO is non-empty.
+			 */
+			test	SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL
+				jz ultra2_wait_fifoemp;
+			test	SG_CACHE_SHADOW, LAST_SEG_DONE jz .;
+			/*
+			 * FIFOEMP can lag LAST_SEG_DONE.  Wait a few
+			 * clocks before calling this an overrun.
+			 */
+			test	DFSTATUS, FIFOEMP jnz ultra2_fifoempty;
+			test	DFSTATUS, FIFOEMP jnz ultra2_fifoempty;
+			test	DFSTATUS, FIFOEMP jnz ultra2_fifoempty;
+			/* Overrun */
+			jmp	data_phase_loop;
+ultra2_wait_fifoemp:
+			test	DFSTATUS, FIFOEMP jz .;
+		}
+ultra2_fifoempty:
+		/* Don't clobber an inprogress host data transfer */
+		test	DFSTATUS, MREQPEND	jnz ultra2_fifoempty;
+ultra2_dmahalt:
+		and     DFCNTRL, ~(SCSIEN|HDMAEN);
+		test	DFCNTRL, SCSIEN|HDMAEN jnz .;
+		if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+			/*
+			 * Keep HHADDR cleared for future, 32bit addressed
+			 * only, DMA operations.
+			 *
+			 * Due to bayonette style S/G handling, our residual
+			 * data must be "fixed up" once the transfer is halted.
+			 * Here we fixup the HSHADDR stored in the high byte
+			 * of the residual data cnt.  By postponing the fixup,
+			 * we can batch the clearing of HADDR with the fixup.
+			 * If we halted on the last segment, the residual is
+			 * already correct.   If we are not on the last
+			 * segment, copy the high address directly from HSHADDR.
+			 * We don't need to worry about maintaining the
+			 * SG_LAST_SEG flag as it will always be false in the
+			 * case where an update is required.
+			 */
+			or	DSCOMMAND1, HADDLDSEL0;
+			test	SG_CACHE_SHADOW, LAST_SEG jnz . + 2;
+			mov	SCB_RESIDUAL_DATACNT[3], SHADDR;
+			clr	HADDR;
+			and	DSCOMMAND1, ~HADDLDSEL0;
+		}
+	} else {
+		/* If we are the last SG block, tell the hardware. */
+		if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0
+		  && ahc->pci_cachesize != 0) {
+			test	MWI_RESIDUAL, 0xFF jnz dma_mid_sg;
+		}
+		test	SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz dma_mid_sg;
+		if ((ahc->flags & AHC_TARGETROLE) != 0) {
+			test	SSTAT0, TARGET jz dma_last_sg;
+			if ((ahc->flags & AHC_TMODE_WIDEODD_BUG) != 0) {
+				test	DMAPARAMS, DIRECTION jz dma_mid_sg;
+			}
+		}
+dma_last_sg:
+		and	DMAPARAMS, ~WIDEODD;
+dma_mid_sg:
+		/* Start DMA data transfer. */
+		mov	DFCNTRL, DMAPARAMS;
+dma_loop:
+		if ((ahc->features & AHC_CMD_CHAN) != 0) {
+			call	idle_loop;
+		}
+		test	SSTAT0,DMADONE	jnz dma_dmadone;
+		test	SSTAT1,PHASEMIS	jz dma_loop;	/* ie. underrun */
+dma_phasemis:
+		/*
+		 * We will be "done" DMAing when the transfer count goes to
+		 * zero, or the target changes the phase (in light of this,
+		 * it makes sense that the DMA circuitry doesn't ACK when
+		 * PHASEMIS is active).  If we are doing a SCSI->Host transfer,
+		 * the data FIFO should be flushed auto-magically on STCNT=0
+		 * or a phase change, so just wait for FIFO empty status.
+		 */
+dma_checkfifo:
+		test	DFCNTRL,DIRECTION	jnz dma_fifoempty;
+dma_fifoflush:
+		test	DFSTATUS,FIFOEMP	jz dma_fifoflush;
+dma_fifoempty:
+		/* Don't clobber an inprogress host data transfer */
+		test	DFSTATUS, MREQPEND	jnz dma_fifoempty;
+
+		/*
+		 * Now shut off the DMA and make sure that the DMA
+		 * hardware has actually stopped.  Touching the DMA
+		 * counters, etc. while a DMA is active will result
+		 * in an ILLSADDR exception.
+		 */
+dma_dmadone:
+		and	DFCNTRL, ~(SCSIEN|SDMAEN|HDMAEN);
+dma_halt:
+		/*
+		 * Some revisions of the aic78XX have a problem where, if the
+		 * data fifo is full, but the PCI input latch is not empty, 
+		 * HDMAEN cannot be cleared.  The fix used here is to drain
+		 * the prefetched but unused data from the data fifo until
+		 * there is space for the input latch to drain.
+		 */
+		if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0) {
+			mov	NONE, DFDAT;
+		}
+		test	DFCNTRL, (SCSIEN|SDMAEN|HDMAEN) jnz dma_halt;
+
+		/* See if we have completed this last segment */
+		test	STCNT[0], 0xff	jnz data_phase_finish;
+		test	STCNT[1], 0xff	jnz data_phase_finish;
+		test	STCNT[2], 0xff	jnz data_phase_finish;
+
+		/*
+		 * Advance the scatter-gather pointers if needed 
+		 */
+		if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0
+		  && ahc->pci_cachesize != 0) {
+			test	MWI_RESIDUAL, 0xFF jz no_mwi_resid;
+			/*
+			 * Reload HADDR from SHADDR and setup the
+			 * count to be the size of our residual.
+			 */
+			if ((ahc->features & AHC_CMD_CHAN) != 0) {
+				bmov	HADDR, SHADDR, 4;
+				mov	HCNT, MWI_RESIDUAL;
+				bmov	HCNT[1], ALLZEROS, 2;
+			} else {
+				mvi	DINDEX, HADDR;
+				mvi	SHADDR call bcopy_4;
+				mov	MWI_RESIDUAL call set_hcnt;
+			}
+			clr	MWI_RESIDUAL;
+			jmp	sg_load_done;
+no_mwi_resid:
+		}
+		test	SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz sg_load;
+		or	SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL;
+		jmp	data_phase_finish;
+sg_load:
+		/*
+		 * Load the next SG element's data address and length
+		 * into the DMA engine.  If we don't have hardware
+		 * to perform a prefetch, we'll have to fetch the
+		 * segment from host memory first.
+		 */
+		if ((ahc->features & AHC_CMD_CHAN) != 0) {
+			/* Wait for the idle loop to complete */
+			test	CCSGCTL, CCSGEN jz . + 3;
+			call	idle_loop;
+			test	CCSGCTL, CCSGEN jnz . - 1;
+			bmov 	HADDR, CCSGRAM, 7;
+			/*
+			 * Workaround for flaky external SCB RAM
+			 * on certain aic7895 setups.  It seems
+			 * unable to handle direct transfers from
+			 * S/G ram to certain SCB locations.
+			 */
+			mov	SINDEX, CCSGRAM;
+			mov	SCB_RESIDUAL_DATACNT[3], SINDEX;
+		} else {
+			if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+				mov	ALLZEROS call set_hhaddr;
+			}
+			mvi	DINDEX, HADDR;
+			mvi	SCB_RESIDUAL_SGPTR	call bcopy_4;
+
+			mvi	SG_SIZEOF	call set_hcnt;
+
+			or	DFCNTRL, HDMAEN|DIRECTION|FIFORESET;
+
+			call	dma_finish;
+
+			mvi	DINDEX, HADDR;
+			call	dfdat_in_7;
+			mov	SCB_RESIDUAL_DATACNT[3], DFDAT;
+		}
+
+		if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+			mov	SCB_RESIDUAL_DATACNT[3] call set_hhaddr;
+
+			/*
+			 * The lowest address byte must be loaded
+			 * last as it triggers the computation of
+			 * some items in the PCI block.  The ULTRA2
+			 * chips do this on PRELOAD.
+			 */
+			mov	HADDR, HADDR;
+		}
+		if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0
+		  && ahc->pci_cachesize != 0) {
+			call calc_mwi_residual;
+		}
+
+		/* Point to the new next sg in memory */
+		call	sg_advance;
+
+sg_load_done:
+		if ((ahc->features & AHC_CMD_CHAN) != 0) {
+			bmov	STCNT, HCNT, 3;
+		} else {
+			call	set_stcnt_from_hcnt;
+		}
+
+		if ((ahc->flags & AHC_TARGETROLE) != 0) {
+			test	SSTAT0, TARGET jnz data_phase_loop;
+		}
+	}
+data_phase_finish:
+	/*
+	 * If the target has left us in data phase, loop through
+	 * the dma code again.  In the case of ULTRA2 adapters,
+	 * we should only loop if there is a data overrun.  For
+	 * all other adapters, we'll loop after each S/G element
+	 * is loaded as well as if there is an overrun.
+	 */
+	if ((ahc->flags & AHC_TARGETROLE) != 0) {
+		test	SSTAT0, TARGET jnz data_phase_done;
+	}
+	if ((ahc->flags & AHC_INITIATORROLE) != 0) {
+		test	SSTAT1, REQINIT jz .;
+		if ((ahc->features & AHC_DT) == 0) {
+			test	SSTAT1,PHASEMIS	jz data_phase_loop;
+		} else {
+			test	SCSIPHASE, DATA_PHASE_MASK jnz data_phase_loop;
+		}
+	}
+
+data_phase_done:
+	/*
+	 * After a DMA finishes, save the SG and STCNT residuals back into
+	 * the SCB.  We use STCNT instead of HCNT, since it's a reflection
+	 * of how many bytes were transferred on the SCSI (as opposed to the
+	 * host) bus.
+	 */
+	if ((ahc->features & AHC_CMD_CHAN) != 0) {
+		/* Kill off any pending prefetch */
+		call	disable_ccsgen;
+	}
+
+	if ((ahc->features & AHC_ULTRA2) == 0) {
+		/*
+		 * Clear the high address byte so that all other DMA
+		 * operations, which use 32bit addressing, can assume
+		 * HHADDR is 0.
+		 */
+		if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+			mov	ALLZEROS call set_hhaddr;
+		}
+	}
+
+	/*
+	 * Update our residual information before the information is
+	 * lost by some other type of SCSI I/O (e.g. PIO).  If we have
+	 * transferred all data, no update is needed.
+	 *
+	 */
+	test	SCB_RESIDUAL_SGPTR, SG_LIST_NULL jnz residual_update_done;
+	if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0
+	  && ahc->pci_cachesize != 0) {
+		if ((ahc->features & AHC_CMD_CHAN) != 0) {
+			test	MWI_RESIDUAL, 0xFF jz bmov_resid;
+		}
+		mov	A, MWI_RESIDUAL;
+		add	SCB_RESIDUAL_DATACNT[0], A, STCNT[0];
+		clr	A;
+		adc	SCB_RESIDUAL_DATACNT[1], A, STCNT[1];
+		adc	SCB_RESIDUAL_DATACNT[2], A, STCNT[2];
+		clr	MWI_RESIDUAL;
+		if ((ahc->features & AHC_CMD_CHAN) != 0) {
+			jmp	. + 2;
+bmov_resid:
+			bmov	SCB_RESIDUAL_DATACNT, STCNT, 3;
+		}
+	} else if ((ahc->features & AHC_CMD_CHAN) != 0) {
+		bmov	SCB_RESIDUAL_DATACNT, STCNT, 3;
+	} else {
+		mov	SCB_RESIDUAL_DATACNT[0], STCNT[0];
+		mov	SCB_RESIDUAL_DATACNT[1], STCNT[1];
+		mov	SCB_RESIDUAL_DATACNT[2], STCNT[2];
+	}
+residual_update_done:
+	/*
+	 * Since we've been through a data phase, the SCB_RESID* fields
+	 * are now initialized.  Clear the full residual flag.
+	 */
+	and	SCB_SGPTR[0], ~SG_FULL_RESID;
+
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		/* Clear the channel in case we return to data phase later */
+		or	SXFRCTL0, CLRSTCNT|CLRCHN;
+		or	SXFRCTL0, CLRSTCNT|CLRCHN;
+	}
+
+	if ((ahc->flags & AHC_TARGETROLE) != 0) {
+		test	SEQ_FLAGS, DPHASE_PENDING jz ITloop;
+		and	SEQ_FLAGS, ~DPHASE_PENDING;
+		/*
+		 * For data-in phases, wait for any pending acks from the
+		 * initiator before changing phase.  We only need to
+		 * send Ignore Wide Residue messages for data-in phases.
+		 */
+		test	DFCNTRL, DIRECTION jz target_ITloop;
+		test	SSTAT1, REQINIT	jnz .;
+		test	SCB_LUN, SCB_XFERLEN_ODD jz target_ITloop;
+		test	SCSIRATE, WIDEXFER jz target_ITloop;
+		/*
+		 * Issue an Ignore Wide Residue Message.
+		 */
+		mvi	P_MESGIN|BSYO call change_phase;
+		mvi	MSG_IGN_WIDE_RESIDUE call target_outb;
+		mvi	1 call target_outb;
+		jmp	target_ITloop;
+	} else {
+		jmp	ITloop;
+	}
+
+if ((ahc->flags & AHC_INITIATORROLE) != 0) {
+/*
+ * Command phase.  Set up the DMA registers and let 'er rip.
+ */
+p_command:
+	test	SEQ_FLAGS, NOT_IDENTIFIED jz p_command_okay;
+	mvi	PROTO_VIOLATION call set_seqint;
+p_command_okay:
+
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		bmov	HCNT[0], SCB_CDB_LEN,  1;
+		bmov	HCNT[1], ALLZEROS, 2;
+		mvi	SG_CACHE_PRE, LAST_SEG;
+	} else if ((ahc->features & AHC_CMD_CHAN) != 0) {
+		bmov	STCNT[0], SCB_CDB_LEN, 1;
+		bmov	STCNT[1], ALLZEROS, 2;
+	} else {
+		mov	STCNT[0], SCB_CDB_LEN;
+		clr	STCNT[1];
+		clr	STCNT[2];
+	}
+	add	NONE, -13, SCB_CDB_LEN;
+	mvi	SCB_CDB_STORE jnc p_command_embedded;
+p_command_from_host:
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		bmov	HADDR[0], SCB_CDB_PTR, 4;
+		mvi	DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN|DIRECTION);
+	} else {
+		if ((ahc->features & AHC_CMD_CHAN) != 0) {
+			bmov	HADDR[0], SCB_CDB_PTR, 4;
+			bmov	HCNT, STCNT, 3;
+		} else {
+			mvi	DINDEX, HADDR;
+			mvi	SCB_CDB_PTR call bcopy_4;
+			mov	SCB_CDB_LEN call set_hcnt;
+		}
+		mvi	DFCNTRL, (SCSIEN|SDMAEN|HDMAEN|DIRECTION|FIFORESET);
+	}
+	jmp	p_command_xfer;
+p_command_embedded:
+	/*
+	 * The data fifo seems to require 4 byte aligned
+	 * transfers from the sequencer.  Force this to
+	 * be the case by clearing HADDR[0] even though
+	 * we aren't going to touch host memory.
+	 */
+	clr	HADDR[0];
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		mvi	DFCNTRL, (PRELOADEN|SCSIEN|DIRECTION);
+		bmov	DFDAT, SCB_CDB_STORE, 12; 
+	} else if ((ahc->features & AHC_CMD_CHAN) != 0) {
+		if ((ahc->flags & AHC_SCB_BTT) != 0) {
+			/*
+			 * On the 7895 the data FIFO will
+			 * get corrupted if you try to dump
+			 * data from external SCB memory into
+			 * the FIFO while it is enabled.  So,
+			 * fill the fifo and then enable SCSI
+			 * transfers.
+			 */
+			mvi	DFCNTRL, (DIRECTION|FIFORESET);
+		} else {
+			mvi	DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFORESET);
+		}
+		bmov	DFDAT, SCB_CDB_STORE, 12; 
+		if ((ahc->flags & AHC_SCB_BTT) != 0) {
+			mvi	DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFOFLUSH);
+		} else {
+			or	DFCNTRL, FIFOFLUSH;
+		}
+	} else {
+		mvi	DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFORESET);
+		call	copy_to_fifo_6;
+		call	copy_to_fifo_6;
+		or	DFCNTRL, FIFOFLUSH;
+	}
+p_command_xfer:
+	and	SEQ_FLAGS, ~NO_CDB_SENT;
+	if ((ahc->features & AHC_DT) == 0) {
+		test	SSTAT0, SDONE jnz . + 2;
+		test    SSTAT1, PHASEMIS jz . - 1;
+		/*
+		 * Wait for our ACK to go-away on it's own
+		 * instead of being killed by SCSIEN getting cleared.
+		 */
+		test	SCSISIGI, ACKI jnz .;
+	} else {
+		test	DFCNTRL, SCSIEN jnz .;
+	}
+	test	SSTAT0, SDONE jnz p_command_successful;
+	/*
+	 * Don't allow a data phase if the command
+	 * was not fully transferred.
+	 */
+	or	SEQ_FLAGS, NO_CDB_SENT;
+p_command_successful:
+	and	DFCNTRL, ~(SCSIEN|SDMAEN|HDMAEN);
+	test	DFCNTRL, (SCSIEN|SDMAEN|HDMAEN) jnz .;
+	jmp	ITloop;
+
+/*
+ * Status phase.  Wait for the data byte to appear, then read it
+ * and store it into the SCB.
+ */
+p_status:
+	test	SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation;
+p_status_okay:
+	mov	SCB_SCSI_STATUS, SCSIDATL;
+	or	SCB_CONTROL, STATUS_RCVD;
+	jmp	ITloop;
+
+/*
+ * Message out phase.  If MSG_OUT is MSG_IDENTIFYFLAG, build a full
+ * indentify message sequence and send it to the target.  The host may
+ * override this behavior by setting the MK_MESSAGE bit in the SCB
+ * control byte.  This will cause us to interrupt the host and allow
+ * it to handle the message phase completely on its own.  If the bit
+ * associated with this target is set, we will also interrupt the host,
+ * thereby allowing it to send a message on the next selection regardless
+ * of the transaction being sent.
+ * 
+ * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message.
+ * This is done to allow the host to send messages outside of an identify
+ * sequence while protecting the seqencer from testing the MK_MESSAGE bit
+ * on an SCB that might not be for the current nexus. (For example, a
+ * BDR message in responce to a bad reselection would leave us pointed to
+ * an SCB that doesn't have anything to do with the current target).
+ *
+ * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag,
+ * bus device reset).
+ *
+ * When there are no messages to send, MSG_OUT should be set to MSG_NOOP,
+ * in case the target decides to put us in this phase for some strange
+ * reason.
+ */
+p_mesgout_retry:
+	/* Turn on ATN for the retry */
+	if ((ahc->features & AHC_DT) == 0) {
+		or	SCSISIGO, ATNO, LASTPHASE;
+	} else {
+		mvi	SCSISIGO, ATNO;
+	}
+p_mesgout:
+	mov	SINDEX, MSG_OUT;
+	cmp	SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host;
+	test	SCB_CONTROL,MK_MESSAGE	jnz host_message_loop;
+p_mesgout_identify:
+	or	SINDEX, MSG_IDENTIFYFLAG|DISCENB, SAVED_LUN;
+	test	SCB_CONTROL, DISCENB jnz . + 2;
+	and	SINDEX, ~DISCENB;
+/*
+ * Send a tag message if TAG_ENB is set in the SCB control block.
+ * Use SCB_TAG (the position in the kernel's SCB array) as the tag value.
+ */
+p_mesgout_tag:
+	test	SCB_CONTROL,TAG_ENB jz  p_mesgout_onebyte;
+	mov	SCSIDATL, SINDEX;	/* Send the identify message */
+	call	phase_lock;
+	cmp	LASTPHASE, P_MESGOUT	jne p_mesgout_done;
+	and	SCSIDATL,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL;
+	call	phase_lock;
+	cmp	LASTPHASE, P_MESGOUT	jne p_mesgout_done;
+	mov	SCB_TAG	jmp p_mesgout_onebyte;
+/*
+ * Interrupt the driver, and allow it to handle this message
+ * phase and any required retries.
+ */
+p_mesgout_from_host:
+	cmp	SINDEX, HOST_MSG	jne p_mesgout_onebyte;
+	jmp	host_message_loop;
+
+p_mesgout_onebyte:
+	mvi	CLRSINT1, CLRATNO;
+	mov	SCSIDATL, SINDEX;
+
+/*
+ * If the next bus phase after ATN drops is message out, it means
+ * that the target is requesting that the last message(s) be resent.
+ */
+	call	phase_lock;
+	cmp	LASTPHASE, P_MESGOUT	je p_mesgout_retry;
+
+p_mesgout_done:
+	mvi	CLRSINT1,CLRATNO;	/* Be sure to turn ATNO off */
+	mov	LAST_MSG, MSG_OUT;
+	mvi	MSG_OUT, MSG_NOOP;	/* No message left */
+	jmp	ITloop;
+
+/*
+ * Message in phase.  Bytes are read using Automatic PIO mode.
+ */
+p_mesgin:
+	mvi	ACCUM		call inb_first;	/* read the 1st message byte */
+
+	test	A,MSG_IDENTIFYFLAG	jnz mesgin_identify;
+	cmp	A,MSG_DISCONNECT	je mesgin_disconnect;
+	cmp	A,MSG_SAVEDATAPOINTER	je mesgin_sdptrs;
+	cmp	ALLZEROS,A		je mesgin_complete;
+	cmp	A,MSG_RESTOREPOINTERS	je mesgin_rdptrs;
+	cmp	A,MSG_IGN_WIDE_RESIDUE	je mesgin_ign_wide_residue;
+	cmp	A,MSG_NOOP		je mesgin_done;
+
+/*
+ * Pushed message loop to allow the kernel to
+ * run it's own message state engine.  To avoid an
+ * extra nop instruction after signaling the kernel,
+ * we perform the phase_lock before checking to see
+ * if we should exit the loop and skip the phase_lock
+ * in the ITloop.  Performing back to back phase_locks
+ * shouldn't hurt, but why do it twice...
+ */
+host_message_loop:
+	mvi	HOST_MSG_LOOP call set_seqint;
+	call	phase_lock;
+	cmp	RETURN_1, EXIT_MSG_LOOP	je ITloop + 1;
+	jmp	host_message_loop;
+
+mesgin_ign_wide_residue:
+if ((ahc->features & AHC_WIDE) != 0) {
+	test	SCSIRATE, WIDEXFER jz mesgin_reject;
+	/* Pull the residue byte */
+	mvi	ARG_1	call inb_next;
+	cmp	ARG_1, 0x01 jne mesgin_reject;
+	test	SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz . + 2;
+	test	SCB_LUN, SCB_XFERLEN_ODD jnz mesgin_done;
+	mvi	IGN_WIDE_RES call set_seqint;
+	jmp	mesgin_done;
+}
+
+mesgin_proto_violation:
+	mvi	PROTO_VIOLATION call set_seqint;
+	jmp	mesgin_done;
+mesgin_reject:
+	mvi	MSG_MESSAGE_REJECT	call mk_mesg;
+mesgin_done:
+	mov	NONE,SCSIDATL;		/*dummy read from latch to ACK*/
+	jmp	ITloop;
+
+/*
+ * We received a "command complete" message.  Put the SCB_TAG into the QOUTFIFO,
+ * and trigger a completion interrupt.  Before doing so, check to see if there
+ * is a residual or the status byte is something other than STATUS_GOOD (0).
+ * In either of these conditions, we upload the SCB back to the host so it can
+ * process this information.  In the case of a non zero status byte, we 
+ * additionally interrupt the kernel driver synchronously, allowing it to
+ * decide if sense should be retrieved.  If the kernel driver wishes to request
+ * sense, it will fill the kernel SCB with a request sense command, requeue
+ * it to the QINFIFO and tell us not to post to the QOUTFIFO by setting 
+ * RETURN_1 to SEND_SENSE.
+ */
+mesgin_complete:
+
+	/*
+	 * If ATN is raised, we still want to give the target a message.
+	 * Perhaps there was a parity error on this last message byte.
+	 * Either way, the target should take us to message out phase
+	 * and then attempt to complete the command again.  We should use a
+	 * critical section here to guard against a timeout triggering
+	 * for this command and setting ATN while we are still processing
+	 * the completion.
+	test	SCSISIGI, ATNI jnz mesgin_done;
+	 */
+
+	/*
+	 * If we are identified and have successfully sent the CDB,
+	 * any status will do.  Optimize this fast path.
+	 */
+	test	SCB_CONTROL, STATUS_RCVD jz mesgin_proto_violation;
+	test	SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz complete_accepted; 
+
+	/*
+	 * If the target never sent an identify message but instead went
+	 * to mesgin to give an invalid message, let the host abort us.
+	 */
+	test	SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation;
+
+	/*
+	 * If we recevied good status but never successfully sent the
+	 * cdb, abort the command.
+	 */
+	test	SCB_SCSI_STATUS,0xff	jnz complete_accepted;
+	test	SEQ_FLAGS, NO_CDB_SENT jnz mesgin_proto_violation;
+
+complete_accepted:
+	/*
+	 * See if we attempted to deliver a message but the target ingnored us.
+	 */
+	test	SCB_CONTROL, MK_MESSAGE jz . + 2;
+	mvi	MKMSG_FAILED call set_seqint;
+
+	/*
+	 * Check for residuals
+	 */
+	test	SCB_SGPTR, SG_LIST_NULL jnz check_status;/* No xfer */
+	test	SCB_SGPTR, SG_FULL_RESID jnz upload_scb;/* Never xfered */
+	test	SCB_RESIDUAL_SGPTR, SG_LIST_NULL jz upload_scb;
+check_status:
+	test	SCB_SCSI_STATUS,0xff	jz complete;	/* Good Status? */
+upload_scb:
+	or	SCB_SGPTR, SG_RESID_VALID;
+	mvi	DMAPARAMS, FIFORESET;
+	mov	SCB_TAG		call dma_scb;
+	test	SCB_SCSI_STATUS, 0xff	jz complete;	/* Just a residual? */
+	mvi	BAD_STATUS call set_seqint;		/* let driver know */
+	cmp	RETURN_1, SEND_SENSE	jne complete;
+	call	add_scb_to_free_list;
+	jmp	await_busfree;
+complete:
+	mov	SCB_TAG call complete_post;
+	jmp	await_busfree;
+}
+
+complete_post:
+	/* Post the SCBID in SINDEX and issue an interrupt */
+	call	add_scb_to_free_list;
+	mov	ARG_1, SINDEX;
+	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+		mov	A, SDSCB_QOFF;
+	} else {
+		mov	A, QOUTPOS;
+	}
+	mvi	QOUTFIFO_OFFSET call post_byte_setup;
+	mov	ARG_1 call post_byte;
+	if ((ahc->features & AHC_QUEUE_REGS) == 0) {
+		inc 	QOUTPOS;
+	}
+	mvi	INTSTAT,CMDCMPLT ret;
+
+if ((ahc->flags & AHC_INITIATORROLE) != 0) {
+/*
+ * Is it a disconnect message?  Set a flag in the SCB to remind us
+ * and await the bus going free.  If this is an untagged transaction
+ * store the SCB id for it in our untagged target table for lookup on
+ * a reselction.
+ */
+mesgin_disconnect:
+	/*
+	 * If ATN is raised, we still want to give the target a message.
+	 * Perhaps there was a parity error on this last message byte
+	 * or we want to abort this command.  Either way, the target
+	 * should take us to message out phase and then attempt to
+	 * disconnect again.
+	 * XXX - Wait for more testing.
+	test	SCSISIGI, ATNI jnz mesgin_done;
+	 */
+	test	SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT
+		jnz mesgin_proto_violation;
+	or	SCB_CONTROL,DISCONNECTED;
+	if ((ahc->flags & AHC_PAGESCBS) != 0) {
+		call	add_scb_to_disc_list;
+	}
+	test	SCB_CONTROL, TAG_ENB jnz await_busfree;
+	mov	ARG_1, SCB_TAG;
+	and	SAVED_LUN, LID, SCB_LUN;
+	mov	SCB_SCSIID	call set_busy_target;
+	jmp	await_busfree;
+
+/*
+ * Save data pointers message:
+ * Copying RAM values back to SCB, for Save Data Pointers message, but
+ * only if we've actually been into a data phase to change them.  This
+ * protects against bogus data in scratch ram and the residual counts
+ * since they are only initialized when we go into data_in or data_out.
+ * Ack the message as soon as possible.  For chips without S/G pipelining,
+ * we can only ack the message after SHADDR has been saved.  On these
+ * chips, SHADDR increments with every bus transaction, even PIO.
+ */
+mesgin_sdptrs:
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		mov	NONE,SCSIDATL;		/*dummy read from latch to ACK*/
+		test	SEQ_FLAGS, DPHASE	jz ITloop;
+	} else {
+		test	SEQ_FLAGS, DPHASE	jz mesgin_done;
+	}
+
+	/*
+	 * If we are asked to save our position at the end of the
+	 * transfer, just mark us at the end rather than perform a
+	 * full save.
+	 */
+	test	SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz mesgin_sdptrs_full;
+	or	SCB_SGPTR, SG_LIST_NULL;
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		jmp	ITloop;
+	} else {
+		jmp	mesgin_done;
+	}
+
+mesgin_sdptrs_full:
+
+	/*
+	 * The SCB_SGPTR becomes the next one we'll download,
+	 * and the SCB_DATAPTR becomes the current SHADDR.
+	 * Use the residual number since STCNT is corrupted by
+	 * any message transfer.
+	 */
+	if ((ahc->features & AHC_CMD_CHAN) != 0) {
+		bmov	SCB_DATAPTR, SHADDR, 4;
+		if ((ahc->features & AHC_ULTRA2) == 0) {
+			mov	NONE,SCSIDATL;	/*dummy read from latch to ACK*/
+		}
+		bmov	SCB_DATACNT, SCB_RESIDUAL_DATACNT, 8;
+	} else {
+		mvi	DINDEX, SCB_DATAPTR;
+		mvi	SHADDR call bcopy_4;
+		mov	NONE,SCSIDATL;	/*dummy read from latch to ACK*/
+		mvi	SCB_RESIDUAL_DATACNT call bcopy_8;
+	}
+	jmp	ITloop;
+
+/*
+ * Restore pointers message?  Data pointers are recopied from the
+ * SCB anytime we enter a data phase for the first time, so all
+ * we need to do is clear the DPHASE flag and let the data phase
+ * code do the rest.  We also reset/reallocate the FIFO to make
+ * sure we have a clean start for the next data or command phase.
+ */
+mesgin_rdptrs:
+	and	SEQ_FLAGS, ~DPHASE;		/*
+						 * We'll reload them
+						 * the next time through
+						 * the dataphase.
+						 */
+	or	SXFRCTL0, CLRSTCNT|CLRCHN;
+	jmp	mesgin_done;
+
+/*
+ * Index into our Busy Target table.  SINDEX and DINDEX are modified
+ * upon return.  SCBPTR may be modified by this action.
+ */
+set_busy_target:
+	shr	DINDEX, 4, SINDEX;
+	if ((ahc->flags & AHC_SCB_BTT) != 0) {
+		mov	SCBPTR, SAVED_LUN;
+		add	DINDEX, SCB_64_BTT;
+	} else {
+		add	DINDEX, BUSY_TARGETS;
+	}
+	mov	DINDIR, ARG_1 ret;
+
+/*
+ * Identify message?  For a reconnecting target, this tells us the lun
+ * that the reconnection is for - find the correct SCB and switch to it,
+ * clearing the "disconnected" bit so we don't "find" it by accident later.
+ */
+mesgin_identify:
+	/*
+	 * Determine whether a target is using tagged or non-tagged
+	 * transactions by first looking at the transaction stored in
+	 * the busy target array.  If there is no untagged transaction
+	 * for this target or the transaction is for a different lun, then
+	 * this must be a tagged transaction.
+	 */
+	shr	SINDEX, 4, SAVED_SCSIID;
+	and	SAVED_LUN, MSG_IDENTIFY_LUNMASK, A;
+	if ((ahc->flags & AHC_SCB_BTT) != 0) {
+		add	SINDEX, SCB_64_BTT;
+		mov	SCBPTR, SAVED_LUN;
+		if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+			add	NONE, -SCB_64_BTT, SINDEX;
+			jc	. + 2;
+			mvi	INTSTAT, OUT_OF_RANGE;
+			nop;
+			add	NONE, -(SCB_64_BTT + 16), SINDEX;
+			jnc	. + 2;
+			mvi	INTSTAT, OUT_OF_RANGE;
+			nop;
+		}
+	} else {
+		add	SINDEX, BUSY_TARGETS;
+		if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+			add	NONE, -BUSY_TARGETS, SINDEX;
+			jc	. + 2;
+			mvi	INTSTAT, OUT_OF_RANGE;
+			nop;
+			add	NONE, -(BUSY_TARGETS + 16), SINDEX;
+			jnc	. + 2;
+			mvi	INTSTAT, OUT_OF_RANGE;
+			nop;
+		}
+	}
+	mov	ARG_1, SINDIR;
+	cmp	ARG_1, SCB_LIST_NULL	je snoop_tag;
+	if ((ahc->flags & AHC_PAGESCBS) != 0) {
+		mov	ARG_1 call findSCB;
+	} else {
+		mov	SCBPTR, ARG_1;
+	}
+	if ((ahc->flags & AHC_SCB_BTT) != 0) {
+		jmp setup_SCB_id_lun_okay;
+	} else {
+		/*
+		 * We only allow one untagged command per-target
+		 * at a time.  So, if the lun doesn't match, look
+		 * for a tag message.
+		 */
+		and	A, LID, SCB_LUN;
+		cmp	SAVED_LUN, A	je setup_SCB_id_lun_okay;
+		if ((ahc->flags & AHC_PAGESCBS) != 0) {
+			/*
+			 * findSCB removes the SCB from the
+			 * disconnected list, so we must replace
+			 * it there should this SCB be for another
+			 * lun.
+			 */
+			call	cleanup_scb;
+		}
+	}
+
+/*
+ * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message.
+ * If we get one, we use the tag returned to find the proper
+ * SCB.  With SCB paging, we must search for non-tagged
+ * transactions since the SCB may exist in any slot.  If we're not
+ * using SCB paging, we can use the tag as the direct index to the
+ * SCB.
+ */
+snoop_tag:
+	if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+		or	SEQ_FLAGS, 0x80;
+	}
+	mov	NONE,SCSIDATL;		/* ACK Identify MSG */
+	call	phase_lock;
+	if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+		or	SEQ_FLAGS, 0x1;
+	}
+	cmp	LASTPHASE, P_MESGIN	jne not_found;
+	if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+		or	SEQ_FLAGS, 0x2;
+	}
+	cmp	SCSIBUSL,MSG_SIMPLE_Q_TAG jne not_found;
+get_tag:
+	if ((ahc->flags & AHC_PAGESCBS) != 0) {
+		mvi	ARG_1	call inb_next;	/* tag value */
+		mov	ARG_1	call findSCB;
+	} else {
+		mvi	ARG_1	call inb_next;	/* tag value */
+		mov	SCBPTR, ARG_1;
+	}
+
+/*
+ * Ensure that the SCB the tag points to is for
+ * an SCB transaction to the reconnecting target.
+ */
+setup_SCB:
+	if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+		or	SEQ_FLAGS, 0x4;
+	}
+	mov	A, SCB_SCSIID;
+	cmp	SAVED_SCSIID, A	jne not_found_cleanup_scb;
+	if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+		or	SEQ_FLAGS, 0x8;
+	}
+setup_SCB_id_okay:
+	and	A, LID, SCB_LUN;
+	cmp	SAVED_LUN, A	jne not_found_cleanup_scb;
+setup_SCB_id_lun_okay:
+	if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) {
+		or	SEQ_FLAGS, 0x10;
+	}
+	test	SCB_CONTROL,DISCONNECTED jz not_found_cleanup_scb;
+	and	SCB_CONTROL,~DISCONNECTED;
+	test	SCB_CONTROL, TAG_ENB	jnz setup_SCB_tagged;
+	if ((ahc->flags & AHC_SCB_BTT) != 0) {
+		mov	A, SCBPTR;
+	}
+	mvi	ARG_1, SCB_LIST_NULL;
+	mov	SAVED_SCSIID	call	set_busy_target;
+	if ((ahc->flags & AHC_SCB_BTT) != 0) {
+		mov	SCBPTR, A;
+	}
+setup_SCB_tagged:
+	clr	SEQ_FLAGS;	/* make note of IDENTIFY */
+	call	set_transfer_settings;
+	/* See if the host wants to send a message upon reconnection */
+	test	SCB_CONTROL, MK_MESSAGE jz mesgin_done;
+	mvi	HOST_MSG	call mk_mesg;
+	jmp	mesgin_done;
+
+not_found_cleanup_scb:
+	if ((ahc->flags & AHC_PAGESCBS) != 0) {
+		call	cleanup_scb;
+	}
+not_found:
+	mvi	NO_MATCH call set_seqint;
+	jmp	mesgin_done;
+
+mk_mesg:
+	if ((ahc->features & AHC_DT) == 0) {
+		or	SCSISIGO, ATNO, LASTPHASE;
+	} else {
+		mvi	SCSISIGO, ATNO;
+	}
+	mov	MSG_OUT,SINDEX ret;
+
+/*
+ * Functions to read data in Automatic PIO mode.
+ *
+ * According to Adaptec's documentation, an ACK is not sent on input from
+ * the target until SCSIDATL is read from.  So we wait until SCSIDATL is
+ * latched (the usual way), then read the data byte directly off the bus
+ * using SCSIBUSL.  When we have pulled the ATN line, or we just want to
+ * acknowledge the byte, then we do a dummy read from SCISDATL.  The SCSI
+ * spec guarantees that the target will hold the data byte on the bus until
+ * we send our ACK.
+ *
+ * The assumption here is that these are called in a particular sequence,
+ * and that REQ is already set when inb_first is called.  inb_{first,next}
+ * use the same calling convention as inb.
+ */
+inb_next_wait_perr:
+	mvi	PERR_DETECTED call set_seqint;
+	jmp	inb_next_wait;
+inb_next:
+	mov	NONE,SCSIDATL;		/*dummy read from latch to ACK*/
+inb_next_wait:
+	/*
+	 * If there is a parity error, wait for the kernel to
+	 * see the interrupt and prepare our message response
+	 * before continuing.
+	 */
+	test	SSTAT1, REQINIT	jz inb_next_wait;
+	test	SSTAT1, SCSIPERR jnz inb_next_wait_perr;
+inb_next_check_phase:
+	and	LASTPHASE, PHASE_MASK, SCSISIGI;
+	cmp	LASTPHASE, P_MESGIN jne mesgin_phasemis;
+inb_first:
+	mov	DINDEX,SINDEX;
+	mov	DINDIR,SCSIBUSL	ret;		/*read byte directly from bus*/
+inb_last:
+	mov	NONE,SCSIDATL ret;		/*dummy read from latch to ACK*/
+}
+
+if ((ahc->flags & AHC_TARGETROLE) != 0) {
+/*
+ * Change to a new phase.  If we are changing the state of the I/O signal,
+ * from out to in, wait an additional data release delay before continuing.
+ */
+change_phase:
+	/* Wait for preceeding I/O session to complete. */
+	test	SCSISIGI, ACKI jnz .;
+
+	/* Change the phase */
+	and	DINDEX, IOI, SCSISIGI;
+	mov	SCSISIGO, SINDEX;
+	and	A, IOI, SINDEX;
+
+	/*
+	 * If the data direction has changed, from
+	 * out (initiator driving) to in (target driving),
+	 * we must wait at least a data release delay plus
+	 * the normal bus settle delay. [SCSI III SPI 10.11.0]
+	 */
+	cmp 	DINDEX, A je change_phase_wait;
+	test	SINDEX, IOI jz change_phase_wait;
+	call	change_phase_wait;
+change_phase_wait:
+	nop;
+	nop;
+	nop;
+	nop ret;
+
+/*
+ * Send a byte to an initiator in Automatic PIO mode.
+ */
+target_outb:
+	or	SXFRCTL0, SPIOEN;
+	test	SSTAT0, SPIORDY	jz .;
+	mov	SCSIDATL, SINDEX;
+	test	SSTAT0, SPIORDY	jz .;
+	and	SXFRCTL0, ~SPIOEN ret;
+}
+	
+/*
+ * Locate a disconnected SCB by SCBID.  Upon return, SCBPTR and SINDEX will
+ * be set to the position of the SCB.  If the SCB cannot be found locally,
+ * it will be paged in from host memory.  RETURN_2 stores the address of the
+ * preceding SCB in the disconnected list which can be used to speed up
+ * removal of the found SCB from the disconnected list.
+ */
+if ((ahc->flags & AHC_PAGESCBS) != 0) {
+BEGIN_CRITICAL;
+findSCB:
+	mov	A, SINDEX;			/* Tag passed in SINDEX */
+	cmp	DISCONNECTED_SCBH, SCB_LIST_NULL je findSCB_notFound;
+	mov	SCBPTR, DISCONNECTED_SCBH;	/* Initialize SCBPTR */
+	mvi	ARG_2, SCB_LIST_NULL;		/* Head of list */
+	jmp	findSCB_loop;
+findSCB_next:
+	cmp	SCB_NEXT, SCB_LIST_NULL je findSCB_notFound;
+	mov	ARG_2, SCBPTR;
+	mov	SCBPTR,SCB_NEXT;
+findSCB_loop:
+	cmp	SCB_TAG, A	jne findSCB_next;
+rem_scb_from_disc_list:
+	cmp	ARG_2, SCB_LIST_NULL	je rHead;
+	mov	DINDEX, SCB_NEXT;
+	mov	SINDEX, SCBPTR;
+	mov	SCBPTR, ARG_2;
+	mov	SCB_NEXT, DINDEX;
+	mov	SCBPTR, SINDEX ret;
+rHead:
+	mov	DISCONNECTED_SCBH,SCB_NEXT ret;
+END_CRITICAL;
+findSCB_notFound:
+	/*
+	 * We didn't find it.  Page in the SCB.
+	 */
+	mov	ARG_1, A; /* Save tag */
+	mov	ALLZEROS call get_free_or_disc_scb;
+	mvi	DMAPARAMS, HDMAEN|DIRECTION|FIFORESET;
+	mov	ARG_1	jmp dma_scb;
+}
+
+/*
+ * Prepare the hardware to post a byte to host memory given an
+ * index of (A + (256 * SINDEX)) and a base address of SHARED_DATA_ADDR.
+ */
+post_byte_setup:
+	mov	ARG_2, SINDEX;
+	if ((ahc->features & AHC_CMD_CHAN) != 0) {
+		mvi	DINDEX, CCHADDR;
+		mvi	SHARED_DATA_ADDR call	set_1byte_addr;
+		mvi	CCHCNT, 1;
+		mvi	CCSCBCTL, CCSCBRESET ret;
+	} else {
+		mvi	DINDEX, HADDR;
+		mvi	SHARED_DATA_ADDR call	set_1byte_addr;
+		mvi	1	call set_hcnt;
+		mvi	DFCNTRL, FIFORESET ret;
+	}
+
+post_byte:
+	if ((ahc->features & AHC_CMD_CHAN) != 0) {
+		bmov	CCSCBRAM, SINDEX, 1;
+		or	CCSCBCTL, CCSCBEN|CCSCBRESET;
+		test	CCSCBCTL, CCSCBDONE jz .;
+		clr	CCSCBCTL ret;
+	} else {
+		mov	DFDAT, SINDEX;
+		or	DFCNTRL, HDMAEN|FIFOFLUSH;
+		jmp	dma_finish;
+	}
+
+phase_lock_perr:
+	mvi	PERR_DETECTED call set_seqint;
+phase_lock:     
+	/*
+	 * If there is a parity error, wait for the kernel to
+	 * see the interrupt and prepare our message response
+	 * before continuing.
+	 */
+	test	SSTAT1, REQINIT jz phase_lock;
+	test	SSTAT1, SCSIPERR jnz phase_lock_perr;
+phase_lock_latch_phase:
+	if ((ahc->features & AHC_DT) == 0) {
+		and	SCSISIGO, PHASE_MASK, SCSISIGI;
+	}
+	and	LASTPHASE, PHASE_MASK, SCSISIGI ret;
+
+if ((ahc->features & AHC_CMD_CHAN) == 0) {
+set_hcnt:
+	mov	HCNT[0], SINDEX;
+clear_hcnt:
+	clr	HCNT[1];
+	clr	HCNT[2] ret;
+
+set_stcnt_from_hcnt:
+	mov	STCNT[0], HCNT[0];
+	mov	STCNT[1], HCNT[1];
+	mov	STCNT[2], HCNT[2] ret;
+
+bcopy_8:
+	mov	DINDIR, SINDIR;
+bcopy_7:
+	mov	DINDIR, SINDIR;
+	mov	DINDIR, SINDIR;
+bcopy_5:
+	mov	DINDIR, SINDIR;
+bcopy_4:
+	mov	DINDIR, SINDIR;
+bcopy_3:
+	mov	DINDIR, SINDIR;
+	mov	DINDIR, SINDIR;
+	mov	DINDIR, SINDIR ret;
+}
+
+if ((ahc->flags & AHC_TARGETROLE) != 0) {
+/*
+ * Setup addr assuming that A is an index into
+ * an array of 32byte objects, SINDEX contains
+ * the base address of that array, and DINDEX
+ * contains the base address of the location
+ * to store the indexed address.
+ */
+set_32byte_addr:
+	shr	ARG_2, 3, A;
+	shl	A, 5;
+	jmp	set_1byte_addr;
+}
+
+/*
+ * Setup addr assuming that A is an index into
+ * an array of 64byte objects, SINDEX contains
+ * the base address of that array, and DINDEX
+ * contains the base address of the location
+ * to store the indexed address.
+ */
+set_64byte_addr:
+	shr	ARG_2, 2, A;
+	shl	A, 6;
+
+/*
+ * Setup addr assuming that A + (ARG_2 * 256) is an
+ * index into an array of 1byte objects, SINDEX contains
+ * the base address of that array, and DINDEX contains
+ * the base address of the location to store the computed
+ * address.
+ */
+set_1byte_addr:
+	add     DINDIR, A, SINDIR;
+	mov     A, ARG_2;
+	adc	DINDIR, A, SINDIR;
+	clr	A;
+	adc	DINDIR, A, SINDIR;
+	adc	DINDIR, A, SINDIR ret;
+
+/*
+ * Either post or fetch an SCB from host memory based on the
+ * DIRECTION bit in DMAPARAMS. The host SCB index is in SINDEX.
+ */
+dma_scb:
+	mov	A, SINDEX;
+	if ((ahc->features & AHC_CMD_CHAN) != 0) {
+		mvi	DINDEX, CCHADDR;
+		mvi	HSCB_ADDR call set_64byte_addr;
+		mov	CCSCBPTR, SCBPTR;
+		test	DMAPARAMS, DIRECTION jz dma_scb_tohost;
+		if ((ahc->flags & AHC_SCB_BTT) != 0) {
+			mvi	CCHCNT, SCB_DOWNLOAD_SIZE_64;
+		} else {
+			mvi	CCHCNT, SCB_DOWNLOAD_SIZE;
+		}
+		mvi	CCSCBCTL, CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET;
+		cmp	CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN|CCSCBDIR jne .;
+		jmp	dma_scb_finish;
+dma_scb_tohost:
+		mvi	CCHCNT, SCB_UPLOAD_SIZE;
+		if ((ahc->features & AHC_ULTRA2) == 0) {
+			mvi	CCSCBCTL, CCSCBRESET;
+			bmov	CCSCBRAM, SCB_BASE, SCB_UPLOAD_SIZE;
+			or	CCSCBCTL, CCSCBEN|CCSCBRESET;
+			test	CCSCBCTL, CCSCBDONE jz .;
+		} else if ((ahc->bugs & AHC_SCBCHAN_UPLOAD_BUG) != 0) {
+			mvi	CCSCBCTL, CCARREN|CCSCBRESET;
+			cmp	CCSCBCTL, ARRDONE|CCARREN jne .;
+			mvi	CCHCNT, SCB_UPLOAD_SIZE;
+			mvi	CCSCBCTL, CCSCBEN|CCSCBRESET;
+			cmp	CCSCBCTL, CCSCBDONE|CCSCBEN jne .;
+		} else {
+			mvi	CCSCBCTL, CCARREN|CCSCBEN|CCSCBRESET;
+			cmp	CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN jne .;
+		}
+dma_scb_finish:
+		clr	CCSCBCTL;
+		test	CCSCBCTL, CCARREN|CCSCBEN jnz .;
+		ret;
+	} else {
+		mvi	DINDEX, HADDR;
+		mvi	HSCB_ADDR call set_64byte_addr;
+		mvi	SCB_DOWNLOAD_SIZE call set_hcnt;
+		mov	DFCNTRL, DMAPARAMS;
+		test	DMAPARAMS, DIRECTION	jnz dma_scb_fromhost;
+		/* Fill it with the SCB data */
+copy_scb_tofifo:
+		mvi	SINDEX, SCB_BASE;
+		add	A, SCB_DOWNLOAD_SIZE, SINDEX;
+copy_scb_tofifo_loop:
+		call	copy_to_fifo_8;
+		cmp	SINDEX, A jne copy_scb_tofifo_loop;
+		or	DFCNTRL, HDMAEN|FIFOFLUSH;
+		jmp	dma_finish;
+dma_scb_fromhost:
+		mvi	DINDEX, SCB_BASE;
+		if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0) {
+			/*
+			 * The PCI module will only issue a PCI
+			 * retry if the data FIFO is empty.  If the
+			 * host disconnects in the middle of a
+			 * transfer, we must empty the fifo of all
+			 * available data to force the chip to
+			 * continue the transfer.  This does not
+			 * happen for SCSI transfers as the SCSI module
+			 * will drain the FIFO as data are made available.
+			 * When the hang occurs, we know that a multiple
+			 * of 8 bytes is in the FIFO because the PCI
+			 * module has an 8 byte input latch that only
+			 * dumps to the FIFO when HCNT == 0 or the
+			 * latch is full.
+			 */
+			clr	A;
+			/* Wait for at least 8 bytes of data to arrive. */
+dma_scb_hang_fifo:
+			test	DFSTATUS, FIFOQWDEMP jnz dma_scb_hang_fifo;
+dma_scb_hang_wait:
+			test	DFSTATUS, MREQPEND jnz dma_scb_hang_wait;
+			test	DFSTATUS, HDONE	jnz dma_scb_hang_dma_done;
+			test	DFSTATUS, HDONE	jnz dma_scb_hang_dma_done;
+			test	DFSTATUS, HDONE	jnz dma_scb_hang_dma_done;
+			/*
+			 * The PCI module no longer intends to perform
+			 * a PCI transaction.  Drain the fifo.
+			 */
+dma_scb_hang_dma_drain_fifo:
+			not	A, HCNT;
+			add	A, SCB_DOWNLOAD_SIZE+SCB_BASE+1;
+			and	A, ~0x7;
+			mov	DINDIR,DFDAT;
+			cmp	DINDEX, A jne . - 1;
+			cmp	DINDEX, SCB_DOWNLOAD_SIZE+SCB_BASE
+				je	dma_finish_nowait;
+			/* Restore A as the lines left to transfer. */
+			add	A, -SCB_BASE, DINDEX;
+			shr	A, 3;
+			jmp	dma_scb_hang_fifo;
+dma_scb_hang_dma_done:
+			and	DFCNTRL, ~HDMAEN;
+			test	DFCNTRL, HDMAEN jnz .;
+			add	SEQADDR0, A;
+		} else {
+			call	dma_finish;
+		}
+		call	dfdat_in_8;
+		call	dfdat_in_8;
+		call	dfdat_in_8;
+dfdat_in_8:
+		mov	DINDIR,DFDAT;
+dfdat_in_7:
+		mov	DINDIR,DFDAT;
+		mov	DINDIR,DFDAT;
+		mov	DINDIR,DFDAT;
+		mov	DINDIR,DFDAT;
+		mov	DINDIR,DFDAT;
+dfdat_in_2:
+		mov	DINDIR,DFDAT;
+		mov	DINDIR,DFDAT ret;
+	}
+
+copy_to_fifo_8:
+	mov	DFDAT,SINDIR;
+	mov	DFDAT,SINDIR;
+copy_to_fifo_6:
+	mov	DFDAT,SINDIR;
+copy_to_fifo_5:
+	mov	DFDAT,SINDIR;
+copy_to_fifo_4:
+	mov	DFDAT,SINDIR;
+	mov	DFDAT,SINDIR;
+	mov	DFDAT,SINDIR;
+	mov	DFDAT,SINDIR ret;
+
+/*
+ * Wait for DMA from host memory to data FIFO to complete, then disable
+ * DMA and wait for it to acknowledge that it's off.
+ */
+dma_finish:
+	test	DFSTATUS,HDONE	jz dma_finish;
+dma_finish_nowait:
+	/* Turn off DMA */
+	and	DFCNTRL, ~HDMAEN;
+	test	DFCNTRL, HDMAEN jnz .;
+	ret;
+
+/*
+ * Restore an SCB that failed to match an incoming reselection
+ * to the correct/safe state.  If the SCB is for a disconnected
+ * transaction, it must be returned to the disconnected list.
+ * If it is not in the disconnected state, it must be free.
+ */
+cleanup_scb:
+	if ((ahc->flags & AHC_PAGESCBS) != 0) {
+		test	SCB_CONTROL,DISCONNECTED jnz add_scb_to_disc_list;
+	}
+add_scb_to_free_list:
+	if ((ahc->flags & AHC_PAGESCBS) != 0) {
+BEGIN_CRITICAL;
+		mov	SCB_NEXT, FREE_SCBH;
+		mvi	SCB_TAG, SCB_LIST_NULL;
+		mov	FREE_SCBH, SCBPTR ret;
+END_CRITICAL;
+	} else {
+		mvi	SCB_TAG, SCB_LIST_NULL ret;
+	}
+
+if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+set_hhaddr:
+	or	DSCOMMAND1, HADDLDSEL0;
+	and	HADDR, SG_HIGH_ADDR_BITS, SINDEX;
+	and	DSCOMMAND1, ~HADDLDSEL0 ret;
+}
+
+if ((ahc->flags & AHC_PAGESCBS) != 0) {
+get_free_or_disc_scb:
+BEGIN_CRITICAL;
+	cmp	FREE_SCBH, SCB_LIST_NULL jne dequeue_free_scb;
+	cmp	DISCONNECTED_SCBH, SCB_LIST_NULL jne dequeue_disc_scb;
+return_error:
+	mvi	NO_FREE_SCB call set_seqint;
+	mvi	SINDEX, SCB_LIST_NULL	ret;
+dequeue_disc_scb:
+	mov	SCBPTR, DISCONNECTED_SCBH;
+	mov	DISCONNECTED_SCBH, SCB_NEXT;
+END_CRITICAL;
+	mvi	DMAPARAMS, FIFORESET;
+	mov	SCB_TAG	jmp dma_scb;
+BEGIN_CRITICAL;
+dequeue_free_scb:
+	mov	SCBPTR, FREE_SCBH;
+	mov	FREE_SCBH, SCB_NEXT ret;
+END_CRITICAL;
+
+add_scb_to_disc_list:
+/*
+ * Link this SCB into the DISCONNECTED list.  This list holds the
+ * candidates for paging out an SCB if one is needed for a new command.
+ * Modifying the disconnected list is a critical(pause dissabled) section.
+ */
+BEGIN_CRITICAL;
+	mov	SCB_NEXT, DISCONNECTED_SCBH;
+	mov	DISCONNECTED_SCBH, SCBPTR ret;
+END_CRITICAL;
+}
+set_seqint:
+	mov	INTSTAT, SINDEX;
+	nop;
+return:
+	ret;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
new file mode 100644
index 0000000..468d612
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
@@ -0,0 +1,306 @@
+/*
+ * Interface for the 93C66/56/46/26/06 serial eeprom parts.
+ *
+ * Copyright (c) 1995, 1996 Daniel M. Eischen
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_93cx6.c#17 $
+ *
+ * $FreeBSD$
+ */
+
+/*
+ *   The instruction set of the 93C66/56/46/26/06 chips are as follows:
+ *
+ *               Start  OP	    *
+ *     Function   Bit  Code  Address**  Data     Description
+ *     -------------------------------------------------------------------
+ *     READ        1    10   A5 - A0             Reads data stored in memory,
+ *                                               starting at specified address
+ *     EWEN        1    00   11XXXX              Write enable must precede
+ *                                               all programming modes
+ *     ERASE       1    11   A5 - A0             Erase register A5A4A3A2A1A0
+ *     WRITE       1    01   A5 - A0   D15 - D0  Writes register
+ *     ERAL        1    00   10XXXX              Erase all registers
+ *     WRAL        1    00   01XXXX    D15 - D0  Writes to all registers
+ *     EWDS        1    00   00XXXX              Disables all programming
+ *                                               instructions
+ *     *Note: A value of X for address is a don't care condition.
+ *    **Note: There are 8 address bits for the 93C56/66 chips unlike
+ *	      the 93C46/26/06 chips which have 6 address bits.
+ *
+ *   The 93C46 has a four wire interface: clock, chip select, data in, and
+ *   data out.  In order to perform one of the above functions, you need
+ *   to enable the chip select for a clock period (typically a minimum of
+ *   1 usec, with the clock high and low a minimum of 750 and 250 nsec
+ *   respectively).  While the chip select remains high, you can clock in
+ *   the instructions (above) starting with the start bit, followed by the
+ *   OP code, Address, and Data (if needed).  For the READ instruction, the
+ *   requested 16-bit register contents is read from the data out line but
+ *   is preceded by an initial zero (leading 0, followed by 16-bits, MSB
+ *   first).  The clock cycling from low to high initiates the next data
+ *   bit to be sent from the chip.
+ *
+ */
+
+#ifdef __linux__
+#include "aic7xxx_osm.h"
+#include "aic7xxx_inline.h"
+#include "aic7xxx_93cx6.h"
+#else
+#include <dev/aic7xxx/aic7xxx_osm.h>
+#include <dev/aic7xxx/aic7xxx_inline.h>
+#include <dev/aic7xxx/aic7xxx_93cx6.h>
+#endif
+
+/*
+ * Right now, we only have to read the SEEPROM.  But we make it easier to
+ * add other 93Cx6 functions.
+ */
+static struct seeprom_cmd {
+  	uint8_t len;
+ 	uint8_t bits[9];
+} seeprom_read = {3, {1, 1, 0}};
+
+static struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
+static struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
+static struct seeprom_cmd seeprom_write = {3, {1, 0, 1}};
+
+/*
+ * Wait for the SEERDY to go high; about 800 ns.
+ */
+#define CLOCK_PULSE(sd, rdy)				\
+	while ((SEEPROM_STATUS_INB(sd) & rdy) == 0) {	\
+		;  /* Do nothing */			\
+	}						\
+	(void)SEEPROM_INB(sd);	/* Clear clock */
+
+/*
+ * Send a START condition and the given command
+ */
+static void
+send_seeprom_cmd(struct seeprom_descriptor *sd, struct seeprom_cmd *cmd)
+{
+	uint8_t temp;
+	int i = 0;
+
+	/* Send chip select for one clock cycle. */
+	temp = sd->sd_MS ^ sd->sd_CS;
+	SEEPROM_OUTB(sd, temp ^ sd->sd_CK);
+	CLOCK_PULSE(sd, sd->sd_RDY);
+
+	for (i = 0; i < cmd->len; i++) {
+		if (cmd->bits[i] != 0)
+			temp ^= sd->sd_DO;
+		SEEPROM_OUTB(sd, temp);
+		CLOCK_PULSE(sd, sd->sd_RDY);
+		SEEPROM_OUTB(sd, temp ^ sd->sd_CK);
+		CLOCK_PULSE(sd, sd->sd_RDY);
+		if (cmd->bits[i] != 0)
+			temp ^= sd->sd_DO;
+	}
+}
+
+/*
+ * Clear CS put the chip in the reset state, where it can wait for new commands.
+ */
+static void
+reset_seeprom(struct seeprom_descriptor *sd)
+{
+	uint8_t temp;
+
+	temp = sd->sd_MS;
+	SEEPROM_OUTB(sd, temp);
+	CLOCK_PULSE(sd, sd->sd_RDY);
+	SEEPROM_OUTB(sd, temp ^ sd->sd_CK);
+	CLOCK_PULSE(sd, sd->sd_RDY);
+	SEEPROM_OUTB(sd, temp);
+	CLOCK_PULSE(sd, sd->sd_RDY);
+}
+
+/*
+ * Read the serial EEPROM and returns 1 if successful and 0 if
+ * not successful.
+ */
+int
+ahc_read_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
+		 u_int start_addr, u_int count)
+{
+	int i = 0;
+	u_int k = 0;
+	uint16_t v;
+	uint8_t temp;
+
+	/*
+	 * Read the requested registers of the seeprom.  The loop
+	 * will range from 0 to count-1.
+	 */
+	for (k = start_addr; k < count + start_addr; k++) {
+		/*
+		 * Now we're ready to send the read command followed by the
+		 * address of the 16-bit register we want to read.
+		 */
+		send_seeprom_cmd(sd, &seeprom_read);
+
+		/* Send the 6 or 8 bit address (MSB first, LSB last). */
+		temp = sd->sd_MS ^ sd->sd_CS;
+		for (i = (sd->sd_chip - 1); i >= 0; i--) {
+			if ((k & (1 << i)) != 0)
+				temp ^= sd->sd_DO;
+			SEEPROM_OUTB(sd, temp);
+			CLOCK_PULSE(sd, sd->sd_RDY);
+			SEEPROM_OUTB(sd, temp ^ sd->sd_CK);
+			CLOCK_PULSE(sd, sd->sd_RDY);
+			if ((k & (1 << i)) != 0)
+				temp ^= sd->sd_DO;
+		}
+
+		/*
+		 * Now read the 16 bit register.  An initial 0 precedes the
+		 * register contents which begins with bit 15 (MSB) and ends
+		 * with bit 0 (LSB).  The initial 0 will be shifted off the
+		 * top of our word as we let the loop run from 0 to 16.
+		 */
+		v = 0;
+		for (i = 16; i >= 0; i--) {
+			SEEPROM_OUTB(sd, temp);
+			CLOCK_PULSE(sd, sd->sd_RDY);
+			v <<= 1;
+			if (SEEPROM_DATA_INB(sd) & sd->sd_DI)
+				v |= 1;
+			SEEPROM_OUTB(sd, temp ^ sd->sd_CK);
+			CLOCK_PULSE(sd, sd->sd_RDY);
+		}
+
+		buf[k - start_addr] = v;
+
+		/* Reset the chip select for the next command cycle. */
+		reset_seeprom(sd);
+	}
+#ifdef AHC_DUMP_EEPROM
+	printf("\nSerial EEPROM:\n\t");
+	for (k = 0; k < count; k = k + 1) {
+		if (((k % 8) == 0) && (k != 0)) {
+			printf ("\n\t");
+		}
+		printf (" 0x%x", buf[k]);
+	}
+	printf ("\n");
+#endif
+	return (1);
+}
+
+/*
+ * Write the serial EEPROM and return 1 if successful and 0 if
+ * not successful.
+ */
+int
+ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
+		  u_int start_addr, u_int count)
+{
+	uint16_t v;
+	uint8_t temp;
+	int i, k;
+
+	/* Place the chip into write-enable mode */
+	send_seeprom_cmd(sd, &seeprom_ewen);
+	reset_seeprom(sd);
+
+	/* Write all requested data out to the seeprom. */
+	temp = sd->sd_MS ^ sd->sd_CS;
+	for (k = start_addr; k < count + start_addr; k++) {
+		/* Send the write command */
+		send_seeprom_cmd(sd, &seeprom_write);
+
+		/* Send the 6 or 8 bit address (MSB first). */
+		for (i = (sd->sd_chip - 1); i >= 0; i--) {
+			if ((k & (1 << i)) != 0)
+				temp ^= sd->sd_DO;
+			SEEPROM_OUTB(sd, temp);
+			CLOCK_PULSE(sd, sd->sd_RDY);
+			SEEPROM_OUTB(sd, temp ^ sd->sd_CK);
+			CLOCK_PULSE(sd, sd->sd_RDY);
+			if ((k & (1 << i)) != 0)
+				temp ^= sd->sd_DO;
+		}
+
+		/* Write the 16 bit value, MSB first */
+		v = buf[k - start_addr];
+		for (i = 15; i >= 0; i--) {
+			if ((v & (1 << i)) != 0)
+				temp ^= sd->sd_DO;
+			SEEPROM_OUTB(sd, temp);
+			CLOCK_PULSE(sd, sd->sd_RDY);
+			SEEPROM_OUTB(sd, temp ^ sd->sd_CK);
+			CLOCK_PULSE(sd, sd->sd_RDY);
+			if ((v & (1 << i)) != 0)
+				temp ^= sd->sd_DO;
+		}
+
+		/* Wait for the chip to complete the write */
+		temp = sd->sd_MS;
+		SEEPROM_OUTB(sd, temp);
+		CLOCK_PULSE(sd, sd->sd_RDY);
+		temp = sd->sd_MS ^ sd->sd_CS;
+		do {
+			SEEPROM_OUTB(sd, temp);
+			CLOCK_PULSE(sd, sd->sd_RDY);
+			SEEPROM_OUTB(sd, temp ^ sd->sd_CK);
+			CLOCK_PULSE(sd, sd->sd_RDY);
+		} while ((SEEPROM_DATA_INB(sd) & sd->sd_DI) == 0);
+
+		reset_seeprom(sd);
+	}
+
+	/* Put the chip back into write-protect mode */
+	send_seeprom_cmd(sd, &seeprom_ewds);
+	reset_seeprom(sd);
+
+	return (1);
+}
+
+int
+ahc_verify_cksum(struct seeprom_config *sc)
+{
+	int i;
+	int maxaddr;
+	uint32_t checksum;
+	uint16_t *scarray;
+
+	maxaddr = (sizeof(*sc)/2) - 1;
+	checksum = 0;
+	scarray = (uint16_t *)sc;
+
+	for (i = 0; i < maxaddr; i++)
+		checksum = checksum + scarray[i];
+	if (checksum == 0
+	 || (checksum & 0xFFFF) != sc->checksum) {
+		return (0);
+	} else {
+		return(1);
+	}
+}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.h b/drivers/scsi/aic7xxx/aic7xxx_93cx6.h
new file mode 100644
index 0000000..859c43c
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.h
@@ -0,0 +1,102 @@
+/*
+ * Interface to the 93C46/56 serial EEPROM that is used to store BIOS
+ * settings for the aic7xxx based adaptec SCSI controllers.  It can
+ * also be used for 93C26 and 93C06 serial EEPROMS.
+ *
+ * Copyright (c) 1994, 1995, 2000 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_93cx6.h#12 $
+ *
+ * $FreeBSD$
+ */
+#ifndef _AIC7XXX_93CX6_H_
+#define _AIC7XXX_93CX6_H_
+
+typedef enum {
+	C46 = 6,
+	C56_66 = 8
+} seeprom_chip_t;
+
+struct seeprom_descriptor {
+	struct ahc_softc *sd_ahc;
+	u_int sd_control_offset;
+	u_int sd_status_offset;
+	u_int sd_dataout_offset;
+	seeprom_chip_t sd_chip;
+	uint16_t sd_MS;
+	uint16_t sd_RDY;
+	uint16_t sd_CS;
+	uint16_t sd_CK;
+	uint16_t sd_DO;
+	uint16_t sd_DI;
+};
+
+/*
+ * This function will read count 16-bit words from the serial EEPROM and
+ * return their value in buf.  The port address of the aic7xxx serial EEPROM
+ * control register is passed in as offset.  The following parameters are
+ * also passed in:
+ *
+ *   CS  - Chip select
+ *   CK  - Clock
+ *   DO  - Data out
+ *   DI  - Data in
+ *   RDY - SEEPROM ready
+ *   MS  - Memory port mode select
+ *
+ *  A failed read attempt returns 0, and a successful read returns 1.
+ */
+
+#define	SEEPROM_INB(sd) \
+	ahc_inb(sd->sd_ahc, sd->sd_control_offset)
+#define	SEEPROM_OUTB(sd, value)					\
+do {								\
+	ahc_outb(sd->sd_ahc, sd->sd_control_offset, value);	\
+	ahc_flush_device_writes(sd->sd_ahc);			\
+} while(0)
+
+#define	SEEPROM_STATUS_INB(sd) \
+	ahc_inb(sd->sd_ahc, sd->sd_status_offset)
+#define	SEEPROM_DATA_INB(sd) \
+	ahc_inb(sd->sd_ahc, sd->sd_dataout_offset)
+
+int ahc_read_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
+		     u_int start_addr, u_int count);
+int ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
+		      u_int start_addr, u_int count);
+int ahc_verify_cksum(struct seeprom_config *sc);
+
+#endif /* _AIC7XXX_93CX6_H_ */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
new file mode 100644
index 0000000..9a6b4a5
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -0,0 +1,7451 @@
+/*
+ * Core routines and tables shareable across OS platforms.
+ *
+ * Copyright (c) 1994-2002 Justin T. Gibbs.
+ * Copyright (c) 2000-2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#134 $
+ *
+ * $FreeBSD$
+ */
+
+#ifdef __linux__
+#include "aic7xxx_osm.h"
+#include "aic7xxx_inline.h"
+#include "aicasm/aicasm_insformat.h"
+#else
+#include <dev/aic7xxx/aic7xxx_osm.h>
+#include <dev/aic7xxx/aic7xxx_inline.h>
+#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
+#endif
+
+/****************************** Softc Data ************************************/
+struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq);
+
+/***************************** Lookup Tables **********************************/
+char *ahc_chip_names[] =
+{
+	"NONE",
+	"aic7770",
+	"aic7850",
+	"aic7855",
+	"aic7859",
+	"aic7860",
+	"aic7870",
+	"aic7880",
+	"aic7895",
+	"aic7895C",
+	"aic7890/91",
+	"aic7896/97",
+	"aic7892",
+	"aic7899"
+};
+static const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names);
+
+/*
+ * Hardware error codes.
+ */
+struct ahc_hard_error_entry {
+        uint8_t errno;
+	char *errmesg;
+};
+
+static struct ahc_hard_error_entry ahc_hard_errors[] = {
+	{ ILLHADDR,	"Illegal Host Access" },
+	{ ILLSADDR,	"Illegal Sequencer Address referrenced" },
+	{ ILLOPCODE,	"Illegal Opcode in sequencer program" },
+	{ SQPARERR,	"Sequencer Parity Error" },
+	{ DPARERR,	"Data-path Parity Error" },
+	{ MPARERR,	"Scratch or SCB Memory Parity Error" },
+	{ PCIERRSTAT,	"PCI Error detected" },
+	{ CIOPARERR,	"CIOBUS Parity Error" },
+};
+static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors);
+
+static struct ahc_phase_table_entry ahc_phase_table[] =
+{
+	{ P_DATAOUT,	MSG_NOOP,		"in Data-out phase"	},
+	{ P_DATAIN,	MSG_INITIATOR_DET_ERR,	"in Data-in phase"	},
+	{ P_DATAOUT_DT,	MSG_NOOP,		"in DT Data-out phase"	},
+	{ P_DATAIN_DT,	MSG_INITIATOR_DET_ERR,	"in DT Data-in phase"	},
+	{ P_COMMAND,	MSG_NOOP,		"in Command phase"	},
+	{ P_MESGOUT,	MSG_NOOP,		"in Message-out phase"	},
+	{ P_STATUS,	MSG_INITIATOR_DET_ERR,	"in Status phase"	},
+	{ P_MESGIN,	MSG_PARITY_ERROR,	"in Message-in phase"	},
+	{ P_BUSFREE,	MSG_NOOP,		"while idle"		},
+	{ 0,		MSG_NOOP,		"in unknown phase"	}
+};
+
+/*
+ * In most cases we only wish to itterate over real phases, so
+ * exclude the last element from the count.
+ */
+static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1;
+
+/*
+ * Valid SCSIRATE values.  (p. 3-17)
+ * Provides a mapping of tranfer periods in ns to the proper value to
+ * stick in the scsixfer reg.
+ */
+static struct ahc_syncrate ahc_syncrates[] =
+{
+      /* ultra2    fast/ultra  period     rate */
+	{ 0x42,      0x000,      9,      "80.0" },
+	{ 0x03,      0x000,     10,      "40.0" },
+	{ 0x04,      0x000,     11,      "33.0" },
+	{ 0x05,      0x100,     12,      "20.0" },
+	{ 0x06,      0x110,     15,      "16.0" },
+	{ 0x07,      0x120,     18,      "13.4" },
+	{ 0x08,      0x000,     25,      "10.0" },
+	{ 0x19,      0x010,     31,      "8.0"  },
+	{ 0x1a,      0x020,     37,      "6.67" },
+	{ 0x1b,      0x030,     43,      "5.7"  },
+	{ 0x1c,      0x040,     50,      "5.0"  },
+	{ 0x00,      0x050,     56,      "4.4"  },
+	{ 0x00,      0x060,     62,      "4.0"  },
+	{ 0x00,      0x070,     68,      "3.6"  },
+	{ 0x00,      0x000,      0,      NULL   }
+};
+
+/* Our Sequencer Program */
+#include "aic7xxx_seq.h"
+
+/**************************** Function Declarations ***************************/
+static void		ahc_force_renegotiation(struct ahc_softc *ahc,
+						struct ahc_devinfo *devinfo);
+static struct ahc_tmode_tstate*
+			ahc_alloc_tstate(struct ahc_softc *ahc,
+					 u_int scsi_id, char channel);
+#ifdef AHC_TARGET_MODE
+static void		ahc_free_tstate(struct ahc_softc *ahc,
+					u_int scsi_id, char channel, int force);
+#endif
+static struct ahc_syncrate*
+			ahc_devlimited_syncrate(struct ahc_softc *ahc,
+					        struct ahc_initiator_tinfo *,
+						u_int *period,
+						u_int *ppr_options,
+						role_t role);
+static void		ahc_update_pending_scbs(struct ahc_softc *ahc);
+static void		ahc_fetch_devinfo(struct ahc_softc *ahc,
+					  struct ahc_devinfo *devinfo);
+static void		ahc_scb_devinfo(struct ahc_softc *ahc,
+					struct ahc_devinfo *devinfo,
+					struct scb *scb);
+static void		ahc_assert_atn(struct ahc_softc *ahc);
+static void		ahc_setup_initiator_msgout(struct ahc_softc *ahc,
+						   struct ahc_devinfo *devinfo,
+						   struct scb *scb);
+static void		ahc_build_transfer_msg(struct ahc_softc *ahc,
+					       struct ahc_devinfo *devinfo);
+static void		ahc_construct_sdtr(struct ahc_softc *ahc,
+					   struct ahc_devinfo *devinfo,
+					   u_int period, u_int offset);
+static void		ahc_construct_wdtr(struct ahc_softc *ahc,
+					   struct ahc_devinfo *devinfo,
+					   u_int bus_width);
+static void		ahc_construct_ppr(struct ahc_softc *ahc,
+					  struct ahc_devinfo *devinfo,
+					  u_int period, u_int offset,
+					  u_int bus_width, u_int ppr_options);
+static void		ahc_clear_msg_state(struct ahc_softc *ahc);
+static void		ahc_handle_proto_violation(struct ahc_softc *ahc);
+static void		ahc_handle_message_phase(struct ahc_softc *ahc);
+typedef enum {
+	AHCMSG_1B,
+	AHCMSG_2B,
+	AHCMSG_EXT
+} ahc_msgtype;
+static int		ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type,
+				     u_int msgval, int full);
+static int		ahc_parse_msg(struct ahc_softc *ahc,
+				      struct ahc_devinfo *devinfo);
+static int		ahc_handle_msg_reject(struct ahc_softc *ahc,
+					      struct ahc_devinfo *devinfo);
+static void		ahc_handle_ign_wide_residue(struct ahc_softc *ahc,
+						struct ahc_devinfo *devinfo);
+static void		ahc_reinitialize_dataptrs(struct ahc_softc *ahc);
+static void		ahc_handle_devreset(struct ahc_softc *ahc,
+					    struct ahc_devinfo *devinfo,
+					    cam_status status, char *message,
+					    int verbose_level);
+#ifdef AHC_TARGET_MODE
+static void		ahc_setup_target_msgin(struct ahc_softc *ahc,
+					       struct ahc_devinfo *devinfo,
+					       struct scb *scb);
+#endif
+
+static bus_dmamap_callback_t	ahc_dmamap_cb; 
+static void			ahc_build_free_scb_list(struct ahc_softc *ahc);
+static int			ahc_init_scbdata(struct ahc_softc *ahc);
+static void			ahc_fini_scbdata(struct ahc_softc *ahc);
+static void		ahc_qinfifo_requeue(struct ahc_softc *ahc,
+					    struct scb *prev_scb,
+					    struct scb *scb);
+static int		ahc_qinfifo_count(struct ahc_softc *ahc);
+static u_int		ahc_rem_scb_from_disc_list(struct ahc_softc *ahc,
+						   u_int prev, u_int scbptr);
+static void		ahc_add_curscb_to_free_list(struct ahc_softc *ahc);
+static u_int		ahc_rem_wscb(struct ahc_softc *ahc,
+				     u_int scbpos, u_int prev);
+static void		ahc_reset_current_bus(struct ahc_softc *ahc);
+#ifdef AHC_DUMP_SEQ
+static void		ahc_dumpseq(struct ahc_softc *ahc);
+#endif
+static int		ahc_loadseq(struct ahc_softc *ahc);
+static int		ahc_check_patch(struct ahc_softc *ahc,
+					struct patch **start_patch,
+					u_int start_instr, u_int *skip_addr);
+static void		ahc_download_instr(struct ahc_softc *ahc,
+					   u_int instrptr, uint8_t *dconsts);
+#ifdef AHC_TARGET_MODE
+static void		ahc_queue_lstate_event(struct ahc_softc *ahc,
+					       struct ahc_tmode_lstate *lstate,
+					       u_int initiator_id,
+					       u_int event_type,
+					       u_int event_arg);
+static void		ahc_update_scsiid(struct ahc_softc *ahc,
+					  u_int targid_mask);
+static int		ahc_handle_target_cmd(struct ahc_softc *ahc,
+					      struct target_cmd *cmd);
+#endif
+/************************* Sequencer Execution Control ************************/
+/*
+ * Restart the sequencer program from address zero
+ */
+void
+ahc_restart(struct ahc_softc *ahc)
+{
+
+	ahc_pause(ahc);
+
+	/* No more pending messages. */
+	ahc_clear_msg_state(ahc);
+
+	ahc_outb(ahc, SCSISIGO, 0);		/* De-assert BSY */
+	ahc_outb(ahc, MSG_OUT, MSG_NOOP);	/* No message to send */
+	ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
+	ahc_outb(ahc, LASTPHASE, P_BUSFREE);
+	ahc_outb(ahc, SAVED_SCSIID, 0xFF);
+	ahc_outb(ahc, SAVED_LUN, 0xFF);
+
+	/*
+	 * Ensure that the sequencer's idea of TQINPOS
+	 * matches our own.  The sequencer increments TQINPOS
+	 * only after it sees a DMA complete and a reset could
+	 * occur before the increment leaving the kernel to believe
+	 * the command arrived but the sequencer to not.
+	 */
+	ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
+
+	/* Always allow reselection */
+	ahc_outb(ahc, SCSISEQ,
+		 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
+	if ((ahc->features & AHC_CMD_CHAN) != 0) {
+		/* Ensure that no DMA operations are in progress */
+		ahc_outb(ahc, CCSCBCNT, 0);
+		ahc_outb(ahc, CCSGCTL, 0);
+		ahc_outb(ahc, CCSCBCTL, 0);
+	}
+	/*
+	 * If we were in the process of DMA'ing SCB data into
+	 * an SCB, replace that SCB on the free list.  This prevents
+	 * an SCB leak.
+	 */
+	if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) {
+		ahc_add_curscb_to_free_list(ahc);
+		ahc_outb(ahc, SEQ_FLAGS2,
+			 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA);
+	}
+	ahc_outb(ahc, MWI_RESIDUAL, 0);
+	ahc_outb(ahc, SEQCTL, ahc->seqctl);
+	ahc_outb(ahc, SEQADDR0, 0);
+	ahc_outb(ahc, SEQADDR1, 0);
+	ahc_unpause(ahc);
+}
+
+/************************* Input/Output Queues ********************************/
+void
+ahc_run_qoutfifo(struct ahc_softc *ahc)
+{
+	struct scb *scb;
+	u_int  scb_index;
+
+	ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
+	while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) {
+
+		scb_index = ahc->qoutfifo[ahc->qoutfifonext];
+		if ((ahc->qoutfifonext & 0x03) == 0x03) {
+			u_int modnext;
+
+			/*
+			 * Clear 32bits of QOUTFIFO at a time
+			 * so that we don't clobber an incoming
+			 * byte DMA to the array on architectures
+			 * that only support 32bit load and store
+			 * operations.
+			 */
+			modnext = ahc->qoutfifonext & ~0x3;
+			*((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL;
+			ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
+					ahc->shared_data_dmamap,
+					/*offset*/modnext, /*len*/4,
+					BUS_DMASYNC_PREREAD);
+		}
+		ahc->qoutfifonext++;
+
+		scb = ahc_lookup_scb(ahc, scb_index);
+		if (scb == NULL) {
+			printf("%s: WARNING no command for scb %d "
+			       "(cmdcmplt)\nQOUTPOS = %d\n",
+			       ahc_name(ahc), scb_index,
+			       (ahc->qoutfifonext - 1) & 0xFF);
+			continue;
+		}
+
+		/*
+		 * Save off the residual
+		 * if there is one.
+		 */
+		ahc_update_residual(ahc, scb);
+		ahc_done(ahc, scb);
+	}
+}
+
+void
+ahc_run_untagged_queues(struct ahc_softc *ahc)
+{
+	int i;
+
+	for (i = 0; i < 16; i++)
+		ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
+}
+
+void
+ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
+{
+	struct scb *scb;
+
+	if (ahc->untagged_queue_lock != 0)
+		return;
+
+	if ((scb = TAILQ_FIRST(queue)) != NULL
+	 && (scb->flags & SCB_ACTIVE) == 0) {
+		scb->flags |= SCB_ACTIVE;
+		ahc_queue_scb(ahc, scb);
+	}
+}
+
+/************************* Interrupt Handling *********************************/
+void
+ahc_handle_brkadrint(struct ahc_softc *ahc)
+{
+	/*
+	 * We upset the sequencer :-(
+	 * Lookup the error message
+	 */
+	int i;
+	int error;
+
+	error = ahc_inb(ahc, ERROR);
+	for (i = 0; error != 1 && i < num_errors; i++)
+		error >>= 1;
+	printf("%s: brkadrint, %s at seqaddr = 0x%x\n",
+	       ahc_name(ahc), ahc_hard_errors[i].errmesg,
+	       ahc_inb(ahc, SEQADDR0) |
+	       (ahc_inb(ahc, SEQADDR1) << 8));
+
+	ahc_dump_card_state(ahc);
+
+	/* Tell everyone that this HBA is no longer available */
+	ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
+		       CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
+		       CAM_NO_HBA);
+
+	/* Disable all interrupt sources by resetting the controller */
+	ahc_shutdown(ahc);
+}
+
+void
+ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
+{
+	struct scb *scb;
+	struct ahc_devinfo devinfo;
+	
+	ahc_fetch_devinfo(ahc, &devinfo);
+
+	/*
+	 * Clear the upper byte that holds SEQINT status
+	 * codes and clear the SEQINT bit. We will unpause
+	 * the sequencer, if appropriate, after servicing
+	 * the request.
+	 */
+	ahc_outb(ahc, CLRINT, CLRSEQINT);
+	switch (intstat & SEQINT_MASK) {
+	case BAD_STATUS:
+	{
+		u_int  scb_index;
+		struct hardware_scb *hscb;
+
+		/*
+		 * Set the default return value to 0 (don't
+		 * send sense).  The sense code will change
+		 * this if needed.
+		 */
+		ahc_outb(ahc, RETURN_1, 0);
+
+		/*
+		 * The sequencer will notify us when a command
+		 * has an error that would be of interest to
+		 * the kernel.  This allows us to leave the sequencer
+		 * running in the common case of command completes
+		 * without error.  The sequencer will already have
+		 * dma'd the SCB back up to us, so we can reference
+		 * the in kernel copy directly.
+		 */
+		scb_index = ahc_inb(ahc, SCB_TAG);
+		scb = ahc_lookup_scb(ahc, scb_index);
+		if (scb == NULL) {
+			ahc_print_devinfo(ahc, &devinfo);
+			printf("ahc_intr - referenced scb "
+			       "not valid during seqint 0x%x scb(%d)\n",
+			       intstat, scb_index);
+			ahc_dump_card_state(ahc);
+			panic("for safety");
+			goto unpause;
+		}
+
+		hscb = scb->hscb; 
+
+		/* Don't want to clobber the original sense code */
+		if ((scb->flags & SCB_SENSE) != 0) {
+			/*
+			 * Clear the SCB_SENSE Flag and have
+			 * the sequencer do a normal command
+			 * complete.
+			 */
+			scb->flags &= ~SCB_SENSE;
+			ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
+			break;
+		}
+		ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
+		/* Freeze the queue until the client sees the error. */
+		ahc_freeze_devq(ahc, scb);
+		ahc_freeze_scb(scb);
+		ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
+		switch (hscb->shared_data.status.scsi_status) {
+		case SCSI_STATUS_OK:
+			printf("%s: Interrupted for staus of 0???\n",
+			       ahc_name(ahc));
+			break;
+		case SCSI_STATUS_CMD_TERMINATED:
+		case SCSI_STATUS_CHECK_COND:
+		{
+			struct ahc_dma_seg *sg;
+			struct scsi_sense *sc;
+			struct ahc_initiator_tinfo *targ_info;
+			struct ahc_tmode_tstate *tstate;
+			struct ahc_transinfo *tinfo;
+#ifdef AHC_DEBUG
+			if (ahc_debug & AHC_SHOW_SENSE) {
+				ahc_print_path(ahc, scb);
+				printf("SCB %d: requests Check Status\n",
+				       scb->hscb->tag);
+			}
+#endif
+
+			if (ahc_perform_autosense(scb) == 0)
+				break;
+
+			targ_info = ahc_fetch_transinfo(ahc,
+							devinfo.channel,
+							devinfo.our_scsiid,
+							devinfo.target,
+							&tstate);
+			tinfo = &targ_info->curr;
+			sg = scb->sg_list;
+			sc = (struct scsi_sense *)(&hscb->shared_data.cdb); 
+			/*
+			 * Save off the residual if there is one.
+			 */
+			ahc_update_residual(ahc, scb);
+#ifdef AHC_DEBUG
+			if (ahc_debug & AHC_SHOW_SENSE) {
+				ahc_print_path(ahc, scb);
+				printf("Sending Sense\n");
+			}
+#endif
+			sg->addr = ahc_get_sense_bufaddr(ahc, scb);
+			sg->len = ahc_get_sense_bufsize(ahc, scb);
+			sg->len |= AHC_DMA_LAST_SEG;
+
+			/* Fixup byte order */
+			sg->addr = ahc_htole32(sg->addr);
+			sg->len = ahc_htole32(sg->len);
+
+			sc->opcode = REQUEST_SENSE;
+			sc->byte2 = 0;
+			if (tinfo->protocol_version <= SCSI_REV_2
+			 && SCB_GET_LUN(scb) < 8)
+				sc->byte2 = SCB_GET_LUN(scb) << 5;
+			sc->unused[0] = 0;
+			sc->unused[1] = 0;
+			sc->length = sg->len;
+			sc->control = 0;
+
+			/*
+			 * We can't allow the target to disconnect.
+			 * This will be an untagged transaction and
+			 * having the target disconnect will make this
+			 * transaction indestinguishable from outstanding
+			 * tagged transactions.
+			 */
+			hscb->control = 0;
+
+			/*
+			 * This request sense could be because the
+			 * the device lost power or in some other
+			 * way has lost our transfer negotiations.
+			 * Renegotiate if appropriate.  Unit attention
+			 * errors will be reported before any data
+			 * phases occur.
+			 */
+			if (ahc_get_residual(scb) 
+			 == ahc_get_transfer_length(scb)) {
+				ahc_update_neg_request(ahc, &devinfo,
+						       tstate, targ_info,
+						       AHC_NEG_IF_NON_ASYNC);
+			}
+			if (tstate->auto_negotiate & devinfo.target_mask) {
+				hscb->control |= MK_MESSAGE;
+				scb->flags &= ~SCB_NEGOTIATE;
+				scb->flags |= SCB_AUTO_NEGOTIATE;
+			}
+			hscb->cdb_len = sizeof(*sc);
+			hscb->dataptr = sg->addr; 
+			hscb->datacnt = sg->len;
+			hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
+			hscb->sgptr = ahc_htole32(hscb->sgptr);
+			scb->sg_count = 1;
+			scb->flags |= SCB_SENSE;
+			ahc_qinfifo_requeue_tail(ahc, scb);
+			ahc_outb(ahc, RETURN_1, SEND_SENSE);
+			/*
+			 * Ensure we have enough time to actually
+			 * retrieve the sense.
+			 */
+			ahc_scb_timer_reset(scb, 5 * 1000000);
+			break;
+		}
+		default:
+			break;
+		}
+		break;
+	}
+	case NO_MATCH:
+	{
+		/* Ensure we don't leave the selection hardware on */
+		ahc_outb(ahc, SCSISEQ,
+			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
+
+		printf("%s:%c:%d: no active SCB for reconnecting "
+		       "target - issuing BUS DEVICE RESET\n",
+		       ahc_name(ahc), devinfo.channel, devinfo.target);
+		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
+		       "ARG_1 == 0x%x ACCUM = 0x%x\n",
+		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
+		       ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
+		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
+		       "SINDEX == 0x%x\n",
+		       ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
+		       ahc_index_busy_tcl(ahc,
+			    BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
+				      ahc_inb(ahc, SAVED_LUN))),
+		       ahc_inb(ahc, SINDEX));
+		printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
+		       "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
+		       ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
+		       ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
+		       ahc_inb(ahc, SCB_CONTROL));
+		printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
+		       ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
+		printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
+		printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL));
+		ahc_dump_card_state(ahc);
+		ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
+		ahc->msgout_len = 1;
+		ahc->msgout_index = 0;
+		ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+		ahc_outb(ahc, MSG_OUT, HOST_MSG);
+		ahc_assert_atn(ahc);
+		break;
+	}
+	case SEND_REJECT: 
+	{
+		u_int rejbyte = ahc_inb(ahc, ACCUM);
+		printf("%s:%c:%d: Warning - unknown message received from "
+		       "target (0x%x).  Rejecting\n", 
+		       ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
+		break; 
+	}
+	case PROTO_VIOLATION:
+	{
+		ahc_handle_proto_violation(ahc);
+		break;
+	}
+	case IGN_WIDE_RES:
+		ahc_handle_ign_wide_residue(ahc, &devinfo);
+		break;
+	case PDATA_REINIT:
+		ahc_reinitialize_dataptrs(ahc);
+		break;
+	case BAD_PHASE:
+	{
+		u_int lastphase;
+
+		lastphase = ahc_inb(ahc, LASTPHASE);
+		printf("%s:%c:%d: unknown scsi bus phase %x, "
+		       "lastphase = 0x%x.  Attempting to continue\n",
+		       ahc_name(ahc), devinfo.channel, devinfo.target,
+		       lastphase, ahc_inb(ahc, SCSISIGI));
+		break;
+	}
+	case MISSED_BUSFREE:
+	{
+		u_int lastphase;
+
+		lastphase = ahc_inb(ahc, LASTPHASE);
+		printf("%s:%c:%d: Missed busfree. "
+		       "Lastphase = 0x%x, Curphase = 0x%x\n",
+		       ahc_name(ahc), devinfo.channel, devinfo.target,
+		       lastphase, ahc_inb(ahc, SCSISIGI));
+		ahc_restart(ahc);
+		return;
+	}
+	case HOST_MSG_LOOP:
+	{
+		/*
+		 * The sequencer has encountered a message phase
+		 * that requires host assistance for completion.
+		 * While handling the message phase(s), we will be
+		 * notified by the sequencer after each byte is
+		 * transfered so we can track bus phase changes.
+		 *
+		 * If this is the first time we've seen a HOST_MSG_LOOP
+		 * interrupt, initialize the state of the host message
+		 * loop.
+		 */
+		if (ahc->msg_type == MSG_TYPE_NONE) {
+			struct scb *scb;
+			u_int scb_index;
+			u_int bus_phase;
+
+			bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
+			if (bus_phase != P_MESGIN
+			 && bus_phase != P_MESGOUT) {
+				printf("ahc_intr: HOST_MSG_LOOP bad "
+				       "phase 0x%x\n",
+				      bus_phase);
+				/*
+				 * Probably transitioned to bus free before
+				 * we got here.  Just punt the message.
+				 */
+				ahc_clear_intstat(ahc);
+				ahc_restart(ahc);
+				return;
+			}
+
+			scb_index = ahc_inb(ahc, SCB_TAG);
+			scb = ahc_lookup_scb(ahc, scb_index);
+			if (devinfo.role == ROLE_INITIATOR) {
+				if (scb == NULL)
+					panic("HOST_MSG_LOOP with "
+					      "invalid SCB %x\n", scb_index);
+
+				if (bus_phase == P_MESGOUT)
+					ahc_setup_initiator_msgout(ahc,
+								   &devinfo,
+								   scb);
+				else {
+					ahc->msg_type =
+					    MSG_TYPE_INITIATOR_MSGIN;
+					ahc->msgin_index = 0;
+				}
+			}
+#ifdef AHC_TARGET_MODE
+			else {
+				if (bus_phase == P_MESGOUT) {
+					ahc->msg_type =
+					    MSG_TYPE_TARGET_MSGOUT;
+					ahc->msgin_index = 0;
+				}
+				else 
+					ahc_setup_target_msgin(ahc,
+							       &devinfo,
+							       scb);
+			}
+#endif
+		}
+
+		ahc_handle_message_phase(ahc);
+		break;
+	}
+	case PERR_DETECTED:
+	{
+		/*
+		 * If we've cleared the parity error interrupt
+		 * but the sequencer still believes that SCSIPERR
+		 * is true, it must be that the parity error is
+		 * for the currently presented byte on the bus,
+		 * and we are not in a phase (data-in) where we will
+		 * eventually ack this byte.  Ack the byte and
+		 * throw it away in the hope that the target will
+		 * take us to message out to deliver the appropriate
+		 * error message.
+		 */
+		if ((intstat & SCSIINT) == 0
+		 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) {
+
+			if ((ahc->features & AHC_DT) == 0) {
+				u_int curphase;
+
+				/*
+				 * The hardware will only let you ack bytes
+				 * if the expected phase in SCSISIGO matches
+				 * the current phase.  Make sure this is
+				 * currently the case.
+				 */
+				curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
+				ahc_outb(ahc, LASTPHASE, curphase);
+				ahc_outb(ahc, SCSISIGO, curphase);
+			}
+			if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) {
+				int wait;
+
+				/*
+				 * In a data phase.  Faster to bitbucket
+				 * the data than to individually ack each
+				 * byte.  This is also the only strategy
+				 * that will work with AUTOACK enabled.
+				 */
+				ahc_outb(ahc, SXFRCTL1,
+					 ahc_inb(ahc, SXFRCTL1) | BITBUCKET);
+				wait = 5000;
+				while (--wait != 0) {
+					if ((ahc_inb(ahc, SCSISIGI)
+					  & (CDI|MSGI)) != 0)
+						break;
+					ahc_delay(100);
+				}
+				ahc_outb(ahc, SXFRCTL1,
+					 ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
+				if (wait == 0) {
+					struct	scb *scb;
+					u_int	scb_index;
+
+					ahc_print_devinfo(ahc, &devinfo);
+					printf("Unable to clear parity error.  "
+					       "Resetting bus.\n");
+					scb_index = ahc_inb(ahc, SCB_TAG);
+					scb = ahc_lookup_scb(ahc, scb_index);
+					if (scb != NULL)
+						ahc_set_transaction_status(scb,
+						    CAM_UNCOR_PARITY);
+					ahc_reset_channel(ahc, devinfo.channel, 
+							  /*init reset*/TRUE);
+				}
+			} else {
+				ahc_inb(ahc, SCSIDATL);
+			}
+		}
+		break;
+	}
+	case DATA_OVERRUN:
+	{
+		/*
+		 * When the sequencer detects an overrun, it
+		 * places the controller in "BITBUCKET" mode
+		 * and allows the target to complete its transfer.
+		 * Unfortunately, none of the counters get updated
+		 * when the controller is in this mode, so we have
+		 * no way of knowing how large the overrun was.
+		 */
+		u_int scbindex = ahc_inb(ahc, SCB_TAG);
+		u_int lastphase = ahc_inb(ahc, LASTPHASE);
+		u_int i;
+
+		scb = ahc_lookup_scb(ahc, scbindex);
+		for (i = 0; i < num_phases; i++) {
+			if (lastphase == ahc_phase_table[i].phase)
+				break;
+		}
+		ahc_print_path(ahc, scb);
+		printf("data overrun detected %s."
+		       "  Tag == 0x%x.\n",
+		       ahc_phase_table[i].phasemsg,
+  		       scb->hscb->tag);
+		ahc_print_path(ahc, scb);
+		printf("%s seen Data Phase.  Length = %ld.  NumSGs = %d.\n",
+		       ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
+		       ahc_get_transfer_length(scb), scb->sg_count);
+		if (scb->sg_count > 0) {
+			for (i = 0; i < scb->sg_count; i++) {
+
+				printf("sg[%d] - Addr 0x%x%x : Length %d\n",
+				       i,
+				       (ahc_le32toh(scb->sg_list[i].len) >> 24
+				        & SG_HIGH_ADDR_BITS),
+				       ahc_le32toh(scb->sg_list[i].addr),
+				       ahc_le32toh(scb->sg_list[i].len)
+				       & AHC_SG_LEN_MASK);
+			}
+		}
+		/*
+		 * Set this and it will take effect when the
+		 * target does a command complete.
+		 */
+		ahc_freeze_devq(ahc, scb);
+		if ((scb->flags & SCB_SENSE) == 0) {
+			ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
+		} else {
+			scb->flags &= ~SCB_SENSE;
+			ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
+		}
+		ahc_freeze_scb(scb);
+
+		if ((ahc->features & AHC_ULTRA2) != 0) {
+			/*
+			 * Clear the channel in case we return
+			 * to data phase later.
+			 */
+			ahc_outb(ahc, SXFRCTL0,
+				 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
+			ahc_outb(ahc, SXFRCTL0,
+				 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
+		}
+		if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+			u_int dscommand1;
+
+			/* Ensure HHADDR is 0 for future DMA operations. */
+			dscommand1 = ahc_inb(ahc, DSCOMMAND1);
+			ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
+			ahc_outb(ahc, HADDR, 0);
+			ahc_outb(ahc, DSCOMMAND1, dscommand1);
+		}
+		break;
+	}
+	case MKMSG_FAILED:
+	{
+		u_int scbindex;
+
+		printf("%s:%c:%d:%d: Attempt to issue message failed\n",
+		       ahc_name(ahc), devinfo.channel, devinfo.target,
+		       devinfo.lun);
+		scbindex = ahc_inb(ahc, SCB_TAG);
+		scb = ahc_lookup_scb(ahc, scbindex);
+		if (scb != NULL
+		 && (scb->flags & SCB_RECOVERY_SCB) != 0)
+			/*
+			 * Ensure that we didn't put a second instance of this
+			 * SCB into the QINFIFO.
+			 */
+			ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
+					   SCB_GET_CHANNEL(ahc, scb),
+					   SCB_GET_LUN(scb), scb->hscb->tag,
+					   ROLE_INITIATOR, /*status*/0,
+					   SEARCH_REMOVE);
+		break;
+	}
+	case NO_FREE_SCB:
+	{
+		printf("%s: No free or disconnected SCBs\n", ahc_name(ahc));
+		ahc_dump_card_state(ahc);
+		panic("for safety");
+		break;
+	}
+	case SCB_MISMATCH:
+	{
+		u_int scbptr;
+
+		scbptr = ahc_inb(ahc, SCBPTR);
+		printf("Bogus TAG after DMA.  SCBPTR %d, tag %d, our tag %d\n",
+		       scbptr, ahc_inb(ahc, ARG_1),
+		       ahc->scb_data->hscbs[scbptr].tag);
+		ahc_dump_card_state(ahc);
+		panic("for saftey");
+		break;
+	}
+	case OUT_OF_RANGE:
+	{
+		printf("%s: BTT calculation out of range\n", ahc_name(ahc));
+		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
+		       "ARG_1 == 0x%x ACCUM = 0x%x\n",
+		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
+		       ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
+		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
+		       "SINDEX == 0x%x\n, A == 0x%x\n",
+		       ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
+		       ahc_index_busy_tcl(ahc,
+			    BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
+				      ahc_inb(ahc, SAVED_LUN))),
+		       ahc_inb(ahc, SINDEX),
+		       ahc_inb(ahc, ACCUM));
+		printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
+		       "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
+		       ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
+		       ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
+		       ahc_inb(ahc, SCB_CONTROL));
+		printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
+		       ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
+		ahc_dump_card_state(ahc);
+		panic("for safety");
+		break;
+	}
+	default:
+		printf("ahc_intr: seqint, "
+		       "intstat == 0x%x, scsisigi = 0x%x\n",
+		       intstat, ahc_inb(ahc, SCSISIGI));
+		break;
+	}
+unpause:
+	/*
+	 *  The sequencer is paused immediately on
+	 *  a SEQINT, so we should restart it when
+	 *  we're done.
+	 */
+	ahc_unpause(ahc);
+}
+
+void
+ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
+{
+	u_int	scb_index;
+	u_int	status0;
+	u_int	status;
+	struct	scb *scb;
+	char	cur_channel;
+	char	intr_channel;
+
+	if ((ahc->features & AHC_TWIN) != 0
+	 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0))
+		cur_channel = 'B';
+	else
+		cur_channel = 'A';
+	intr_channel = cur_channel;
+
+	if ((ahc->features & AHC_ULTRA2) != 0)
+		status0 = ahc_inb(ahc, SSTAT0) & IOERR;
+	else
+		status0 = 0;
+	status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
+	if (status == 0 && status0 == 0) {
+		if ((ahc->features & AHC_TWIN) != 0) {
+			/* Try the other channel */
+		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
+			status = ahc_inb(ahc, SSTAT1)
+			       & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
+			intr_channel = (cur_channel == 'A') ? 'B' : 'A';
+		}
+		if (status == 0) {
+			printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc));
+			ahc_outb(ahc, CLRINT, CLRSCSIINT);
+			ahc_unpause(ahc);
+			return;
+		}
+	}
+
+	/* Make sure the sequencer is in a safe location. */
+	ahc_clear_critical_section(ahc);
+
+	scb_index = ahc_inb(ahc, SCB_TAG);
+	scb = ahc_lookup_scb(ahc, scb_index);
+	if (scb != NULL
+	 && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
+		scb = NULL;
+
+	if ((ahc->features & AHC_ULTRA2) != 0
+	 && (status0 & IOERR) != 0) {
+		int now_lvd;
+
+		now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40;
+		printf("%s: Transceiver State Has Changed to %s mode\n",
+		       ahc_name(ahc), now_lvd ? "LVD" : "SE");
+		ahc_outb(ahc, CLRSINT0, CLRIOERR);
+		/*
+		 * When transitioning to SE mode, the reset line
+		 * glitches, triggering an arbitration bug in some
+		 * Ultra2 controllers.  This bug is cleared when we
+		 * assert the reset line.  Since a reset glitch has
+		 * already occurred with this transition and a
+		 * transceiver state change is handled just like
+		 * a bus reset anyway, asserting the reset line
+		 * ourselves is safe.
+		 */
+		ahc_reset_channel(ahc, intr_channel,
+				 /*Initiate Reset*/now_lvd == 0);
+	} else if ((status & SCSIRSTI) != 0) {
+		printf("%s: Someone reset channel %c\n",
+			ahc_name(ahc), intr_channel);
+		if (intr_channel != cur_channel)
+		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
+		ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE);
+	} else if ((status & SCSIPERR) != 0) {
+		/*
+		 * Determine the bus phase and queue an appropriate message.
+		 * SCSIPERR is latched true as soon as a parity error
+		 * occurs.  If the sequencer acked the transfer that
+		 * caused the parity error and the currently presented
+		 * transfer on the bus has correct parity, SCSIPERR will
+		 * be cleared by CLRSCSIPERR.  Use this to determine if
+		 * we should look at the last phase the sequencer recorded,
+		 * or the current phase presented on the bus.
+		 */
+		struct	ahc_devinfo devinfo;
+		u_int	mesg_out;
+		u_int	curphase;
+		u_int	errorphase;
+		u_int	lastphase;
+		u_int	scsirate;
+		u_int	i;
+		u_int	sstat2;
+		int	silent;
+
+		lastphase = ahc_inb(ahc, LASTPHASE);
+		curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
+		sstat2 = ahc_inb(ahc, SSTAT2);
+		ahc_outb(ahc, CLRSINT1, CLRSCSIPERR);
+		/*
+		 * For all phases save DATA, the sequencer won't
+		 * automatically ack a byte that has a parity error
+		 * in it.  So the only way that the current phase
+		 * could be 'data-in' is if the parity error is for
+		 * an already acked byte in the data phase.  During
+		 * synchronous data-in transfers, we may actually
+		 * ack bytes before latching the current phase in
+		 * LASTPHASE, leading to the discrepancy between
+		 * curphase and lastphase.
+		 */
+		if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0
+		 || curphase == P_DATAIN || curphase == P_DATAIN_DT)
+			errorphase = curphase;
+		else
+			errorphase = lastphase;
+
+		for (i = 0; i < num_phases; i++) {
+			if (errorphase == ahc_phase_table[i].phase)
+				break;
+		}
+		mesg_out = ahc_phase_table[i].mesg_out;
+		silent = FALSE;
+		if (scb != NULL) {
+			if (SCB_IS_SILENT(scb))
+				silent = TRUE;
+			else
+				ahc_print_path(ahc, scb);
+			scb->flags |= SCB_TRANSMISSION_ERROR;
+		} else
+			printf("%s:%c:%d: ", ahc_name(ahc), intr_channel,
+			       SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID)));
+		scsirate = ahc_inb(ahc, SCSIRATE);
+		if (silent == FALSE) {
+			printf("parity error detected %s. "
+			       "SEQADDR(0x%x) SCSIRATE(0x%x)\n",
+			       ahc_phase_table[i].phasemsg,
+			       ahc_inw(ahc, SEQADDR0),
+			       scsirate);
+			if ((ahc->features & AHC_DT) != 0) {
+				if ((sstat2 & CRCVALERR) != 0)
+					printf("\tCRC Value Mismatch\n");
+				if ((sstat2 & CRCENDERR) != 0)
+					printf("\tNo terminal CRC packet "
+					       "recevied\n");
+				if ((sstat2 & CRCREQERR) != 0)
+					printf("\tIllegal CRC packet "
+					       "request\n");
+				if ((sstat2 & DUAL_EDGE_ERR) != 0)
+					printf("\tUnexpected %sDT Data Phase\n",
+					       (scsirate & SINGLE_EDGE)
+					     ? "" : "non-");
+			}
+		}
+
+		if ((ahc->features & AHC_DT) != 0
+		 && (sstat2 & DUAL_EDGE_ERR) != 0) {
+			/*
+			 * This error applies regardless of
+			 * data direction, so ignore the value
+			 * in the phase table.
+			 */
+			mesg_out = MSG_INITIATOR_DET_ERR;
+		}
+
+		/*
+		 * We've set the hardware to assert ATN if we   
+		 * get a parity error on "in" phases, so all we  
+		 * need to do is stuff the message buffer with
+		 * the appropriate message.  "In" phases have set
+		 * mesg_out to something other than MSG_NOP.
+		 */
+		if (mesg_out != MSG_NOOP) {
+			if (ahc->msg_type != MSG_TYPE_NONE)
+				ahc->send_msg_perror = TRUE;
+			else
+				ahc_outb(ahc, MSG_OUT, mesg_out);
+		}
+		/*
+		 * Force a renegotiation with this target just in
+		 * case we are out of sync for some external reason
+		 * unknown (or unreported) by the target.
+		 */
+		ahc_fetch_devinfo(ahc, &devinfo);
+		ahc_force_renegotiation(ahc, &devinfo);
+
+		ahc_outb(ahc, CLRINT, CLRSCSIINT);
+		ahc_unpause(ahc);
+	} else if ((status & SELTO) != 0) {
+		u_int	scbptr;
+
+		/* Stop the selection */
+		ahc_outb(ahc, SCSISEQ, 0);
+
+		/* No more pending messages */
+		ahc_clear_msg_state(ahc);
+
+		/* Clear interrupt state */
+		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
+		ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
+
+		/*
+		 * Although the driver does not care about the
+		 * 'Selection in Progress' status bit, the busy
+		 * LED does.  SELINGO is only cleared by a sucessfull
+		 * selection, so we must manually clear it to insure
+		 * the LED turns off just incase no future successful
+		 * selections occur (e.g. no devices on the bus).
+		 */
+		ahc_outb(ahc, CLRSINT0, CLRSELINGO);
+
+		scbptr = ahc_inb(ahc, WAITING_SCBH);
+		ahc_outb(ahc, SCBPTR, scbptr);
+		scb_index = ahc_inb(ahc, SCB_TAG);
+
+		scb = ahc_lookup_scb(ahc, scb_index);
+		if (scb == NULL) {
+			printf("%s: ahc_intr - referenced scb not "
+			       "valid during SELTO scb(%d, %d)\n",
+			       ahc_name(ahc), scbptr, scb_index);
+			ahc_dump_card_state(ahc);
+		} else {
+			struct ahc_devinfo devinfo;
+#ifdef AHC_DEBUG
+			if ((ahc_debug & AHC_SHOW_SELTO) != 0) {
+				ahc_print_path(ahc, scb);
+				printf("Saw Selection Timeout for SCB 0x%x\n",
+				       scb_index);
+			}
+#endif
+			/*
+			 * Force a renegotiation with this target just in
+			 * case the cable was pulled and will later be
+			 * re-attached.  The target may forget its negotiation
+			 * settings with us should it attempt to reselect
+			 * during the interruption.  The target will not issue
+			 * a unit attention in this case, so we must always
+			 * renegotiate.
+			 */
+			ahc_scb_devinfo(ahc, &devinfo, scb);
+			ahc_force_renegotiation(ahc, &devinfo);
+			ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT);
+			ahc_freeze_devq(ahc, scb);
+		}
+		ahc_outb(ahc, CLRINT, CLRSCSIINT);
+		ahc_restart(ahc);
+	} else if ((status & BUSFREE) != 0
+		&& (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) {
+		struct	ahc_devinfo devinfo;
+		u_int	lastphase;
+		u_int	saved_scsiid;
+		u_int	saved_lun;
+		u_int	target;
+		u_int	initiator_role_id;
+		char	channel;
+		int	printerror;
+
+		/*
+		 * Clear our selection hardware as soon as possible.
+		 * We may have an entry in the waiting Q for this target,
+		 * that is affected by this busfree and we don't want to
+		 * go about selecting the target while we handle the event.
+		 */
+		ahc_outb(ahc, SCSISEQ,
+			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
+
+		/*
+		 * Disable busfree interrupts and clear the busfree
+		 * interrupt status.  We do this here so that several
+		 * bus transactions occur prior to clearing the SCSIINT
+		 * latch.  It can take a bit for the clearing to take effect.
+		 */
+		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
+		ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR);
+
+		/*
+		 * Look at what phase we were last in.
+		 * If its message out, chances are pretty good
+		 * that the busfree was in response to one of
+		 * our abort requests.
+		 */
+		lastphase = ahc_inb(ahc, LASTPHASE);
+		saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
+		saved_lun = ahc_inb(ahc, SAVED_LUN);
+		target = SCSIID_TARGET(ahc, saved_scsiid);
+		initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
+		channel = SCSIID_CHANNEL(ahc, saved_scsiid);
+		ahc_compile_devinfo(&devinfo, initiator_role_id,
+				    target, saved_lun, channel, ROLE_INITIATOR);
+		printerror = 1;
+
+		if (lastphase == P_MESGOUT) {
+			u_int tag;
+
+			tag = SCB_LIST_NULL;
+			if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE)
+			 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) {
+				if (ahc->msgout_buf[ahc->msgout_index - 1]
+				 == MSG_ABORT_TAG)
+					tag = scb->hscb->tag;
+				ahc_print_path(ahc, scb);
+				printf("SCB %d - Abort%s Completed.\n",
+				       scb->hscb->tag, tag == SCB_LIST_NULL ?
+				       "" : " Tag");
+				ahc_abort_scbs(ahc, target, channel,
+					       saved_lun, tag,
+					       ROLE_INITIATOR,
+					       CAM_REQ_ABORTED);
+				printerror = 0;
+			} else if (ahc_sent_msg(ahc, AHCMSG_1B,
+						MSG_BUS_DEV_RESET, TRUE)) {
+#ifdef __FreeBSD__
+				/*
+				 * Don't mark the user's request for this BDR
+				 * as completing with CAM_BDR_SENT.  CAM3
+				 * specifies CAM_REQ_CMP.
+				 */
+				if (scb != NULL
+				 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
+				 && ahc_match_scb(ahc, scb, target, channel,
+						  CAM_LUN_WILDCARD,
+						  SCB_LIST_NULL,
+						  ROLE_INITIATOR)) {
+					ahc_set_transaction_status(scb, CAM_REQ_CMP);
+				}
+#endif
+				ahc_compile_devinfo(&devinfo,
+						    initiator_role_id,
+						    target,
+						    CAM_LUN_WILDCARD,
+						    channel,
+						    ROLE_INITIATOR);
+				ahc_handle_devreset(ahc, &devinfo,
+						    CAM_BDR_SENT,
+						    "Bus Device Reset",
+						    /*verbose_level*/0);
+				printerror = 0;
+			} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
+						MSG_EXT_PPR, FALSE)) {
+				struct ahc_initiator_tinfo *tinfo;
+				struct ahc_tmode_tstate *tstate;
+
+				/*
+				 * PPR Rejected.  Try non-ppr negotiation
+				 * and retry command.
+				 */
+				tinfo = ahc_fetch_transinfo(ahc,
+							    devinfo.channel,
+							    devinfo.our_scsiid,
+							    devinfo.target,
+							    &tstate);
+				tinfo->curr.transport_version = 2;
+				tinfo->goal.transport_version = 2;
+				tinfo->goal.ppr_options = 0;
+				ahc_qinfifo_requeue_tail(ahc, scb);
+				printerror = 0;
+			} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
+						MSG_EXT_WDTR, FALSE)) {
+				/*
+				 * Negotiation Rejected.  Go-narrow and
+				 * retry command.
+				 */
+				ahc_set_width(ahc, &devinfo,
+					      MSG_EXT_WDTR_BUS_8_BIT,
+					      AHC_TRANS_CUR|AHC_TRANS_GOAL,
+					      /*paused*/TRUE);
+				ahc_qinfifo_requeue_tail(ahc, scb);
+				printerror = 0;
+			} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
+						MSG_EXT_SDTR, FALSE)) {
+				/*
+				 * Negotiation Rejected.  Go-async and
+				 * retry command.
+				 */
+				ahc_set_syncrate(ahc, &devinfo,
+						/*syncrate*/NULL,
+						/*period*/0, /*offset*/0,
+						/*ppr_options*/0,
+						AHC_TRANS_CUR|AHC_TRANS_GOAL,
+						/*paused*/TRUE);
+				ahc_qinfifo_requeue_tail(ahc, scb);
+				printerror = 0;
+			}
+		}
+		if (printerror != 0) {
+			u_int i;
+
+			if (scb != NULL) {
+				u_int tag;
+
+				if ((scb->hscb->control & TAG_ENB) != 0)
+					tag = scb->hscb->tag;
+				else
+					tag = SCB_LIST_NULL;
+				ahc_print_path(ahc, scb);
+				ahc_abort_scbs(ahc, target, channel,
+					       SCB_GET_LUN(scb), tag,
+					       ROLE_INITIATOR,
+					       CAM_UNEXP_BUSFREE);
+			} else {
+				/*
+				 * We had not fully identified this connection,
+				 * so we cannot abort anything.
+				 */
+				printf("%s: ", ahc_name(ahc));
+			}
+			for (i = 0; i < num_phases; i++) {
+				if (lastphase == ahc_phase_table[i].phase)
+					break;
+			}
+			if (lastphase != P_BUSFREE) {
+				/*
+				 * Renegotiate with this device at the
+				 * next oportunity just in case this busfree
+				 * is due to a negotiation mismatch with the
+				 * device.
+				 */
+				ahc_force_renegotiation(ahc, &devinfo);
+			}
+			printf("Unexpected busfree %s\n"
+			       "SEQADDR == 0x%x\n",
+			       ahc_phase_table[i].phasemsg,
+			       ahc_inb(ahc, SEQADDR0)
+				| (ahc_inb(ahc, SEQADDR1) << 8));
+		}
+		ahc_outb(ahc, CLRINT, CLRSCSIINT);
+		ahc_restart(ahc);
+	} else {
+		printf("%s: Missing case in ahc_handle_scsiint. status = %x\n",
+		       ahc_name(ahc), status);
+		ahc_outb(ahc, CLRINT, CLRSCSIINT);
+	}
+}
+
+/*
+ * Force renegotiation to occur the next time we initiate
+ * a command to the current device.
+ */
+static void
+ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+	struct	ahc_initiator_tinfo *targ_info;
+	struct	ahc_tmode_tstate *tstate;
+
+	targ_info = ahc_fetch_transinfo(ahc,
+					devinfo->channel,
+					devinfo->our_scsiid,
+					devinfo->target,
+					&tstate);
+	ahc_update_neg_request(ahc, devinfo, tstate,
+			       targ_info, AHC_NEG_IF_NON_ASYNC);
+}
+
+#define AHC_MAX_STEPS 2000
+void
+ahc_clear_critical_section(struct ahc_softc *ahc)
+{
+	int	stepping;
+	int	steps;
+	u_int	simode0;
+	u_int	simode1;
+
+	if (ahc->num_critical_sections == 0)
+		return;
+
+	stepping = FALSE;
+	steps = 0;
+	simode0 = 0;
+	simode1 = 0;
+	for (;;) {
+		struct	cs *cs;
+		u_int	seqaddr;
+		u_int	i;
+
+		seqaddr = ahc_inb(ahc, SEQADDR0)
+			| (ahc_inb(ahc, SEQADDR1) << 8);
+
+		/*
+		 * Seqaddr represents the next instruction to execute, 
+		 * so we are really executing the instruction just
+		 * before it.
+		 */
+		if (seqaddr != 0)
+			seqaddr -= 1;
+		cs = ahc->critical_sections;
+		for (i = 0; i < ahc->num_critical_sections; i++, cs++) {
+			
+			if (cs->begin < seqaddr && cs->end >= seqaddr)
+				break;
+		}
+
+		if (i == ahc->num_critical_sections)
+			break;
+
+		if (steps > AHC_MAX_STEPS) {
+			printf("%s: Infinite loop in critical section\n",
+			       ahc_name(ahc));
+			ahc_dump_card_state(ahc);
+			panic("critical section loop");
+		}
+
+		steps++;
+		if (stepping == FALSE) {
+
+			/*
+			 * Disable all interrupt sources so that the
+			 * sequencer will not be stuck by a pausing
+			 * interrupt condition while we attempt to
+			 * leave a critical section.
+			 */
+			simode0 = ahc_inb(ahc, SIMODE0);
+			ahc_outb(ahc, SIMODE0, 0);
+			simode1 = ahc_inb(ahc, SIMODE1);
+			if ((ahc->features & AHC_DT) != 0)
+				/*
+				 * On DT class controllers, we
+				 * use the enhanced busfree logic.
+				 * Unfortunately we cannot re-enable
+				 * busfree detection within the
+				 * current connection, so we must
+				 * leave it on while single stepping.
+				 */
+				ahc_outb(ahc, SIMODE1, simode1 & ENBUSFREE);
+			else
+				ahc_outb(ahc, SIMODE1, 0);
+			ahc_outb(ahc, CLRINT, CLRSCSIINT);
+			ahc_outb(ahc, SEQCTL, ahc->seqctl | STEP);
+			stepping = TRUE;
+		}
+		if ((ahc->features & AHC_DT) != 0) {
+			ahc_outb(ahc, CLRSINT1, CLRBUSFREE);
+			ahc_outb(ahc, CLRINT, CLRSCSIINT);
+		}
+		ahc_outb(ahc, HCNTRL, ahc->unpause);
+		while (!ahc_is_paused(ahc))
+			ahc_delay(200);
+	}
+	if (stepping) {
+		ahc_outb(ahc, SIMODE0, simode0);
+		ahc_outb(ahc, SIMODE1, simode1);
+		ahc_outb(ahc, SEQCTL, ahc->seqctl);
+	}
+}
+
+/*
+ * Clear any pending interrupt status.
+ */
+void
+ahc_clear_intstat(struct ahc_softc *ahc)
+{
+	/* Clear any interrupt conditions this may have caused */
+	ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
+				|CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG|
+				CLRREQINIT);
+	ahc_flush_device_writes(ahc);
+	ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
+ 	ahc_flush_device_writes(ahc);
+	ahc_outb(ahc, CLRINT, CLRSCSIINT);
+	ahc_flush_device_writes(ahc);
+}
+
+/**************************** Debugging Routines ******************************/
+#ifdef AHC_DEBUG
+uint32_t ahc_debug = AHC_DEBUG_OPTS;
+#endif
+
+void
+ahc_print_scb(struct scb *scb)
+{
+	int i;
+
+	struct hardware_scb *hscb = scb->hscb;
+
+	printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
+	       (void *)scb,
+	       hscb->control,
+	       hscb->scsiid,
+	       hscb->lun,
+	       hscb->cdb_len);
+	printf("Shared Data: ");
+	for (i = 0; i < sizeof(hscb->shared_data.cdb); i++)
+		printf("%#02x", hscb->shared_data.cdb[i]);
+	printf("        dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n",
+		ahc_le32toh(hscb->dataptr),
+		ahc_le32toh(hscb->datacnt),
+		ahc_le32toh(hscb->sgptr),
+		hscb->tag);
+	if (scb->sg_count > 0) {
+		for (i = 0; i < scb->sg_count; i++) {
+			printf("sg[%d] - Addr 0x%x%x : Length %d\n",
+			       i,
+			       (ahc_le32toh(scb->sg_list[i].len) >> 24
+			        & SG_HIGH_ADDR_BITS),
+			       ahc_le32toh(scb->sg_list[i].addr),
+			       ahc_le32toh(scb->sg_list[i].len));
+		}
+	}
+}
+
+/************************* Transfer Negotiation *******************************/
+/*
+ * Allocate per target mode instance (ID we respond to as a target)
+ * transfer negotiation data structures.
+ */
+static struct ahc_tmode_tstate *
+ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel)
+{
+	struct ahc_tmode_tstate *master_tstate;
+	struct ahc_tmode_tstate *tstate;
+	int i;
+
+	master_tstate = ahc->enabled_targets[ahc->our_id];
+	if (channel == 'B') {
+		scsi_id += 8;
+		master_tstate = ahc->enabled_targets[ahc->our_id_b + 8];
+	}
+	if (ahc->enabled_targets[scsi_id] != NULL
+	 && ahc->enabled_targets[scsi_id] != master_tstate)
+		panic("%s: ahc_alloc_tstate - Target already allocated",
+		      ahc_name(ahc));
+	tstate = (struct ahc_tmode_tstate*)malloc(sizeof(*tstate),
+						   M_DEVBUF, M_NOWAIT);
+	if (tstate == NULL)
+		return (NULL);
+
+	/*
+	 * If we have allocated a master tstate, copy user settings from
+	 * the master tstate (taken from SRAM or the EEPROM) for this
+	 * channel, but reset our current and goal settings to async/narrow
+	 * until an initiator talks to us.
+	 */
+	if (master_tstate != NULL) {
+		memcpy(tstate, master_tstate, sizeof(*tstate));
+		memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
+		tstate->ultraenb = 0;
+		for (i = 0; i < AHC_NUM_TARGETS; i++) {
+			memset(&tstate->transinfo[i].curr, 0,
+			      sizeof(tstate->transinfo[i].curr));
+			memset(&tstate->transinfo[i].goal, 0,
+			      sizeof(tstate->transinfo[i].goal));
+		}
+	} else
+		memset(tstate, 0, sizeof(*tstate));
+	ahc->enabled_targets[scsi_id] = tstate;
+	return (tstate);
+}
+
+#ifdef AHC_TARGET_MODE
+/*
+ * Free per target mode instance (ID we respond to as a target)
+ * transfer negotiation data structures.
+ */
+static void
+ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
+{
+	struct ahc_tmode_tstate *tstate;
+
+	/*
+	 * Don't clean up our "master" tstate.
+	 * It has our default user settings.
+	 */
+	if (((channel == 'B' && scsi_id == ahc->our_id_b)
+	  || (channel == 'A' && scsi_id == ahc->our_id))
+	 && force == FALSE)
+		return;
+
+	if (channel == 'B')
+		scsi_id += 8;
+	tstate = ahc->enabled_targets[scsi_id];
+	if (tstate != NULL)
+		free(tstate, M_DEVBUF);
+	ahc->enabled_targets[scsi_id] = NULL;
+}
+#endif
+
+/*
+ * Called when we have an active connection to a target on the bus,
+ * this function finds the nearest syncrate to the input period limited
+ * by the capabilities of the bus connectivity of and sync settings for
+ * the target.
+ */
+struct ahc_syncrate *
+ahc_devlimited_syncrate(struct ahc_softc *ahc,
+			struct ahc_initiator_tinfo *tinfo,
+			u_int *period, u_int *ppr_options, role_t role)
+{
+	struct	ahc_transinfo *transinfo;
+	u_int	maxsync;
+
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0
+		 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) {
+			maxsync = AHC_SYNCRATE_DT;
+		} else {
+			maxsync = AHC_SYNCRATE_ULTRA;
+			/* Can't do DT on an SE bus */
+			*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+		}
+	} else if ((ahc->features & AHC_ULTRA) != 0) {
+		maxsync = AHC_SYNCRATE_ULTRA;
+	} else {
+		maxsync = AHC_SYNCRATE_FAST;
+	}
+	/*
+	 * Never allow a value higher than our current goal
+	 * period otherwise we may allow a target initiated
+	 * negotiation to go above the limit as set by the
+	 * user.  In the case of an initiator initiated
+	 * sync negotiation, we limit based on the user
+	 * setting.  This allows the system to still accept
+	 * incoming negotiations even if target initiated
+	 * negotiation is not performed.
+	 */
+	if (role == ROLE_TARGET)
+		transinfo = &tinfo->user;
+	else 
+		transinfo = &tinfo->goal;
+	*ppr_options &= transinfo->ppr_options;
+	if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
+		maxsync = MAX(maxsync, AHC_SYNCRATE_ULTRA2);
+		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+	}
+	if (transinfo->period == 0) {
+		*period = 0;
+		*ppr_options = 0;
+		return (NULL);
+	}
+	*period = MAX(*period, transinfo->period);
+	return (ahc_find_syncrate(ahc, period, ppr_options, maxsync));
+}
+
+/*
+ * Look up the valid period to SCSIRATE conversion in our table.
+ * Return the period and offset that should be sent to the target
+ * if this was the beginning of an SDTR.
+ */
+struct ahc_syncrate *
+ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
+		  u_int *ppr_options, u_int maxsync)
+{
+	struct ahc_syncrate *syncrate;
+
+	if ((ahc->features & AHC_DT) == 0)
+		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+
+	/* Skip all DT only entries if DT is not available */
+	if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
+	 && maxsync < AHC_SYNCRATE_ULTRA2)
+		maxsync = AHC_SYNCRATE_ULTRA2;
+	
+	for (syncrate = &ahc_syncrates[maxsync];
+	     syncrate->rate != NULL;
+	     syncrate++) {
+
+		/*
+		 * The Ultra2 table doesn't go as low
+		 * as for the Fast/Ultra cards.
+		 */
+		if ((ahc->features & AHC_ULTRA2) != 0
+		 && (syncrate->sxfr_u2 == 0))
+			break;
+
+		if (*period <= syncrate->period) {
+			/*
+			 * When responding to a target that requests
+			 * sync, the requested rate may fall between
+			 * two rates that we can output, but still be
+			 * a rate that we can receive.  Because of this,
+			 * we want to respond to the target with
+			 * the same rate that it sent to us even
+			 * if the period we use to send data to it
+			 * is lower.  Only lower the response period
+			 * if we must.
+			 */
+			if (syncrate == &ahc_syncrates[maxsync])
+				*period = syncrate->period;
+
+			/*
+			 * At some speeds, we only support
+			 * ST transfers.
+			 */
+		 	if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
+				*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+			break;
+		}
+	}
+
+	if ((*period == 0)
+	 || (syncrate->rate == NULL)
+	 || ((ahc->features & AHC_ULTRA2) != 0
+	  && (syncrate->sxfr_u2 == 0))) {
+		/* Use asynchronous transfers. */
+		*period = 0;
+		syncrate = NULL;
+		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+	}
+	return (syncrate);
+}
+
+/*
+ * Convert from an entry in our syncrate table to the SCSI equivalent
+ * sync "period" factor.
+ */
+u_int
+ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
+{
+	struct ahc_syncrate *syncrate;
+
+	if ((ahc->features & AHC_ULTRA2) != 0)
+		scsirate &= SXFR_ULTRA2;
+	else
+		scsirate &= SXFR;
+
+	syncrate = &ahc_syncrates[maxsync];
+	while (syncrate->rate != NULL) {
+
+		if ((ahc->features & AHC_ULTRA2) != 0) {
+			if (syncrate->sxfr_u2 == 0)
+				break;
+			else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2))
+				return (syncrate->period);
+		} else if (scsirate == (syncrate->sxfr & SXFR)) {
+				return (syncrate->period);
+		}
+		syncrate++;
+	}
+	return (0); /* async */
+}
+
+/*
+ * Truncate the given synchronous offset to a value the
+ * current adapter type and syncrate are capable of.
+ */
+void
+ahc_validate_offset(struct ahc_softc *ahc,
+		    struct ahc_initiator_tinfo *tinfo,
+		    struct ahc_syncrate *syncrate,
+		    u_int *offset, int wide, role_t role)
+{
+	u_int maxoffset;
+
+	/* Limit offset to what we can do */
+	if (syncrate == NULL) {
+		maxoffset = 0;
+	} else if ((ahc->features & AHC_ULTRA2) != 0) {
+		maxoffset = MAX_OFFSET_ULTRA2;
+	} else {
+		if (wide)
+			maxoffset = MAX_OFFSET_16BIT;
+		else
+			maxoffset = MAX_OFFSET_8BIT;
+	}
+	*offset = MIN(*offset, maxoffset);
+	if (tinfo != NULL) {
+		if (role == ROLE_TARGET)
+			*offset = MIN(*offset, tinfo->user.offset);
+		else
+			*offset = MIN(*offset, tinfo->goal.offset);
+	}
+}
+
+/*
+ * Truncate the given transfer width parameter to a value the
+ * current adapter type is capable of.
+ */
+void
+ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
+		   u_int *bus_width, role_t role)
+{
+	switch (*bus_width) {
+	default:
+		if (ahc->features & AHC_WIDE) {
+			/* Respond Wide */
+			*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
+			break;
+		}
+		/* FALLTHROUGH */
+	case MSG_EXT_WDTR_BUS_8_BIT:
+		*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
+		break;
+	}
+	if (tinfo != NULL) {
+		if (role == ROLE_TARGET)
+			*bus_width = MIN(tinfo->user.width, *bus_width);
+		else
+			*bus_width = MIN(tinfo->goal.width, *bus_width);
+	}
+}
+
+/*
+ * Update the bitmask of targets for which the controller should
+ * negotiate with at the next convenient oportunity.  This currently
+ * means the next time we send the initial identify messages for
+ * a new transaction.
+ */
+int
+ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+		       struct ahc_tmode_tstate *tstate,
+		       struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type)
+{
+	u_int auto_negotiate_orig;
+
+	auto_negotiate_orig = tstate->auto_negotiate;
+	if (neg_type == AHC_NEG_ALWAYS) {
+		/*
+		 * Force our "current" settings to be
+		 * unknown so that unless a bus reset
+		 * occurs the need to renegotiate is
+		 * recorded persistently.
+		 */
+		if ((ahc->features & AHC_WIDE) != 0)
+			tinfo->curr.width = AHC_WIDTH_UNKNOWN;
+		tinfo->curr.period = AHC_PERIOD_UNKNOWN;
+		tinfo->curr.offset = AHC_OFFSET_UNKNOWN;
+	}
+	if (tinfo->curr.period != tinfo->goal.period
+	 || tinfo->curr.width != tinfo->goal.width
+	 || tinfo->curr.offset != tinfo->goal.offset
+	 || tinfo->curr.ppr_options != tinfo->goal.ppr_options
+	 || (neg_type == AHC_NEG_IF_NON_ASYNC
+	  && (tinfo->goal.offset != 0
+	   || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
+	   || tinfo->goal.ppr_options != 0)))
+		tstate->auto_negotiate |= devinfo->target_mask;
+	else
+		tstate->auto_negotiate &= ~devinfo->target_mask;
+
+	return (auto_negotiate_orig != tstate->auto_negotiate);
+}
+
+/*
+ * Update the user/goal/curr tables of synchronous negotiation
+ * parameters as well as, in the case of a current or active update,
+ * any data structures on the host controller.  In the case of an
+ * active update, the specified target is currently talking to us on
+ * the bus, so the transfer parameter update must take effect
+ * immediately.
+ */
+void
+ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+		 struct ahc_syncrate *syncrate, u_int period,
+		 u_int offset, u_int ppr_options, u_int type, int paused)
+{
+	struct	ahc_initiator_tinfo *tinfo;
+	struct	ahc_tmode_tstate *tstate;
+	u_int	old_period;
+	u_int	old_offset;
+	u_int	old_ppr;
+	int	active;
+	int	update_needed;
+
+	active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
+	update_needed = 0;
+
+	if (syncrate == NULL) {
+		period = 0;
+		offset = 0;
+	}
+
+	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
+				    devinfo->target, &tstate);
+
+	if ((type & AHC_TRANS_USER) != 0) {
+		tinfo->user.period = period;
+		tinfo->user.offset = offset;
+		tinfo->user.ppr_options = ppr_options;
+	}
+
+	if ((type & AHC_TRANS_GOAL) != 0) {
+		tinfo->goal.period = period;
+		tinfo->goal.offset = offset;
+		tinfo->goal.ppr_options = ppr_options;
+	}
+
+	old_period = tinfo->curr.period;
+	old_offset = tinfo->curr.offset;
+	old_ppr	   = tinfo->curr.ppr_options;
+
+	if ((type & AHC_TRANS_CUR) != 0
+	 && (old_period != period
+	  || old_offset != offset
+	  || old_ppr != ppr_options)) {
+		u_int	scsirate;
+
+		update_needed++;
+		scsirate = tinfo->scsirate;
+		if ((ahc->features & AHC_ULTRA2) != 0) {
+
+			scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC);
+			if (syncrate != NULL) {
+				scsirate |= syncrate->sxfr_u2;
+				if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0)
+					scsirate |= ENABLE_CRC;
+				else
+					scsirate |= SINGLE_EDGE;
+			}
+		} else {
+
+			scsirate &= ~(SXFR|SOFS);
+			/*
+			 * Ensure Ultra mode is set properly for
+			 * this target.
+			 */
+			tstate->ultraenb &= ~devinfo->target_mask;
+			if (syncrate != NULL) {
+				if (syncrate->sxfr & ULTRA_SXFR) {
+					tstate->ultraenb |=
+						devinfo->target_mask;
+				}
+				scsirate |= syncrate->sxfr & SXFR;
+				scsirate |= offset & SOFS;
+			}
+			if (active) {
+				u_int sxfrctl0;
+
+				sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
+				sxfrctl0 &= ~FAST20;
+				if (tstate->ultraenb & devinfo->target_mask)
+					sxfrctl0 |= FAST20;
+				ahc_outb(ahc, SXFRCTL0, sxfrctl0);
+			}
+		}
+		if (active) {
+			ahc_outb(ahc, SCSIRATE, scsirate);
+			if ((ahc->features & AHC_ULTRA2) != 0)
+				ahc_outb(ahc, SCSIOFFSET, offset);
+		}
+
+		tinfo->scsirate = scsirate;
+		tinfo->curr.period = period;
+		tinfo->curr.offset = offset;
+		tinfo->curr.ppr_options = ppr_options;
+
+		ahc_send_async(ahc, devinfo->channel, devinfo->target,
+			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
+		if (bootverbose) {
+			if (offset != 0) {
+				printf("%s: target %d synchronous at %sMHz%s, "
+				       "offset = 0x%x\n", ahc_name(ahc),
+				       devinfo->target, syncrate->rate,
+				       (ppr_options & MSG_EXT_PPR_DT_REQ)
+				       ? " DT" : "", offset);
+			} else {
+				printf("%s: target %d using "
+				       "asynchronous transfers\n",
+				       ahc_name(ahc), devinfo->target);
+			}
+		}
+	}
+
+	update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
+						tinfo, AHC_NEG_TO_GOAL);
+
+	if (update_needed)
+		ahc_update_pending_scbs(ahc);
+}
+
+/*
+ * Update the user/goal/curr tables of wide negotiation
+ * parameters as well as, in the case of a current or active update,
+ * any data structures on the host controller.  In the case of an
+ * active update, the specified target is currently talking to us on
+ * the bus, so the transfer parameter update must take effect
+ * immediately.
+ */
+void
+ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+	      u_int width, u_int type, int paused)
+{
+	struct	ahc_initiator_tinfo *tinfo;
+	struct	ahc_tmode_tstate *tstate;
+	u_int	oldwidth;
+	int	active;
+	int	update_needed;
+
+	active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
+	update_needed = 0;
+	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
+				    devinfo->target, &tstate);
+
+	if ((type & AHC_TRANS_USER) != 0)
+		tinfo->user.width = width;
+
+	if ((type & AHC_TRANS_GOAL) != 0)
+		tinfo->goal.width = width;
+
+	oldwidth = tinfo->curr.width;
+	if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) {
+		u_int	scsirate;
+
+		update_needed++;
+		scsirate =  tinfo->scsirate;
+		scsirate &= ~WIDEXFER;
+		if (width == MSG_EXT_WDTR_BUS_16_BIT)
+			scsirate |= WIDEXFER;
+
+		tinfo->scsirate = scsirate;
+
+		if (active)
+			ahc_outb(ahc, SCSIRATE, scsirate);
+
+		tinfo->curr.width = width;
+
+		ahc_send_async(ahc, devinfo->channel, devinfo->target,
+			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
+		if (bootverbose) {
+			printf("%s: target %d using %dbit transfers\n",
+			       ahc_name(ahc), devinfo->target,
+			       8 * (0x01 << width));
+		}
+	}
+
+	update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
+						tinfo, AHC_NEG_TO_GOAL);
+	if (update_needed)
+		ahc_update_pending_scbs(ahc);
+}
+
+/*
+ * Update the current state of tagged queuing for a given target.
+ */
+void
+ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+	     ahc_queue_alg alg)
+{
+ 	ahc_platform_set_tags(ahc, devinfo, alg);
+ 	ahc_send_async(ahc, devinfo->channel, devinfo->target,
+ 		       devinfo->lun, AC_TRANSFER_NEG, &alg);
+}
+
+/*
+ * When the transfer settings for a connection change, update any
+ * in-transit SCBs to contain the new data so the hardware will
+ * be set correctly during future (re)selections.
+ */
+static void
+ahc_update_pending_scbs(struct ahc_softc *ahc)
+{
+	struct	scb *pending_scb;
+	int	pending_scb_count;
+	int	i;
+	int	paused;
+	u_int	saved_scbptr;
+
+	/*
+	 * Traverse the pending SCB list and ensure that all of the
+	 * SCBs there have the proper settings.
+	 */
+	pending_scb_count = 0;
+	LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
+		struct ahc_devinfo devinfo;
+		struct hardware_scb *pending_hscb;
+		struct ahc_initiator_tinfo *tinfo;
+		struct ahc_tmode_tstate *tstate;
+
+		ahc_scb_devinfo(ahc, &devinfo, pending_scb);
+		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
+					    devinfo.our_scsiid,
+					    devinfo.target, &tstate);
+		pending_hscb = pending_scb->hscb;
+		pending_hscb->control &= ~ULTRAENB;
+		if ((tstate->ultraenb & devinfo.target_mask) != 0)
+			pending_hscb->control |= ULTRAENB;
+		pending_hscb->scsirate = tinfo->scsirate;
+		pending_hscb->scsioffset = tinfo->curr.offset;
+		if ((tstate->auto_negotiate & devinfo.target_mask) == 0
+		 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
+			pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
+			pending_hscb->control &= ~MK_MESSAGE;
+		}
+		ahc_sync_scb(ahc, pending_scb,
+			     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+		pending_scb_count++;
+	}
+
+	if (pending_scb_count == 0)
+		return;
+
+	if (ahc_is_paused(ahc)) {
+		paused = 1;
+	} else {
+		paused = 0;
+		ahc_pause(ahc);
+	}
+
+	saved_scbptr = ahc_inb(ahc, SCBPTR);
+	/* Ensure that the hscbs down on the card match the new information */
+	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
+		struct	hardware_scb *pending_hscb;
+		u_int	control;
+		u_int	scb_tag;
+
+		ahc_outb(ahc, SCBPTR, i);
+		scb_tag = ahc_inb(ahc, SCB_TAG);
+		pending_scb = ahc_lookup_scb(ahc, scb_tag);
+		if (pending_scb == NULL)
+			continue;
+
+		pending_hscb = pending_scb->hscb;
+		control = ahc_inb(ahc, SCB_CONTROL);
+		control &= ~(ULTRAENB|MK_MESSAGE);
+		control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE);
+		ahc_outb(ahc, SCB_CONTROL, control);
+		ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate);
+		ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset);
+	}
+	ahc_outb(ahc, SCBPTR, saved_scbptr);
+
+	if (paused == 0)
+		ahc_unpause(ahc);
+}
+
+/**************************** Pathing Information *****************************/
+static void
+ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+	u_int	saved_scsiid;
+	role_t	role;
+	int	our_id;
+
+	if (ahc_inb(ahc, SSTAT0) & TARGET)
+		role = ROLE_TARGET;
+	else
+		role = ROLE_INITIATOR;
+
+	if (role == ROLE_TARGET
+	 && (ahc->features & AHC_MULTI_TID) != 0
+	 && (ahc_inb(ahc, SEQ_FLAGS)
+ 	   & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) {
+		/* We were selected, so pull our id from TARGIDIN */
+		our_id = ahc_inb(ahc, TARGIDIN) & OID;
+	} else if ((ahc->features & AHC_ULTRA2) != 0)
+		our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
+	else
+		our_id = ahc_inb(ahc, SCSIID) & OID;
+
+	saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
+	ahc_compile_devinfo(devinfo,
+			    our_id,
+			    SCSIID_TARGET(ahc, saved_scsiid),
+			    ahc_inb(ahc, SAVED_LUN),
+			    SCSIID_CHANNEL(ahc, saved_scsiid),
+			    role);
+}
+
+struct ahc_phase_table_entry*
+ahc_lookup_phase_entry(int phase)
+{
+	struct ahc_phase_table_entry *entry;
+	struct ahc_phase_table_entry *last_entry;
+
+	/*
+	 * num_phases doesn't include the default entry which
+	 * will be returned if the phase doesn't match.
+	 */
+	last_entry = &ahc_phase_table[num_phases];
+	for (entry = ahc_phase_table; entry < last_entry; entry++) {
+		if (phase == entry->phase)
+			break;
+	}
+	return (entry);
+}
+
+void
+ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target,
+		    u_int lun, char channel, role_t role)
+{
+	devinfo->our_scsiid = our_id;
+	devinfo->target = target;
+	devinfo->lun = lun;
+	devinfo->target_offset = target;
+	devinfo->channel = channel;
+	devinfo->role = role;
+	if (channel == 'B')
+		devinfo->target_offset += 8;
+	devinfo->target_mask = (0x01 << devinfo->target_offset);
+}
+
+void
+ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+	printf("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel,
+	       devinfo->target, devinfo->lun);
+}
+
+static void
+ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+		struct scb *scb)
+{
+	role_t	role;
+	int	our_id;
+
+	our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
+	role = ROLE_INITIATOR;
+	if ((scb->flags & SCB_TARGET_SCB) != 0)
+		role = ROLE_TARGET;
+	ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb),
+			    SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role);
+}
+
+
+/************************ Message Phase Processing ****************************/
+static void
+ahc_assert_atn(struct ahc_softc *ahc)
+{
+	u_int scsisigo;
+
+	scsisigo = ATNO;
+	if ((ahc->features & AHC_DT) == 0)
+		scsisigo |= ahc_inb(ahc, SCSISIGI);
+	ahc_outb(ahc, SCSISIGO, scsisigo);
+}
+
+/*
+ * When an initiator transaction with the MK_MESSAGE flag either reconnects
+ * or enters the initial message out phase, we are interrupted.  Fill our
+ * outgoing message buffer with the appropriate message and beging handing
+ * the message phase(s) manually.
+ */
+static void
+ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+			   struct scb *scb)
+{
+	/*
+	 * To facilitate adding multiple messages together,
+	 * each routine should increment the index and len
+	 * variables instead of setting them explicitly.
+	 */
+	ahc->msgout_index = 0;
+	ahc->msgout_len = 0;
+
+	if ((scb->flags & SCB_DEVICE_RESET) == 0
+	 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) {
+		u_int identify_msg;
+
+		identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
+		if ((scb->hscb->control & DISCENB) != 0)
+			identify_msg |= MSG_IDENTIFY_DISCFLAG;
+		ahc->msgout_buf[ahc->msgout_index++] = identify_msg;
+		ahc->msgout_len++;
+
+		if ((scb->hscb->control & TAG_ENB) != 0) {
+			ahc->msgout_buf[ahc->msgout_index++] =
+			    scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
+			ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag;
+			ahc->msgout_len += 2;
+		}
+	}
+
+	if (scb->flags & SCB_DEVICE_RESET) {
+		ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
+		ahc->msgout_len++;
+		ahc_print_path(ahc, scb);
+		printf("Bus Device Reset Message Sent\n");
+		/*
+		 * Clear our selection hardware in advance of
+		 * the busfree.  We may have an entry in the waiting
+		 * Q for this target, and we don't want to go about
+		 * selecting while we handle the busfree and blow it
+		 * away.
+		 */
+		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
+	} else if ((scb->flags & SCB_ABORT) != 0) {
+		if ((scb->hscb->control & TAG_ENB) != 0)
+			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG;
+		else
+			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
+		ahc->msgout_len++;
+		ahc_print_path(ahc, scb);
+		printf("Abort%s Message Sent\n",
+		       (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
+		/*
+		 * Clear our selection hardware in advance of
+		 * the busfree.  We may have an entry in the waiting
+		 * Q for this target, and we don't want to go about
+		 * selecting while we handle the busfree and blow it
+		 * away.
+		 */
+		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
+	} else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
+		ahc_build_transfer_msg(ahc, devinfo);
+	} else {
+		printf("ahc_intr: AWAITING_MSG for an SCB that "
+		       "does not have a waiting message\n");
+		printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
+		       devinfo->target_mask);
+		panic("SCB = %d, SCB Control = %x, MSG_OUT = %x "
+		      "SCB flags = %x", scb->hscb->tag, scb->hscb->control,
+		      ahc_inb(ahc, MSG_OUT), scb->flags);
+	}
+
+	/*
+	 * Clear the MK_MESSAGE flag from the SCB so we aren't
+	 * asked to send this message again.
+	 */
+	ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE);
+	scb->hscb->control &= ~MK_MESSAGE;
+	ahc->msgout_index = 0;
+	ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+}
+
+/*
+ * Build an appropriate transfer negotiation message for the
+ * currently active target.
+ */
+static void
+ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+	/*
+	 * We need to initiate transfer negotiations.
+	 * If our current and goal settings are identical,
+	 * we want to renegotiate due to a check condition.
+	 */
+	struct	ahc_initiator_tinfo *tinfo;
+	struct	ahc_tmode_tstate *tstate;
+	struct	ahc_syncrate *rate;
+	int	dowide;
+	int	dosync;
+	int	doppr;
+	u_int	period;
+	u_int	ppr_options;
+	u_int	offset;
+
+	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
+				    devinfo->target, &tstate);
+	/*
+	 * Filter our period based on the current connection.
+	 * If we can't perform DT transfers on this segment (not in LVD
+	 * mode for instance), then our decision to issue a PPR message
+	 * may change.
+	 */
+	period = tinfo->goal.period;
+	offset = tinfo->goal.offset;
+	ppr_options = tinfo->goal.ppr_options;
+	/* Target initiated PPR is not allowed in the SCSI spec */
+	if (devinfo->role == ROLE_TARGET)
+		ppr_options = 0;
+	rate = ahc_devlimited_syncrate(ahc, tinfo, &period,
+				       &ppr_options, devinfo->role);
+	dowide = tinfo->curr.width != tinfo->goal.width;
+	dosync = tinfo->curr.offset != offset || tinfo->curr.period != period;
+	/*
+	 * Only use PPR if we have options that need it, even if the device
+	 * claims to support it.  There might be an expander in the way
+	 * that doesn't.
+	 */
+	doppr = ppr_options != 0;
+
+	if (!dowide && !dosync && !doppr) {
+		dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
+		dosync = tinfo->goal.offset != 0;
+	}
+
+	if (!dowide && !dosync && !doppr) {
+		/*
+		 * Force async with a WDTR message if we have a wide bus,
+		 * or just issue an SDTR with a 0 offset.
+		 */
+		if ((ahc->features & AHC_WIDE) != 0)
+			dowide = 1;
+		else
+			dosync = 1;
+
+		if (bootverbose) {
+			ahc_print_devinfo(ahc, devinfo);
+			printf("Ensuring async\n");
+		}
+	}
+
+	/* Target initiated PPR is not allowed in the SCSI spec */
+	if (devinfo->role == ROLE_TARGET)
+		doppr = 0;
+
+	/*
+	 * Both the PPR message and SDTR message require the
+	 * goal syncrate to be limited to what the target device
+	 * is capable of handling (based on whether an LVD->SE
+	 * expander is on the bus), so combine these two cases.
+	 * Regardless, guarantee that if we are using WDTR and SDTR
+	 * messages that WDTR comes first.
+	 */
+	if (doppr || (dosync && !dowide)) {
+
+		offset = tinfo->goal.offset;
+		ahc_validate_offset(ahc, tinfo, rate, &offset,
+				    doppr ? tinfo->goal.width
+					  : tinfo->curr.width,
+				    devinfo->role);
+		if (doppr) {
+			ahc_construct_ppr(ahc, devinfo, period, offset,
+					  tinfo->goal.width, ppr_options);
+		} else {
+			ahc_construct_sdtr(ahc, devinfo, period, offset);
+		}
+	} else {
+		ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width);
+	}
+}
+
+/*
+ * Build a synchronous negotiation message in our message
+ * buffer based on the input parameters.
+ */
+static void
+ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+		   u_int period, u_int offset)
+{
+	if (offset == 0)
+		period = AHC_ASYNC_XFER_PERIOD;
+	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
+	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN;
+	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR;
+	ahc->msgout_buf[ahc->msgout_index++] = period;
+	ahc->msgout_buf[ahc->msgout_index++] = offset;
+	ahc->msgout_len += 5;
+	if (bootverbose) {
+		printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
+		       ahc_name(ahc), devinfo->channel, devinfo->target,
+		       devinfo->lun, period, offset);
+	}
+}
+
+/*
+ * Build a wide negotiation message in our message
+ * buffer based on the input parameters.
+ */
+static void
+ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+		   u_int bus_width)
+{
+	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
+	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN;
+	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR;
+	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
+	ahc->msgout_len += 4;
+	if (bootverbose) {
+		printf("(%s:%c:%d:%d): Sending WDTR %x\n",
+		       ahc_name(ahc), devinfo->channel, devinfo->target,
+		       devinfo->lun, bus_width);
+	}
+}
+
+/*
+ * Build a parallel protocol request message in our message
+ * buffer based on the input parameters.
+ */
+static void
+ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+		  u_int period, u_int offset, u_int bus_width,
+		  u_int ppr_options)
+{
+	if (offset == 0)
+		period = AHC_ASYNC_XFER_PERIOD;
+	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
+	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN;
+	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR;
+	ahc->msgout_buf[ahc->msgout_index++] = period;
+	ahc->msgout_buf[ahc->msgout_index++] = 0;
+	ahc->msgout_buf[ahc->msgout_index++] = offset;
+	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
+	ahc->msgout_buf[ahc->msgout_index++] = ppr_options;
+	ahc->msgout_len += 8;
+	if (bootverbose) {
+		printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
+		       "offset %x, ppr_options %x\n", ahc_name(ahc),
+		       devinfo->channel, devinfo->target, devinfo->lun,
+		       bus_width, period, offset, ppr_options);
+	}
+}
+
+/*
+ * Clear any active message state.
+ */
+static void
+ahc_clear_msg_state(struct ahc_softc *ahc)
+{
+	ahc->msgout_len = 0;
+	ahc->msgin_index = 0;
+	ahc->msg_type = MSG_TYPE_NONE;
+	if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) {
+		/*
+		 * The target didn't care to respond to our
+		 * message request, so clear ATN.
+		 */
+		ahc_outb(ahc, CLRSINT1, CLRATNO);
+	}
+	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
+	ahc_outb(ahc, SEQ_FLAGS2,
+		 ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
+}
+
+static void
+ahc_handle_proto_violation(struct ahc_softc *ahc)
+{
+	struct	ahc_devinfo devinfo;
+	struct	scb *scb;
+	u_int	scbid;
+	u_int	seq_flags;
+	u_int	curphase;
+	u_int	lastphase;
+	int	found;
+
+	ahc_fetch_devinfo(ahc, &devinfo);
+	scbid = ahc_inb(ahc, SCB_TAG);
+	scb = ahc_lookup_scb(ahc, scbid);
+	seq_flags = ahc_inb(ahc, SEQ_FLAGS);
+	curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
+	lastphase = ahc_inb(ahc, LASTPHASE);
+	if ((seq_flags & NOT_IDENTIFIED) != 0) {
+
+		/*
+		 * The reconnecting target either did not send an
+		 * identify message, or did, but we didn't find an SCB
+		 * to match.
+		 */
+		ahc_print_devinfo(ahc, &devinfo);
+		printf("Target did not send an IDENTIFY message. "
+		       "LASTPHASE = 0x%x.\n", lastphase);
+		scb = NULL;
+	} else if (scb == NULL) {
+		/*
+		 * We don't seem to have an SCB active for this
+		 * transaction.  Print an error and reset the bus.
+		 */
+		ahc_print_devinfo(ahc, &devinfo);
+		printf("No SCB found during protocol violation\n");
+		goto proto_violation_reset;
+	} else {
+		ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
+		if ((seq_flags & NO_CDB_SENT) != 0) {
+			ahc_print_path(ahc, scb);
+			printf("No or incomplete CDB sent to device.\n");
+		} else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) {
+			/*
+			 * The target never bothered to provide status to
+			 * us prior to completing the command.  Since we don't
+			 * know the disposition of this command, we must attempt
+			 * to abort it.  Assert ATN and prepare to send an abort
+			 * message.
+			 */
+			ahc_print_path(ahc, scb);
+			printf("Completed command without status.\n");
+		} else {
+			ahc_print_path(ahc, scb);
+			printf("Unknown protocol violation.\n");
+			ahc_dump_card_state(ahc);
+		}
+	}
+	if ((lastphase & ~P_DATAIN_DT) == 0
+	 || lastphase == P_COMMAND) {
+proto_violation_reset:
+		/*
+		 * Target either went directly to data/command
+		 * phase or didn't respond to our ATN.
+		 * The only safe thing to do is to blow
+		 * it away with a bus reset.
+		 */
+		found = ahc_reset_channel(ahc, 'A', TRUE);
+		printf("%s: Issued Channel %c Bus Reset. "
+		       "%d SCBs aborted\n", ahc_name(ahc), 'A', found);
+	} else {
+		/*
+		 * Leave the selection hardware off in case
+		 * this abort attempt will affect yet to
+		 * be sent commands.
+		 */
+		ahc_outb(ahc, SCSISEQ,
+			 ahc_inb(ahc, SCSISEQ) & ~ENSELO);
+		ahc_assert_atn(ahc);
+		ahc_outb(ahc, MSG_OUT, HOST_MSG);
+		if (scb == NULL) {
+			ahc_print_devinfo(ahc, &devinfo);
+			ahc->msgout_buf[0] = MSG_ABORT_TASK;
+			ahc->msgout_len = 1;
+			ahc->msgout_index = 0;
+			ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+		} else {
+			ahc_print_path(ahc, scb);
+			scb->flags |= SCB_ABORT;
+		}
+		printf("Protocol violation %s.  Attempting to abort.\n",
+		       ahc_lookup_phase_entry(curphase)->phasemsg);
+	}
+}
+
+/*
+ * Manual message loop handler.
+ */
+static void
+ahc_handle_message_phase(struct ahc_softc *ahc)
+{ 
+	struct	ahc_devinfo devinfo;
+	u_int	bus_phase;
+	int	end_session;
+
+	ahc_fetch_devinfo(ahc, &devinfo);
+	end_session = FALSE;
+	bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
+
+reswitch:
+	switch (ahc->msg_type) {
+	case MSG_TYPE_INITIATOR_MSGOUT:
+	{
+		int lastbyte;
+		int phasemis;
+		int msgdone;
+
+		if (ahc->msgout_len == 0)
+			panic("HOST_MSG_LOOP interrupt with no active message");
+
+#ifdef AHC_DEBUG
+		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
+			ahc_print_devinfo(ahc, &devinfo);
+			printf("INITIATOR_MSG_OUT");
+		}
+#endif
+		phasemis = bus_phase != P_MESGOUT;
+		if (phasemis) {
+#ifdef AHC_DEBUG
+			if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
+				printf(" PHASEMIS %s\n",
+				       ahc_lookup_phase_entry(bus_phase)
+							     ->phasemsg);
+			}
+#endif
+			if (bus_phase == P_MESGIN) {
+				/*
+				 * Change gears and see if
+				 * this messages is of interest to
+				 * us or should be passed back to
+				 * the sequencer.
+				 */
+				ahc_outb(ahc, CLRSINT1, CLRATNO);
+				ahc->send_msg_perror = FALSE;
+				ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN;
+				ahc->msgin_index = 0;
+				goto reswitch;
+			}
+			end_session = TRUE;
+			break;
+		}
+
+		if (ahc->send_msg_perror) {
+			ahc_outb(ahc, CLRSINT1, CLRATNO);
+			ahc_outb(ahc, CLRSINT1, CLRREQINIT);
+#ifdef AHC_DEBUG
+			if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
+				printf(" byte 0x%x\n", ahc->send_msg_perror);
+#endif
+			ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR);
+			break;
+		}
+
+		msgdone	= ahc->msgout_index == ahc->msgout_len;
+		if (msgdone) {
+			/*
+			 * The target has requested a retry.
+			 * Re-assert ATN, reset our message index to
+			 * 0, and try again.
+			 */
+			ahc->msgout_index = 0;
+			ahc_assert_atn(ahc);
+		}
+
+		lastbyte = ahc->msgout_index == (ahc->msgout_len - 1);
+		if (lastbyte) {
+			/* Last byte is signified by dropping ATN */
+			ahc_outb(ahc, CLRSINT1, CLRATNO);
+		}
+
+		/*
+		 * Clear our interrupt status and present
+		 * the next byte on the bus.
+		 */
+		ahc_outb(ahc, CLRSINT1, CLRREQINIT);
+#ifdef AHC_DEBUG
+		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
+			printf(" byte 0x%x\n",
+			       ahc->msgout_buf[ahc->msgout_index]);
+#endif
+		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
+		break;
+	}
+	case MSG_TYPE_INITIATOR_MSGIN:
+	{
+		int phasemis;
+		int message_done;
+
+#ifdef AHC_DEBUG
+		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
+			ahc_print_devinfo(ahc, &devinfo);
+			printf("INITIATOR_MSG_IN");
+		}
+#endif
+		phasemis = bus_phase != P_MESGIN;
+		if (phasemis) {
+#ifdef AHC_DEBUG
+			if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
+				printf(" PHASEMIS %s\n",
+				       ahc_lookup_phase_entry(bus_phase)
+							     ->phasemsg);
+			}
+#endif
+			ahc->msgin_index = 0;
+			if (bus_phase == P_MESGOUT
+			 && (ahc->send_msg_perror == TRUE
+			  || (ahc->msgout_len != 0
+			   && ahc->msgout_index == 0))) {
+				ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+				goto reswitch;
+			}
+			end_session = TRUE;
+			break;
+		}
+
+		/* Pull the byte in without acking it */
+		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL);
+#ifdef AHC_DEBUG
+		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
+			printf(" byte 0x%x\n",
+			       ahc->msgin_buf[ahc->msgin_index]);
+#endif
+
+		message_done = ahc_parse_msg(ahc, &devinfo);
+
+		if (message_done) {
+			/*
+			 * Clear our incoming message buffer in case there
+			 * is another message following this one.
+			 */
+			ahc->msgin_index = 0;
+
+			/*
+			 * If this message illicited a response,
+			 * assert ATN so the target takes us to the
+			 * message out phase.
+			 */
+			if (ahc->msgout_len != 0) {
+#ifdef AHC_DEBUG
+				if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
+					ahc_print_devinfo(ahc, &devinfo);
+					printf("Asserting ATN for response\n");
+				}
+#endif
+				ahc_assert_atn(ahc);
+			}
+		} else 
+			ahc->msgin_index++;
+
+		if (message_done == MSGLOOP_TERMINATED) {
+			end_session = TRUE;
+		} else {
+			/* Ack the byte */
+			ahc_outb(ahc, CLRSINT1, CLRREQINIT);
+			ahc_inb(ahc, SCSIDATL);
+		}
+		break;
+	}
+	case MSG_TYPE_TARGET_MSGIN:
+	{
+		int msgdone;
+		int msgout_request;
+
+		if (ahc->msgout_len == 0)
+			panic("Target MSGIN with no active message");
+
+		/*
+		 * If we interrupted a mesgout session, the initiator
+		 * will not know this until our first REQ.  So, we
+		 * only honor mesgout requests after we've sent our
+		 * first byte.
+		 */
+		if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0
+		 && ahc->msgout_index > 0)
+			msgout_request = TRUE;
+		else
+			msgout_request = FALSE;
+
+		if (msgout_request) {
+
+			/*
+			 * Change gears and see if
+			 * this messages is of interest to
+			 * us or should be passed back to
+			 * the sequencer.
+			 */
+			ahc->msg_type = MSG_TYPE_TARGET_MSGOUT;
+			ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO);
+			ahc->msgin_index = 0;
+			/* Dummy read to REQ for first byte */
+			ahc_inb(ahc, SCSIDATL);
+			ahc_outb(ahc, SXFRCTL0,
+				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
+			break;
+		}
+
+		msgdone = ahc->msgout_index == ahc->msgout_len;
+		if (msgdone) {
+			ahc_outb(ahc, SXFRCTL0,
+				 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
+			end_session = TRUE;
+			break;
+		}
+
+		/*
+		 * Present the next byte on the bus.
+		 */
+		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN);
+		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
+		break;
+	}
+	case MSG_TYPE_TARGET_MSGOUT:
+	{
+		int lastbyte;
+		int msgdone;
+
+		/*
+		 * The initiator signals that this is
+		 * the last byte by dropping ATN.
+		 */
+		lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0;
+
+		/*
+		 * Read the latched byte, but turn off SPIOEN first
+		 * so that we don't inadvertently cause a REQ for the
+		 * next byte.
+		 */
+		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
+		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL);
+		msgdone = ahc_parse_msg(ahc, &devinfo);
+		if (msgdone == MSGLOOP_TERMINATED) {
+			/*
+			 * The message is *really* done in that it caused
+			 * us to go to bus free.  The sequencer has already
+			 * been reset at this point, so pull the ejection
+			 * handle.
+			 */
+			return;
+		}
+		
+		ahc->msgin_index++;
+
+		/*
+		 * XXX Read spec about initiator dropping ATN too soon
+		 *     and use msgdone to detect it.
+		 */
+		if (msgdone == MSGLOOP_MSGCOMPLETE) {
+			ahc->msgin_index = 0;
+
+			/*
+			 * If this message illicited a response, transition
+			 * to the Message in phase and send it.
+			 */
+			if (ahc->msgout_len != 0) {
+				ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO);
+				ahc_outb(ahc, SXFRCTL0,
+					 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
+				ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
+				ahc->msgin_index = 0;
+				break;
+			}
+		}
+
+		if (lastbyte)
+			end_session = TRUE;
+		else {
+			/* Ask for the next byte. */
+			ahc_outb(ahc, SXFRCTL0,
+				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
+		}
+
+		break;
+	}
+	default:
+		panic("Unknown REQINIT message type");
+	}
+
+	if (end_session) {
+		ahc_clear_msg_state(ahc);
+		ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP);
+	} else
+		ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
+}
+
+/*
+ * See if we sent a particular extended message to the target.
+ * If "full" is true, return true only if the target saw the full
+ * message.  If "full" is false, return true if the target saw at
+ * least the first byte of the message.
+ */
+static int
+ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full)
+{
+	int found;
+	u_int index;
+
+	found = FALSE;
+	index = 0;
+
+	while (index < ahc->msgout_len) {
+		if (ahc->msgout_buf[index] == MSG_EXTENDED) {
+			u_int end_index;
+
+			end_index = index + 1 + ahc->msgout_buf[index + 1];
+			if (ahc->msgout_buf[index+2] == msgval
+			 && type == AHCMSG_EXT) {
+
+				if (full) {
+					if (ahc->msgout_index > end_index)
+						found = TRUE;
+				} else if (ahc->msgout_index > index)
+					found = TRUE;
+			}
+			index = end_index;
+		} else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK
+			&& ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
+
+			/* Skip tag type and tag id or residue param*/
+			index += 2;
+		} else {
+			/* Single byte message */
+			if (type == AHCMSG_1B
+			 && ahc->msgout_buf[index] == msgval
+			 && ahc->msgout_index > index)
+				found = TRUE;
+			index++;
+		}
+
+		if (found)
+			break;
+	}
+	return (found);
+}
+
+/*
+ * Wait for a complete incoming message, parse it, and respond accordingly.
+ */
+static int
+ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+	struct	ahc_initiator_tinfo *tinfo;
+	struct	ahc_tmode_tstate *tstate;
+	int	reject;
+	int	done;
+	int	response;
+	u_int	targ_scsirate;
+
+	done = MSGLOOP_IN_PROG;
+	response = FALSE;
+	reject = FALSE;
+	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
+				    devinfo->target, &tstate);
+	targ_scsirate = tinfo->scsirate;
+
+	/*
+	 * Parse as much of the message as is available,
+	 * rejecting it if we don't support it.  When
+	 * the entire message is available and has been
+	 * handled, return MSGLOOP_MSGCOMPLETE, indicating
+	 * that we have parsed an entire message.
+	 *
+	 * In the case of extended messages, we accept the length
+	 * byte outright and perform more checking once we know the
+	 * extended message type.
+	 */
+	switch (ahc->msgin_buf[0]) {
+	case MSG_DISCONNECT:
+	case MSG_SAVEDATAPOINTER:
+	case MSG_CMDCOMPLETE:
+	case MSG_RESTOREPOINTERS:
+	case MSG_IGN_WIDE_RESIDUE:
+		/*
+		 * End our message loop as these are messages
+		 * the sequencer handles on its own.
+		 */
+		done = MSGLOOP_TERMINATED;
+		break;
+	case MSG_MESSAGE_REJECT:
+		response = ahc_handle_msg_reject(ahc, devinfo);
+		/* FALLTHROUGH */
+	case MSG_NOOP:
+		done = MSGLOOP_MSGCOMPLETE;
+		break;
+	case MSG_EXTENDED:
+	{
+		/* Wait for enough of the message to begin validation */
+		if (ahc->msgin_index < 2)
+			break;
+		switch (ahc->msgin_buf[2]) {
+		case MSG_EXT_SDTR:
+		{
+			struct	 ahc_syncrate *syncrate;
+			u_int	 period;
+			u_int	 ppr_options;
+			u_int	 offset;
+			u_int	 saved_offset;
+			
+			if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
+				reject = TRUE;
+				break;
+			}
+
+			/*
+			 * Wait until we have both args before validating
+			 * and acting on this message.
+			 *
+			 * Add one to MSG_EXT_SDTR_LEN to account for
+			 * the extended message preamble.
+			 */
+			if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1))
+				break;
+
+			period = ahc->msgin_buf[3];
+			ppr_options = 0;
+			saved_offset = offset = ahc->msgin_buf[4];
+			syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
+							   &ppr_options,
+							   devinfo->role);
+			ahc_validate_offset(ahc, tinfo, syncrate, &offset,
+					    targ_scsirate & WIDEXFER,
+					    devinfo->role);
+			if (bootverbose) {
+				printf("(%s:%c:%d:%d): Received "
+				       "SDTR period %x, offset %x\n\t"
+				       "Filtered to period %x, offset %x\n",
+				       ahc_name(ahc), devinfo->channel,
+				       devinfo->target, devinfo->lun,
+				       ahc->msgin_buf[3], saved_offset,
+				       period, offset);
+			}
+			ahc_set_syncrate(ahc, devinfo, 
+					 syncrate, period,
+					 offset, ppr_options,
+					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
+					 /*paused*/TRUE);
+
+			/*
+			 * See if we initiated Sync Negotiation
+			 * and didn't have to fall down to async
+			 * transfers.
+			 */
+			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) {
+				/* We started it */
+				if (saved_offset != offset) {
+					/* Went too low - force async */
+					reject = TRUE;
+				}
+			} else {
+				/*
+				 * Send our own SDTR in reply
+				 */
+				if (bootverbose
+				 && devinfo->role == ROLE_INITIATOR) {
+					printf("(%s:%c:%d:%d): Target "
+					       "Initiated SDTR\n",
+					       ahc_name(ahc), devinfo->channel,
+					       devinfo->target, devinfo->lun);
+				}
+				ahc->msgout_index = 0;
+				ahc->msgout_len = 0;
+				ahc_construct_sdtr(ahc, devinfo,
+						   period, offset);
+				ahc->msgout_index = 0;
+				response = TRUE;
+			}
+			done = MSGLOOP_MSGCOMPLETE;
+			break;
+		}
+		case MSG_EXT_WDTR:
+		{
+			u_int bus_width;
+			u_int saved_width;
+			u_int sending_reply;
+
+			sending_reply = FALSE;
+			if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
+				reject = TRUE;
+				break;
+			}
+
+			/*
+			 * Wait until we have our arg before validating
+			 * and acting on this message.
+			 *
+			 * Add one to MSG_EXT_WDTR_LEN to account for
+			 * the extended message preamble.
+			 */
+			if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1))
+				break;
+
+			bus_width = ahc->msgin_buf[3];
+			saved_width = bus_width;
+			ahc_validate_width(ahc, tinfo, &bus_width,
+					   devinfo->role);
+			if (bootverbose) {
+				printf("(%s:%c:%d:%d): Received WDTR "
+				       "%x filtered to %x\n",
+				       ahc_name(ahc), devinfo->channel,
+				       devinfo->target, devinfo->lun,
+				       saved_width, bus_width);
+			}
+
+			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) {
+				/*
+				 * Don't send a WDTR back to the
+				 * target, since we asked first.
+				 * If the width went higher than our
+				 * request, reject it.
+				 */
+				if (saved_width > bus_width) {
+					reject = TRUE;
+					printf("(%s:%c:%d:%d): requested %dBit "
+					       "transfers.  Rejecting...\n",
+					       ahc_name(ahc), devinfo->channel,
+					       devinfo->target, devinfo->lun,
+					       8 * (0x01 << bus_width));
+					bus_width = 0;
+				}
+			} else {
+				/*
+				 * Send our own WDTR in reply
+				 */
+				if (bootverbose
+				 && devinfo->role == ROLE_INITIATOR) {
+					printf("(%s:%c:%d:%d): Target "
+					       "Initiated WDTR\n",
+					       ahc_name(ahc), devinfo->channel,
+					       devinfo->target, devinfo->lun);
+				}
+				ahc->msgout_index = 0;
+				ahc->msgout_len = 0;
+				ahc_construct_wdtr(ahc, devinfo, bus_width);
+				ahc->msgout_index = 0;
+				response = TRUE;
+				sending_reply = TRUE;
+			}
+			/*
+			 * After a wide message, we are async, but
+			 * some devices don't seem to honor this portion
+			 * of the spec.  Force a renegotiation of the
+			 * sync component of our transfer agreement even
+			 * if our goal is async.  By updating our width
+			 * after forcing the negotiation, we avoid
+			 * renegotiating for width.
+			 */
+			ahc_update_neg_request(ahc, devinfo, tstate,
+					       tinfo, AHC_NEG_ALWAYS);
+			ahc_set_width(ahc, devinfo, bus_width,
+				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
+				      /*paused*/TRUE);
+			if (sending_reply == FALSE && reject == FALSE) {
+
+				/*
+				 * We will always have an SDTR to send.
+				 */
+				ahc->msgout_index = 0;
+				ahc->msgout_len = 0;
+				ahc_build_transfer_msg(ahc, devinfo);
+				ahc->msgout_index = 0;
+				response = TRUE;
+			}
+			done = MSGLOOP_MSGCOMPLETE;
+			break;
+		}
+		case MSG_EXT_PPR:
+		{
+			struct	ahc_syncrate *syncrate;
+			u_int	period;
+			u_int	offset;
+			u_int	bus_width;
+			u_int	ppr_options;
+			u_int	saved_width;
+			u_int	saved_offset;
+			u_int	saved_ppr_options;
+
+			if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) {
+				reject = TRUE;
+				break;
+			}
+
+			/*
+			 * Wait until we have all args before validating
+			 * and acting on this message.
+			 *
+			 * Add one to MSG_EXT_PPR_LEN to account for
+			 * the extended message preamble.
+			 */
+			if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1))
+				break;
+
+			period = ahc->msgin_buf[3];
+			offset = ahc->msgin_buf[5];
+			bus_width = ahc->msgin_buf[6];
+			saved_width = bus_width;
+			ppr_options = ahc->msgin_buf[7];
+			/*
+			 * According to the spec, a DT only
+			 * period factor with no DT option
+			 * set implies async.
+			 */
+			if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
+			 && period == 9)
+				offset = 0;
+			saved_ppr_options = ppr_options;
+			saved_offset = offset;
+
+			/*
+			 * Mask out any options we don't support
+			 * on any controller.  Transfer options are
+			 * only available if we are negotiating wide.
+			 */
+			ppr_options &= MSG_EXT_PPR_DT_REQ;
+			if (bus_width == 0)
+				ppr_options = 0;
+
+			ahc_validate_width(ahc, tinfo, &bus_width,
+					   devinfo->role);
+			syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
+							   &ppr_options,
+							   devinfo->role);
+			ahc_validate_offset(ahc, tinfo, syncrate,
+					    &offset, bus_width,
+					    devinfo->role);
+
+			if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) {
+				/*
+				 * If we are unable to do any of the
+				 * requested options (we went too low),
+				 * then we'll have to reject the message.
+				 */
+				if (saved_width > bus_width
+				 || saved_offset != offset
+				 || saved_ppr_options != ppr_options) {
+					reject = TRUE;
+					period = 0;
+					offset = 0;
+					bus_width = 0;
+					ppr_options = 0;
+					syncrate = NULL;
+				}
+			} else {
+				if (devinfo->role != ROLE_TARGET)
+					printf("(%s:%c:%d:%d): Target "
+					       "Initiated PPR\n",
+					       ahc_name(ahc), devinfo->channel,
+					       devinfo->target, devinfo->lun);
+				else
+					printf("(%s:%c:%d:%d): Initiator "
+					       "Initiated PPR\n",
+					       ahc_name(ahc), devinfo->channel,
+					       devinfo->target, devinfo->lun);
+				ahc->msgout_index = 0;
+				ahc->msgout_len = 0;
+				ahc_construct_ppr(ahc, devinfo, period, offset,
+						  bus_width, ppr_options);
+				ahc->msgout_index = 0;
+				response = TRUE;
+			}
+			if (bootverbose) {
+				printf("(%s:%c:%d:%d): Received PPR width %x, "
+				       "period %x, offset %x,options %x\n"
+				       "\tFiltered to width %x, period %x, "
+				       "offset %x, options %x\n",
+				       ahc_name(ahc), devinfo->channel,
+				       devinfo->target, devinfo->lun,
+				       saved_width, ahc->msgin_buf[3],
+				       saved_offset, saved_ppr_options,
+				       bus_width, period, offset, ppr_options);
+			}
+			ahc_set_width(ahc, devinfo, bus_width,
+				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
+				      /*paused*/TRUE);
+			ahc_set_syncrate(ahc, devinfo,
+					 syncrate, period,
+					 offset, ppr_options,
+					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
+					 /*paused*/TRUE);
+			done = MSGLOOP_MSGCOMPLETE;
+			break;
+		}
+		default:
+			/* Unknown extended message.  Reject it. */
+			reject = TRUE;
+			break;
+		}
+		break;
+	}
+#ifdef AHC_TARGET_MODE
+	case MSG_BUS_DEV_RESET:
+		ahc_handle_devreset(ahc, devinfo,
+				    CAM_BDR_SENT,
+				    "Bus Device Reset Received",
+				    /*verbose_level*/0);
+		ahc_restart(ahc);
+		done = MSGLOOP_TERMINATED;
+		break;
+	case MSG_ABORT_TAG:
+	case MSG_ABORT:
+	case MSG_CLEAR_QUEUE:
+	{
+		int tag;
+
+		/* Target mode messages */
+		if (devinfo->role != ROLE_TARGET) {
+			reject = TRUE;
+			break;
+		}
+		tag = SCB_LIST_NULL;
+		if (ahc->msgin_buf[0] == MSG_ABORT_TAG)
+			tag = ahc_inb(ahc, INITIATOR_TAG);
+		ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
+			       devinfo->lun, tag, ROLE_TARGET,
+			       CAM_REQ_ABORTED);
+
+		tstate = ahc->enabled_targets[devinfo->our_scsiid];
+		if (tstate != NULL) {
+			struct ahc_tmode_lstate* lstate;
+
+			lstate = tstate->enabled_luns[devinfo->lun];
+			if (lstate != NULL) {
+				ahc_queue_lstate_event(ahc, lstate,
+						       devinfo->our_scsiid,
+						       ahc->msgin_buf[0],
+						       /*arg*/tag);
+				ahc_send_lstate_events(ahc, lstate);
+			}
+		}
+		ahc_restart(ahc);
+		done = MSGLOOP_TERMINATED;
+		break;
+	}
+#endif
+	case MSG_TERM_IO_PROC:
+	default:
+		reject = TRUE;
+		break;
+	}
+
+	if (reject) {
+		/*
+		 * Setup to reject the message.
+		 */
+		ahc->msgout_index = 0;
+		ahc->msgout_len = 1;
+		ahc->msgout_buf[0] = MSG_MESSAGE_REJECT;
+		done = MSGLOOP_MSGCOMPLETE;
+		response = TRUE;
+	}
+
+	if (done != MSGLOOP_IN_PROG && !response)
+		/* Clear the outgoing message buffer */
+		ahc->msgout_len = 0;
+
+	return (done);
+}
+
+/*
+ * Process a message reject message.
+ */
+static int
+ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+	/*
+	 * What we care about here is if we had an
+	 * outstanding SDTR or WDTR message for this
+	 * target.  If we did, this is a signal that
+	 * the target is refusing negotiation.
+	 */
+	struct scb *scb;
+	struct ahc_initiator_tinfo *tinfo;
+	struct ahc_tmode_tstate *tstate;
+	u_int scb_index;
+	u_int last_msg;
+	int   response = 0;
+
+	scb_index = ahc_inb(ahc, SCB_TAG);
+	scb = ahc_lookup_scb(ahc, scb_index);
+	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
+				    devinfo->our_scsiid,
+				    devinfo->target, &tstate);
+	/* Might be necessary */
+	last_msg = ahc_inb(ahc, LAST_MSG);
+
+	if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
+		/*
+		 * Target does not support the PPR message.
+		 * Attempt to negotiate SPI-2 style.
+		 */
+		if (bootverbose) {
+			printf("(%s:%c:%d:%d): PPR Rejected. "
+			       "Trying WDTR/SDTR\n",
+			       ahc_name(ahc), devinfo->channel,
+			       devinfo->target, devinfo->lun);
+		}
+		tinfo->goal.ppr_options = 0;
+		tinfo->curr.transport_version = 2;
+		tinfo->goal.transport_version = 2;
+		ahc->msgout_index = 0;
+		ahc->msgout_len = 0;
+		ahc_build_transfer_msg(ahc, devinfo);
+		ahc->msgout_index = 0;
+		response = 1;
+	} else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
+
+		/* note 8bit xfers */
+		printf("(%s:%c:%d:%d): refuses WIDE negotiation.  Using "
+		       "8bit transfers\n", ahc_name(ahc),
+		       devinfo->channel, devinfo->target, devinfo->lun);
+		ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+			      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
+			      /*paused*/TRUE);
+		/*
+		 * No need to clear the sync rate.  If the target
+		 * did not accept the command, our syncrate is
+		 * unaffected.  If the target started the negotiation,
+		 * but rejected our response, we already cleared the
+		 * sync rate before sending our WDTR.
+		 */
+		if (tinfo->goal.offset != tinfo->curr.offset) {
+
+			/* Start the sync negotiation */
+			ahc->msgout_index = 0;
+			ahc->msgout_len = 0;
+			ahc_build_transfer_msg(ahc, devinfo);
+			ahc->msgout_index = 0;
+			response = 1;
+		}
+	} else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
+		/* note asynch xfers and clear flag */
+		ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0,
+				 /*offset*/0, /*ppr_options*/0,
+				 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
+				 /*paused*/TRUE);
+		printf("(%s:%c:%d:%d): refuses synchronous negotiation. "
+		       "Using asynchronous transfers\n",
+		       ahc_name(ahc), devinfo->channel,
+		       devinfo->target, devinfo->lun);
+	} else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
+		int tag_type;
+		int mask;
+
+		tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
+
+		if (tag_type == MSG_SIMPLE_TASK) {
+			printf("(%s:%c:%d:%d): refuses tagged commands.  "
+			       "Performing non-tagged I/O\n", ahc_name(ahc),
+			       devinfo->channel, devinfo->target, devinfo->lun);
+			ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE);
+			mask = ~0x23;
+		} else {
+			printf("(%s:%c:%d:%d): refuses %s tagged commands.  "
+			       "Performing simple queue tagged I/O only\n",
+			       ahc_name(ahc), devinfo->channel, devinfo->target,
+			       devinfo->lun, tag_type == MSG_ORDERED_TASK
+			       ? "ordered" : "head of queue");
+			ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC);
+			mask = ~0x03;
+		}
+
+		/*
+		 * Resend the identify for this CCB as the target
+		 * may believe that the selection is invalid otherwise.
+		 */
+		ahc_outb(ahc, SCB_CONTROL,
+			 ahc_inb(ahc, SCB_CONTROL) & mask);
+	 	scb->hscb->control &= mask;
+		ahc_set_transaction_tag(scb, /*enabled*/FALSE,
+					/*type*/MSG_SIMPLE_TASK);
+		ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
+		ahc_assert_atn(ahc);
+
+		/*
+		 * This transaction is now at the head of
+		 * the untagged queue for this target.
+		 */
+		if ((ahc->flags & AHC_SCB_BTT) == 0) {
+			struct scb_tailq *untagged_q;
+
+			untagged_q =
+			    &(ahc->untagged_queues[devinfo->target_offset]);
+			TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe);
+			scb->flags |= SCB_UNTAGGEDQ;
+		}
+		ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
+			     scb->hscb->tag);
+
+		/*
+		 * Requeue all tagged commands for this target
+		 * currently in our posession so they can be
+		 * converted to untagged commands.
+		 */
+		ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
+				   SCB_GET_CHANNEL(ahc, scb),
+				   SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
+				   ROLE_INITIATOR, CAM_REQUEUE_REQ,
+				   SEARCH_COMPLETE);
+	} else {
+		/*
+		 * Otherwise, we ignore it.
+		 */
+		printf("%s:%c:%d: Message reject for %x -- ignored\n",
+		       ahc_name(ahc), devinfo->channel, devinfo->target,
+		       last_msg);
+	}
+	return (response);
+}
+
+/*
+ * Process an ingnore wide residue message.
+ */
+static void
+ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+	u_int scb_index;
+	struct scb *scb;
+
+	scb_index = ahc_inb(ahc, SCB_TAG);
+	scb = ahc_lookup_scb(ahc, scb_index);
+	/*
+	 * XXX Actually check data direction in the sequencer?
+	 * Perhaps add datadir to some spare bits in the hscb?
+	 */
+	if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0
+	 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) {
+		/*
+		 * Ignore the message if we haven't
+		 * seen an appropriate data phase yet.
+		 */
+	} else {
+		/*
+		 * If the residual occurred on the last
+		 * transfer and the transfer request was
+		 * expected to end on an odd count, do
+		 * nothing.  Otherwise, subtract a byte
+		 * and update the residual count accordingly.
+		 */
+		uint32_t sgptr;
+
+		sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
+		if ((sgptr & SG_LIST_NULL) != 0
+		 && (ahc_inb(ahc, SCB_LUN) & SCB_XFERLEN_ODD) != 0) {
+			/*
+			 * If the residual occurred on the last
+			 * transfer and the transfer request was
+			 * expected to end on an odd count, do
+			 * nothing.
+			 */
+		} else {
+			struct ahc_dma_seg *sg;
+			uint32_t data_cnt;
+			uint32_t data_addr;
+			uint32_t sglen;
+
+			/* Pull in all of the sgptr */
+			sgptr = ahc_inl(ahc, SCB_RESIDUAL_SGPTR);
+			data_cnt = ahc_inl(ahc, SCB_RESIDUAL_DATACNT);
+
+			if ((sgptr & SG_LIST_NULL) != 0) {
+				/*
+				 * The residual data count is not updated
+				 * for the command run to completion case.
+				 * Explicitly zero the count.
+				 */
+				data_cnt &= ~AHC_SG_LEN_MASK;
+			}
+
+			data_addr = ahc_inl(ahc, SHADDR);
+
+			data_cnt += 1;
+			data_addr -= 1;
+			sgptr &= SG_PTR_MASK;
+
+			sg = ahc_sg_bus_to_virt(scb, sgptr);
+
+			/*
+			 * The residual sg ptr points to the next S/G
+			 * to load so we must go back one.
+			 */
+			sg--;
+			sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
+			if (sg != scb->sg_list
+			 && sglen < (data_cnt & AHC_SG_LEN_MASK)) {
+
+				sg--;
+				sglen = ahc_le32toh(sg->len);
+				/*
+				 * Preserve High Address and SG_LIST bits
+				 * while setting the count to 1.
+				 */
+				data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK));
+				data_addr = ahc_le32toh(sg->addr)
+					  + (sglen & AHC_SG_LEN_MASK) - 1;
+
+				/*
+				 * Increment sg so it points to the
+				 * "next" sg.
+				 */
+				sg++;
+				sgptr = ahc_sg_virt_to_bus(scb, sg);
+			}
+			ahc_outl(ahc, SCB_RESIDUAL_SGPTR, sgptr);
+			ahc_outl(ahc, SCB_RESIDUAL_DATACNT, data_cnt);
+			/*
+			 * Toggle the "oddness" of the transfer length
+			 * to handle this mid-transfer ignore wide
+			 * residue.  This ensures that the oddness is
+			 * correct for subsequent data transfers.
+			 */
+			ahc_outb(ahc, SCB_LUN,
+				 ahc_inb(ahc, SCB_LUN) ^ SCB_XFERLEN_ODD);
+		}
+	}
+}
+
+
+/*
+ * Reinitialize the data pointers for the active transfer
+ * based on its current residual.
+ */
+static void
+ahc_reinitialize_dataptrs(struct ahc_softc *ahc)
+{
+	struct	 scb *scb;
+	struct	 ahc_dma_seg *sg;
+	u_int	 scb_index;
+	uint32_t sgptr;
+	uint32_t resid;
+	uint32_t dataptr;
+
+	scb_index = ahc_inb(ahc, SCB_TAG);
+	scb = ahc_lookup_scb(ahc, scb_index);
+	sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
+	      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
+	      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8)
+	      |	ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
+
+	sgptr &= SG_PTR_MASK;
+	sg = ahc_sg_bus_to_virt(scb, sgptr);
+
+	/* The residual sg_ptr always points to the next sg */
+	sg--;
+
+	resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16)
+	      | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8)
+	      | ahc_inb(ahc, SCB_RESIDUAL_DATACNT);
+
+	dataptr = ahc_le32toh(sg->addr)
+		+ (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK)
+		- resid;
+	if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+		u_int dscommand1;
+
+		dscommand1 = ahc_inb(ahc, DSCOMMAND1);
+		ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
+		ahc_outb(ahc, HADDR,
+			 (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS);
+		ahc_outb(ahc, DSCOMMAND1, dscommand1);
+	}
+	ahc_outb(ahc, HADDR + 3, dataptr >> 24);
+	ahc_outb(ahc, HADDR + 2, dataptr >> 16);
+	ahc_outb(ahc, HADDR + 1, dataptr >> 8);
+	ahc_outb(ahc, HADDR, dataptr);
+	ahc_outb(ahc, HCNT + 2, resid >> 16);
+	ahc_outb(ahc, HCNT + 1, resid >> 8);
+	ahc_outb(ahc, HCNT, resid);
+	if ((ahc->features & AHC_ULTRA2) == 0) {
+		ahc_outb(ahc, STCNT + 2, resid >> 16);
+		ahc_outb(ahc, STCNT + 1, resid >> 8);
+		ahc_outb(ahc, STCNT, resid);
+	}
+}
+
+/*
+ * Handle the effects of issuing a bus device reset message.
+ */
+static void
+ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+		    cam_status status, char *message, int verbose_level)
+{
+#ifdef AHC_TARGET_MODE
+	struct ahc_tmode_tstate* tstate;
+	u_int lun;
+#endif
+	int found;
+
+	found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
+			       CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role,
+			       status);
+
+#ifdef AHC_TARGET_MODE
+	/*
+	 * Send an immediate notify ccb to all target mord peripheral
+	 * drivers affected by this action.
+	 */
+	tstate = ahc->enabled_targets[devinfo->our_scsiid];
+	if (tstate != NULL) {
+		for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
+			struct ahc_tmode_lstate* lstate;
+
+			lstate = tstate->enabled_luns[lun];
+			if (lstate == NULL)
+				continue;
+
+			ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
+					       MSG_BUS_DEV_RESET, /*arg*/0);
+			ahc_send_lstate_events(ahc, lstate);
+		}
+	}
+#endif
+
+	/*
+	 * Go back to async/narrow transfers and renegotiate.
+	 */
+	ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+		      AHC_TRANS_CUR, /*paused*/TRUE);
+	ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL,
+			 /*period*/0, /*offset*/0, /*ppr_options*/0,
+			 AHC_TRANS_CUR, /*paused*/TRUE);
+	
+	ahc_send_async(ahc, devinfo->channel, devinfo->target,
+		       CAM_LUN_WILDCARD, AC_SENT_BDR, NULL);
+
+	if (message != NULL
+	 && (verbose_level <= bootverbose))
+		printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc),
+		       message, devinfo->channel, devinfo->target, found);
+}
+
+#ifdef AHC_TARGET_MODE
+static void
+ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+		       struct scb *scb)
+{
+
+	/*              
+	 * To facilitate adding multiple messages together,
+	 * each routine should increment the index and len
+	 * variables instead of setting them explicitly.
+	 */             
+	ahc->msgout_index = 0;
+	ahc->msgout_len = 0;
+
+	if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0)
+		ahc_build_transfer_msg(ahc, devinfo);
+	else
+		panic("ahc_intr: AWAITING target message with no message");
+
+	ahc->msgout_index = 0;
+	ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
+}
+#endif
+/**************************** Initialization **********************************/
+/*
+ * Allocate a controller structure for a new device
+ * and perform initial initializion.
+ */
+struct ahc_softc *
+ahc_alloc(void *platform_arg, char *name)
+{
+	struct  ahc_softc *ahc;
+	int	i;
+
+#ifndef	__FreeBSD__
+	ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT);
+	if (!ahc) {
+		printf("aic7xxx: cannot malloc softc!\n");
+		free(name, M_DEVBUF);
+		return NULL;
+	}
+#else
+	ahc = device_get_softc((device_t)platform_arg);
+#endif
+	memset(ahc, 0, sizeof(*ahc));
+	ahc->seep_config = malloc(sizeof(*ahc->seep_config),
+				  M_DEVBUF, M_NOWAIT);
+	if (ahc->seep_config == NULL) {
+#ifndef	__FreeBSD__
+		free(ahc, M_DEVBUF);
+#endif
+		free(name, M_DEVBUF);
+		return (NULL);
+	}
+	LIST_INIT(&ahc->pending_scbs);
+	/* We don't know our unit number until the OSM sets it */
+	ahc->name = name;
+	ahc->unit = -1;
+	ahc->description = NULL;
+	ahc->channel = 'A';
+	ahc->channel_b = 'B';
+	ahc->chip = AHC_NONE;
+	ahc->features = AHC_FENONE;
+	ahc->bugs = AHC_BUGNONE;
+	ahc->flags = AHC_FNONE;
+	/*
+	 * Default to all error reporting enabled with the
+	 * sequencer operating at its fastest speed.
+	 * The bus attach code may modify this.
+	 */
+	ahc->seqctl = FASTMODE;
+
+	for (i = 0; i < AHC_NUM_TARGETS; i++)
+		TAILQ_INIT(&ahc->untagged_queues[i]);
+	if (ahc_platform_alloc(ahc, platform_arg) != 0) {
+		ahc_free(ahc);
+		ahc = NULL;
+	}
+	return (ahc);
+}
+
+int
+ahc_softc_init(struct ahc_softc *ahc)
+{
+
+	/* The IRQMS bit is only valid on VL and EISA chips */
+	if ((ahc->chip & AHC_PCI) == 0)
+		ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS;
+	else
+		ahc->unpause = 0;
+	ahc->pause = ahc->unpause | PAUSE; 
+	/* XXX The shared scb data stuff should be deprecated */
+	if (ahc->scb_data == NULL) {
+		ahc->scb_data = malloc(sizeof(*ahc->scb_data),
+				       M_DEVBUF, M_NOWAIT);
+		if (ahc->scb_data == NULL)
+			return (ENOMEM);
+		memset(ahc->scb_data, 0, sizeof(*ahc->scb_data));
+	}
+
+	return (0);
+}
+
+void
+ahc_softc_insert(struct ahc_softc *ahc)
+{
+	struct ahc_softc *list_ahc;
+
+#if AHC_PCI_CONFIG > 0
+	/*
+	 * Second Function PCI devices need to inherit some
+	 * settings from function 0.
+	 */
+	if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI
+	 && (ahc->features & AHC_MULTI_FUNC) != 0) {
+		TAILQ_FOREACH(list_ahc, &ahc_tailq, links) {
+			ahc_dev_softc_t list_pci;
+			ahc_dev_softc_t pci;
+
+			list_pci = list_ahc->dev_softc;
+			pci = ahc->dev_softc;
+			if (ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci)
+			 && ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)) {
+				struct ahc_softc *master;
+				struct ahc_softc *slave;
+
+				if (ahc_get_pci_function(list_pci) == 0) {
+					master = list_ahc;
+					slave = ahc;
+				} else {
+					master = ahc;
+					slave = list_ahc;
+				}
+				slave->flags &= ~AHC_BIOS_ENABLED; 
+				slave->flags |=
+				    master->flags & AHC_BIOS_ENABLED;
+				slave->flags &= ~AHC_PRIMARY_CHANNEL; 
+				slave->flags |=
+				    master->flags & AHC_PRIMARY_CHANNEL;
+				break;
+			}
+		}
+	}
+#endif
+
+	/*
+	 * Insertion sort into our list of softcs.
+	 */
+	list_ahc = TAILQ_FIRST(&ahc_tailq);
+	while (list_ahc != NULL
+	    && ahc_softc_comp(ahc, list_ahc) <= 0)
+		list_ahc = TAILQ_NEXT(list_ahc, links);
+	if (list_ahc != NULL)
+		TAILQ_INSERT_BEFORE(list_ahc, ahc, links);
+	else
+		TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links);
+	ahc->init_level++;
+}
+
+/*
+ * Verify that the passed in softc pointer is for a
+ * controller that is still configured.
+ */
+struct ahc_softc *
+ahc_find_softc(struct ahc_softc *ahc)
+{
+	struct ahc_softc *list_ahc;
+
+	TAILQ_FOREACH(list_ahc, &ahc_tailq, links) {
+		if (list_ahc == ahc)
+			return (ahc);
+	}
+	return (NULL);
+}
+
+void
+ahc_set_unit(struct ahc_softc *ahc, int unit)
+{
+	ahc->unit = unit;
+}
+
+void
+ahc_set_name(struct ahc_softc *ahc, char *name)
+{
+	if (ahc->name != NULL)
+		free(ahc->name, M_DEVBUF);
+	ahc->name = name;
+}
+
+void
+ahc_free(struct ahc_softc *ahc)
+{
+	int i;
+
+	switch (ahc->init_level) {
+	default:
+	case 5:
+		ahc_shutdown(ahc);
+		/* FALLTHROUGH */
+	case 4:
+		ahc_dmamap_unload(ahc, ahc->shared_data_dmat,
+				  ahc->shared_data_dmamap);
+		/* FALLTHROUGH */
+	case 3:
+		ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo,
+				ahc->shared_data_dmamap);
+		ahc_dmamap_destroy(ahc, ahc->shared_data_dmat,
+				   ahc->shared_data_dmamap);
+		/* FALLTHROUGH */
+	case 2:
+		ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
+	case 1:
+#ifndef __linux__
+		ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
+#endif
+		break;
+	case 0:
+		break;
+	}
+
+#ifndef __linux__
+	ahc_dma_tag_destroy(ahc, ahc->parent_dmat);
+#endif
+	ahc_platform_free(ahc);
+	ahc_fini_scbdata(ahc);
+	for (i = 0; i < AHC_NUM_TARGETS; i++) {
+		struct ahc_tmode_tstate *tstate;
+
+		tstate = ahc->enabled_targets[i];
+		if (tstate != NULL) {
+#ifdef AHC_TARGET_MODE
+			int j;
+
+			for (j = 0; j < AHC_NUM_LUNS; j++) {
+				struct ahc_tmode_lstate *lstate;
+
+				lstate = tstate->enabled_luns[j];
+				if (lstate != NULL) {
+					xpt_free_path(lstate->path);
+					free(lstate, M_DEVBUF);
+				}
+			}
+#endif
+			free(tstate, M_DEVBUF);
+		}
+	}
+#ifdef AHC_TARGET_MODE
+	if (ahc->black_hole != NULL) {
+		xpt_free_path(ahc->black_hole->path);
+		free(ahc->black_hole, M_DEVBUF);
+	}
+#endif
+	if (ahc->name != NULL)
+		free(ahc->name, M_DEVBUF);
+	if (ahc->seep_config != NULL)
+		free(ahc->seep_config, M_DEVBUF);
+#ifndef __FreeBSD__
+	free(ahc, M_DEVBUF);
+#endif
+	return;
+}
+
+void
+ahc_shutdown(void *arg)
+{
+	struct	ahc_softc *ahc;
+	int	i;
+
+	ahc = (struct ahc_softc *)arg;
+
+	/* This will reset most registers to 0, but not all */
+	ahc_reset(ahc, /*reinit*/FALSE);
+	ahc_outb(ahc, SCSISEQ, 0);
+	ahc_outb(ahc, SXFRCTL0, 0);
+	ahc_outb(ahc, DSPCISTATUS, 0);
+
+	for (i = TARG_SCSIRATE; i < SCSICONF; i++)
+		ahc_outb(ahc, i, 0);
+}
+
+/*
+ * Reset the controller and record some information about it
+ * that is only available just after a reset.  If "reinit" is
+ * non-zero, this reset occured after initial configuration
+ * and the caller requests that the chip be fully reinitialized
+ * to a runable state.  Chip interrupts are *not* enabled after
+ * a reinitialization.  The caller must enable interrupts via
+ * ahc_intr_enable().
+ */
+int
+ahc_reset(struct ahc_softc *ahc, int reinit)
+{
+	u_int	sblkctl;
+	u_int	sxfrctl1_a, sxfrctl1_b;
+	int	error;
+	int	wait;
+	
+	/*
+	 * Preserve the value of the SXFRCTL1 register for all channels.
+	 * It contains settings that affect termination and we don't want
+	 * to disturb the integrity of the bus.
+	 */
+	ahc_pause(ahc);
+	if ((ahc_inb(ahc, HCNTRL) & CHIPRST) != 0) {
+		/*
+		 * The chip has not been initialized since
+		 * PCI/EISA/VLB bus reset.  Don't trust
+		 * "left over BIOS data".
+		 */
+		ahc->flags |= AHC_NO_BIOS_INIT;
+	}
+	sxfrctl1_b = 0;
+	if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) {
+		u_int sblkctl;
+
+		/*
+		 * Save channel B's settings in case this chip
+		 * is setup for TWIN channel operation.
+		 */
+		sblkctl = ahc_inb(ahc, SBLKCTL);
+		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
+		sxfrctl1_b = ahc_inb(ahc, SXFRCTL1);
+		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
+	}
+	sxfrctl1_a = ahc_inb(ahc, SXFRCTL1);
+
+	ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause);
+
+	/*
+	 * Ensure that the reset has finished.  We delay 1000us
+	 * prior to reading the register to make sure the chip
+	 * has sufficiently completed its reset to handle register
+	 * accesses.
+	 */
+	wait = 1000;
+	do {
+		ahc_delay(1000);
+	} while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK));
+
+	if (wait == 0) {
+		printf("%s: WARNING - Failed chip reset!  "
+		       "Trying to initialize anyway.\n", ahc_name(ahc));
+	}
+	ahc_outb(ahc, HCNTRL, ahc->pause);
+
+	/* Determine channel configuration */
+	sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE);
+	/* No Twin Channel PCI cards */
+	if ((ahc->chip & AHC_PCI) != 0)
+		sblkctl &= ~SELBUSB;
+	switch (sblkctl) {
+	case 0:
+		/* Single Narrow Channel */
+		break;
+	case 2:
+		/* Wide Channel */
+		ahc->features |= AHC_WIDE;
+		break;
+	case 8:
+		/* Twin Channel */
+		ahc->features |= AHC_TWIN;
+		break;
+	default:
+		printf(" Unsupported adapter type.  Ignoring\n");
+		return(-1);
+	}
+
+	/*
+	 * Reload sxfrctl1.
+	 *
+	 * We must always initialize STPWEN to 1 before we
+	 * restore the saved values.  STPWEN is initialized
+	 * to a tri-state condition which can only be cleared
+	 * by turning it on.
+	 */
+	if ((ahc->features & AHC_TWIN) != 0) {
+		u_int sblkctl;
+
+		sblkctl = ahc_inb(ahc, SBLKCTL);
+		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
+		ahc_outb(ahc, SXFRCTL1, sxfrctl1_b);
+		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
+	}
+	ahc_outb(ahc, SXFRCTL1, sxfrctl1_a);
+
+	error = 0;
+	if (reinit != 0)
+		/*
+		 * If a recovery action has forced a chip reset,
+		 * re-initialize the chip to our liking.
+		 */
+		error = ahc->bus_chip_init(ahc);
+#ifdef AHC_DUMP_SEQ
+	else 
+		ahc_dumpseq(ahc);
+#endif
+
+	return (error);
+}
+
+/*
+ * Determine the number of SCBs available on the controller
+ */
+int
+ahc_probe_scbs(struct ahc_softc *ahc) {
+	int i;
+
+	for (i = 0; i < AHC_SCB_MAX; i++) {
+
+		ahc_outb(ahc, SCBPTR, i);
+		ahc_outb(ahc, SCB_BASE, i);
+		if (ahc_inb(ahc, SCB_BASE) != i)
+			break;
+		ahc_outb(ahc, SCBPTR, 0);
+		if (ahc_inb(ahc, SCB_BASE) != 0)
+			break;
+	}
+	return (i);
+}
+
+static void
+ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 
+{
+	dma_addr_t *baddr;
+
+	baddr = (dma_addr_t *)arg;
+	*baddr = segs->ds_addr;
+}
+
+static void
+ahc_build_free_scb_list(struct ahc_softc *ahc)
+{
+	int scbsize;
+	int i;
+
+	scbsize = 32;
+	if ((ahc->flags & AHC_LSCBS_ENABLED) != 0)
+		scbsize = 64;
+
+	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
+		int j;
+
+		ahc_outb(ahc, SCBPTR, i);
+
+		/*
+		 * Touch all SCB bytes to avoid parity errors
+		 * should one of our debugging routines read
+		 * an otherwise uninitiatlized byte.
+		 */
+		for (j = 0; j < scbsize; j++)
+			ahc_outb(ahc, SCB_BASE+j, 0xFF);
+
+		/* Clear the control byte. */
+		ahc_outb(ahc, SCB_CONTROL, 0);
+
+		/* Set the next pointer */
+		if ((ahc->flags & AHC_PAGESCBS) != 0)
+			ahc_outb(ahc, SCB_NEXT, i+1);
+		else 
+			ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
+
+		/* Make the tag number, SCSIID, and lun invalid */
+		ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
+		ahc_outb(ahc, SCB_SCSIID, 0xFF);
+		ahc_outb(ahc, SCB_LUN, 0xFF);
+	}
+
+	if ((ahc->flags & AHC_PAGESCBS) != 0) {
+		/* SCB 0 heads the free list. */
+		ahc_outb(ahc, FREE_SCBH, 0);
+	} else {
+		/* No free list. */
+		ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL);
+	}
+
+	/* Make sure that the last SCB terminates the free list */
+	ahc_outb(ahc, SCBPTR, i-1);
+	ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
+}
+
+static int
+ahc_init_scbdata(struct ahc_softc *ahc)
+{
+	struct scb_data *scb_data;
+
+	scb_data = ahc->scb_data;
+	SLIST_INIT(&scb_data->free_scbs);
+	SLIST_INIT(&scb_data->sg_maps);
+
+	/* Allocate SCB resources */
+	scb_data->scbarray =
+	    (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC,
+				 M_DEVBUF, M_NOWAIT);
+	if (scb_data->scbarray == NULL)
+		return (ENOMEM);
+	memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC);
+
+	/* Determine the number of hardware SCBs and initialize them */
+
+	scb_data->maxhscbs = ahc_probe_scbs(ahc);
+	if (ahc->scb_data->maxhscbs == 0) {
+		printf("%s: No SCB space found\n", ahc_name(ahc));
+		return (ENXIO);
+	}
+
+	/*
+	 * Create our DMA tags.  These tags define the kinds of device
+	 * accessible memory allocations and memory mappings we will
+	 * need to perform during normal operation.
+	 *
+	 * Unless we need to further restrict the allocation, we rely
+	 * on the restrictions of the parent dmat, hence the common
+	 * use of MAXADDR and MAXSIZE.
+	 */
+
+	/* DMA tag for our hardware scb structures */
+	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
+			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+			       /*highaddr*/BUS_SPACE_MAXADDR,
+			       /*filter*/NULL, /*filterarg*/NULL,
+			       AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb),
+			       /*nsegments*/1,
+			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+			       /*flags*/0, &scb_data->hscb_dmat) != 0) {
+		goto error_exit;
+	}
+
+	scb_data->init_level++;
+
+	/* Allocation for our hscbs */
+	if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat,
+			     (void **)&scb_data->hscbs,
+			     BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) {
+		goto error_exit;
+	}
+
+	scb_data->init_level++;
+
+	/* And permanently map them */
+	ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap,
+			scb_data->hscbs,
+			AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb),
+			ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0);
+
+	scb_data->init_level++;
+
+	/* DMA tag for our sense buffers */
+	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
+			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+			       /*highaddr*/BUS_SPACE_MAXADDR,
+			       /*filter*/NULL, /*filterarg*/NULL,
+			       AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data),
+			       /*nsegments*/1,
+			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+			       /*flags*/0, &scb_data->sense_dmat) != 0) {
+		goto error_exit;
+	}
+
+	scb_data->init_level++;
+
+	/* Allocate them */
+	if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat,
+			     (void **)&scb_data->sense,
+			     BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) {
+		goto error_exit;
+	}
+
+	scb_data->init_level++;
+
+	/* And permanently map them */
+	ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap,
+			scb_data->sense,
+			AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data),
+			ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0);
+
+	scb_data->init_level++;
+
+	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
+	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8,
+			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+			       /*highaddr*/BUS_SPACE_MAXADDR,
+			       /*filter*/NULL, /*filterarg*/NULL,
+			       PAGE_SIZE, /*nsegments*/1,
+			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+			       /*flags*/0, &scb_data->sg_dmat) != 0) {
+		goto error_exit;
+	}
+
+	scb_data->init_level++;
+
+	/* Perform initial CCB allocation */
+	memset(scb_data->hscbs, 0,
+	       AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb));
+	ahc_alloc_scbs(ahc);
+
+	if (scb_data->numscbs == 0) {
+		printf("%s: ahc_init_scbdata - "
+		       "Unable to allocate initial scbs\n",
+		       ahc_name(ahc));
+		goto error_exit;
+	}
+
+	/*
+	 * Reserve the next queued SCB.
+	 */
+	ahc->next_queued_scb = ahc_get_scb(ahc);
+
+	/*
+	 * Note that we were successfull
+	 */
+	return (0); 
+
+error_exit:
+
+	return (ENOMEM);
+}
+
+static void
+ahc_fini_scbdata(struct ahc_softc *ahc)
+{
+	struct scb_data *scb_data;
+
+	scb_data = ahc->scb_data;
+	if (scb_data == NULL)
+		return;
+
+	switch (scb_data->init_level) {
+	default:
+	case 7:
+	{
+		struct sg_map_node *sg_map;
+
+		while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) {
+			SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
+			ahc_dmamap_unload(ahc, scb_data->sg_dmat,
+					  sg_map->sg_dmamap);
+			ahc_dmamem_free(ahc, scb_data->sg_dmat,
+					sg_map->sg_vaddr,
+					sg_map->sg_dmamap);
+			free(sg_map, M_DEVBUF);
+		}
+		ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
+	}
+	case 6:
+		ahc_dmamap_unload(ahc, scb_data->sense_dmat,
+				  scb_data->sense_dmamap);
+	case 5:
+		ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
+				scb_data->sense_dmamap);
+		ahc_dmamap_destroy(ahc, scb_data->sense_dmat,
+				   scb_data->sense_dmamap);
+	case 4:
+		ahc_dma_tag_destroy(ahc, scb_data->sense_dmat);
+	case 3:
+		ahc_dmamap_unload(ahc, scb_data->hscb_dmat,
+				  scb_data->hscb_dmamap);
+	case 2:
+		ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
+				scb_data->hscb_dmamap);
+		ahc_dmamap_destroy(ahc, scb_data->hscb_dmat,
+				   scb_data->hscb_dmamap);
+	case 1:
+		ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat);
+		break;
+	case 0:
+		break;
+	}
+	if (scb_data->scbarray != NULL)
+		free(scb_data->scbarray, M_DEVBUF);
+}
+
+void
+ahc_alloc_scbs(struct ahc_softc *ahc)
+{
+	struct scb_data *scb_data;
+	struct scb *next_scb;
+	struct sg_map_node *sg_map;
+	dma_addr_t physaddr;
+	struct ahc_dma_seg *segs;
+	int newcount;
+	int i;
+
+	scb_data = ahc->scb_data;
+	if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC)
+		/* Can't allocate any more */
+		return;
+
+	next_scb = &scb_data->scbarray[scb_data->numscbs];
+
+	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
+
+	if (sg_map == NULL)
+		return;
+
+	/* Allocate S/G space for the next batch of SCBS */
+	if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat,
+			     (void **)&sg_map->sg_vaddr,
+			     BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
+		free(sg_map, M_DEVBUF);
+		return;
+	}
+
+	SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
+
+	ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap,
+			sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb,
+			&sg_map->sg_physaddr, /*flags*/0);
+
+	segs = sg_map->sg_vaddr;
+	physaddr = sg_map->sg_physaddr;
+
+	newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg)));
+	newcount = MIN(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs));
+	for (i = 0; i < newcount; i++) {
+		struct scb_platform_data *pdata;
+#ifndef __linux__
+		int error;
+#endif
+		pdata = (struct scb_platform_data *)malloc(sizeof(*pdata),
+							   M_DEVBUF, M_NOWAIT);
+		if (pdata == NULL)
+			break;
+		next_scb->platform_data = pdata;
+		next_scb->sg_map = sg_map;
+		next_scb->sg_list = segs;
+		/*
+		 * The sequencer always starts with the second entry.
+		 * The first entry is embedded in the scb.
+		 */
+		next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
+		next_scb->ahc_softc = ahc;
+		next_scb->flags = SCB_FREE;
+#ifndef __linux__
+		error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
+					  &next_scb->dmamap);
+		if (error != 0)
+			break;
+#endif
+		next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
+		next_scb->hscb->tag = ahc->scb_data->numscbs;
+		SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
+				  next_scb, links.sle);
+		segs += AHC_NSEG;
+		physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg));
+		next_scb++;
+		ahc->scb_data->numscbs++;
+	}
+}
+
+void
+ahc_controller_info(struct ahc_softc *ahc, char *buf)
+{
+	int len;
+
+	len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]);
+	buf += len;
+	if ((ahc->features & AHC_TWIN) != 0)
+ 		len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
+			      "B SCSI Id=%d, primary %c, ",
+			      ahc->our_id, ahc->our_id_b,
+			      (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A');
+	else {
+		const char *speed;
+		const char *type;
+
+		speed = "";
+		if ((ahc->features & AHC_ULTRA) != 0) {
+			speed = "Ultra ";
+		} else if ((ahc->features & AHC_DT) != 0) {
+			speed = "Ultra160 ";
+		} else if ((ahc->features & AHC_ULTRA2) != 0) {
+			speed = "Ultra2 ";
+		}
+		if ((ahc->features & AHC_WIDE) != 0) {
+			type = "Wide";
+		} else {
+			type = "Single";
+		}
+		len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ",
+			      speed, type, ahc->channel, ahc->our_id);
+	}
+	buf += len;
+
+	if ((ahc->flags & AHC_PAGESCBS) != 0)
+		sprintf(buf, "%d/%d SCBs",
+			ahc->scb_data->maxhscbs, AHC_MAX_QUEUE);
+	else
+		sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs);
+}
+
+int
+ahc_chip_init(struct ahc_softc *ahc)
+{
+	int	 term;
+	int	 error;
+	u_int	 i;
+	u_int	 scsi_conf;
+	u_int	 scsiseq_template;
+	uint32_t physaddr;
+
+	ahc_outb(ahc, SEQ_FLAGS, 0);
+	ahc_outb(ahc, SEQ_FLAGS2, 0);
+
+	/* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/
+	if (ahc->features & AHC_TWIN) {
+
+		/*
+		 * Setup Channel B first.
+		 */
+		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
+		term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0;
+		ahc_outb(ahc, SCSIID, ahc->our_id_b);
+		scsi_conf = ahc_inb(ahc, SCSICONF + 1);
+		ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
+					|term|ahc->seltime_b|ENSTIMER|ACTNEGEN);
+		if ((ahc->features & AHC_ULTRA2) != 0)
+			ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
+		ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
+		ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
+
+		/* Select Channel A */
+		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
+	}
+	term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0;
+	if ((ahc->features & AHC_ULTRA2) != 0)
+		ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
+	else
+		ahc_outb(ahc, SCSIID, ahc->our_id);
+	scsi_conf = ahc_inb(ahc, SCSICONF);
+	ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
+				|term|ahc->seltime
+				|ENSTIMER|ACTNEGEN);
+	if ((ahc->features & AHC_ULTRA2) != 0)
+		ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
+	ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
+	ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
+
+	/* There are no untagged SCBs active yet. */
+	for (i = 0; i < 16; i++) {
+		ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0));
+		if ((ahc->flags & AHC_SCB_BTT) != 0) {
+			int lun;
+
+			/*
+			 * The SCB based BTT allows an entry per
+			 * target and lun pair.
+			 */
+			for (lun = 1; lun < AHC_NUM_LUNS; lun++)
+				ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun));
+		}
+	}
+
+	/* All of our queues are empty */
+	for (i = 0; i < 256; i++)
+		ahc->qoutfifo[i] = SCB_LIST_NULL;
+	ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD);
+
+	for (i = 0; i < 256; i++)
+		ahc->qinfifo[i] = SCB_LIST_NULL;
+
+	if ((ahc->features & AHC_MULTI_TID) != 0) {
+		ahc_outb(ahc, TARGID, 0);
+		ahc_outb(ahc, TARGID + 1, 0);
+	}
+
+	/*
+	 * Tell the sequencer where it can find our arrays in memory.
+	 */
+	physaddr = ahc->scb_data->hscb_busaddr;
+	ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF);
+	ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF);
+	ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF);
+	ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF);
+
+	physaddr = ahc->shared_data_busaddr;
+	ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF);
+	ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF);
+	ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF);
+	ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF);
+
+	/*
+	 * Initialize the group code to command length table.
+	 * This overrides the values in TARG_SCSIRATE, so only
+	 * setup the table after we have processed that information.
+	 */
+	ahc_outb(ahc, CMDSIZE_TABLE, 5);
+	ahc_outb(ahc, CMDSIZE_TABLE + 1, 9);
+	ahc_outb(ahc, CMDSIZE_TABLE + 2, 9);
+	ahc_outb(ahc, CMDSIZE_TABLE + 3, 0);
+	ahc_outb(ahc, CMDSIZE_TABLE + 4, 15);
+	ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
+	ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
+	ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
+		
+	if ((ahc->features & AHC_HS_MAILBOX) != 0)
+		ahc_outb(ahc, HS_MAILBOX, 0);
+
+	/* Tell the sequencer of our initial queue positions */
+	if ((ahc->features & AHC_TARGETMODE) != 0) {
+		ahc->tqinfifonext = 1;
+		ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
+		ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
+	}
+	ahc->qinfifonext = 0;
+	ahc->qoutfifonext = 0;
+	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+		ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256);
+		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
+		ahc_outb(ahc, SNSCB_QOFF, ahc->qinfifonext);
+		ahc_outb(ahc, SDSCB_QOFF, 0);
+	} else {
+		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
+		ahc_outb(ahc, QINPOS, ahc->qinfifonext);
+		ahc_outb(ahc, QOUTPOS, ahc->qoutfifonext);
+	}
+
+	/* We don't have any waiting selections */
+	ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL);
+
+	/* Our disconnection list is empty too */
+	ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
+
+	/* Message out buffer starts empty */
+	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
+
+	/*
+	 * Setup the allowed SCSI Sequences based on operational mode.
+	 * If we are a target, we'll enalbe select in operations once
+	 * we've had a lun enabled.
+	 */
+	scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP;
+	if ((ahc->flags & AHC_INITIATORROLE) != 0)
+		scsiseq_template |= ENRSELI;
+	ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template);
+
+	/* Initialize our list of free SCBs. */
+	ahc_build_free_scb_list(ahc);
+
+	/*
+	 * Tell the sequencer which SCB will be the next one it receives.
+	 */
+	ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
+
+	/*
+	 * Load the Sequencer program and Enable the adapter
+	 * in "fast" mode.
+	 */
+	if (bootverbose)
+		printf("%s: Downloading Sequencer Program...",
+		       ahc_name(ahc));
+
+	error = ahc_loadseq(ahc);
+	if (error != 0)
+		return (error);
+
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		int wait;
+
+		/*
+		 * Wait for up to 500ms for our transceivers
+		 * to settle.  If the adapter does not have
+		 * a cable attached, the transceivers may
+		 * never settle, so don't complain if we
+		 * fail here.
+		 */
+		for (wait = 5000;
+		     (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
+		     wait--)
+			ahc_delay(100);
+	}
+	ahc_restart(ahc);
+	return (0);
+}
+
+/*
+ * Start the board, ready for normal operation
+ */
+int
+ahc_init(struct ahc_softc *ahc)
+{
+	int	 max_targ;
+	u_int	 i;
+	u_int	 scsi_conf;
+	u_int	 ultraenb;
+	u_int	 discenable;
+	u_int	 tagenable;
+	size_t	 driver_data_size;
+
+#ifdef AHC_DEBUG
+	if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0)
+		ahc->flags |= AHC_SEQUENCER_DEBUG;
+#endif
+
+#ifdef AHC_PRINT_SRAM
+	printf("Scratch Ram:");
+	for (i = 0x20; i < 0x5f; i++) {
+		if (((i % 8) == 0) && (i != 0)) {
+			printf ("\n              ");
+		}
+		printf (" 0x%x", ahc_inb(ahc, i));
+	}
+	if ((ahc->features & AHC_MORE_SRAM) != 0) {
+		for (i = 0x70; i < 0x7f; i++) {
+			if (((i % 8) == 0) && (i != 0)) {
+				printf ("\n              ");
+			}
+			printf (" 0x%x", ahc_inb(ahc, i));
+		}
+	}
+	printf ("\n");
+	/*
+	 * Reading uninitialized scratch ram may
+	 * generate parity errors.
+	 */
+	ahc_outb(ahc, CLRINT, CLRPARERR);
+	ahc_outb(ahc, CLRINT, CLRBRKADRINT);
+#endif
+	max_targ = 15;
+
+	/*
+	 * Assume we have a board at this stage and it has been reset.
+	 */
+	if ((ahc->flags & AHC_USEDEFAULTS) != 0)
+		ahc->our_id = ahc->our_id_b = 7;
+	
+	/*
+	 * Default to allowing initiator operations.
+	 */
+	ahc->flags |= AHC_INITIATORROLE;
+
+	/*
+	 * Only allow target mode features if this unit has them enabled.
+	 */
+	if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
+		ahc->features &= ~AHC_TARGETMODE;
+
+#ifndef __linux__
+	/* DMA tag for mapping buffers into device visible space. */
+	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
+			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+			       /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING
+					? (dma_addr_t)0x7FFFFFFFFFULL
+					: BUS_SPACE_MAXADDR_32BIT,
+			       /*highaddr*/BUS_SPACE_MAXADDR,
+			       /*filter*/NULL, /*filterarg*/NULL,
+			       /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE,
+			       /*nsegments*/AHC_NSEG,
+			       /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
+			       /*flags*/BUS_DMA_ALLOCNOW,
+			       &ahc->buffer_dmat) != 0) {
+		return (ENOMEM);
+	}
+#endif
+
+	ahc->init_level++;
+
+	/*
+	 * DMA tag for our command fifos and other data in system memory
+	 * the card's sequencer must be able to access.  For initiator
+	 * roles, we need to allocate space for the qinfifo and qoutfifo.
+	 * The qinfifo and qoutfifo are composed of 256 1 byte elements. 
+	 * When providing for the target mode role, we must additionally
+	 * provide space for the incoming target command fifo and an extra
+	 * byte to deal with a dma bug in some chip versions.
+	 */
+	driver_data_size = 2 * 256 * sizeof(uint8_t);
+	if ((ahc->features & AHC_TARGETMODE) != 0)
+		driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd)
+				 + /*DMA WideOdd Bug Buffer*/1;
+	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
+			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
+			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+			       /*highaddr*/BUS_SPACE_MAXADDR,
+			       /*filter*/NULL, /*filterarg*/NULL,
+			       driver_data_size,
+			       /*nsegments*/1,
+			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
+			       /*flags*/0, &ahc->shared_data_dmat) != 0) {
+		return (ENOMEM);
+	}
+
+	ahc->init_level++;
+
+	/* Allocation of driver data */
+	if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat,
+			     (void **)&ahc->qoutfifo,
+			     BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) {
+		return (ENOMEM);
+	}
+
+	ahc->init_level++;
+
+	/* And permanently map it in */
+	ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
+			ahc->qoutfifo, driver_data_size, ahc_dmamap_cb,
+			&ahc->shared_data_busaddr, /*flags*/0);
+
+	if ((ahc->features & AHC_TARGETMODE) != 0) {
+		ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo;
+		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS];
+		ahc->dma_bug_buf = ahc->shared_data_busaddr
+				 + driver_data_size - 1;
+		/* All target command blocks start out invalid. */
+		for (i = 0; i < AHC_TMODE_CMDS; i++)
+			ahc->targetcmds[i].cmd_valid = 0;
+		ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD);
+		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
+	}
+	ahc->qinfifo = &ahc->qoutfifo[256];
+
+	ahc->init_level++;
+
+	/* Allocate SCB data now that buffer_dmat is initialized */
+	if (ahc->scb_data->maxhscbs == 0)
+		if (ahc_init_scbdata(ahc) != 0)
+			return (ENOMEM);
+
+	/*
+	 * Allocate a tstate to house information for our
+	 * initiator presence on the bus as well as the user
+	 * data for any target mode initiator.
+	 */
+	if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) {
+		printf("%s: unable to allocate ahc_tmode_tstate.  "
+		       "Failing attach\n", ahc_name(ahc));
+		return (ENOMEM);
+	}
+
+	if ((ahc->features & AHC_TWIN) != 0) {
+		if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) {
+			printf("%s: unable to allocate ahc_tmode_tstate.  "
+			       "Failing attach\n", ahc_name(ahc));
+			return (ENOMEM);
+		}
+	}
+
+	if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) {
+		ahc->flags |= AHC_PAGESCBS;
+	} else {
+		ahc->flags &= ~AHC_PAGESCBS;
+	}
+
+#ifdef AHC_DEBUG
+	if (ahc_debug & AHC_SHOW_MISC) {
+		printf("%s: hardware scb %u bytes; kernel scb %u bytes; "
+		       "ahc_dma %u bytes\n",
+			ahc_name(ahc),
+			(u_int)sizeof(struct hardware_scb),
+			(u_int)sizeof(struct scb),
+			(u_int)sizeof(struct ahc_dma_seg));
+	}
+#endif /* AHC_DEBUG */
+
+	/*
+	 * Look at the information that board initialization or
+	 * the board bios has left us.
+	 */
+	if (ahc->features & AHC_TWIN) {
+		scsi_conf = ahc_inb(ahc, SCSICONF + 1);
+		if ((scsi_conf & RESET_SCSI) != 0
+		 && (ahc->flags & AHC_INITIATORROLE) != 0)
+			ahc->flags |= AHC_RESET_BUS_B;
+	}
+
+	scsi_conf = ahc_inb(ahc, SCSICONF);
+	if ((scsi_conf & RESET_SCSI) != 0
+	 && (ahc->flags & AHC_INITIATORROLE) != 0)
+		ahc->flags |= AHC_RESET_BUS_A;
+
+	ultraenb = 0;	
+	tagenable = ALL_TARGETS_MASK;
+
+	/* Grab the disconnection disable table and invert it for our needs */
+	if ((ahc->flags & AHC_USEDEFAULTS) != 0) {
+		printf("%s: Host Adapter Bios disabled.  Using default SCSI "
+			"device parameters\n", ahc_name(ahc));
+		ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B|
+			      AHC_TERM_ENB_A|AHC_TERM_ENB_B;
+		discenable = ALL_TARGETS_MASK;
+		if ((ahc->features & AHC_ULTRA) != 0)
+			ultraenb = ALL_TARGETS_MASK;
+	} else {
+		discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8)
+			   | ahc_inb(ahc, DISC_DSB));
+		if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0)
+			ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8)
+				      | ahc_inb(ahc, ULTRA_ENB);
+	}
+
+	if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
+		max_targ = 7;
+
+	for (i = 0; i <= max_targ; i++) {
+		struct ahc_initiator_tinfo *tinfo;
+		struct ahc_tmode_tstate *tstate;
+		u_int our_id;
+		u_int target_id;
+		char channel;
+
+		channel = 'A';
+		our_id = ahc->our_id;
+		target_id = i;
+		if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
+			channel = 'B';
+			our_id = ahc->our_id_b;
+			target_id = i % 8;
+		}
+		tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
+					    target_id, &tstate);
+		/* Default to async narrow across the board */
+		memset(tinfo, 0, sizeof(*tinfo));
+		if (ahc->flags & AHC_USEDEFAULTS) {
+			if ((ahc->features & AHC_WIDE) != 0)
+				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
+
+			/*
+			 * These will be truncated when we determine the
+			 * connection type we have with the target.
+			 */
+			tinfo->user.period = ahc_syncrates->period;
+			tinfo->user.offset = MAX_OFFSET;
+		} else {
+			u_int scsirate;
+			uint16_t mask;
+
+			/* Take the settings leftover in scratch RAM. */
+			scsirate = ahc_inb(ahc, TARG_SCSIRATE + i);
+			mask = (0x01 << i);
+			if ((ahc->features & AHC_ULTRA2) != 0) {
+				u_int offset;
+				u_int maxsync;
+
+				if ((scsirate & SOFS) == 0x0F) {
+					/*
+					 * Haven't negotiated yet,
+					 * so the format is different.
+					 */
+					scsirate = (scsirate & SXFR) >> 4
+						 | (ultraenb & mask)
+						  ? 0x08 : 0x0
+						 | (scsirate & WIDEXFER);
+					offset = MAX_OFFSET_ULTRA2;
+				} else
+					offset = ahc_inb(ahc, TARG_OFFSET + i);
+				if ((scsirate & ~WIDEXFER) == 0 && offset != 0)
+					/* Set to the lowest sync rate, 5MHz */
+					scsirate |= 0x1c;
+				maxsync = AHC_SYNCRATE_ULTRA2;
+				if ((ahc->features & AHC_DT) != 0)
+					maxsync = AHC_SYNCRATE_DT;
+				tinfo->user.period =
+				    ahc_find_period(ahc, scsirate, maxsync);
+				if (offset == 0)
+					tinfo->user.period = 0;
+				else
+					tinfo->user.offset = MAX_OFFSET;
+				if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/
+				 && (ahc->features & AHC_DT) != 0)
+					tinfo->user.ppr_options =
+					    MSG_EXT_PPR_DT_REQ;
+			} else if ((scsirate & SOFS) != 0) {
+				if ((scsirate & SXFR) == 0x40
+				 && (ultraenb & mask) != 0) {
+					/* Treat 10MHz as a non-ultra speed */
+					scsirate &= ~SXFR;
+				 	ultraenb &= ~mask;
+				}
+				tinfo->user.period = 
+				    ahc_find_period(ahc, scsirate,
+						    (ultraenb & mask)
+						   ? AHC_SYNCRATE_ULTRA
+						   : AHC_SYNCRATE_FAST);
+				if (tinfo->user.period != 0)
+					tinfo->user.offset = MAX_OFFSET;
+			}
+			if (tinfo->user.period == 0)
+				tinfo->user.offset = 0;
+			if ((scsirate & WIDEXFER) != 0
+			 && (ahc->features & AHC_WIDE) != 0)
+				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
+			tinfo->user.protocol_version = 4;
+			if ((ahc->features & AHC_DT) != 0)
+				tinfo->user.transport_version = 3;
+			else
+				tinfo->user.transport_version = 2;
+			tinfo->goal.protocol_version = 2;
+			tinfo->goal.transport_version = 2;
+			tinfo->curr.protocol_version = 2;
+			tinfo->curr.transport_version = 2;
+		}
+		tstate->ultraenb = 0;
+	}
+	ahc->user_discenable = discenable;
+	ahc->user_tagenable = tagenable;
+
+	return (ahc->bus_chip_init(ahc));
+}
+
+void
+ahc_intr_enable(struct ahc_softc *ahc, int enable)
+{
+	u_int hcntrl;
+
+	hcntrl = ahc_inb(ahc, HCNTRL);
+	hcntrl &= ~INTEN;
+	ahc->pause &= ~INTEN;
+	ahc->unpause &= ~INTEN;
+	if (enable) {
+		hcntrl |= INTEN;
+		ahc->pause |= INTEN;
+		ahc->unpause |= INTEN;
+	}
+	ahc_outb(ahc, HCNTRL, hcntrl);
+}
+
+/*
+ * Ensure that the card is paused in a location
+ * outside of all critical sections and that all
+ * pending work is completed prior to returning.
+ * This routine should only be called from outside
+ * an interrupt context.
+ */
+void
+ahc_pause_and_flushwork(struct ahc_softc *ahc)
+{
+	int intstat;
+	int maxloops;
+	int paused;
+
+	maxloops = 1000;
+	ahc->flags |= AHC_ALL_INTERRUPTS;
+	paused = FALSE;
+	do {
+		if (paused)
+			ahc_unpause(ahc);
+		ahc_intr(ahc);
+		ahc_pause(ahc);
+		paused = TRUE;
+		ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO);
+		ahc_clear_critical_section(ahc);
+		intstat = ahc_inb(ahc, INTSTAT);
+	} while (--maxloops
+	      && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0)
+	      && ((intstat & INT_PEND) != 0
+	       || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0));
+	if (maxloops == 0) {
+		printf("Infinite interrupt loop, INTSTAT = %x",
+		       ahc_inb(ahc, INTSTAT));
+	}
+	ahc_platform_flushwork(ahc);
+	ahc->flags &= ~AHC_ALL_INTERRUPTS;
+}
+
+int
+ahc_suspend(struct ahc_softc *ahc)
+{
+
+	ahc_pause_and_flushwork(ahc);
+
+	if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
+		ahc_unpause(ahc);
+		return (EBUSY);
+	}
+
+#ifdef AHC_TARGET_MODE
+	/*
+	 * XXX What about ATIOs that have not yet been serviced?
+	 * Perhaps we should just refuse to be suspended if we
+	 * are acting in a target role.
+	 */
+	if (ahc->pending_device != NULL) {
+		ahc_unpause(ahc);
+		return (EBUSY);
+	}
+#endif
+	ahc_shutdown(ahc);
+	return (0);
+}
+
+int
+ahc_resume(struct ahc_softc *ahc)
+{
+
+	ahc_reset(ahc, /*reinit*/TRUE);
+	ahc_intr_enable(ahc, TRUE); 
+	ahc_restart(ahc);
+	return (0);
+}
+
+/************************** Busy Target Table *********************************/
+/*
+ * Return the untagged transaction id for a given target/channel lun.
+ * Optionally, clear the entry.
+ */
+u_int
+ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
+{
+	u_int scbid;
+	u_int target_offset;
+
+	if ((ahc->flags & AHC_SCB_BTT) != 0) {
+		u_int saved_scbptr;
+		
+		saved_scbptr = ahc_inb(ahc, SCBPTR);
+		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
+		scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
+		ahc_outb(ahc, SCBPTR, saved_scbptr);
+	} else {
+		target_offset = TCL_TARGET_OFFSET(tcl);
+		scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset);
+	}
+
+	return (scbid);
+}
+
+void
+ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
+{
+	u_int target_offset;
+
+	if ((ahc->flags & AHC_SCB_BTT) != 0) {
+		u_int saved_scbptr;
+		
+		saved_scbptr = ahc_inb(ahc, SCBPTR);
+		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
+		ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL);
+		ahc_outb(ahc, SCBPTR, saved_scbptr);
+	} else {
+		target_offset = TCL_TARGET_OFFSET(tcl);
+		ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL);
+	}
+}
+
+void
+ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
+{
+	u_int target_offset;
+
+	if ((ahc->flags & AHC_SCB_BTT) != 0) {
+		u_int saved_scbptr;
+		
+		saved_scbptr = ahc_inb(ahc, SCBPTR);
+		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
+		ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid);
+		ahc_outb(ahc, SCBPTR, saved_scbptr);
+	} else {
+		target_offset = TCL_TARGET_OFFSET(tcl);
+		ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid);
+	}
+}
+
+/************************** SCB and SCB queue management **********************/
+int
+ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
+	      char channel, int lun, u_int tag, role_t role)
+{
+	int targ = SCB_GET_TARGET(ahc, scb);
+	char chan = SCB_GET_CHANNEL(ahc, scb);
+	int slun = SCB_GET_LUN(scb);
+	int match;
+
+	match = ((chan == channel) || (channel == ALL_CHANNELS));
+	if (match != 0)
+		match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
+	if (match != 0)
+		match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
+	if (match != 0) {
+#ifdef AHC_TARGET_MODE
+		int group;
+
+		group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
+		if (role == ROLE_INITIATOR) {
+			match = (group != XPT_FC_GROUP_TMODE)
+			      && ((tag == scb->hscb->tag)
+			       || (tag == SCB_LIST_NULL));
+		} else if (role == ROLE_TARGET) {
+			match = (group == XPT_FC_GROUP_TMODE)
+			      && ((tag == scb->io_ctx->csio.tag_id)
+			       || (tag == SCB_LIST_NULL));
+		}
+#else /* !AHC_TARGET_MODE */
+		match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
+#endif /* AHC_TARGET_MODE */
+	}
+
+	return match;
+}
+
+void
+ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
+{
+	int	target;
+	char	channel;
+	int	lun;
+
+	target = SCB_GET_TARGET(ahc, scb);
+	lun = SCB_GET_LUN(scb);
+	channel = SCB_GET_CHANNEL(ahc, scb);
+	
+	ahc_search_qinfifo(ahc, target, channel, lun,
+			   /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
+			   CAM_REQUEUE_REQ, SEARCH_COMPLETE);
+
+	ahc_platform_freeze_devq(ahc, scb);
+}
+
+void
+ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb)
+{
+	struct scb *prev_scb;
+
+	prev_scb = NULL;
+	if (ahc_qinfifo_count(ahc) != 0) {
+		u_int prev_tag;
+		uint8_t prev_pos;
+
+		prev_pos = ahc->qinfifonext - 1;
+		prev_tag = ahc->qinfifo[prev_pos];
+		prev_scb = ahc_lookup_scb(ahc, prev_tag);
+	}
+	ahc_qinfifo_requeue(ahc, prev_scb, scb);
+	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
+	} else {
+		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
+	}
+}
+
+static void
+ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb,
+		    struct scb *scb)
+{
+	if (prev_scb == NULL) {
+		ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
+	} else {
+		prev_scb->hscb->next = scb->hscb->tag;
+		ahc_sync_scb(ahc, prev_scb, 
+			     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+	}
+	ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
+	scb->hscb->next = ahc->next_queued_scb->hscb->tag;
+	ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+}
+
+static int
+ahc_qinfifo_count(struct ahc_softc *ahc)
+{
+	uint8_t qinpos;
+	uint8_t diff;
+
+	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+		qinpos = ahc_inb(ahc, SNSCB_QOFF);
+		ahc_outb(ahc, SNSCB_QOFF, qinpos);
+	} else
+		qinpos = ahc_inb(ahc, QINPOS);
+	diff = ahc->qinfifonext - qinpos;
+	return (diff);
+}
+
+int
+ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
+		   int lun, u_int tag, role_t role, uint32_t status,
+		   ahc_search_action action)
+{
+	struct	scb *scb;
+	struct	scb *prev_scb;
+	uint8_t qinstart;
+	uint8_t qinpos;
+	uint8_t qintail;
+	uint8_t next;
+	uint8_t prev;
+	uint8_t curscbptr;
+	int	found;
+	int	have_qregs;
+
+	qintail = ahc->qinfifonext;
+	have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0;
+	if (have_qregs) {
+		qinstart = ahc_inb(ahc, SNSCB_QOFF);
+		ahc_outb(ahc, SNSCB_QOFF, qinstart);
+	} else
+		qinstart = ahc_inb(ahc, QINPOS);
+	qinpos = qinstart;
+	found = 0;
+	prev_scb = NULL;
+
+	if (action == SEARCH_COMPLETE) {
+		/*
+		 * Don't attempt to run any queued untagged transactions
+		 * until we are done with the abort process.
+		 */
+		ahc_freeze_untagged_queues(ahc);
+	}
+
+	/*
+	 * Start with an empty queue.  Entries that are not chosen
+	 * for removal will be re-added to the queue as we go.
+	 */
+	ahc->qinfifonext = qinpos;
+	ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
+
+	while (qinpos != qintail) {
+		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]);
+		if (scb == NULL) {
+			printf("qinpos = %d, SCB index = %d\n",
+				qinpos, ahc->qinfifo[qinpos]);
+			panic("Loop 1\n");
+		}
+
+		if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) {
+			/*
+			 * We found an scb that needs to be acted on.
+			 */
+			found++;
+			switch (action) {
+			case SEARCH_COMPLETE:
+			{
+				cam_status ostat;
+				cam_status cstat;
+
+				ostat = ahc_get_transaction_status(scb);
+				if (ostat == CAM_REQ_INPROG)
+					ahc_set_transaction_status(scb, status);
+				cstat = ahc_get_transaction_status(scb);
+				if (cstat != CAM_REQ_CMP)
+					ahc_freeze_scb(scb);
+				if ((scb->flags & SCB_ACTIVE) == 0)
+					printf("Inactive SCB in qinfifo\n");
+				ahc_done(ahc, scb);
+
+				/* FALLTHROUGH */
+			}
+			case SEARCH_REMOVE:
+				break;
+			case SEARCH_COUNT:
+				ahc_qinfifo_requeue(ahc, prev_scb, scb);
+				prev_scb = scb;
+				break;
+			}
+		} else {
+			ahc_qinfifo_requeue(ahc, prev_scb, scb);
+			prev_scb = scb;
+		}
+		qinpos++;
+	}
+
+	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
+	} else {
+		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
+	}
+
+	if (action != SEARCH_COUNT
+	 && (found != 0)
+	 && (qinstart != ahc->qinfifonext)) {
+		/*
+		 * The sequencer may be in the process of dmaing
+		 * down the SCB at the beginning of the queue.
+		 * This could be problematic if either the first,
+		 * or the second SCB is removed from the queue
+		 * (the first SCB includes a pointer to the "next"
+		 * SCB to dma). If we have removed any entries, swap
+		 * the first element in the queue with the next HSCB
+		 * so the sequencer will notice that NEXT_QUEUED_SCB
+		 * has changed during its dma attempt and will retry
+		 * the DMA.
+		 */
+		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]);
+
+		if (scb == NULL) {
+			printf("found = %d, qinstart = %d, qinfifionext = %d\n",
+				found, qinstart, ahc->qinfifonext);
+			panic("First/Second Qinfifo fixup\n");
+		}
+		/*
+		 * ahc_swap_with_next_hscb forces our next pointer to
+		 * point to the reserved SCB for future commands.  Save
+		 * and restore our original next pointer to maintain
+		 * queue integrity.
+		 */
+		next = scb->hscb->next;
+		ahc->scb_data->scbindex[scb->hscb->tag] = NULL;
+		ahc_swap_with_next_hscb(ahc, scb);
+		scb->hscb->next = next;
+		ahc->qinfifo[qinstart] = scb->hscb->tag;
+
+		/* Tell the card about the new head of the qinfifo. */
+		ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
+
+		/* Fixup the tail "next" pointer. */
+		qintail = ahc->qinfifonext - 1;
+		scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]);
+		scb->hscb->next = ahc->next_queued_scb->hscb->tag;
+	}
+
+	/*
+	 * Search waiting for selection list.
+	 */
+	curscbptr = ahc_inb(ahc, SCBPTR);
+	next = ahc_inb(ahc, WAITING_SCBH);  /* Start at head of list. */
+	prev = SCB_LIST_NULL;
+
+	while (next != SCB_LIST_NULL) {
+		uint8_t scb_index;
+
+		ahc_outb(ahc, SCBPTR, next);
+		scb_index = ahc_inb(ahc, SCB_TAG);
+		if (scb_index >= ahc->scb_data->numscbs) {
+			printf("Waiting List inconsistency. "
+			       "SCB index == %d, yet numscbs == %d.",
+			       scb_index, ahc->scb_data->numscbs);
+			ahc_dump_card_state(ahc);
+			panic("for safety");
+		}
+		scb = ahc_lookup_scb(ahc, scb_index);
+		if (scb == NULL) {
+			printf("scb_index = %d, next = %d\n",
+				scb_index, next);
+			panic("Waiting List traversal\n");
+		}
+		if (ahc_match_scb(ahc, scb, target, channel,
+				  lun, SCB_LIST_NULL, role)) {
+			/*
+			 * We found an scb that needs to be acted on.
+			 */
+			found++;
+			switch (action) {
+			case SEARCH_COMPLETE:
+			{
+				cam_status ostat;
+				cam_status cstat;
+
+				ostat = ahc_get_transaction_status(scb);
+				if (ostat == CAM_REQ_INPROG)
+					ahc_set_transaction_status(scb,
+								   status);
+				cstat = ahc_get_transaction_status(scb);
+				if (cstat != CAM_REQ_CMP)
+					ahc_freeze_scb(scb);
+				if ((scb->flags & SCB_ACTIVE) == 0)
+					printf("Inactive SCB in Waiting List\n");
+				ahc_done(ahc, scb);
+				/* FALLTHROUGH */
+			}
+			case SEARCH_REMOVE:
+				next = ahc_rem_wscb(ahc, next, prev);
+				break;
+			case SEARCH_COUNT:
+				prev = next;
+				next = ahc_inb(ahc, SCB_NEXT);
+				break;
+			}
+		} else {
+			
+			prev = next;
+			next = ahc_inb(ahc, SCB_NEXT);
+		}
+	}
+	ahc_outb(ahc, SCBPTR, curscbptr);
+
+	found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target,
+					    channel, lun, status, action);
+
+	if (action == SEARCH_COMPLETE)
+		ahc_release_untagged_queues(ahc);
+	return (found);
+}
+
+int
+ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx,
+			   int target, char channel, int lun, uint32_t status,
+			   ahc_search_action action)
+{
+	struct	scb *scb;
+	int	maxtarget;
+	int	found;
+	int	i;
+
+	if (action == SEARCH_COMPLETE) {
+		/*
+		 * Don't attempt to run any queued untagged transactions
+		 * until we are done with the abort process.
+		 */
+		ahc_freeze_untagged_queues(ahc);
+	}
+
+	found = 0;
+	i = 0;
+	if ((ahc->flags & AHC_SCB_BTT) == 0) {
+
+		maxtarget = 16;
+		if (target != CAM_TARGET_WILDCARD) {
+
+			i = target;
+			if (channel == 'B')
+				i += 8;
+			maxtarget = i + 1;
+		}
+	} else {
+		maxtarget = 0;
+	}
+
+	for (; i < maxtarget; i++) {
+		struct scb_tailq *untagged_q;
+		struct scb *next_scb;
+
+		untagged_q = &(ahc->untagged_queues[i]);
+		next_scb = TAILQ_FIRST(untagged_q);
+		while (next_scb != NULL) {
+
+			scb = next_scb;
+			next_scb = TAILQ_NEXT(scb, links.tqe);
+
+			/*
+			 * The head of the list may be the currently
+			 * active untagged command for a device.
+			 * We're only searching for commands that
+			 * have not been started.  A transaction
+			 * marked active but still in the qinfifo
+			 * is removed by the qinfifo scanning code
+			 * above.
+			 */
+			if ((scb->flags & SCB_ACTIVE) != 0)
+				continue;
+
+			if (ahc_match_scb(ahc, scb, target, channel, lun,
+					  SCB_LIST_NULL, ROLE_INITIATOR) == 0
+			 || (ctx != NULL && ctx != scb->io_ctx))
+				continue;
+
+			/*
+			 * We found an scb that needs to be acted on.
+			 */
+			found++;
+			switch (action) {
+			case SEARCH_COMPLETE:
+			{
+				cam_status ostat;
+				cam_status cstat;
+
+				ostat = ahc_get_transaction_status(scb);
+				if (ostat == CAM_REQ_INPROG)
+					ahc_set_transaction_status(scb, status);
+				cstat = ahc_get_transaction_status(scb);
+				if (cstat != CAM_REQ_CMP)
+					ahc_freeze_scb(scb);
+				if ((scb->flags & SCB_ACTIVE) == 0)
+					printf("Inactive SCB in untaggedQ\n");
+				ahc_done(ahc, scb);
+				break;
+			}
+			case SEARCH_REMOVE:
+				scb->flags &= ~SCB_UNTAGGEDQ;
+				TAILQ_REMOVE(untagged_q, scb, links.tqe);
+				break;
+			case SEARCH_COUNT:
+				break;
+			}
+		}
+	}
+
+	if (action == SEARCH_COMPLETE)
+		ahc_release_untagged_queues(ahc);
+	return (found);
+}
+
+int
+ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel,
+		     int lun, u_int tag, int stop_on_first, int remove,
+		     int save_state)
+{
+	struct	scb *scbp;
+	u_int	next;
+	u_int	prev;
+	u_int	count;
+	u_int	active_scb;
+
+	count = 0;
+	next = ahc_inb(ahc, DISCONNECTED_SCBH);
+	prev = SCB_LIST_NULL;
+
+	if (save_state) {
+		/* restore this when we're done */
+		active_scb = ahc_inb(ahc, SCBPTR);
+	} else
+		/* Silence compiler */
+		active_scb = SCB_LIST_NULL;
+
+	while (next != SCB_LIST_NULL) {
+		u_int scb_index;
+
+		ahc_outb(ahc, SCBPTR, next);
+		scb_index = ahc_inb(ahc, SCB_TAG);
+		if (scb_index >= ahc->scb_data->numscbs) {
+			printf("Disconnected List inconsistency. "
+			       "SCB index == %d, yet numscbs == %d.",
+			       scb_index, ahc->scb_data->numscbs);
+			ahc_dump_card_state(ahc);
+			panic("for safety");
+		}
+
+		if (next == prev) {
+			panic("Disconnected List Loop. "
+			      "cur SCBPTR == %x, prev SCBPTR == %x.",
+			      next, prev);
+		}
+		scbp = ahc_lookup_scb(ahc, scb_index);
+		if (ahc_match_scb(ahc, scbp, target, channel, lun,
+				  tag, ROLE_INITIATOR)) {
+			count++;
+			if (remove) {
+				next =
+				    ahc_rem_scb_from_disc_list(ahc, prev, next);
+			} else {
+				prev = next;
+				next = ahc_inb(ahc, SCB_NEXT);
+			}
+			if (stop_on_first)
+				break;
+		} else {
+			prev = next;
+			next = ahc_inb(ahc, SCB_NEXT);
+		}
+	}
+	if (save_state)
+		ahc_outb(ahc, SCBPTR, active_scb);
+	return (count);
+}
+
+/*
+ * Remove an SCB from the on chip list of disconnected transactions.
+ * This is empty/unused if we are not performing SCB paging.
+ */
+static u_int
+ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr)
+{
+	u_int next;
+
+	ahc_outb(ahc, SCBPTR, scbptr);
+	next = ahc_inb(ahc, SCB_NEXT);
+
+	ahc_outb(ahc, SCB_CONTROL, 0);
+
+	ahc_add_curscb_to_free_list(ahc);
+
+	if (prev != SCB_LIST_NULL) {
+		ahc_outb(ahc, SCBPTR, prev);
+		ahc_outb(ahc, SCB_NEXT, next);
+	} else
+		ahc_outb(ahc, DISCONNECTED_SCBH, next);
+
+	return (next);
+}
+
+/*
+ * Add the SCB as selected by SCBPTR onto the on chip list of
+ * free hardware SCBs.  This list is empty/unused if we are not
+ * performing SCB paging.
+ */
+static void
+ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
+{
+	/*
+	 * Invalidate the tag so that our abort
+	 * routines don't think it's active.
+	 */
+	ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
+
+	if ((ahc->flags & AHC_PAGESCBS) != 0) {
+		ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH));
+		ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR));
+	}
+}
+
+/*
+ * Manipulate the waiting for selection list and return the
+ * scb that follows the one that we remove.
+ */
+static u_int
+ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
+{       
+	u_int curscb, next;
+
+	/*
+	 * Select the SCB we want to abort and
+	 * pull the next pointer out of it.
+	 */
+	curscb = ahc_inb(ahc, SCBPTR);
+	ahc_outb(ahc, SCBPTR, scbpos);
+	next = ahc_inb(ahc, SCB_NEXT);
+
+	/* Clear the necessary fields */
+	ahc_outb(ahc, SCB_CONTROL, 0);
+
+	ahc_add_curscb_to_free_list(ahc);
+
+	/* update the waiting list */
+	if (prev == SCB_LIST_NULL) {
+		/* First in the list */
+		ahc_outb(ahc, WAITING_SCBH, next); 
+
+		/*
+		 * Ensure we aren't attempting to perform
+		 * selection for this entry.
+		 */
+		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
+	} else {
+		/*
+		 * Select the scb that pointed to us 
+		 * and update its next pointer.
+		 */
+		ahc_outb(ahc, SCBPTR, prev);
+		ahc_outb(ahc, SCB_NEXT, next);
+	}
+
+	/*
+	 * Point us back at the original scb position.
+	 */
+	ahc_outb(ahc, SCBPTR, curscb);
+	return next;
+}
+
+/******************************** Error Handling ******************************/
+/*
+ * Abort all SCBs that match the given description (target/channel/lun/tag),
+ * setting their status to the passed in status if the status has not already
+ * been modified from CAM_REQ_INPROG.  This routine assumes that the sequencer
+ * is paused before it is called.
+ */
+int
+ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
+	       int lun, u_int tag, role_t role, uint32_t status)
+{
+	struct	scb *scbp;
+	struct	scb *scbp_next;
+	u_int	active_scb;
+	int	i, j;
+	int	maxtarget;
+	int	minlun;
+	int	maxlun;
+
+	int	found;
+
+	/*
+	 * Don't attempt to run any queued untagged transactions
+	 * until we are done with the abort process.
+	 */
+	ahc_freeze_untagged_queues(ahc);
+
+	/* restore this when we're done */
+	active_scb = ahc_inb(ahc, SCBPTR);
+
+	found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL,
+				   role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
+
+	/*
+	 * Clean out the busy target table for any untagged commands.
+	 */
+	i = 0;
+	maxtarget = 16;
+	if (target != CAM_TARGET_WILDCARD) {
+		i = target;
+		if (channel == 'B')
+			i += 8;
+		maxtarget = i + 1;
+	}
+
+	if (lun == CAM_LUN_WILDCARD) {
+
+		/*
+		 * Unless we are using an SCB based
+		 * busy targets table, there is only
+		 * one table entry for all luns of
+		 * a target.
+		 */
+		minlun = 0;
+		maxlun = 1;
+		if ((ahc->flags & AHC_SCB_BTT) != 0)
+			maxlun = AHC_NUM_LUNS;
+	} else {
+		minlun = lun;
+		maxlun = lun + 1;
+	}
+
+	if (role != ROLE_TARGET) {
+		for (;i < maxtarget; i++) {
+			for (j = minlun;j < maxlun; j++) {
+				u_int scbid;
+				u_int tcl;
+
+				tcl = BUILD_TCL(i << 4, j);
+				scbid = ahc_index_busy_tcl(ahc, tcl);
+				scbp = ahc_lookup_scb(ahc, scbid);
+				if (scbp == NULL
+				 || ahc_match_scb(ahc, scbp, target, channel,
+						  lun, tag, role) == 0)
+					continue;
+				ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j));
+			}
+		}
+
+		/*
+		 * Go through the disconnected list and remove any entries we
+		 * have queued for completion, 0'ing their control byte too.
+		 * We save the active SCB and restore it ourselves, so there
+		 * is no reason for this search to restore it too.
+		 */
+		ahc_search_disc_list(ahc, target, channel, lun, tag,
+				     /*stop_on_first*/FALSE, /*remove*/TRUE,
+				     /*save_state*/FALSE);
+	}
+
+	/*
+	 * Go through the hardware SCB array looking for commands that
+	 * were active but not on any list.  In some cases, these remnants
+	 * might not still have mappings in the scbindex array (e.g. unexpected
+	 * bus free with the same scb queued for an abort).  Don't hold this
+	 * against them.
+	 */
+	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
+		u_int scbid;
+
+		ahc_outb(ahc, SCBPTR, i);
+		scbid = ahc_inb(ahc, SCB_TAG);
+		scbp = ahc_lookup_scb(ahc, scbid);
+		if ((scbp == NULL && scbid != SCB_LIST_NULL)
+		 || (scbp != NULL
+		  && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)))
+			ahc_add_curscb_to_free_list(ahc);
+	}
+
+	/*
+	 * Go through the pending CCB list and look for
+	 * commands for this target that are still active.
+	 * These are other tagged commands that were
+	 * disconnected when the reset occurred.
+	 */
+	scbp_next = LIST_FIRST(&ahc->pending_scbs);
+	while (scbp_next != NULL) {
+		scbp = scbp_next;
+		scbp_next = LIST_NEXT(scbp, pending_links);
+		if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) {
+			cam_status ostat;
+
+			ostat = ahc_get_transaction_status(scbp);
+			if (ostat == CAM_REQ_INPROG)
+				ahc_set_transaction_status(scbp, status);
+			if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP)
+				ahc_freeze_scb(scbp);
+			if ((scbp->flags & SCB_ACTIVE) == 0)
+				printf("Inactive SCB on pending list\n");
+			ahc_done(ahc, scbp);
+			found++;
+		}
+	}
+	ahc_outb(ahc, SCBPTR, active_scb);
+	ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status);
+	ahc_release_untagged_queues(ahc);
+	return found;
+}
+
+static void
+ahc_reset_current_bus(struct ahc_softc *ahc)
+{
+	uint8_t scsiseq;
+
+	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST);
+	scsiseq = ahc_inb(ahc, SCSISEQ);
+	ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO);
+	ahc_flush_device_writes(ahc);
+	ahc_delay(AHC_BUSRESET_DELAY);
+	/* Turn off the bus reset */
+	ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO);
+
+	ahc_clear_intstat(ahc);
+
+	/* Re-enable reset interrupts */
+	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST);
+}
+
+int
+ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
+{
+	struct	ahc_devinfo devinfo;
+	u_int	initiator, target, max_scsiid;
+	u_int	sblkctl;
+	u_int	scsiseq;
+	u_int	simode1;
+	int	found;
+	int	restart_needed;
+	char	cur_channel;
+
+	ahc->pending_device = NULL;
+
+	ahc_compile_devinfo(&devinfo,
+			    CAM_TARGET_WILDCARD,
+			    CAM_TARGET_WILDCARD,
+			    CAM_LUN_WILDCARD,
+			    channel, ROLE_UNKNOWN);
+	ahc_pause(ahc);
+
+	/* Make sure the sequencer is in a safe location. */
+	ahc_clear_critical_section(ahc);
+
+	/*
+	 * Run our command complete fifos to ensure that we perform
+	 * completion processing on any commands that 'completed'
+	 * before the reset occurred.
+	 */
+	ahc_run_qoutfifo(ahc);
+#ifdef AHC_TARGET_MODE
+	/*
+	 * XXX - In Twin mode, the tqinfifo may have commands
+	 *	 for an unaffected channel in it.  However, if
+	 *	 we have run out of ATIO resources to drain that
+	 *	 queue, we may not get them all out here.  Further,
+	 *	 the blocked transactions for the reset channel
+	 *	 should just be killed off, irrespecitve of whether
+	 *	 we are blocked on ATIO resources.  Write a routine
+	 *	 to compact the tqinfifo appropriately.
+	 */
+	if ((ahc->flags & AHC_TARGETROLE) != 0) {
+		ahc_run_tqinfifo(ahc, /*paused*/TRUE);
+	}
+#endif
+
+	/*
+	 * Reset the bus if we are initiating this reset
+	 */
+	sblkctl = ahc_inb(ahc, SBLKCTL);
+	cur_channel = 'A';
+	if ((ahc->features & AHC_TWIN) != 0
+	 && ((sblkctl & SELBUSB) != 0))
+	    cur_channel = 'B';
+	scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
+	if (cur_channel != channel) {
+		/* Case 1: Command for another bus is active
+		 * Stealthily reset the other bus without
+		 * upsetting the current bus.
+		 */
+		ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB);
+		simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
+#ifdef AHC_TARGET_MODE
+		/*
+		 * Bus resets clear ENSELI, so we cannot
+		 * defer re-enabling bus reset interrupts
+		 * if we are in target mode.
+		 */
+		if ((ahc->flags & AHC_TARGETROLE) != 0)
+			simode1 |= ENSCSIRST;
+#endif
+		ahc_outb(ahc, SIMODE1, simode1);
+		if (initiate_reset)
+			ahc_reset_current_bus(ahc);
+		ahc_clear_intstat(ahc);
+		ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
+		ahc_outb(ahc, SBLKCTL, sblkctl);
+		restart_needed = FALSE;
+	} else {
+		/* Case 2: A command from this bus is active or we're idle */
+		simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
+#ifdef AHC_TARGET_MODE
+		/*
+		 * Bus resets clear ENSELI, so we cannot
+		 * defer re-enabling bus reset interrupts
+		 * if we are in target mode.
+		 */
+		if ((ahc->flags & AHC_TARGETROLE) != 0)
+			simode1 |= ENSCSIRST;
+#endif
+		ahc_outb(ahc, SIMODE1, simode1);
+		if (initiate_reset)
+			ahc_reset_current_bus(ahc);
+		ahc_clear_intstat(ahc);
+		ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
+		restart_needed = TRUE;
+	}
+
+	/*
+	 * Clean up all the state information for the
+	 * pending transactions on this bus.
+	 */
+	found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel,
+			       CAM_LUN_WILDCARD, SCB_LIST_NULL,
+			       ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
+
+	max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7;
+
+#ifdef AHC_TARGET_MODE
+	/*
+	 * Send an immediate notify ccb to all target more peripheral
+	 * drivers affected by this action.
+	 */
+	for (target = 0; target <= max_scsiid; target++) {
+		struct ahc_tmode_tstate* tstate;
+		u_int lun;
+
+		tstate = ahc->enabled_targets[target];
+		if (tstate == NULL)
+			continue;
+		for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
+			struct ahc_tmode_lstate* lstate;
+
+			lstate = tstate->enabled_luns[lun];
+			if (lstate == NULL)
+				continue;
+
+			ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD,
+					       EVENT_TYPE_BUS_RESET, /*arg*/0);
+			ahc_send_lstate_events(ahc, lstate);
+		}
+	}
+#endif
+	/* Notify the XPT that a bus reset occurred */
+	ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD,
+		       CAM_LUN_WILDCARD, AC_BUS_RESET, NULL);
+
+	/*
+	 * Revert to async/narrow transfers until we renegotiate.
+	 */
+	for (target = 0; target <= max_scsiid; target++) {
+
+		if (ahc->enabled_targets[target] == NULL)
+			continue;
+		for (initiator = 0; initiator <= max_scsiid; initiator++) {
+			struct ahc_devinfo devinfo;
+
+			ahc_compile_devinfo(&devinfo, target, initiator,
+					    CAM_LUN_WILDCARD,
+					    channel, ROLE_UNKNOWN);
+			ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+				      AHC_TRANS_CUR, /*paused*/TRUE);
+			ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
+					 /*period*/0, /*offset*/0,
+					 /*ppr_options*/0, AHC_TRANS_CUR,
+					 /*paused*/TRUE);
+		}
+	}
+
+	if (restart_needed)
+		ahc_restart(ahc);
+	else
+		ahc_unpause(ahc);
+	return found;
+}
+
+
+/***************************** Residual Processing ****************************/
+/*
+ * Calculate the residual for a just completed SCB.
+ */
+void
+ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb)
+{
+	struct hardware_scb *hscb;
+	struct status_pkt *spkt;
+	uint32_t sgptr;
+	uint32_t resid_sgptr;
+	uint32_t resid;
+
+	/*
+	 * 5 cases.
+	 * 1) No residual.
+	 *    SG_RESID_VALID clear in sgptr.
+	 * 2) Transferless command
+	 * 3) Never performed any transfers.
+	 *    sgptr has SG_FULL_RESID set.
+	 * 4) No residual but target did not
+	 *    save data pointers after the
+	 *    last transfer, so sgptr was
+	 *    never updated.
+	 * 5) We have a partial residual.
+	 *    Use residual_sgptr to determine
+	 *    where we are.
+	 */
+
+	hscb = scb->hscb;
+	sgptr = ahc_le32toh(hscb->sgptr);
+	if ((sgptr & SG_RESID_VALID) == 0)
+		/* Case 1 */
+		return;
+	sgptr &= ~SG_RESID_VALID;
+
+	if ((sgptr & SG_LIST_NULL) != 0)
+		/* Case 2 */
+		return;
+
+	spkt = &hscb->shared_data.status;
+	resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr);
+	if ((sgptr & SG_FULL_RESID) != 0) {
+		/* Case 3 */
+		resid = ahc_get_transfer_length(scb);
+	} else if ((resid_sgptr & SG_LIST_NULL) != 0) {
+		/* Case 4 */
+		return;
+	} else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
+		panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
+	} else {
+		struct ahc_dma_seg *sg;
+
+		/*
+		 * Remainder of the SG where the transfer
+		 * stopped.  
+		 */
+		resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK;
+		sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK);
+
+		/* The residual sg_ptr always points to the next sg */
+		sg--;
+
+		/*
+		 * Add up the contents of all residual
+		 * SG segments that are after the SG where
+		 * the transfer stopped.
+		 */
+		while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) {
+			sg++;
+			resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
+		}
+	}
+	if ((scb->flags & SCB_SENSE) == 0)
+		ahc_set_residual(scb, resid);
+	else
+		ahc_set_sense_residual(scb, resid);
+
+#ifdef AHC_DEBUG
+	if ((ahc_debug & AHC_SHOW_MISC) != 0) {
+		ahc_print_path(ahc, scb);
+		printf("Handled %sResidual of %d bytes\n",
+		       (scb->flags & SCB_SENSE) ? "Sense " : "", resid);
+	}
+#endif
+}
+
+/******************************* Target Mode **********************************/
+#ifdef AHC_TARGET_MODE
+/*
+ * Add a target mode event to this lun's queue
+ */
+static void
+ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate,
+		       u_int initiator_id, u_int event_type, u_int event_arg)
+{
+	struct ahc_tmode_event *event;
+	int pending;
+
+	xpt_freeze_devq(lstate->path, /*count*/1);
+	if (lstate->event_w_idx >= lstate->event_r_idx)
+		pending = lstate->event_w_idx - lstate->event_r_idx;
+	else
+		pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1
+			- (lstate->event_r_idx - lstate->event_w_idx);
+
+	if (event_type == EVENT_TYPE_BUS_RESET
+	 || event_type == MSG_BUS_DEV_RESET) {
+		/*
+		 * Any earlier events are irrelevant, so reset our buffer.
+		 * This has the effect of allowing us to deal with reset
+		 * floods (an external device holding down the reset line)
+		 * without losing the event that is really interesting.
+		 */
+		lstate->event_r_idx = 0;
+		lstate->event_w_idx = 0;
+		xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
+	}
+
+	if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) {
+		xpt_print_path(lstate->path);
+		printf("immediate event %x:%x lost\n",
+		       lstate->event_buffer[lstate->event_r_idx].event_type,
+		       lstate->event_buffer[lstate->event_r_idx].event_arg);
+		lstate->event_r_idx++;
+		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
+			lstate->event_r_idx = 0;
+		xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
+	}
+
+	event = &lstate->event_buffer[lstate->event_w_idx];
+	event->initiator_id = initiator_id;
+	event->event_type = event_type;
+	event->event_arg = event_arg;
+	lstate->event_w_idx++;
+	if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
+		lstate->event_w_idx = 0;
+}
+
+/*
+ * Send any target mode events queued up waiting
+ * for immediate notify resources.
+ */
+void
+ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate)
+{
+	struct ccb_hdr *ccbh;
+	struct ccb_immed_notify *inot;
+
+	while (lstate->event_r_idx != lstate->event_w_idx
+	    && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
+		struct ahc_tmode_event *event;
+
+		event = &lstate->event_buffer[lstate->event_r_idx];
+		SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
+		inot = (struct ccb_immed_notify *)ccbh;
+		switch (event->event_type) {
+		case EVENT_TYPE_BUS_RESET:
+			ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
+			break;
+		default:
+			ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
+			inot->message_args[0] = event->event_type;
+			inot->message_args[1] = event->event_arg;
+			break;
+		}
+		inot->initiator_id = event->initiator_id;
+		inot->sense_len = 0;
+		xpt_done((union ccb *)inot);
+		lstate->event_r_idx++;
+		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
+			lstate->event_r_idx = 0;
+	}
+}
+#endif
+
+/******************** Sequencer Program Patching/Download *********************/
+
+#ifdef AHC_DUMP_SEQ
+void
+ahc_dumpseq(struct ahc_softc* ahc)
+{
+	int i;
+
+	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
+	ahc_outb(ahc, SEQADDR0, 0);
+	ahc_outb(ahc, SEQADDR1, 0);
+	for (i = 0; i < ahc->instruction_ram_size; i++) {
+		uint8_t ins_bytes[4];
+
+		ahc_insb(ahc, SEQRAM, ins_bytes, 4);
+		printf("0x%08x\n", ins_bytes[0] << 24
+				 | ins_bytes[1] << 16
+				 | ins_bytes[2] << 8
+				 | ins_bytes[3]);
+	}
+}
+#endif
+
+static int
+ahc_loadseq(struct ahc_softc *ahc)
+{
+	struct	cs cs_table[num_critical_sections];
+	u_int	begin_set[num_critical_sections];
+	u_int	end_set[num_critical_sections];
+	struct	patch *cur_patch;
+	u_int	cs_count;
+	u_int	cur_cs;
+	u_int	i;
+	u_int	skip_addr;
+	u_int	sg_prefetch_cnt;
+	int	downloaded;
+	uint8_t	download_consts[7];
+
+	/*
+	 * Start out with 0 critical sections
+	 * that apply to this firmware load.
+	 */
+	cs_count = 0;
+	cur_cs = 0;
+	memset(begin_set, 0, sizeof(begin_set));
+	memset(end_set, 0, sizeof(end_set));
+
+	/* Setup downloadable constant table */
+	download_consts[QOUTFIFO_OFFSET] = 0;
+	if (ahc->targetcmds != NULL)
+		download_consts[QOUTFIFO_OFFSET] += 32;
+	download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1;
+	download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1;
+	download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1);
+	sg_prefetch_cnt = ahc->pci_cachesize;
+	if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg)))
+		sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg);
+	download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
+	download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1);
+	download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1);
+
+	cur_patch = patches;
+	downloaded = 0;
+	skip_addr = 0;
+	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
+	ahc_outb(ahc, SEQADDR0, 0);
+	ahc_outb(ahc, SEQADDR1, 0);
+
+	for (i = 0; i < sizeof(seqprog)/4; i++) {
+		if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) {
+			/*
+			 * Don't download this instruction as it
+			 * is in a patch that was removed.
+			 */
+			continue;
+		}
+
+		if (downloaded == ahc->instruction_ram_size) {
+			/*
+			 * We're about to exceed the instruction
+			 * storage capacity for this chip.  Fail
+			 * the load.
+			 */
+			printf("\n%s: Program too large for instruction memory "
+			       "size of %d!\n", ahc_name(ahc),
+			       ahc->instruction_ram_size);
+			return (ENOMEM);
+		}
+
+		/*
+		 * Move through the CS table until we find a CS
+		 * that might apply to this instruction.
+		 */
+		for (; cur_cs < num_critical_sections; cur_cs++) {
+			if (critical_sections[cur_cs].end <= i) {
+				if (begin_set[cs_count] == TRUE
+				 && end_set[cs_count] == FALSE) {
+					cs_table[cs_count].end = downloaded;
+				 	end_set[cs_count] = TRUE;
+					cs_count++;
+				}
+				continue;
+			}
+			if (critical_sections[cur_cs].begin <= i
+			 && begin_set[cs_count] == FALSE) {
+				cs_table[cs_count].begin = downloaded;
+				begin_set[cs_count] = TRUE;
+			}
+			break;
+		}
+		ahc_download_instr(ahc, i, download_consts);
+		downloaded++;
+	}
+
+	ahc->num_critical_sections = cs_count;
+	if (cs_count != 0) {
+
+		cs_count *= sizeof(struct cs);
+		ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT);
+		if (ahc->critical_sections == NULL)
+			panic("ahc_loadseq: Could not malloc");
+		memcpy(ahc->critical_sections, cs_table, cs_count);
+	}
+	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
+
+	if (bootverbose) {
+		printf(" %d instructions downloaded\n", downloaded);
+		printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n",
+		       ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags);
+	}
+	return (0);
+}
+
+static int
+ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch,
+		u_int start_instr, u_int *skip_addr)
+{
+	struct	patch *cur_patch;
+	struct	patch *last_patch;
+	u_int	num_patches;
+
+	num_patches = sizeof(patches)/sizeof(struct patch);
+	last_patch = &patches[num_patches];
+	cur_patch = *start_patch;
+
+	while (cur_patch < last_patch && start_instr == cur_patch->begin) {
+
+		if (cur_patch->patch_func(ahc) == 0) {
+
+			/* Start rejecting code */
+			*skip_addr = start_instr + cur_patch->skip_instr;
+			cur_patch += cur_patch->skip_patch;
+		} else {
+			/* Accepted this patch.  Advance to the next
+			 * one and wait for our intruction pointer to
+			 * hit this point.
+			 */
+			cur_patch++;
+		}
+	}
+
+	*start_patch = cur_patch;
+	if (start_instr < *skip_addr)
+		/* Still skipping */
+		return (0);
+
+	return (1);
+}
+
+static void
+ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
+{
+	union	ins_formats instr;
+	struct	ins_format1 *fmt1_ins;
+	struct	ins_format3 *fmt3_ins;
+	u_int	opcode;
+
+	/*
+	 * The firmware is always compiled into a little endian format.
+	 */
+	instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
+
+	fmt1_ins = &instr.format1;
+	fmt3_ins = NULL;
+
+	/* Pull the opcode */
+	opcode = instr.format1.opcode;
+	switch (opcode) {
+	case AIC_OP_JMP:
+	case AIC_OP_JC:
+	case AIC_OP_JNC:
+	case AIC_OP_CALL:
+	case AIC_OP_JNE:
+	case AIC_OP_JNZ:
+	case AIC_OP_JE:
+	case AIC_OP_JZ:
+	{
+		struct patch *cur_patch;
+		int address_offset;
+		u_int address;
+		u_int skip_addr;
+		u_int i;
+
+		fmt3_ins = &instr.format3;
+		address_offset = 0;
+		address = fmt3_ins->address;
+		cur_patch = patches;
+		skip_addr = 0;
+
+		for (i = 0; i < address;) {
+
+			ahc_check_patch(ahc, &cur_patch, i, &skip_addr);
+
+			if (skip_addr > i) {
+				int end_addr;
+
+				end_addr = MIN(address, skip_addr);
+				address_offset += end_addr - i;
+				i = skip_addr;
+			} else {
+				i++;
+			}
+		}
+		address -= address_offset;
+		fmt3_ins->address = address;
+		/* FALLTHROUGH */
+	}
+	case AIC_OP_OR:
+	case AIC_OP_AND:
+	case AIC_OP_XOR:
+	case AIC_OP_ADD:
+	case AIC_OP_ADC:
+	case AIC_OP_BMOV:
+		if (fmt1_ins->parity != 0) {
+			fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
+		}
+		fmt1_ins->parity = 0;
+		if ((ahc->features & AHC_CMD_CHAN) == 0
+		 && opcode == AIC_OP_BMOV) {
+			/*
+			 * Block move was added at the same time
+			 * as the command channel.  Verify that
+			 * this is only a move of a single element
+			 * and convert the BMOV to a MOV
+			 * (AND with an immediate of FF).
+			 */
+			if (fmt1_ins->immediate != 1)
+				panic("%s: BMOV not supported\n",
+				      ahc_name(ahc));
+			fmt1_ins->opcode = AIC_OP_AND;
+			fmt1_ins->immediate = 0xff;
+		}
+		/* FALLTHROUGH */
+	case AIC_OP_ROL:
+		if ((ahc->features & AHC_ULTRA2) != 0) {
+			int i, count;
+
+			/* Calculate odd parity for the instruction */
+			for (i = 0, count = 0; i < 31; i++) {
+				uint32_t mask;
+
+				mask = 0x01 << i;
+				if ((instr.integer & mask) != 0)
+					count++;
+			}
+			if ((count & 0x01) == 0)
+				instr.format1.parity = 1;
+		} else {
+			/* Compress the instruction for older sequencers */
+			if (fmt3_ins != NULL) {
+				instr.integer =
+					fmt3_ins->immediate
+				      | (fmt3_ins->source << 8)
+				      | (fmt3_ins->address << 16)
+				      |	(fmt3_ins->opcode << 25);
+			} else {
+				instr.integer =
+					fmt1_ins->immediate
+				      | (fmt1_ins->source << 8)
+				      | (fmt1_ins->destination << 16)
+				      |	(fmt1_ins->ret << 24)
+				      |	(fmt1_ins->opcode << 25);
+			}
+		}
+		/* The sequencer is a little endian cpu */
+		instr.integer = ahc_htole32(instr.integer);
+		ahc_outsb(ahc, SEQRAM, instr.bytes, 4);
+		break;
+	default:
+		panic("Unknown opcode encountered in seq program");
+		break;
+	}
+}
+
+int
+ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries,
+		   const char *name, u_int address, u_int value,
+		   u_int *cur_column, u_int wrap_point)
+{
+	int	printed;
+	u_int	printed_mask;
+
+	if (cur_column != NULL && *cur_column >= wrap_point) {
+		printf("\n");
+		*cur_column = 0;
+	}
+	printed = printf("%s[0x%x]", name, value);
+	if (table == NULL) {
+		printed += printf(" ");
+		*cur_column += printed;
+		return (printed);
+	}
+	printed_mask = 0;
+	while (printed_mask != 0xFF) {
+		int entry;
+
+		for (entry = 0; entry < num_entries; entry++) {
+			if (((value & table[entry].mask)
+			  != table[entry].value)
+			 || ((printed_mask & table[entry].mask)
+			  == table[entry].mask))
+				continue;
+
+			printed += printf("%s%s",
+					  printed_mask == 0 ? ":(" : "|",
+					  table[entry].name);
+			printed_mask |= table[entry].mask;
+			
+			break;
+		}
+		if (entry >= num_entries)
+			break;
+	}
+	if (printed_mask != 0)
+		printed += printf(") ");
+	else
+		printed += printf(" ");
+	if (cur_column != NULL)
+		*cur_column += printed;
+	return (printed);
+}
+
+void
+ahc_dump_card_state(struct ahc_softc *ahc)
+{
+	struct	scb *scb;
+	struct	scb_tailq *untagged_q;
+	u_int	cur_col;
+	int	paused;
+	int	target;
+	int	maxtarget;
+	int	i;
+	uint8_t last_phase;
+	uint8_t qinpos;
+	uint8_t qintail;
+	uint8_t qoutpos;
+	uint8_t scb_index;
+	uint8_t saved_scbptr;
+
+	if (ahc_is_paused(ahc)) {
+		paused = 1;
+	} else {
+		paused = 0;
+		ahc_pause(ahc);
+	}
+
+	saved_scbptr = ahc_inb(ahc, SCBPTR);
+	last_phase = ahc_inb(ahc, LASTPHASE);
+	printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
+	       "%s: Dumping Card State %s, at SEQADDR 0x%x\n",
+	       ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg,
+	       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
+	if (paused)
+		printf("Card was paused\n");
+	printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n",
+	       ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX),
+	       ahc_inb(ahc, ARG_2));
+	printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT),
+	       ahc_inb(ahc, SCBPTR));
+	cur_col = 0;
+	if ((ahc->features & AHC_DT) != 0)
+		ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50);
+	ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50);
+	ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50);
+	ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50);
+	ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50);
+	ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50);
+	ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50);
+	ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50);
+	ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50);
+	ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50);
+	ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50);
+	ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50);
+	ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50);
+	ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50);
+	ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50);
+	ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50);
+	ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50);
+	ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50);
+	ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50);
+	if (cur_col != 0)
+		printf("\n");
+	printf("STACK:");
+	for (i = 0; i < STACK_SIZE; i++)
+	       printf(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8));
+	printf("\nSCB count = %d\n", ahc->scb_data->numscbs);
+	printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag);
+	printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB));
+	/* QINFIFO */
+	printf("QINFIFO entries: ");
+	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+		qinpos = ahc_inb(ahc, SNSCB_QOFF);
+		ahc_outb(ahc, SNSCB_QOFF, qinpos);
+	} else
+		qinpos = ahc_inb(ahc, QINPOS);
+	qintail = ahc->qinfifonext;
+	while (qinpos != qintail) {
+		printf("%d ", ahc->qinfifo[qinpos]);
+		qinpos++;
+	}
+	printf("\n");
+
+	printf("Waiting Queue entries: ");
+	scb_index = ahc_inb(ahc, WAITING_SCBH);
+	i = 0;
+	while (scb_index != SCB_LIST_NULL && i++ < 256) {
+		ahc_outb(ahc, SCBPTR, scb_index);
+		printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
+		scb_index = ahc_inb(ahc, SCB_NEXT);
+	}
+	printf("\n");
+
+	printf("Disconnected Queue entries: ");
+	scb_index = ahc_inb(ahc, DISCONNECTED_SCBH);
+	i = 0;
+	while (scb_index != SCB_LIST_NULL && i++ < 256) {
+		ahc_outb(ahc, SCBPTR, scb_index);
+		printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
+		scb_index = ahc_inb(ahc, SCB_NEXT);
+	}
+	printf("\n");
+		
+	ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
+	printf("QOUTFIFO entries: ");
+	qoutpos = ahc->qoutfifonext;
+	i = 0;
+	while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) {
+		printf("%d ", ahc->qoutfifo[qoutpos]);
+		qoutpos++;
+	}
+	printf("\n");
+
+	printf("Sequencer Free SCB List: ");
+	scb_index = ahc_inb(ahc, FREE_SCBH);
+	i = 0;
+	while (scb_index != SCB_LIST_NULL && i++ < 256) {
+		ahc_outb(ahc, SCBPTR, scb_index);
+		printf("%d ", scb_index);
+		scb_index = ahc_inb(ahc, SCB_NEXT);
+	}
+	printf("\n");
+
+	printf("Sequencer SCB Info: ");
+	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
+		ahc_outb(ahc, SCBPTR, i);
+		cur_col = printf("\n%3d ", i);
+
+		ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60);
+		ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60);
+		ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60);
+		ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60);
+	}
+	printf("\n");
+
+	printf("Pending list: ");
+	i = 0;
+	LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
+		if (i++ > 256)
+			break;
+		cur_col = printf("\n%3d ", scb->hscb->tag);
+		ahc_scb_control_print(scb->hscb->control, &cur_col, 60);
+		ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60);
+		ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60);
+		if ((ahc->flags & AHC_PAGESCBS) == 0) {
+			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
+			printf("(");
+			ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL),
+					      &cur_col, 60);
+			ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60);
+			printf(")");
+		}
+	}
+	printf("\n");
+
+	printf("Kernel Free SCB list: ");
+	i = 0;
+	SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) {
+		if (i++ > 256)
+			break;
+		printf("%d ", scb->hscb->tag);
+	}
+	printf("\n");
+
+	maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7;
+	for (target = 0; target <= maxtarget; target++) {
+		untagged_q = &ahc->untagged_queues[target];
+		if (TAILQ_FIRST(untagged_q) == NULL)
+			continue;
+		printf("Untagged Q(%d): ", target);
+		i = 0;
+		TAILQ_FOREACH(scb, untagged_q, links.tqe) {
+			if (i++ > 256)
+				break;
+			printf("%d ", scb->hscb->tag);
+		}
+		printf("\n");
+	}
+
+	ahc_platform_dump_card_state(ahc);
+	printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
+	ahc_outb(ahc, SCBPTR, saved_scbptr);
+	if (paused == 0)
+		ahc_unpause(ahc);
+}
+
+/************************* Target Mode ****************************************/
+#ifdef AHC_TARGET_MODE
+cam_status
+ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb,
+		    struct ahc_tmode_tstate **tstate,
+		    struct ahc_tmode_lstate **lstate,
+		    int notfound_failure)
+{
+
+	if ((ahc->features & AHC_TARGETMODE) == 0)
+		return (CAM_REQ_INVALID);
+
+	/*
+	 * Handle the 'black hole' device that sucks up
+	 * requests to unattached luns on enabled targets.
+	 */
+	if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
+	 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
+		*tstate = NULL;
+		*lstate = ahc->black_hole;
+	} else {
+		u_int max_id;
+
+		max_id = (ahc->features & AHC_WIDE) ? 15 : 7;
+		if (ccb->ccb_h.target_id > max_id)
+			return (CAM_TID_INVALID);
+
+		if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS)
+			return (CAM_LUN_INVALID);
+
+		*tstate = ahc->enabled_targets[ccb->ccb_h.target_id];
+		*lstate = NULL;
+		if (*tstate != NULL)
+			*lstate =
+			    (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
+	}
+
+	if (notfound_failure != 0 && *lstate == NULL)
+		return (CAM_PATH_INVALID);
+
+	return (CAM_REQ_CMP);
+}
+
+void
+ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
+{
+	struct	   ahc_tmode_tstate *tstate;
+	struct	   ahc_tmode_lstate *lstate;
+	struct	   ccb_en_lun *cel;
+	cam_status status;
+	u_long	   s;
+	u_int	   target;
+	u_int	   lun;
+	u_int	   target_mask;
+	u_int	   our_id;
+	int	   error;
+	char	   channel;
+
+	status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate,
+				     /*notfound_failure*/FALSE);
+
+	if (status != CAM_REQ_CMP) {
+		ccb->ccb_h.status = status;
+		return;
+	}
+
+	if (cam_sim_bus(sim) == 0)
+		our_id = ahc->our_id;
+	else
+		our_id = ahc->our_id_b;
+
+	if (ccb->ccb_h.target_id != our_id) {
+		/*
+		 * our_id represents our initiator ID, or
+		 * the ID of the first target to have an
+		 * enabled lun in target mode.  There are
+		 * two cases that may preclude enabling a
+		 * target id other than our_id.
+		 *
+		 *   o our_id is for an active initiator role.
+		 *     Since the hardware does not support
+		 *     reselections to the initiator role at
+		 *     anything other than our_id, and our_id
+		 *     is used by the hardware to indicate the
+		 *     ID to use for both select-out and
+		 *     reselect-out operations, the only target
+		 *     ID we can support in this mode is our_id.
+		 *
+		 *   o The MULTARGID feature is not available and
+		 *     a previous target mode ID has been enabled.
+		 */
+		if ((ahc->features & AHC_MULTIROLE) != 0) {
+
+			if ((ahc->features & AHC_MULTI_TID) != 0
+		   	 && (ahc->flags & AHC_INITIATORROLE) != 0) {
+				/*
+				 * Only allow additional targets if
+				 * the initiator role is disabled.
+				 * The hardware cannot handle a re-select-in
+				 * on the initiator id during a re-select-out
+				 * on a different target id.
+				 */
+				status = CAM_TID_INVALID;
+			} else if ((ahc->flags & AHC_INITIATORROLE) != 0
+				|| ahc->enabled_luns > 0) {
+				/*
+				 * Only allow our target id to change
+				 * if the initiator role is not configured
+				 * and there are no enabled luns which
+				 * are attached to the currently registered
+				 * scsi id.
+				 */
+				status = CAM_TID_INVALID;
+			}
+		} else if ((ahc->features & AHC_MULTI_TID) == 0
+			&& ahc->enabled_luns > 0) {
+
+			status = CAM_TID_INVALID;
+		}
+	}
+
+	if (status != CAM_REQ_CMP) {
+		ccb->ccb_h.status = status;
+		return;
+	}
+
+	/*
+	 * We now have an id that is valid.
+	 * If we aren't in target mode, switch modes.
+	 */
+	if ((ahc->flags & AHC_TARGETROLE) == 0
+	 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
+		u_long	 s;
+		ahc_flag saved_flags;
+
+		printf("Configuring Target Mode\n");
+		ahc_lock(ahc, &s);
+		if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
+			ccb->ccb_h.status = CAM_BUSY;
+			ahc_unlock(ahc, &s);
+			return;
+		}
+		saved_flags = ahc->flags;
+		ahc->flags |= AHC_TARGETROLE;
+		if ((ahc->features & AHC_MULTIROLE) == 0)
+			ahc->flags &= ~AHC_INITIATORROLE;
+		ahc_pause(ahc);
+		error = ahc_loadseq(ahc);
+		if (error != 0) {
+			/*
+			 * Restore original configuration and notify
+			 * the caller that we cannot support target mode.
+			 * Since the adapter started out in this
+			 * configuration, the firmware load will succeed,
+			 * so there is no point in checking ahc_loadseq's
+			 * return value.
+			 */
+			ahc->flags = saved_flags;
+			(void)ahc_loadseq(ahc);
+			ahc_restart(ahc);
+			ahc_unlock(ahc, &s);
+			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
+			return;
+		}
+		ahc_restart(ahc);
+		ahc_unlock(ahc, &s);
+	}
+	cel = &ccb->cel;
+	target = ccb->ccb_h.target_id;
+	lun = ccb->ccb_h.target_lun;
+	channel = SIM_CHANNEL(ahc, sim);
+	target_mask = 0x01 << target;
+	if (channel == 'B')
+		target_mask <<= 8;
+
+	if (cel->enable != 0) {
+		u_int scsiseq;
+
+		/* Are we already enabled?? */
+		if (lstate != NULL) {
+			xpt_print_path(ccb->ccb_h.path);
+			printf("Lun already enabled\n");
+			ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
+			return;
+		}
+
+		if (cel->grp6_len != 0
+		 || cel->grp7_len != 0) {
+			/*
+			 * Don't (yet?) support vendor
+			 * specific commands.
+			 */
+			ccb->ccb_h.status = CAM_REQ_INVALID;
+			printf("Non-zero Group Codes\n");
+			return;
+		}
+
+		/*
+		 * Seems to be okay.
+		 * Setup our data structures.
+		 */
+		if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
+			tstate = ahc_alloc_tstate(ahc, target, channel);
+			if (tstate == NULL) {
+				xpt_print_path(ccb->ccb_h.path);
+				printf("Couldn't allocate tstate\n");
+				ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+				return;
+			}
+		}
+		lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT);
+		if (lstate == NULL) {
+			xpt_print_path(ccb->ccb_h.path);
+			printf("Couldn't allocate lstate\n");
+			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+			return;
+		}
+		memset(lstate, 0, sizeof(*lstate));
+		status = xpt_create_path(&lstate->path, /*periph*/NULL,
+					 xpt_path_path_id(ccb->ccb_h.path),
+					 xpt_path_target_id(ccb->ccb_h.path),
+					 xpt_path_lun_id(ccb->ccb_h.path));
+		if (status != CAM_REQ_CMP) {
+			free(lstate, M_DEVBUF);
+			xpt_print_path(ccb->ccb_h.path);
+			printf("Couldn't allocate path\n");
+			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+			return;
+		}
+		SLIST_INIT(&lstate->accept_tios);
+		SLIST_INIT(&lstate->immed_notifies);
+		ahc_lock(ahc, &s);
+		ahc_pause(ahc);
+		if (target != CAM_TARGET_WILDCARD) {
+			tstate->enabled_luns[lun] = lstate;
+			ahc->enabled_luns++;
+
+			if ((ahc->features & AHC_MULTI_TID) != 0) {
+				u_int targid_mask;
+
+				targid_mask = ahc_inb(ahc, TARGID)
+					    | (ahc_inb(ahc, TARGID + 1) << 8);
+
+				targid_mask |= target_mask;
+				ahc_outb(ahc, TARGID, targid_mask);
+				ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
+				
+				ahc_update_scsiid(ahc, targid_mask);
+			} else {
+				u_int our_id;
+				char  channel;
+
+				channel = SIM_CHANNEL(ahc, sim);
+				our_id = SIM_SCSI_ID(ahc, sim);
+
+				/*
+				 * This can only happen if selections
+				 * are not enabled
+				 */
+				if (target != our_id) {
+					u_int sblkctl;
+					char  cur_channel;
+					int   swap;
+
+					sblkctl = ahc_inb(ahc, SBLKCTL);
+					cur_channel = (sblkctl & SELBUSB)
+						    ? 'B' : 'A';
+					if ((ahc->features & AHC_TWIN) == 0)
+						cur_channel = 'A';
+					swap = cur_channel != channel;
+					if (channel == 'A')
+						ahc->our_id = target;
+					else
+						ahc->our_id_b = target;
+
+					if (swap)
+						ahc_outb(ahc, SBLKCTL,
+							 sblkctl ^ SELBUSB);
+
+					ahc_outb(ahc, SCSIID, target);
+
+					if (swap)
+						ahc_outb(ahc, SBLKCTL, sblkctl);
+				}
+			}
+		} else
+			ahc->black_hole = lstate;
+		/* Allow select-in operations */
+		if (ahc->black_hole != NULL && ahc->enabled_luns > 0) {
+			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
+			scsiseq |= ENSELI;
+			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
+			scsiseq = ahc_inb(ahc, SCSISEQ);
+			scsiseq |= ENSELI;
+			ahc_outb(ahc, SCSISEQ, scsiseq);
+		}
+		ahc_unpause(ahc);
+		ahc_unlock(ahc, &s);
+		ccb->ccb_h.status = CAM_REQ_CMP;
+		xpt_print_path(ccb->ccb_h.path);
+		printf("Lun now enabled for target mode\n");
+	} else {
+		struct scb *scb;
+		int i, empty;
+
+		if (lstate == NULL) {
+			ccb->ccb_h.status = CAM_LUN_INVALID;
+			return;
+		}
+
+		ahc_lock(ahc, &s);
+		
+		ccb->ccb_h.status = CAM_REQ_CMP;
+		LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
+			struct ccb_hdr *ccbh;
+
+			ccbh = &scb->io_ctx->ccb_h;
+			if (ccbh->func_code == XPT_CONT_TARGET_IO
+			 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
+				printf("CTIO pending\n");
+				ccb->ccb_h.status = CAM_REQ_INVALID;
+				ahc_unlock(ahc, &s);
+				return;
+			}
+		}
+
+		if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
+			printf("ATIOs pending\n");
+			ccb->ccb_h.status = CAM_REQ_INVALID;
+		}
+
+		if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
+			printf("INOTs pending\n");
+			ccb->ccb_h.status = CAM_REQ_INVALID;
+		}
+
+		if (ccb->ccb_h.status != CAM_REQ_CMP) {
+			ahc_unlock(ahc, &s);
+			return;
+		}
+
+		xpt_print_path(ccb->ccb_h.path);
+		printf("Target mode disabled\n");
+		xpt_free_path(lstate->path);
+		free(lstate, M_DEVBUF);
+
+		ahc_pause(ahc);
+		/* Can we clean up the target too? */
+		if (target != CAM_TARGET_WILDCARD) {
+			tstate->enabled_luns[lun] = NULL;
+			ahc->enabled_luns--;
+			for (empty = 1, i = 0; i < 8; i++)
+				if (tstate->enabled_luns[i] != NULL) {
+					empty = 0;
+					break;
+				}
+
+			if (empty) {
+				ahc_free_tstate(ahc, target, channel,
+						/*force*/FALSE);
+				if (ahc->features & AHC_MULTI_TID) {
+					u_int targid_mask;
+
+					targid_mask = ahc_inb(ahc, TARGID)
+						    | (ahc_inb(ahc, TARGID + 1)
+						       << 8);
+
+					targid_mask &= ~target_mask;
+					ahc_outb(ahc, TARGID, targid_mask);
+					ahc_outb(ahc, TARGID+1,
+					 	 (targid_mask >> 8));
+					ahc_update_scsiid(ahc, targid_mask);
+				}
+			}
+		} else {
+
+			ahc->black_hole = NULL;
+
+			/*
+			 * We can't allow selections without
+			 * our black hole device.
+			 */
+			empty = TRUE;
+		}
+		if (ahc->enabled_luns == 0) {
+			/* Disallow select-in */
+			u_int scsiseq;
+
+			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
+			scsiseq &= ~ENSELI;
+			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
+			scsiseq = ahc_inb(ahc, SCSISEQ);
+			scsiseq &= ~ENSELI;
+			ahc_outb(ahc, SCSISEQ, scsiseq);
+
+			if ((ahc->features & AHC_MULTIROLE) == 0) {
+				printf("Configuring Initiator Mode\n");
+				ahc->flags &= ~AHC_TARGETROLE;
+				ahc->flags |= AHC_INITIATORROLE;
+				/*
+				 * Returning to a configuration that
+				 * fit previously will always succeed.
+				 */
+				(void)ahc_loadseq(ahc);
+				ahc_restart(ahc);
+				/*
+				 * Unpaused.  The extra unpause
+				 * that follows is harmless.
+				 */
+			}
+		}
+		ahc_unpause(ahc);
+		ahc_unlock(ahc, &s);
+	}
+}
+
+static void
+ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
+{
+	u_int scsiid_mask;
+	u_int scsiid;
+
+	if ((ahc->features & AHC_MULTI_TID) == 0)
+		panic("ahc_update_scsiid called on non-multitid unit\n");
+
+	/*
+	 * Since we will rely on the TARGID mask
+	 * for selection enables, ensure that OID
+	 * in SCSIID is not set to some other ID
+	 * that we don't want to allow selections on.
+	 */
+	if ((ahc->features & AHC_ULTRA2) != 0)
+		scsiid = ahc_inb(ahc, SCSIID_ULTRA2);
+	else
+		scsiid = ahc_inb(ahc, SCSIID);
+	scsiid_mask = 0x1 << (scsiid & OID);
+	if ((targid_mask & scsiid_mask) == 0) {
+		u_int our_id;
+
+		/* ffs counts from 1 */
+		our_id = ffs(targid_mask);
+		if (our_id == 0)
+			our_id = ahc->our_id;
+		else
+			our_id--;
+		scsiid &= TID;
+		scsiid |= our_id;
+	}
+	if ((ahc->features & AHC_ULTRA2) != 0)
+		ahc_outb(ahc, SCSIID_ULTRA2, scsiid);
+	else
+		ahc_outb(ahc, SCSIID, scsiid);
+}
+
+void
+ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
+{
+	struct target_cmd *cmd;
+
+	/*
+	 * If the card supports auto-access pause,
+	 * we can access the card directly regardless
+	 * of whether it is paused or not.
+	 */
+	if ((ahc->features & AHC_AUTOPAUSE) != 0)
+		paused = TRUE;
+
+	ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD);
+	while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) {
+
+		/*
+		 * Only advance through the queue if we
+		 * have the resources to process the command.
+		 */
+		if (ahc_handle_target_cmd(ahc, cmd) != 0)
+			break;
+
+		cmd->cmd_valid = 0;
+		ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
+				ahc->shared_data_dmamap,
+				ahc_targetcmd_offset(ahc, ahc->tqinfifonext),
+				sizeof(struct target_cmd),
+				BUS_DMASYNC_PREREAD);
+		ahc->tqinfifonext++;
+
+		/*
+		 * Lazily update our position in the target mode incoming
+		 * command queue as seen by the sequencer.
+		 */
+		if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
+			if ((ahc->features & AHC_HS_MAILBOX) != 0) {
+				u_int hs_mailbox;
+
+				hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
+				hs_mailbox &= ~HOST_TQINPOS;
+				hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS;
+				ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
+			} else {
+				if (!paused)
+					ahc_pause(ahc);	
+				ahc_outb(ahc, KERNEL_TQINPOS,
+					 ahc->tqinfifonext & HOST_TQINPOS);
+				if (!paused)
+					ahc_unpause(ahc);
+			}
+		}
+	}
+}
+
+static int
+ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
+{
+	struct	  ahc_tmode_tstate *tstate;
+	struct	  ahc_tmode_lstate *lstate;
+	struct	  ccb_accept_tio *atio;
+	uint8_t *byte;
+	int	  initiator;
+	int	  target;
+	int	  lun;
+
+	initiator = SCSIID_TARGET(ahc, cmd->scsiid);
+	target = SCSIID_OUR_ID(cmd->scsiid);
+	lun    = (cmd->identify & MSG_IDENTIFY_LUNMASK);
+
+	byte = cmd->bytes;
+	tstate = ahc->enabled_targets[target];
+	lstate = NULL;
+	if (tstate != NULL)
+		lstate = tstate->enabled_luns[lun];
+
+	/*
+	 * Commands for disabled luns go to the black hole driver.
+	 */
+	if (lstate == NULL)
+		lstate = ahc->black_hole;
+
+	atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
+	if (atio == NULL) {
+		ahc->flags |= AHC_TQINFIFO_BLOCKED;
+		/*
+		 * Wait for more ATIOs from the peripheral driver for this lun.
+		 */
+		if (bootverbose)
+			printf("%s: ATIOs exhausted\n", ahc_name(ahc));
+		return (1);
+	} else
+		ahc->flags &= ~AHC_TQINFIFO_BLOCKED;
+#if 0
+	printf("Incoming command from %d for %d:%d%s\n",
+	       initiator, target, lun,
+	       lstate == ahc->black_hole ? "(Black Holed)" : "");
+#endif
+	SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
+
+	if (lstate == ahc->black_hole) {
+		/* Fill in the wildcards */
+		atio->ccb_h.target_id = target;
+		atio->ccb_h.target_lun = lun;
+	}
+
+	/*
+	 * Package it up and send it off to
+	 * whomever has this lun enabled.
+	 */
+	atio->sense_len = 0;
+	atio->init_id = initiator;
+	if (byte[0] != 0xFF) {
+		/* Tag was included */
+		atio->tag_action = *byte++;
+		atio->tag_id = *byte++;
+		atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
+	} else {
+		atio->ccb_h.flags = 0;
+	}
+	byte++;
+
+	/* Okay.  Now determine the cdb size based on the command code */
+	switch (*byte >> CMD_GROUP_CODE_SHIFT) {
+	case 0:
+		atio->cdb_len = 6;
+		break;
+	case 1:
+	case 2:
+		atio->cdb_len = 10;
+		break;
+	case 4:
+		atio->cdb_len = 16;
+		break;
+	case 5:
+		atio->cdb_len = 12;
+		break;
+	case 3:
+	default:
+		/* Only copy the opcode. */
+		atio->cdb_len = 1;
+		printf("Reserved or VU command code type encountered\n");
+		break;
+	}
+	
+	memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
+
+	atio->ccb_h.status |= CAM_CDB_RECVD;
+
+	if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
+		/*
+		 * We weren't allowed to disconnect.
+		 * We're hanging on the bus until a
+		 * continue target I/O comes in response
+		 * to this accept tio.
+		 */
+#if 0
+		printf("Received Immediate Command %d:%d:%d - %p\n",
+		       initiator, target, lun, ahc->pending_device);
+#endif
+		ahc->pending_device = lstate;
+		ahc_freeze_ccb((union ccb *)atio);
+		atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
+	}
+	xpt_done((union ccb*)atio);
+	return (0);
+}
+
+#endif
diff --git a/drivers/scsi/aic7xxx/aic7xxx_inline.h b/drivers/scsi/aic7xxx/aic7xxx_inline.h
new file mode 100644
index 0000000..2cc8a17
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_inline.h
@@ -0,0 +1,649 @@
+/*
+ * Inline routines shareable across OS platforms.
+ *
+ * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_inline.h#43 $
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _AIC7XXX_INLINE_H_
+#define _AIC7XXX_INLINE_H_
+
+/************************* Sequencer Execution Control ************************/
+static __inline void ahc_pause_bug_fix(struct ahc_softc *ahc);
+static __inline int  ahc_is_paused(struct ahc_softc *ahc);
+static __inline void ahc_pause(struct ahc_softc *ahc);
+static __inline void ahc_unpause(struct ahc_softc *ahc);
+
+/*
+ * Work around any chip bugs related to halting sequencer execution.
+ * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
+ * reading a register that will set this signal and deassert it.
+ * Without this workaround, if the chip is paused, by an interrupt or
+ * manual pause while accessing scb ram, accesses to certain registers
+ * will hang the system (infinite pci retries).
+ */
+static __inline void
+ahc_pause_bug_fix(struct ahc_softc *ahc)
+{
+	if ((ahc->features & AHC_ULTRA2) != 0)
+		(void)ahc_inb(ahc, CCSCBCTL);
+}
+
+/*
+ * Determine whether the sequencer has halted code execution.
+ * Returns non-zero status if the sequencer is stopped.
+ */
+static __inline int
+ahc_is_paused(struct ahc_softc *ahc)
+{
+	return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0);
+}
+
+/*
+ * Request that the sequencer stop and wait, indefinitely, for it
+ * to stop.  The sequencer will only acknowledge that it is paused
+ * once it has reached an instruction boundary and PAUSEDIS is
+ * cleared in the SEQCTL register.  The sequencer may use PAUSEDIS
+ * for critical sections.
+ */
+static __inline void
+ahc_pause(struct ahc_softc *ahc)
+{
+	ahc_outb(ahc, HCNTRL, ahc->pause);
+
+	/*
+	 * Since the sequencer can disable pausing in a critical section, we
+	 * must loop until it actually stops.
+	 */
+	while (ahc_is_paused(ahc) == 0)
+		;
+
+	ahc_pause_bug_fix(ahc);
+}
+
+/*
+ * Allow the sequencer to continue program execution.
+ * We check here to ensure that no additional interrupt
+ * sources that would cause the sequencer to halt have been
+ * asserted.  If, for example, a SCSI bus reset is detected
+ * while we are fielding a different, pausing, interrupt type,
+ * we don't want to release the sequencer before going back
+ * into our interrupt handler and dealing with this new
+ * condition.
+ */
+static __inline void
+ahc_unpause(struct ahc_softc *ahc)
+{
+	if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0)
+		ahc_outb(ahc, HCNTRL, ahc->unpause);
+}
+
+/*********************** Untagged Transaction Routines ************************/
+static __inline void	ahc_freeze_untagged_queues(struct ahc_softc *ahc);
+static __inline void	ahc_release_untagged_queues(struct ahc_softc *ahc);
+
+/*
+ * Block our completion routine from starting the next untagged
+ * transaction for this target or target lun.
+ */
+static __inline void
+ahc_freeze_untagged_queues(struct ahc_softc *ahc)
+{
+	if ((ahc->flags & AHC_SCB_BTT) == 0)
+		ahc->untagged_queue_lock++;
+}
+
+/*
+ * Allow the next untagged transaction for this target or target lun
+ * to be executed.  We use a counting semaphore to allow the lock
+ * to be acquired recursively.  Once the count drops to zero, the
+ * transaction queues will be run.
+ */
+static __inline void
+ahc_release_untagged_queues(struct ahc_softc *ahc)
+{
+	if ((ahc->flags & AHC_SCB_BTT) == 0) {
+		ahc->untagged_queue_lock--;
+		if (ahc->untagged_queue_lock == 0)
+			ahc_run_untagged_queues(ahc);
+	}
+}
+
+/************************** Memory mapping routines ***************************/
+static __inline struct ahc_dma_seg *
+			ahc_sg_bus_to_virt(struct scb *scb,
+					   uint32_t sg_busaddr);
+static __inline uint32_t
+			ahc_sg_virt_to_bus(struct scb *scb,
+					   struct ahc_dma_seg *sg);
+static __inline uint32_t
+			ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index);
+static __inline void	ahc_sync_scb(struct ahc_softc *ahc,
+				     struct scb *scb, int op);
+static __inline void	ahc_sync_sglist(struct ahc_softc *ahc,
+					struct scb *scb, int op);
+static __inline uint32_t
+			ahc_targetcmd_offset(struct ahc_softc *ahc,
+					     u_int index);
+
+static __inline struct ahc_dma_seg *
+ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
+{
+	int sg_index;
+
+	sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
+	/* sg_list_phys points to entry 1, not 0 */
+	sg_index++;
+
+	return (&scb->sg_list[sg_index]);
+}
+
+static __inline uint32_t
+ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
+{
+	int sg_index;
+
+	/* sg_list_phys points to entry 1, not 0 */
+	sg_index = sg - &scb->sg_list[1];
+
+	return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
+}
+
+static __inline uint32_t
+ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
+{
+	return (ahc->scb_data->hscb_busaddr
+		+ (sizeof(struct hardware_scb) * index));
+}
+
+static __inline void
+ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op)
+{
+	ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat,
+			ahc->scb_data->hscb_dmamap,
+			/*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb),
+			/*len*/sizeof(*scb->hscb), op);
+}
+
+static __inline void
+ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op)
+{
+	if (scb->sg_count == 0)
+		return;
+
+	ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap,
+			/*offset*/(scb->sg_list - scb->sg_map->sg_vaddr)
+				* sizeof(struct ahc_dma_seg),
+			/*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op);
+}
+
+static __inline uint32_t
+ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index)
+{
+	return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo);
+}
+
+/******************************** Debugging ***********************************/
+static __inline char *ahc_name(struct ahc_softc *ahc);
+
+static __inline char *
+ahc_name(struct ahc_softc *ahc)
+{
+	return (ahc->name);
+}
+
+/*********************** Miscelaneous Support Functions ***********************/
+
+static __inline void	ahc_update_residual(struct ahc_softc *ahc,
+					    struct scb *scb);
+static __inline struct ahc_initiator_tinfo *
+			ahc_fetch_transinfo(struct ahc_softc *ahc,
+					    char channel, u_int our_id,
+					    u_int remote_id,
+					    struct ahc_tmode_tstate **tstate);
+static __inline uint16_t
+			ahc_inw(struct ahc_softc *ahc, u_int port);
+static __inline void	ahc_outw(struct ahc_softc *ahc, u_int port,
+				 u_int value);
+static __inline uint32_t
+			ahc_inl(struct ahc_softc *ahc, u_int port);
+static __inline void	ahc_outl(struct ahc_softc *ahc, u_int port,
+				 uint32_t value);
+static __inline uint64_t
+			ahc_inq(struct ahc_softc *ahc, u_int port);
+static __inline void	ahc_outq(struct ahc_softc *ahc, u_int port,
+				 uint64_t value);
+static __inline struct scb*
+			ahc_get_scb(struct ahc_softc *ahc);
+static __inline void	ahc_free_scb(struct ahc_softc *ahc, struct scb *scb);
+static __inline void	ahc_swap_with_next_hscb(struct ahc_softc *ahc,
+						struct scb *scb);
+static __inline void	ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb);
+static __inline struct scsi_sense_data *
+			ahc_get_sense_buf(struct ahc_softc *ahc,
+					  struct scb *scb);
+static __inline uint32_t
+			ahc_get_sense_bufaddr(struct ahc_softc *ahc,
+					      struct scb *scb);
+
+/*
+ * Determine whether the sequencer reported a residual
+ * for this SCB/transaction.
+ */
+static __inline void
+ahc_update_residual(struct ahc_softc *ahc, struct scb *scb)
+{
+	uint32_t sgptr;
+
+	sgptr = ahc_le32toh(scb->hscb->sgptr);
+	if ((sgptr & SG_RESID_VALID) != 0)
+		ahc_calc_residual(ahc, scb);
+}
+
+/*
+ * Return pointers to the transfer negotiation information
+ * for the specified our_id/remote_id pair.
+ */
+static __inline struct ahc_initiator_tinfo *
+ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
+		    u_int remote_id, struct ahc_tmode_tstate **tstate)
+{
+	/*
+	 * Transfer data structures are stored from the perspective
+	 * of the target role.  Since the parameters for a connection
+	 * in the initiator role to a given target are the same as
+	 * when the roles are reversed, we pretend we are the target.
+	 */
+	if (channel == 'B')
+		our_id += 8;
+	*tstate = ahc->enabled_targets[our_id];
+	return (&(*tstate)->transinfo[remote_id]);
+}
+
+static __inline uint16_t
+ahc_inw(struct ahc_softc *ahc, u_int port)
+{
+	return ((ahc_inb(ahc, port+1) << 8) | ahc_inb(ahc, port));
+}
+
+static __inline void
+ahc_outw(struct ahc_softc *ahc, u_int port, u_int value)
+{
+	ahc_outb(ahc, port, value & 0xFF);
+	ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
+}
+
+static __inline uint32_t
+ahc_inl(struct ahc_softc *ahc, u_int port)
+{
+	return ((ahc_inb(ahc, port))
+	      | (ahc_inb(ahc, port+1) << 8)
+	      | (ahc_inb(ahc, port+2) << 16)
+	      | (ahc_inb(ahc, port+3) << 24));
+}
+
+static __inline void
+ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value)
+{
+	ahc_outb(ahc, port, (value) & 0xFF);
+	ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF);
+	ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF);
+	ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF);
+}
+
+static __inline uint64_t
+ahc_inq(struct ahc_softc *ahc, u_int port)
+{
+	return ((ahc_inb(ahc, port))
+	      | (ahc_inb(ahc, port+1) << 8)
+	      | (ahc_inb(ahc, port+2) << 16)
+	      | (ahc_inb(ahc, port+3) << 24)
+	      | (((uint64_t)ahc_inb(ahc, port+4)) << 32)
+	      | (((uint64_t)ahc_inb(ahc, port+5)) << 40)
+	      | (((uint64_t)ahc_inb(ahc, port+6)) << 48)
+	      | (((uint64_t)ahc_inb(ahc, port+7)) << 56));
+}
+
+static __inline void
+ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value)
+{
+	ahc_outb(ahc, port, value & 0xFF);
+	ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
+	ahc_outb(ahc, port+2, (value >> 16) & 0xFF);
+	ahc_outb(ahc, port+3, (value >> 24) & 0xFF);
+	ahc_outb(ahc, port+4, (value >> 32) & 0xFF);
+	ahc_outb(ahc, port+5, (value >> 40) & 0xFF);
+	ahc_outb(ahc, port+6, (value >> 48) & 0xFF);
+	ahc_outb(ahc, port+7, (value >> 56) & 0xFF);
+}
+
+/*
+ * Get a free scb. If there are none, see if we can allocate a new SCB.
+ */
+static __inline struct scb *
+ahc_get_scb(struct ahc_softc *ahc)
+{
+	struct scb *scb;
+
+	if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) {
+		ahc_alloc_scbs(ahc);
+		scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
+		if (scb == NULL)
+			return (NULL);
+	}
+	SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
+	return (scb);
+}
+
+/*
+ * Return an SCB resource to the free list.
+ */
+static __inline void
+ahc_free_scb(struct ahc_softc *ahc, struct scb *scb)
+{       
+	struct hardware_scb *hscb;
+
+	hscb = scb->hscb;
+	/* Clean up for the next user */
+	ahc->scb_data->scbindex[hscb->tag] = NULL;
+	scb->flags = SCB_FREE;
+	hscb->control = 0;
+
+	SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
+
+	/* Notify the OSM that a resource is now available. */
+	ahc_platform_scb_free(ahc, scb);
+}
+
+static __inline struct scb *
+ahc_lookup_scb(struct ahc_softc *ahc, u_int tag)
+{
+	struct scb* scb;
+
+	scb = ahc->scb_data->scbindex[tag];
+	if (scb != NULL)
+		ahc_sync_scb(ahc, scb,
+			     BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+	return (scb);
+}
+
+static __inline void
+ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb)
+{
+	struct hardware_scb *q_hscb;
+	u_int  saved_tag;
+
+	/*
+	 * Our queuing method is a bit tricky.  The card
+	 * knows in advance which HSCB to download, and we
+	 * can't disappoint it.  To achieve this, the next
+	 * SCB to download is saved off in ahc->next_queued_scb.
+	 * When we are called to queue "an arbitrary scb",
+	 * we copy the contents of the incoming HSCB to the one
+	 * the sequencer knows about, swap HSCB pointers and
+	 * finally assign the SCB to the tag indexed location
+	 * in the scb_array.  This makes sure that we can still
+	 * locate the correct SCB by SCB_TAG.
+	 */
+	q_hscb = ahc->next_queued_scb->hscb;
+	saved_tag = q_hscb->tag;
+	memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
+	if ((scb->flags & SCB_CDB32_PTR) != 0) {
+		q_hscb->shared_data.cdb_ptr =
+		    ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag)
+			      + offsetof(struct hardware_scb, cdb32));
+	}
+	q_hscb->tag = saved_tag;
+	q_hscb->next = scb->hscb->tag;
+
+	/* Now swap HSCB pointers. */
+	ahc->next_queued_scb->hscb = scb->hscb;
+	scb->hscb = q_hscb;
+
+	/* Now define the mapping from tag to SCB in the scbindex */
+	ahc->scb_data->scbindex[scb->hscb->tag] = scb;
+}
+
+/*
+ * Tell the sequencer about a new transaction to execute.
+ */
+static __inline void
+ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
+{
+	ahc_swap_with_next_hscb(ahc, scb);
+
+	if (scb->hscb->tag == SCB_LIST_NULL
+	 || scb->hscb->next == SCB_LIST_NULL)
+		panic("Attempt to queue invalid SCB tag %x:%x\n",
+		      scb->hscb->tag, scb->hscb->next);
+
+	/*
+	 * Setup data "oddness".
+	 */
+	scb->hscb->lun &= LID;
+	if (ahc_get_transfer_length(scb) & 0x1)
+		scb->hscb->lun |= SCB_XFERLEN_ODD;
+
+	/*
+	 * Keep a history of SCBs we've downloaded in the qinfifo.
+	 */
+	ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
+
+	/*
+	 * Make sure our data is consistent from the
+	 * perspective of the adapter.
+	 */
+	ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+	/* Tell the adapter about the newly queued SCB */
+	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
+	} else {
+		if ((ahc->features & AHC_AUTOPAUSE) == 0)
+			ahc_pause(ahc);
+		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
+		if ((ahc->features & AHC_AUTOPAUSE) == 0)
+			ahc_unpause(ahc);
+	}
+}
+
+static __inline struct scsi_sense_data *
+ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb)
+{
+	int offset;
+
+	offset = scb - ahc->scb_data->scbarray;
+	return (&ahc->scb_data->sense[offset]);
+}
+
+static __inline uint32_t
+ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb)
+{
+	int offset;
+
+	offset = scb - ahc->scb_data->scbarray;
+	return (ahc->scb_data->sense_busaddr
+	      + (offset * sizeof(struct scsi_sense_data)));
+}
+
+/************************** Interrupt Processing ******************************/
+static __inline void	ahc_sync_qoutfifo(struct ahc_softc *ahc, int op);
+static __inline void	ahc_sync_tqinfifo(struct ahc_softc *ahc, int op);
+static __inline u_int	ahc_check_cmdcmpltqueues(struct ahc_softc *ahc);
+static __inline int	ahc_intr(struct ahc_softc *ahc);
+
+static __inline void
+ahc_sync_qoutfifo(struct ahc_softc *ahc, int op)
+{
+	ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
+			/*offset*/0, /*len*/256, op);
+}
+
+static __inline void
+ahc_sync_tqinfifo(struct ahc_softc *ahc, int op)
+{
+#ifdef AHC_TARGET_MODE
+	if ((ahc->flags & AHC_TARGETROLE) != 0) {
+		ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
+				ahc->shared_data_dmamap,
+				ahc_targetcmd_offset(ahc, 0),
+				sizeof(struct target_cmd) * AHC_TMODE_CMDS,
+				op);
+	}
+#endif
+}
+
+/*
+ * See if the firmware has posted any completed commands
+ * into our in-core command complete fifos.
+ */
+#define AHC_RUN_QOUTFIFO 0x1
+#define AHC_RUN_TQINFIFO 0x2
+static __inline u_int
+ahc_check_cmdcmpltqueues(struct ahc_softc *ahc)
+{
+	u_int retval;
+
+	retval = 0;
+	ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
+			/*offset*/ahc->qoutfifonext, /*len*/1,
+			BUS_DMASYNC_POSTREAD);
+	if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL)
+		retval |= AHC_RUN_QOUTFIFO;
+#ifdef AHC_TARGET_MODE
+	if ((ahc->flags & AHC_TARGETROLE) != 0
+	 && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) {
+		ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
+				ahc->shared_data_dmamap,
+				ahc_targetcmd_offset(ahc, ahc->tqinfifofnext),
+				/*len*/sizeof(struct target_cmd),
+				BUS_DMASYNC_POSTREAD);
+		if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0)
+			retval |= AHC_RUN_TQINFIFO;
+	}
+#endif
+	return (retval);
+}
+
+/*
+ * Catch an interrupt from the adapter
+ */
+static __inline int
+ahc_intr(struct ahc_softc *ahc)
+{
+	u_int	intstat;
+
+	if ((ahc->pause & INTEN) == 0) {
+		/*
+		 * Our interrupt is not enabled on the chip
+		 * and may be disabled for re-entrancy reasons,
+		 * so just return.  This is likely just a shared
+		 * interrupt.
+		 */
+		return (0);
+	}
+	/*
+	 * Instead of directly reading the interrupt status register,
+	 * infer the cause of the interrupt by checking our in-core
+	 * completion queues.  This avoids a costly PCI bus read in
+	 * most cases.
+	 */
+	if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0
+	 && (ahc_check_cmdcmpltqueues(ahc) != 0))
+		intstat = CMDCMPLT;
+	else {
+		intstat = ahc_inb(ahc, INTSTAT);
+	}
+
+	if ((intstat & INT_PEND) == 0) {
+#if AHC_PCI_CONFIG > 0
+		if (ahc->unsolicited_ints > 500) {
+			ahc->unsolicited_ints = 0;
+			if ((ahc->chip & AHC_PCI) != 0
+			 && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
+				ahc->bus_intr(ahc);
+		}
+#endif
+		ahc->unsolicited_ints++;
+		return (0);
+	}
+	ahc->unsolicited_ints = 0;
+
+	if (intstat & CMDCMPLT) {
+		ahc_outb(ahc, CLRINT, CLRCMDINT);
+
+		/*
+		 * Ensure that the chip sees that we've cleared
+		 * this interrupt before we walk the output fifo.
+		 * Otherwise, we may, due to posted bus writes,
+		 * clear the interrupt after we finish the scan,
+		 * and after the sequencer has added new entries
+		 * and asserted the interrupt again.
+		 */
+		ahc_flush_device_writes(ahc);
+		ahc_run_qoutfifo(ahc);
+#ifdef AHC_TARGET_MODE
+		if ((ahc->flags & AHC_TARGETROLE) != 0)
+			ahc_run_tqinfifo(ahc, /*paused*/FALSE);
+#endif
+	}
+
+	/*
+	 * Handle statuses that may invalidate our cached
+	 * copy of INTSTAT separately.
+	 */
+	if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) {
+		/* Hot eject.  Do nothing */
+	} else if (intstat & BRKADRINT) {
+		ahc_handle_brkadrint(ahc);
+	} else if ((intstat & (SEQINT|SCSIINT)) != 0) {
+
+		ahc_pause_bug_fix(ahc);
+
+		if ((intstat & SEQINT) != 0)
+			ahc_handle_seqint(ahc, intstat);
+
+		if ((intstat & SCSIINT) != 0)
+			ahc_handle_scsiint(ahc, intstat);
+	}
+	return (1);
+}
+
+#endif  /* _AIC7XXX_INLINE_H_ */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
new file mode 100644
index 0000000..031c6aa
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -0,0 +1,5043 @@
+/*
+ * Adaptec AIC7xxx device driver for Linux.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.c#235 $
+ *
+ * Copyright (c) 1994 John Aycock
+ *   The University of Calgary Department of Computer Science.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Sources include the Adaptec 1740 driver (aha1740.c), the Ultrastor 24F
+ * driver (ultrastor.c), various Linux kernel source, the Adaptec EISA
+ * config file (!adp7771.cfg), the Adaptec AHA-2740A Series User's Guide,
+ * the Linux Kernel Hacker's Guide, Writing a SCSI Device Driver for Linux,
+ * the Adaptec 1542 driver (aha1542.c), the Adaptec EISA overlay file
+ * (adp7770.ovl), the Adaptec AHA-2740 Series Technical Reference Manual,
+ * the Adaptec AIC-7770 Data Book, the ANSI SCSI specification, the
+ * ANSI SCSI-2 specification (draft 10c), ...
+ *
+ * --------------------------------------------------------------------------
+ *
+ *  Modifications by Daniel M. Eischen (deischen@iworks.InterWorks.org):
+ *
+ *  Substantially modified to include support for wide and twin bus
+ *  adapters, DMAing of SCBs, tagged queueing, IRQ sharing, bug fixes,
+ *  SCB paging, and other rework of the code.
+ *
+ * --------------------------------------------------------------------------
+ * Copyright (c) 1994-2000 Justin T. Gibbs.
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ *---------------------------------------------------------------------------
+ *
+ *  Thanks also go to (in alphabetical order) the following:
+ *
+ *    Rory Bolt     - Sequencer bug fixes
+ *    Jay Estabrook - Initial DEC Alpha support
+ *    Doug Ledford  - Much needed abort/reset bug fixes
+ *    Kai Makisara  - DMAing of SCBs
+ *
+ *  A Boot time option was also added for not resetting the scsi bus.
+ *
+ *    Form:  aic7xxx=extended
+ *           aic7xxx=no_reset
+ *           aic7xxx=verbose
+ *
+ *  Daniel M. Eischen, deischen@iworks.InterWorks.org, 1/23/97
+ *
+ *  Id: aic7xxx.c,v 4.1 1997/06/12 08:23:42 deang Exp
+ */
+
+/*
+ * Further driver modifications made by Doug Ledford <dledford@redhat.com>
+ *
+ * Copyright (c) 1997-1999 Doug Ledford
+ *
+ * These changes are released under the same licensing terms as the FreeBSD
+ * driver written by Justin Gibbs.  Please see his Copyright notice above
+ * for the exact terms and conditions covering my changes as well as the
+ * warranty statement.
+ *
+ * Modifications made to the aic7xxx.c,v 4.1 driver from Dan Eischen include
+ * but are not limited to:
+ *
+ *  1: Import of the latest FreeBSD sequencer code for this driver
+ *  2: Modification of kernel code to accommodate different sequencer semantics
+ *  3: Extensive changes throughout kernel portion of driver to improve
+ *     abort/reset processing and error hanndling
+ *  4: Other work contributed by various people on the Internet
+ *  5: Changes to printk information and verbosity selection code
+ *  6: General reliability related changes, especially in IRQ management
+ *  7: Modifications to the default probe/attach order for supported cards
+ *  8: SMP friendliness has been improved
+ *
+ */
+
+#include "aic7xxx_osm.h"
+#include "aic7xxx_inline.h"
+#include <scsi/scsicam.h>
+
+/*
+ * Include aiclib.c as part of our
+ * "module dependencies are hard" work around.
+ */
+#include "aiclib.c"
+
+#include <linux/init.h>		/* __setup */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+#include "sd.h"			/* For geometry detection */
+#endif
+
+#include <linux/mm.h>		/* For fetching system memory size */
+#include <linux/blkdev.h>		/* For block_size() */
+#include <linux/delay.h>	/* For ssleep/msleep */
+
+/*
+ * Lock protecting manipulation of the ahc softc list.
+ */
+spinlock_t ahc_list_spinlock;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+/* For dynamic sglist size calculation. */
+u_int ahc_linux_nseg;
+#endif
+
+/*
+ * Set this to the delay in seconds after SCSI bus reset.
+ * Note, we honor this only for the initial bus reset.
+ * The scsi error recovery code performs its own bus settle
+ * delay handling for error recovery actions.
+ */
+#ifdef CONFIG_AIC7XXX_RESET_DELAY_MS
+#define AIC7XXX_RESET_DELAY CONFIG_AIC7XXX_RESET_DELAY_MS
+#else
+#define AIC7XXX_RESET_DELAY 5000
+#endif
+
+/*
+ * Control collection of SCSI transfer statistics for the /proc filesystem.
+ *
+ * NOTE: Do NOT enable this when running on kernels version 1.2.x and below.
+ * NOTE: This does affect performance since it has to maintain statistics.
+ */
+#ifdef CONFIG_AIC7XXX_PROC_STATS
+#define AIC7XXX_PROC_STATS
+#endif
+
+/*
+ * To change the default number of tagged transactions allowed per-device,
+ * add a line to the lilo.conf file like:
+ * append="aic7xxx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}"
+ * which will result in the first four devices on the first two
+ * controllers being set to a tagged queue depth of 32.
+ *
+ * The tag_commands is an array of 16 to allow for wide and twin adapters.
+ * Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15
+ * for channel 1.
+ */
+typedef struct {
+	uint8_t tag_commands[16];	/* Allow for wide/twin adapters. */
+} adapter_tag_info_t;
+
+/*
+ * Modify this as you see fit for your system.
+ *
+ * 0			tagged queuing disabled
+ * 1 <= n <= 253	n == max tags ever dispatched.
+ *
+ * The driver will throttle the number of commands dispatched to a
+ * device if it returns queue full.  For devices with a fixed maximum
+ * queue depth, the driver will eventually determine this depth and
+ * lock it in (a console message is printed to indicate that a lock
+ * has occurred).  On some devices, queue full is returned for a temporary
+ * resource shortage.  These devices will return queue full at varying
+ * depths.  The driver will throttle back when the queue fulls occur and
+ * attempt to slowly increase the depth over time as the device recovers
+ * from the resource shortage.
+ *
+ * In this example, the first line will disable tagged queueing for all
+ * the devices on the first probed aic7xxx adapter.
+ *
+ * The second line enables tagged queueing with 4 commands/LUN for IDs
+ * (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the
+ * driver to attempt to use up to 64 tags for ID 1.
+ *
+ * The third line is the same as the first line.
+ *
+ * The fourth line disables tagged queueing for devices 0 and 3.  It
+ * enables tagged queueing for the other IDs, with 16 commands/LUN
+ * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for
+ * IDs 2, 5-7, and 9-15.
+ */
+
+/*
+ * NOTE: The below structure is for reference only, the actual structure
+ *       to modify in order to change things is just below this comment block.
+adapter_tag_info_t aic7xxx_tag_info[] =
+{
+	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+	{{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}},
+	{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+	{{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}}
+};
+*/
+
+#ifdef CONFIG_AIC7XXX_CMDS_PER_DEVICE
+#define AIC7XXX_CMDS_PER_DEVICE CONFIG_AIC7XXX_CMDS_PER_DEVICE
+#else
+#define AIC7XXX_CMDS_PER_DEVICE AHC_MAX_QUEUE
+#endif
+
+#define AIC7XXX_CONFIGED_TAG_COMMANDS {					\
+	AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE,		\
+	AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE,		\
+	AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE,		\
+	AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE,		\
+	AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE,		\
+	AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE,		\
+	AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE,		\
+	AIC7XXX_CMDS_PER_DEVICE, AIC7XXX_CMDS_PER_DEVICE		\
+}
+
+/*
+ * By default, use the number of commands specified by
+ * the users kernel configuration.
+ */
+static adapter_tag_info_t aic7xxx_tag_info[] =
+{
+	{AIC7XXX_CONFIGED_TAG_COMMANDS},
+	{AIC7XXX_CONFIGED_TAG_COMMANDS},
+	{AIC7XXX_CONFIGED_TAG_COMMANDS},
+	{AIC7XXX_CONFIGED_TAG_COMMANDS},
+	{AIC7XXX_CONFIGED_TAG_COMMANDS},
+	{AIC7XXX_CONFIGED_TAG_COMMANDS},
+	{AIC7XXX_CONFIGED_TAG_COMMANDS},
+	{AIC7XXX_CONFIGED_TAG_COMMANDS},
+	{AIC7XXX_CONFIGED_TAG_COMMANDS},
+	{AIC7XXX_CONFIGED_TAG_COMMANDS},
+	{AIC7XXX_CONFIGED_TAG_COMMANDS},
+	{AIC7XXX_CONFIGED_TAG_COMMANDS},
+	{AIC7XXX_CONFIGED_TAG_COMMANDS},
+	{AIC7XXX_CONFIGED_TAG_COMMANDS},
+	{AIC7XXX_CONFIGED_TAG_COMMANDS},
+	{AIC7XXX_CONFIGED_TAG_COMMANDS}
+};
+
+/*
+ * DV option:
+ *
+ * positive value = DV Enabled
+ * zero		  = DV Disabled
+ * negative value = DV Default for adapter type/seeprom
+ */
+#ifdef CONFIG_AIC7XXX_DV_SETTING
+#define AIC7XXX_CONFIGED_DV CONFIG_AIC7XXX_DV_SETTING
+#else
+#define AIC7XXX_CONFIGED_DV -1
+#endif
+
+static int8_t aic7xxx_dv_settings[] =
+{
+	AIC7XXX_CONFIGED_DV,
+	AIC7XXX_CONFIGED_DV,
+	AIC7XXX_CONFIGED_DV,
+	AIC7XXX_CONFIGED_DV,
+	AIC7XXX_CONFIGED_DV,
+	AIC7XXX_CONFIGED_DV,
+	AIC7XXX_CONFIGED_DV,
+	AIC7XXX_CONFIGED_DV,
+	AIC7XXX_CONFIGED_DV,
+	AIC7XXX_CONFIGED_DV,
+	AIC7XXX_CONFIGED_DV,
+	AIC7XXX_CONFIGED_DV,
+	AIC7XXX_CONFIGED_DV,
+	AIC7XXX_CONFIGED_DV,
+	AIC7XXX_CONFIGED_DV,
+	AIC7XXX_CONFIGED_DV
+};
+
+/*
+ * There should be a specific return value for this in scsi.h, but
+ * it seems that most drivers ignore it.
+ */
+#define DID_UNDERFLOW   DID_ERROR
+
+void
+ahc_print_path(struct ahc_softc *ahc, struct scb *scb)
+{
+	printk("(scsi%d:%c:%d:%d): ",
+	       ahc->platform_data->host->host_no,
+	       scb != NULL ? SCB_GET_CHANNEL(ahc, scb) : 'X',
+	       scb != NULL ? SCB_GET_TARGET(ahc, scb) : -1,
+	       scb != NULL ? SCB_GET_LUN(scb) : -1);
+}
+
+/*
+ * XXX - these options apply unilaterally to _all_ 274x/284x/294x
+ *       cards in the system.  This should be fixed.  Exceptions to this
+ *       rule are noted in the comments.
+ */
+
+/*
+ * Skip the scsi bus reset.  Non 0 make us skip the reset at startup.  This
+ * has no effect on any later resets that might occur due to things like
+ * SCSI bus timeouts.
+ */
+static uint32_t aic7xxx_no_reset;
+
+/*
+ * Certain PCI motherboards will scan PCI devices from highest to lowest,
+ * others scan from lowest to highest, and they tend to do all kinds of
+ * strange things when they come into contact with PCI bridge chips.  The
+ * net result of all this is that the PCI card that is actually used to boot
+ * the machine is very hard to detect.  Most motherboards go from lowest
+ * PCI slot number to highest, and the first SCSI controller found is the
+ * one you boot from.  The only exceptions to this are when a controller
+ * has its BIOS disabled.  So, we by default sort all of our SCSI controllers
+ * from lowest PCI slot number to highest PCI slot number.  We also force
+ * all controllers with their BIOS disabled to the end of the list.  This
+ * works on *almost* all computers.  Where it doesn't work, we have this
+ * option.  Setting this option to non-0 will reverse the order of the sort
+ * to highest first, then lowest, but will still leave cards with their BIOS
+ * disabled at the very end.  That should fix everyone up unless there are
+ * really strange cirumstances.
+ */
+static uint32_t aic7xxx_reverse_scan;
+
+/*
+ * Should we force EXTENDED translation on a controller.
+ *     0 == Use whatever is in the SEEPROM or default to off
+ *     1 == Use whatever is in the SEEPROM or default to on
+ */
+static uint32_t aic7xxx_extended;
+
+/*
+ * PCI bus parity checking of the Adaptec controllers.  This is somewhat
+ * dubious at best.  To my knowledge, this option has never actually
+ * solved a PCI parity problem, but on certain machines with broken PCI
+ * chipset configurations where stray PCI transactions with bad parity are
+ * the norm rather than the exception, the error messages can be overwelming.
+ * It's included in the driver for completeness.
+ *   0	   = Shut off PCI parity check
+ *   non-0 = reverse polarity pci parity checking
+ */
+static uint32_t aic7xxx_pci_parity = ~0;
+
+/*
+ * Certain newer motherboards have put new PCI based devices into the
+ * IO spaces that used to typically be occupied by VLB or EISA cards.
+ * This overlap can cause these newer motherboards to lock up when scanned
+ * for older EISA and VLB devices.  Setting this option to non-0 will
+ * cause the driver to skip scanning for any VLB or EISA controllers and
+ * only support the PCI controllers.  NOTE: this means that if the kernel
+ * os compiled with PCI support disabled, then setting this to non-0
+ * would result in never finding any devices :)
+ */
+#ifndef CONFIG_AIC7XXX_PROBE_EISA_VL
+uint32_t aic7xxx_probe_eisa_vl;
+#else
+uint32_t aic7xxx_probe_eisa_vl = ~0;
+#endif
+
+/*
+ * There are lots of broken chipsets in the world.  Some of them will
+ * violate the PCI spec when we issue byte sized memory writes to our
+ * controller.  I/O mapped register access, if allowed by the given
+ * platform, will work in almost all cases.
+ */
+uint32_t aic7xxx_allow_memio = ~0;
+
+/*
+ * aic7xxx_detect() has been run, so register all device arrivals
+ * immediately with the system rather than deferring to the sorted
+ * attachment performed by aic7xxx_detect().
+ */
+int aic7xxx_detect_complete;
+
+/*
+ * So that we can set how long each device is given as a selection timeout.
+ * The table of values goes like this:
+ *   0 - 256ms
+ *   1 - 128ms
+ *   2 - 64ms
+ *   3 - 32ms
+ * We default to 256ms because some older devices need a longer time
+ * to respond to initial selection.
+ */
+static uint32_t aic7xxx_seltime;
+
+/*
+ * Certain devices do not perform any aging on commands.  Should the
+ * device be saturated by commands in one portion of the disk, it is
+ * possible for transactions on far away sectors to never be serviced.
+ * To handle these devices, we can periodically send an ordered tag to
+ * force all outstanding transactions to be serviced prior to a new
+ * transaction.
+ */
+uint32_t aic7xxx_periodic_otag;
+
+/*
+ * Module information and settable options.
+ */
+static char *aic7xxx = NULL;
+
+MODULE_AUTHOR("Maintainer: Justin T. Gibbs <gibbs@scsiguy.com>");
+MODULE_DESCRIPTION("Adaptec Aic77XX/78XX SCSI Host Bus Adapter driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(AIC7XXX_DRIVER_VERSION);
+module_param(aic7xxx, charp, 0444);
+MODULE_PARM_DESC(aic7xxx,
+"period delimited, options string.\n"
+"	verbose			Enable verbose/diagnostic logging\n"
+"	allow_memio		Allow device registers to be memory mapped\n"
+"	debug			Bitmask of debug values to enable\n"
+"	no_probe		Toggle EISA/VLB controller probing\n"
+"	probe_eisa_vl		Toggle EISA/VLB controller probing\n"
+"	no_reset		Supress initial bus resets\n"
+"	extended		Enable extended geometry on all controllers\n"
+"	periodic_otag		Send an ordered tagged transaction\n"
+"				periodically to prevent tag starvation.\n"
+"				This may be required by some older disk\n"
+"				drives or RAID arrays.\n"
+"	reverse_scan		Sort PCI devices highest Bus/Slot to lowest\n"
+"	tag_info:<tag_str>	Set per-target tag depth\n"
+"	global_tag_depth:<int>	Global tag depth for every target\n"
+"				on every bus\n"
+"	dv:<dv_settings>	Set per-controller Domain Validation Setting.\n"
+"	seltime:<int>		Selection Timeout\n"
+"				(0/256ms,1/128ms,2/64ms,3/32ms)\n"
+"\n"
+"	Sample /etc/modprobe.conf line:\n"
+"		Toggle EISA/VLB probing\n"
+"		Set tag depth on Controller 1/Target 1 to 10 tags\n"
+"		Shorten the selection timeout to 128ms\n"
+"\n"
+"	options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n"
+);
+
+static void ahc_linux_handle_scsi_status(struct ahc_softc *,
+					 struct ahc_linux_device *,
+					 struct scb *);
+static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc,
+					 Scsi_Cmnd *cmd);
+static void ahc_linux_filter_inquiry(struct ahc_softc*, struct ahc_devinfo*);
+static void ahc_linux_sem_timeout(u_long arg);
+static void ahc_linux_freeze_simq(struct ahc_softc *ahc);
+static void ahc_linux_release_simq(u_long arg);
+static void ahc_linux_dev_timed_unfreeze(u_long arg);
+static int  ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag);
+static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc);
+static void ahc_linux_size_nseg(void);
+static void ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc);
+static void ahc_linux_start_dv(struct ahc_softc *ahc);
+static void ahc_linux_dv_timeout(struct scsi_cmnd *cmd);
+static int  ahc_linux_dv_thread(void *data);
+static void ahc_linux_kill_dv_thread(struct ahc_softc *ahc);
+static void ahc_linux_dv_target(struct ahc_softc *ahc, u_int target);
+static void ahc_linux_dv_transition(struct ahc_softc *ahc,
+				    struct scsi_cmnd *cmd,
+				    struct ahc_devinfo *devinfo,
+				    struct ahc_linux_target *targ);
+static void ahc_linux_dv_fill_cmd(struct ahc_softc *ahc,
+				  struct scsi_cmnd *cmd,
+				  struct ahc_devinfo *devinfo);
+static void ahc_linux_dv_inq(struct ahc_softc *ahc,
+			     struct scsi_cmnd *cmd,
+			     struct ahc_devinfo *devinfo,
+			     struct ahc_linux_target *targ,
+			     u_int request_length);
+static void ahc_linux_dv_tur(struct ahc_softc *ahc,
+			     struct scsi_cmnd *cmd,
+			     struct ahc_devinfo *devinfo);
+static void ahc_linux_dv_rebd(struct ahc_softc *ahc,
+			      struct scsi_cmnd *cmd,
+			      struct ahc_devinfo *devinfo,
+			      struct ahc_linux_target *targ);
+static void ahc_linux_dv_web(struct ahc_softc *ahc,
+			     struct scsi_cmnd *cmd,
+			     struct ahc_devinfo *devinfo,
+			     struct ahc_linux_target *targ);
+static void ahc_linux_dv_reb(struct ahc_softc *ahc,
+			     struct scsi_cmnd *cmd,
+			     struct ahc_devinfo *devinfo,
+			     struct ahc_linux_target *targ);
+static void ahc_linux_dv_su(struct ahc_softc *ahc,
+			    struct scsi_cmnd *cmd,
+			    struct ahc_devinfo *devinfo,
+			    struct ahc_linux_target *targ);
+static int ahc_linux_fallback(struct ahc_softc *ahc,
+			      struct ahc_devinfo *devinfo);
+static void ahc_linux_dv_complete(Scsi_Cmnd *cmd);
+static void ahc_linux_generate_dv_pattern(struct ahc_linux_target *targ);
+static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc,
+				     struct ahc_devinfo *devinfo);
+static u_int ahc_linux_user_dv_setting(struct ahc_softc *ahc);
+static void ahc_linux_device_queue_depth(struct ahc_softc *ahc,
+					 struct ahc_linux_device *dev);
+static struct ahc_linux_target*	ahc_linux_alloc_target(struct ahc_softc*,
+						       u_int, u_int);
+static void			ahc_linux_free_target(struct ahc_softc*,
+						      struct ahc_linux_target*);
+static struct ahc_linux_device*	ahc_linux_alloc_device(struct ahc_softc*,
+						       struct ahc_linux_target*,
+						       u_int);
+static void			ahc_linux_free_device(struct ahc_softc*,
+						      struct ahc_linux_device*);
+static void ahc_linux_run_device_queue(struct ahc_softc*,
+				       struct ahc_linux_device*);
+static void ahc_linux_setup_tag_info_global(char *p);
+static aic_option_callback_t ahc_linux_setup_tag_info;
+static aic_option_callback_t ahc_linux_setup_dv;
+static int  aic7xxx_setup(char *s);
+static int  ahc_linux_next_unit(void);
+static void ahc_runq_tasklet(unsigned long data);
+static struct ahc_cmd *ahc_linux_run_complete_queue(struct ahc_softc *ahc);
+
+/********************************* Inlines ************************************/
+static __inline void ahc_schedule_runq(struct ahc_softc *ahc);
+static __inline struct ahc_linux_device*
+		     ahc_linux_get_device(struct ahc_softc *ahc, u_int channel,
+					  u_int target, u_int lun, int alloc);
+static __inline void ahc_schedule_completeq(struct ahc_softc *ahc);
+static __inline void ahc_linux_check_device_queue(struct ahc_softc *ahc,
+						  struct ahc_linux_device *dev);
+static __inline struct ahc_linux_device *
+		     ahc_linux_next_device_to_run(struct ahc_softc *ahc);
+static __inline void ahc_linux_run_device_queues(struct ahc_softc *ahc);
+static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
+
+static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
+		 		      struct ahc_dma_seg *sg,
+				      dma_addr_t addr, bus_size_t len);
+
+static __inline void
+ahc_schedule_completeq(struct ahc_softc *ahc)
+{
+	if ((ahc->platform_data->flags & AHC_RUN_CMPLT_Q_TIMER) == 0) {
+		ahc->platform_data->flags |= AHC_RUN_CMPLT_Q_TIMER;
+		ahc->platform_data->completeq_timer.expires = jiffies;
+		add_timer(&ahc->platform_data->completeq_timer);
+	}
+}
+
+/*
+ * Must be called with our lock held.
+ */
+static __inline void
+ahc_schedule_runq(struct ahc_softc *ahc)
+{
+	tasklet_schedule(&ahc->platform_data->runq_tasklet);
+}
+
+static __inline struct ahc_linux_device*
+ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, u_int target,
+		     u_int lun, int alloc)
+{
+	struct ahc_linux_target *targ;
+	struct ahc_linux_device *dev;
+	u_int target_offset;
+
+	target_offset = target;
+	if (channel != 0)
+		target_offset += 8;
+	targ = ahc->platform_data->targets[target_offset];
+	if (targ == NULL) {
+		if (alloc != 0) {
+			targ = ahc_linux_alloc_target(ahc, channel, target);
+			if (targ == NULL)
+				return (NULL);
+		} else
+			return (NULL);
+	}
+	dev = targ->devices[lun];
+	if (dev == NULL && alloc != 0)
+		dev = ahc_linux_alloc_device(ahc, targ, lun);
+	return (dev);
+}
+
+#define AHC_LINUX_MAX_RETURNED_ERRORS 4
+static struct ahc_cmd *
+ahc_linux_run_complete_queue(struct ahc_softc *ahc)
+{
+	struct	ahc_cmd *acmd;
+	u_long	done_flags;
+	int	with_errors;
+
+	with_errors = 0;
+	ahc_done_lock(ahc, &done_flags);
+	while ((acmd = TAILQ_FIRST(&ahc->platform_data->completeq)) != NULL) {
+		Scsi_Cmnd *cmd;
+
+		if (with_errors > AHC_LINUX_MAX_RETURNED_ERRORS) {
+			/*
+			 * Linux uses stack recursion to requeue
+			 * commands that need to be retried.  Avoid
+			 * blowing out the stack by "spoon feeding"
+			 * commands that completed with error back
+			 * the operating system in case they are going
+			 * to be retried. "ick"
+			 */
+			ahc_schedule_completeq(ahc);
+			break;
+		}
+		TAILQ_REMOVE(&ahc->platform_data->completeq,
+			     acmd, acmd_links.tqe);
+		cmd = &acmd_scsi_cmd(acmd);
+		cmd->host_scribble = NULL;
+		if (ahc_cmd_get_transaction_status(cmd) != DID_OK
+		 || (cmd->result & 0xFF) != SCSI_STATUS_OK)
+			with_errors++;
+
+		cmd->scsi_done(cmd);
+	}
+	ahc_done_unlock(ahc, &done_flags);
+	return (acmd);
+}
+
+static __inline void
+ahc_linux_check_device_queue(struct ahc_softc *ahc,
+			     struct ahc_linux_device *dev)
+{
+	if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) != 0
+	 && dev->active == 0) {
+		dev->flags &= ~AHC_DEV_FREEZE_TIL_EMPTY;
+		dev->qfrozen--;
+	}
+
+	if (TAILQ_FIRST(&dev->busyq) == NULL
+	 || dev->openings == 0 || dev->qfrozen != 0)
+		return;
+
+	ahc_linux_run_device_queue(ahc, dev);
+}
+
+static __inline struct ahc_linux_device *
+ahc_linux_next_device_to_run(struct ahc_softc *ahc)
+{
+	
+	if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0
+	 || (ahc->platform_data->qfrozen != 0
+	  && AHC_DV_SIMQ_FROZEN(ahc) == 0))
+		return (NULL);
+	return (TAILQ_FIRST(&ahc->platform_data->device_runq));
+}
+
+static __inline void
+ahc_linux_run_device_queues(struct ahc_softc *ahc)
+{
+	struct ahc_linux_device *dev;
+
+	while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) {
+		TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links);
+		dev->flags &= ~AHC_DEV_ON_RUN_LIST;
+		ahc_linux_check_device_queue(ahc, dev);
+	}
+}
+
+static __inline void
+ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
+{
+	Scsi_Cmnd *cmd;
+
+	cmd = scb->io_ctx;
+	ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE);
+	if (cmd->use_sg != 0) {
+		struct scatterlist *sg;
+
+		sg = (struct scatterlist *)cmd->request_buffer;
+		pci_unmap_sg(ahc->dev_softc, sg, cmd->use_sg,
+			     scsi_to_pci_dma_dir(cmd->sc_data_direction));
+	} else if (cmd->request_bufflen != 0) {
+		pci_unmap_single(ahc->dev_softc,
+				 scb->platform_data->buf_busaddr,
+				 cmd->request_bufflen,
+				 scsi_to_pci_dma_dir(cmd->sc_data_direction));
+	}
+}
+
+static __inline int
+ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
+		  struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len)
+{
+	int	 consumed;
+
+	if ((scb->sg_count + 1) > AHC_NSEG)
+		panic("Too few segs for dma mapping.  "
+		      "Increase AHC_NSEG\n");
+
+	consumed = 1;
+	sg->addr = ahc_htole32(addr & 0xFFFFFFFF);
+	scb->platform_data->xfer_len += len;
+
+	if (sizeof(dma_addr_t) > 4
+	 && (ahc->flags & AHC_39BIT_ADDRESSING) != 0)
+		len |= (addr >> 8) & AHC_SG_HIGH_ADDR_MASK;
+
+	sg->len = ahc_htole32(len);
+	return (consumed);
+}
+
+/************************  Host template entry points *************************/
+static int	   ahc_linux_detect(Scsi_Host_Template *);
+static int	   ahc_linux_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *));
+static const char *ahc_linux_info(struct Scsi_Host *);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+static int	   ahc_linux_slave_alloc(Scsi_Device *);
+static int	   ahc_linux_slave_configure(Scsi_Device *);
+static void	   ahc_linux_slave_destroy(Scsi_Device *);
+#if defined(__i386__)
+static int	   ahc_linux_biosparam(struct scsi_device*,
+				       struct block_device*,
+				       sector_t, int[]);
+#endif
+#else
+static int	   ahc_linux_release(struct Scsi_Host *);
+static void	   ahc_linux_select_queue_depth(struct Scsi_Host *host,
+						Scsi_Device *scsi_devs);
+#if defined(__i386__)
+static int	   ahc_linux_biosparam(Disk *, kdev_t, int[]);
+#endif
+#endif
+static int	   ahc_linux_bus_reset(Scsi_Cmnd *);
+static int	   ahc_linux_dev_reset(Scsi_Cmnd *);
+static int	   ahc_linux_abort(Scsi_Cmnd *);
+
+/*
+ * Calculate a safe value for AHC_NSEG (as expressed through ahc_linux_nseg).
+ *
+ * In pre-2.5.X...
+ * The midlayer allocates an S/G array dynamically when a command is issued
+ * using SCSI malloc.  This array, which is in an OS dependent format that
+ * must later be copied to our private S/G list, is sized to house just the
+ * number of segments needed for the current transfer.  Since the code that
+ * sizes the SCSI malloc pool does not take into consideration fragmentation
+ * of the pool, executing transactions numbering just a fraction of our
+ * concurrent transaction limit with list lengths aproaching AHC_NSEG will
+ * quickly depleat the SCSI malloc pool of usable space.  Unfortunately, the
+ * mid-layer does not properly handle this scsi malloc failures for the S/G
+ * array and the result can be a lockup of the I/O subsystem.  We try to size
+ * our S/G list so that it satisfies our drivers allocation requirements in
+ * addition to avoiding fragmentation of the SCSI malloc pool.
+ */
+static void
+ahc_linux_size_nseg(void)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+	u_int cur_size;
+	u_int best_size;
+
+	/*
+	 * The SCSI allocator rounds to the nearest 512 bytes
+	 * an cannot allocate across a page boundary.  Our algorithm
+	 * is to start at 1K of scsi malloc space per-command and
+	 * loop through all factors of the PAGE_SIZE and pick the best.
+	 */
+	best_size = 0;
+	for (cur_size = 1024; cur_size <= PAGE_SIZE; cur_size *= 2) {
+		u_int nseg;
+
+		nseg = cur_size / sizeof(struct scatterlist);
+		if (nseg < AHC_LINUX_MIN_NSEG)
+			continue;
+
+		if (best_size == 0) {
+			best_size = cur_size;
+			ahc_linux_nseg = nseg;
+		} else {
+			u_int best_rem;
+			u_int cur_rem;
+
+			/*
+			 * Compare the traits of the current "best_size"
+			 * with the current size to determine if the
+			 * current size is a better size.
+			 */
+			best_rem = best_size % sizeof(struct scatterlist);
+			cur_rem = cur_size % sizeof(struct scatterlist);
+			if (cur_rem < best_rem) {
+				best_size = cur_size;
+				ahc_linux_nseg = nseg;
+			}
+		}
+	}
+#endif
+}
+
+/*
+ * Try to detect an Adaptec 7XXX controller.
+ */
+static int
+ahc_linux_detect(Scsi_Host_Template *template)
+{
+	struct	ahc_softc *ahc;
+	int     found = 0;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+	/*
+	 * It is a bug that the upper layer takes
+	 * this lock just prior to calling us.
+	 */
+	spin_unlock_irq(&io_request_lock);
+#endif
+
+	/*
+	 * Sanity checking of Linux SCSI data structures so
+	 * that some of our hacks^H^H^H^H^Hassumptions aren't
+	 * violated.
+	 */
+	if (offsetof(struct ahc_cmd_internal, end)
+	  > offsetof(struct scsi_cmnd, host_scribble)) {
+		printf("ahc_linux_detect: SCSI data structures changed.\n");
+		printf("ahc_linux_detect: Unable to attach\n");
+		return (0);
+	}
+	ahc_linux_size_nseg();
+	/*
+	 * If we've been passed any parameters, process them now.
+	 */
+	if (aic7xxx)
+		aic7xxx_setup(aic7xxx);
+
+	template->proc_name = "aic7xxx";
+
+	/*
+	 * Initialize our softc list lock prior to
+	 * probing for any adapters.
+	 */
+	ahc_list_lockinit();
+
+	found = ahc_linux_pci_init();
+	if (!ahc_linux_eisa_init())
+		found++;
+	
+	/*
+	 * Register with the SCSI layer all
+	 * controllers we've found.
+	 */
+	TAILQ_FOREACH(ahc, &ahc_tailq, links) {
+
+		if (ahc_linux_register_host(ahc, template) == 0)
+			found++;
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+	spin_lock_irq(&io_request_lock);
+#endif
+	aic7xxx_detect_complete++;
+
+	return (found);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+/*
+ * Free the passed in Scsi_Host memory structures prior to unloading the
+ * module.
+ */
+int
+ahc_linux_release(struct Scsi_Host * host)
+{
+	struct ahc_softc *ahc;
+	u_long l;
+
+	ahc_list_lock(&l);
+	if (host != NULL) {
+
+		/*
+		 * We should be able to just perform
+		 * the free directly, but check our
+		 * list for extra sanity.
+		 */
+		ahc = ahc_find_softc(*(struct ahc_softc **)host->hostdata);
+		if (ahc != NULL) {
+			u_long s;
+
+			ahc_lock(ahc, &s);
+			ahc_intr_enable(ahc, FALSE);
+			ahc_unlock(ahc, &s);
+			ahc_free(ahc);
+		}
+	}
+	ahc_list_unlock(&l);
+	return (0);
+}
+#endif
+
+/*
+ * Return a string describing the driver.
+ */
+static const char *
+ahc_linux_info(struct Scsi_Host *host)
+{
+	static char buffer[512];
+	char	ahc_info[256];
+	char   *bp;
+	struct ahc_softc *ahc;
+
+	bp = &buffer[0];
+	ahc = *(struct ahc_softc **)host->hostdata;
+	memset(bp, 0, sizeof(buffer));
+	strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev ");
+	strcat(bp, AIC7XXX_DRIVER_VERSION);
+	strcat(bp, "\n");
+	strcat(bp, "        <");
+	strcat(bp, ahc->description);
+	strcat(bp, ">\n");
+	strcat(bp, "        ");
+	ahc_controller_info(ahc, ahc_info);
+	strcat(bp, ahc_info);
+	strcat(bp, "\n");
+
+	return (bp);
+}
+
+/*
+ * Queue an SCB to the controller.
+ */
+static int
+ahc_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *))
+{
+	struct	 ahc_softc *ahc;
+	struct	 ahc_linux_device *dev;
+	u_long	 flags;
+
+	ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
+
+	/*
+	 * Save the callback on completion function.
+	 */
+	cmd->scsi_done = scsi_done;
+
+	ahc_midlayer_entrypoint_lock(ahc, &flags);
+
+	/*
+	 * Close the race of a command that was in the process of
+	 * being queued to us just as our simq was frozen.  Let
+	 * DV commands through so long as we are only frozen to
+	 * perform DV.
+	 */
+	if (ahc->platform_data->qfrozen != 0
+	 && AHC_DV_CMD(cmd) == 0) {
+
+		ahc_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ);
+		ahc_linux_queue_cmd_complete(ahc, cmd);
+		ahc_schedule_completeq(ahc);
+		ahc_midlayer_entrypoint_unlock(ahc, &flags);
+		return (0);
+	}
+	dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id,
+				   cmd->device->lun, /*alloc*/TRUE);
+	if (dev == NULL) {
+		ahc_cmd_set_transaction_status(cmd, CAM_RESRC_UNAVAIL);
+		ahc_linux_queue_cmd_complete(ahc, cmd);
+		ahc_schedule_completeq(ahc);
+		ahc_midlayer_entrypoint_unlock(ahc, &flags);
+		printf("%s: aic7xxx_linux_queue - Unable to allocate device!\n",
+		       ahc_name(ahc));
+		return (0);
+	}
+	cmd->result = CAM_REQ_INPROG << 16;
+	TAILQ_INSERT_TAIL(&dev->busyq, (struct ahc_cmd *)cmd, acmd_links.tqe);
+	if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) {
+		TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links);
+		dev->flags |= AHC_DEV_ON_RUN_LIST;
+		ahc_linux_run_device_queues(ahc);
+	}
+	ahc_midlayer_entrypoint_unlock(ahc, &flags);
+	return (0);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+static int
+ahc_linux_slave_alloc(Scsi_Device *device)
+{
+	struct	ahc_softc *ahc;
+
+	ahc = *((struct ahc_softc **)device->host->hostdata);
+	if (bootverbose)
+		printf("%s: Slave Alloc %d\n", ahc_name(ahc), device->id);
+	return (0);
+}
+
+static int
+ahc_linux_slave_configure(Scsi_Device *device)
+{
+	struct	ahc_softc *ahc;
+	struct	ahc_linux_device *dev;
+	u_long	flags;
+
+	ahc = *((struct ahc_softc **)device->host->hostdata);
+	if (bootverbose)
+		printf("%s: Slave Configure %d\n", ahc_name(ahc), device->id);
+	ahc_midlayer_entrypoint_lock(ahc, &flags);
+	/*
+	 * Since Linux has attached to the device, configure
+	 * it so we don't free and allocate the device
+	 * structure on every command.
+	 */
+	dev = ahc_linux_get_device(ahc, device->channel,
+				   device->id, device->lun,
+				   /*alloc*/TRUE);
+	if (dev != NULL) {
+		dev->flags &= ~AHC_DEV_UNCONFIGURED;
+		dev->scsi_device = device;
+		ahc_linux_device_queue_depth(ahc, dev);
+	}
+	ahc_midlayer_entrypoint_unlock(ahc, &flags);
+	return (0);
+}
+
+static void
+ahc_linux_slave_destroy(Scsi_Device *device)
+{
+	struct	ahc_softc *ahc;
+	struct	ahc_linux_device *dev;
+	u_long	flags;
+
+	ahc = *((struct ahc_softc **)device->host->hostdata);
+	if (bootverbose)
+		printf("%s: Slave Destroy %d\n", ahc_name(ahc), device->id);
+	ahc_midlayer_entrypoint_lock(ahc, &flags);
+	dev = ahc_linux_get_device(ahc, device->channel,
+				   device->id, device->lun,
+					   /*alloc*/FALSE);
+	/*
+	 * Filter out "silly" deletions of real devices by only
+	 * deleting devices that have had slave_configure()
+	 * called on them.  All other devices that have not
+	 * been configured will automatically be deleted by
+	 * the refcounting process.
+	 */
+	if (dev != NULL
+	 && (dev->flags & AHC_DEV_SLAVE_CONFIGURED) != 0) {
+		dev->flags |= AHC_DEV_UNCONFIGURED;
+		if (TAILQ_EMPTY(&dev->busyq)
+		 && dev->active == 0
+	 	 && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0)
+			ahc_linux_free_device(ahc, dev);
+	}
+	ahc_midlayer_entrypoint_unlock(ahc, &flags);
+}
+#else
+/*
+ * Sets the queue depth for each SCSI device hanging
+ * off the input host adapter.
+ */
+static void
+ahc_linux_select_queue_depth(struct Scsi_Host *host, Scsi_Device *scsi_devs)
+{
+	Scsi_Device *device;
+	Scsi_Device *ldev;
+	struct	ahc_softc *ahc;
+	u_long	flags;
+
+	ahc = *((struct ahc_softc **)host->hostdata);
+	ahc_lock(ahc, &flags);
+	for (device = scsi_devs; device != NULL; device = device->next) {
+
+		/*
+		 * Watch out for duplicate devices.  This works around
+		 * some quirks in how the SCSI scanning code does its
+		 * device management.
+		 */
+		for (ldev = scsi_devs; ldev != device; ldev = ldev->next) {
+			if (ldev->host == device->host
+			 && ldev->channel == device->channel
+			 && ldev->id == device->id
+			 && ldev->lun == device->lun)
+				break;
+		}
+		/* Skip duplicate. */
+		if (ldev != device)
+			continue;
+
+		if (device->host == host) {
+			struct	 ahc_linux_device *dev;
+
+			/*
+			 * Since Linux has attached to the device, configure
+			 * it so we don't free and allocate the device
+			 * structure on every command.
+			 */
+			dev = ahc_linux_get_device(ahc, device->channel,
+						   device->id, device->lun,
+						   /*alloc*/TRUE);
+			if (dev != NULL) {
+				dev->flags &= ~AHC_DEV_UNCONFIGURED;
+				dev->scsi_device = device;
+				ahc_linux_device_queue_depth(ahc, dev);
+				device->queue_depth = dev->openings
+						    + dev->active;
+				if ((dev->flags & (AHC_DEV_Q_BASIC
+						| AHC_DEV_Q_TAGGED)) == 0) {
+					/*
+					 * We allow the OS to queue 2 untagged
+					 * transactions to us at any time even
+					 * though we can only execute them
+					 * serially on the controller/device.
+					 * This should remove some latency.
+					 */
+					device->queue_depth = 2;
+				}
+			}
+		}
+	}
+	ahc_unlock(ahc, &flags);
+}
+#endif
+
+#if defined(__i386__)
+/*
+ * Return the disk geometry for the given SCSI device.
+ */
+static int
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+		    sector_t capacity, int geom[])
+{
+	uint8_t *bh;
+#else
+ahc_linux_biosparam(Disk *disk, kdev_t dev, int geom[])
+{
+	struct	scsi_device *sdev = disk->device;
+	u_long	capacity = disk->capacity;
+	struct	buffer_head *bh;
+#endif
+	int	 heads;
+	int	 sectors;
+	int	 cylinders;
+	int	 ret;
+	int	 extended;
+	struct	 ahc_softc *ahc;
+	u_int	 channel;
+
+	ahc = *((struct ahc_softc **)sdev->host->hostdata);
+	channel = sdev->channel;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+	bh = scsi_bios_ptable(bdev);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,17)
+	bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, block_size(dev));
+#else
+	bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, 1024);
+#endif
+
+	if (bh) {
+		ret = scsi_partsize(bh, capacity,
+				    &geom[2], &geom[0], &geom[1]);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+		kfree(bh);
+#else
+		brelse(bh);
+#endif
+		if (ret != -1)
+			return (ret);
+	}
+	heads = 64;
+	sectors = 32;
+	cylinders = aic_sector_div(capacity, heads, sectors);
+
+	if (aic7xxx_extended != 0)
+		extended = 1;
+	else if (channel == 0)
+		extended = (ahc->flags & AHC_EXTENDED_TRANS_A) != 0;
+	else
+		extended = (ahc->flags & AHC_EXTENDED_TRANS_B) != 0;
+	if (extended && cylinders >= 1024) {
+		heads = 255;
+		sectors = 63;
+		cylinders = aic_sector_div(capacity, heads, sectors);
+	}
+	geom[0] = heads;
+	geom[1] = sectors;
+	geom[2] = cylinders;
+	return (0);
+}
+#endif
+
+/*
+ * Abort the current SCSI command(s).
+ */
+static int
+ahc_linux_abort(Scsi_Cmnd *cmd)
+{
+	int error;
+
+	error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT);
+	if (error != 0)
+		printf("aic7xxx_abort returns 0x%x\n", error);
+	return (error);
+}
+
+/*
+ * Attempt to send a target reset message to the device that timed out.
+ */
+static int
+ahc_linux_dev_reset(Scsi_Cmnd *cmd)
+{
+	int error;
+
+	error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
+	if (error != 0)
+		printf("aic7xxx_dev_reset returns 0x%x\n", error);
+	return (error);
+}
+
+/*
+ * Reset the SCSI bus.
+ */
+static int
+ahc_linux_bus_reset(Scsi_Cmnd *cmd)
+{
+	struct ahc_softc *ahc;
+	u_long s;
+	int    found;
+
+	ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
+	ahc_midlayer_entrypoint_lock(ahc, &s);
+	found = ahc_reset_channel(ahc, cmd->device->channel + 'A',
+				  /*initiate reset*/TRUE);
+	ahc_linux_run_complete_queue(ahc);
+	ahc_midlayer_entrypoint_unlock(ahc, &s);
+
+	if (bootverbose)
+		printf("%s: SCSI bus reset delivered. "
+		       "%d SCBs aborted.\n", ahc_name(ahc), found);
+
+	return SUCCESS;
+}
+
+Scsi_Host_Template aic7xxx_driver_template = {
+	.module			= THIS_MODULE,
+	.name			= "aic7xxx",
+	.proc_info		= ahc_linux_proc_info,
+	.info			= ahc_linux_info,
+	.queuecommand		= ahc_linux_queue,
+	.eh_abort_handler	= ahc_linux_abort,
+	.eh_device_reset_handler = ahc_linux_dev_reset,
+	.eh_bus_reset_handler	= ahc_linux_bus_reset,
+#if defined(__i386__)
+	.bios_param		= ahc_linux_biosparam,
+#endif
+	.can_queue		= AHC_MAX_QUEUE,
+	.this_id		= -1,
+	.cmd_per_lun		= 2,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.slave_alloc		= ahc_linux_slave_alloc,
+	.slave_configure	= ahc_linux_slave_configure,
+	.slave_destroy		= ahc_linux_slave_destroy,
+};
+
+/**************************** Tasklet Handler *********************************/
+
+/*
+ * In 2.4.X and above, this routine is called from a tasklet,
+ * so we must re-acquire our lock prior to executing this code.
+ * In all prior kernels, ahc_schedule_runq() calls this routine
+ * directly and ahc_schedule_runq() is called with our lock held.
+ */
+static void
+ahc_runq_tasklet(unsigned long data)
+{
+	struct ahc_softc* ahc;
+	struct ahc_linux_device *dev;
+	u_long flags;
+
+	ahc = (struct ahc_softc *)data;
+	ahc_lock(ahc, &flags);
+	while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) {
+	
+		TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links);
+		dev->flags &= ~AHC_DEV_ON_RUN_LIST;
+		ahc_linux_check_device_queue(ahc, dev);
+		/* Yeild to our interrupt handler */
+		ahc_unlock(ahc, &flags);
+		ahc_lock(ahc, &flags);
+	}
+	ahc_unlock(ahc, &flags);
+}
+
+/******************************** Macros **************************************/
+#define BUILD_SCSIID(ahc, cmd)						    \
+	((((cmd)->device->id << TID_SHIFT) & TID)			    \
+	| (((cmd)->device->channel == 0) ? (ahc)->our_id : (ahc)->our_id_b) \
+	| (((cmd)->device->channel == 0) ? 0 : TWIN_CHNLB))
+
+/******************************** Bus DMA *************************************/
+int
+ahc_dma_tag_create(struct ahc_softc *ahc, bus_dma_tag_t parent,
+		   bus_size_t alignment, bus_size_t boundary,
+		   dma_addr_t lowaddr, dma_addr_t highaddr,
+		   bus_dma_filter_t *filter, void *filterarg,
+		   bus_size_t maxsize, int nsegments,
+		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag)
+{
+	bus_dma_tag_t dmat;
+
+	dmat = malloc(sizeof(*dmat), M_DEVBUF, M_NOWAIT);
+	if (dmat == NULL)
+		return (ENOMEM);
+
+	/*
+	 * Linux is very simplistic about DMA memory.  For now don't
+	 * maintain all specification information.  Once Linux supplies
+	 * better facilities for doing these operations, or the
+	 * needs of this particular driver change, we might need to do
+	 * more here.
+	 */
+	dmat->alignment = alignment;
+	dmat->boundary = boundary;
+	dmat->maxsize = maxsize;
+	*ret_tag = dmat;
+	return (0);
+}
+
+void
+ahc_dma_tag_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat)
+{
+	free(dmat, M_DEVBUF);
+}
+
+int
+ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr,
+		 int flags, bus_dmamap_t *mapp)
+{
+	bus_dmamap_t map;
+
+	map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT);
+	if (map == NULL)
+		return (ENOMEM);
+	/*
+	 * Although we can dma data above 4GB, our
+	 * "consistent" memory is below 4GB for
+	 * space efficiency reasons (only need a 4byte
+	 * address).  For this reason, we have to reset
+	 * our dma mask when doing allocations.
+	 */
+	if (ahc->dev_softc != NULL)
+		if (pci_set_dma_mask(ahc->dev_softc, 0xFFFFFFFF)) {
+			printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
+			kfree(map);
+			return (ENODEV);
+		}
+	*vaddr = pci_alloc_consistent(ahc->dev_softc,
+				      dmat->maxsize, &map->bus_addr);
+	if (ahc->dev_softc != NULL)
+		if (pci_set_dma_mask(ahc->dev_softc,
+				     ahc->platform_data->hw_dma_mask)) {
+			printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
+			kfree(map);
+			return (ENODEV);
+		}
+	if (*vaddr == NULL)
+		return (ENOMEM);
+	*mapp = map;
+	return(0);
+}
+
+void
+ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat,
+		void* vaddr, bus_dmamap_t map)
+{
+	pci_free_consistent(ahc->dev_softc, dmat->maxsize,
+			    vaddr, map->bus_addr);
+}
+
+int
+ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map,
+		void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
+		void *cb_arg, int flags)
+{
+	/*
+	 * Assume for now that this will only be used during
+	 * initialization and not for per-transaction buffer mapping.
+	 */
+	bus_dma_segment_t stack_sg;
+
+	stack_sg.ds_addr = map->bus_addr;
+	stack_sg.ds_len = dmat->maxsize;
+	cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
+	return (0);
+}
+
+void
+ahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+	/*
+	 * The map may is NULL in our < 2.3.X implementation.
+	 * Now it's 2.6.5, but just in case...
+	 */
+	BUG_ON(map == NULL);
+	free(map, M_DEVBUF);
+}
+
+int
+ahc_dmamap_unload(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+	/* Nothing to do */
+	return (0);
+}
+
+/********************* Platform Dependent Functions ***************************/
+/*
+ * Compare "left hand" softc with "right hand" softc, returning:
+ * < 0 - lahc has a lower priority than rahc
+ *   0 - Softcs are equal
+ * > 0 - lahc has a higher priority than rahc
+ */
+int
+ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
+{
+	int	value;
+	int	rvalue;
+	int	lvalue;
+
+	/*
+	 * Under Linux, cards are ordered as follows:
+	 *	1) VLB/EISA BIOS enabled devices sorted by BIOS address.
+	 *	2) PCI devices with BIOS enabled sorted by bus/slot/func.
+	 *	3) All remaining VLB/EISA devices sorted by ioport.
+	 *	4) All remaining PCI devices sorted by bus/slot/func.
+	 */
+	value = (lahc->flags & AHC_BIOS_ENABLED)
+	      - (rahc->flags & AHC_BIOS_ENABLED);
+	if (value != 0)
+		/* Controllers with BIOS enabled have a *higher* priority */
+		return (value);
+
+	/*
+	 * Same BIOS setting, now sort based on bus type.
+	 * EISA and VL controllers sort together.  EISA/VL
+	 * have higher priority than PCI.
+	 */
+	rvalue = (rahc->chip & AHC_BUS_MASK);
+ 	if (rvalue == AHC_VL)
+		rvalue = AHC_EISA;
+	lvalue = (lahc->chip & AHC_BUS_MASK);
+ 	if (lvalue == AHC_VL)
+		lvalue = AHC_EISA;
+	value = rvalue - lvalue;
+	if (value != 0)
+		return (value);
+
+	/* Still equal.  Sort by BIOS address, ioport, or bus/slot/func. */
+	switch (rvalue) {
+#ifdef CONFIG_PCI
+	case AHC_PCI:
+	{
+		char primary_channel;
+
+		if (aic7xxx_reverse_scan != 0)
+			value = ahc_get_pci_bus(lahc->dev_softc)
+			      - ahc_get_pci_bus(rahc->dev_softc);
+		else
+			value = ahc_get_pci_bus(rahc->dev_softc)
+			      - ahc_get_pci_bus(lahc->dev_softc);
+		if (value != 0)
+			break;
+		if (aic7xxx_reverse_scan != 0)
+			value = ahc_get_pci_slot(lahc->dev_softc)
+			      - ahc_get_pci_slot(rahc->dev_softc);
+		else
+			value = ahc_get_pci_slot(rahc->dev_softc)
+			      - ahc_get_pci_slot(lahc->dev_softc);
+		if (value != 0)
+			break;
+		/*
+		 * On multi-function devices, the user can choose
+		 * to have function 1 probed before function 0.
+		 * Give whichever channel is the primary channel
+		 * the highest priority.
+		 */
+		primary_channel = (lahc->flags & AHC_PRIMARY_CHANNEL) + 'A';
+		value = -1;
+		if (lahc->channel == primary_channel)
+			value = 1;
+		break;
+	}
+#endif
+	case AHC_EISA:
+		if ((rahc->flags & AHC_BIOS_ENABLED) != 0) {
+			value = rahc->platform_data->bios_address
+			      - lahc->platform_data->bios_address; 
+		} else {
+			value = rahc->bsh.ioport
+			      - lahc->bsh.ioport; 
+		}
+		break;
+	default:
+		panic("ahc_softc_sort: invalid bus type");
+	}
+	return (value);
+}
+
+static void
+ahc_linux_setup_tag_info_global(char *p)
+{
+	int tags, i, j;
+
+	tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
+	printf("Setting Global Tags= %d\n", tags);
+
+	for (i = 0; i < NUM_ELEMENTS(aic7xxx_tag_info); i++) {
+		for (j = 0; j < AHC_NUM_TARGETS; j++) {
+			aic7xxx_tag_info[i].tag_commands[j] = tags;
+		}
+	}
+}
+
+static void
+ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
+{
+
+	if ((instance >= 0) && (targ >= 0)
+	 && (instance < NUM_ELEMENTS(aic7xxx_tag_info))
+	 && (targ < AHC_NUM_TARGETS)) {
+		aic7xxx_tag_info[instance].tag_commands[targ] = value & 0xff;
+		if (bootverbose)
+			printf("tag_info[%d:%d] = %d\n", instance, targ, value);
+	}
+}
+
+static void
+ahc_linux_setup_dv(u_long arg, int instance, int targ, int32_t value)
+{
+
+	if ((instance >= 0)
+	 && (instance < NUM_ELEMENTS(aic7xxx_dv_settings))) {
+		aic7xxx_dv_settings[instance] = value;
+		if (bootverbose)
+			printf("dv[%d] = %d\n", instance, value);
+	}
+}
+
+/*
+ * Handle Linux boot parameters. This routine allows for assigning a value
+ * to a parameter with a ':' between the parameter and the value.
+ * ie. aic7xxx=stpwlev:1,extended
+ */
+static int
+aic7xxx_setup(char *s)
+{
+	int	i, n;
+	char   *p;
+	char   *end;
+
+	static struct {
+		const char *name;
+		uint32_t *flag;
+	} options[] = {
+		{ "extended", &aic7xxx_extended },
+		{ "no_reset", &aic7xxx_no_reset },
+		{ "verbose", &aic7xxx_verbose },
+		{ "allow_memio", &aic7xxx_allow_memio},
+#ifdef AHC_DEBUG
+		{ "debug", &ahc_debug },
+#endif
+		{ "reverse_scan", &aic7xxx_reverse_scan },
+		{ "no_probe", &aic7xxx_probe_eisa_vl },
+		{ "probe_eisa_vl", &aic7xxx_probe_eisa_vl },
+		{ "periodic_otag", &aic7xxx_periodic_otag },
+		{ "pci_parity", &aic7xxx_pci_parity },
+		{ "seltime", &aic7xxx_seltime },
+		{ "tag_info", NULL },
+		{ "global_tag_depth", NULL },
+		{ "dv", NULL }
+	};
+
+	end = strchr(s, '\0');
+
+	/*
+	 * XXX ia64 gcc isn't smart enough to know that NUM_ELEMENTS
+	 * will never be 0 in this case.
+	 */
+	n = 0;
+
+	while ((p = strsep(&s, ",.")) != NULL) {
+		if (*p == '\0')
+			continue;
+		for (i = 0; i < NUM_ELEMENTS(options); i++) {
+
+			n = strlen(options[i].name);
+			if (strncmp(options[i].name, p, n) == 0)
+				break;
+		}
+		if (i == NUM_ELEMENTS(options))
+			continue;
+
+		if (strncmp(p, "global_tag_depth", n) == 0) {
+			ahc_linux_setup_tag_info_global(p + n);
+		} else if (strncmp(p, "tag_info", n) == 0) {
+			s = aic_parse_brace_option("tag_info", p + n, end,
+			    2, ahc_linux_setup_tag_info, 0);
+		} else if (strncmp(p, "dv", n) == 0) {
+			s = aic_parse_brace_option("dv", p + n, end, 1,
+			    ahc_linux_setup_dv, 0);
+		} else if (p[n] == ':') {
+			*(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
+		} else if (strncmp(p, "verbose", n) == 0) {
+			*(options[i].flag) = 1;
+		} else {
+			*(options[i].flag) ^= 0xFFFFFFFF;
+		}
+	}
+	return 1;
+}
+
+__setup("aic7xxx=", aic7xxx_setup);
+
+uint32_t aic7xxx_verbose;
+
+int
+ahc_linux_register_host(struct ahc_softc *ahc, Scsi_Host_Template *template)
+{
+	char	 buf[80];
+	struct	 Scsi_Host *host;
+	char	*new_name;
+	u_long	 s;
+	u_int	 targ_offset;
+
+	template->name = ahc->description;
+	host = scsi_host_alloc(template, sizeof(struct ahc_softc *));
+	if (host == NULL)
+		return (ENOMEM);
+
+	*((struct ahc_softc **)host->hostdata) = ahc;
+	ahc_lock(ahc, &s);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+	scsi_assign_lock(host, &ahc->platform_data->spin_lock);
+#elif AHC_SCSI_HAS_HOST_LOCK != 0
+	host->lock = &ahc->platform_data->spin_lock;
+#endif
+	ahc->platform_data->host = host;
+	host->can_queue = AHC_MAX_QUEUE;
+	host->cmd_per_lun = 2;
+	/* XXX No way to communicate the ID for multiple channels */
+	host->this_id = ahc->our_id;
+	host->irq = ahc->platform_data->irq;
+	host->max_id = (ahc->features & AHC_WIDE) ? 16 : 8;
+	host->max_lun = AHC_NUM_LUNS;
+	host->max_channel = (ahc->features & AHC_TWIN) ? 1 : 0;
+	host->sg_tablesize = AHC_NSEG;
+	ahc_set_unit(ahc, ahc_linux_next_unit());
+	sprintf(buf, "scsi%d", host->host_no);
+	new_name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
+	if (new_name != NULL) {
+		strcpy(new_name, buf);
+		ahc_set_name(ahc, new_name);
+	}
+	host->unique_id = ahc->unit;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+	scsi_set_pci_device(host, ahc->dev_softc);
+#endif
+	ahc_linux_initialize_scsi_bus(ahc);
+	ahc_unlock(ahc, &s);
+	ahc->platform_data->dv_pid = kernel_thread(ahc_linux_dv_thread, ahc, 0);
+	ahc_lock(ahc, &s);
+	if (ahc->platform_data->dv_pid < 0) {
+		printf("%s: Failed to create DV thread, error= %d\n",
+		       ahc_name(ahc), ahc->platform_data->dv_pid);
+		return (-ahc->platform_data->dv_pid);
+	}
+	/*
+	 * Initially allocate *all* of our linux target objects
+	 * so that the DV thread will scan them all in parallel
+	 * just after driver initialization.  Any device that
+	 * does not exist will have its target object destroyed
+	 * by the selection timeout handler.  In the case of a
+	 * device that appears after the initial DV scan, async
+	 * negotiation will occur for the first command, and DV
+	 * will comence should that first command be successful.
+	 */
+	for (targ_offset = 0;
+	     targ_offset < host->max_id * (host->max_channel + 1);
+	     targ_offset++) {
+		u_int channel;
+		u_int target;
+
+		channel = 0;
+		target = targ_offset;
+		if (target > 7
+		 && (ahc->features & AHC_TWIN) != 0) {
+			channel = 1;
+			target &= 0x7;
+		}
+		/*
+		 * Skip our own ID.  Some Compaq/HP storage devices
+		 * have enclosure management devices that respond to
+		 * single bit selection (i.e. selecting ourselves).
+		 * It is expected that either an external application
+		 * or a modified kernel will be used to probe this
+		 * ID if it is appropriate.  To accommodate these
+		 * installations, ahc_linux_alloc_target() will allocate
+		 * for our ID if asked to do so.
+		 */
+		if ((channel == 0 && target == ahc->our_id)
+		 || (channel == 1 && target == ahc->our_id_b))
+			continue;
+
+		ahc_linux_alloc_target(ahc, channel, target);
+	}
+	ahc_intr_enable(ahc, TRUE);
+	ahc_linux_start_dv(ahc);
+	ahc_unlock(ahc, &s);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+	scsi_add_host(host, (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); /* XXX handle failure */
+	scsi_scan_host(host);
+#endif
+	return (0);
+}
+
+uint64_t
+ahc_linux_get_memsize(void)
+{
+	struct sysinfo si;
+
+	si_meminfo(&si);
+	return ((uint64_t)si.totalram << PAGE_SHIFT);
+}
+
+/*
+ * Find the smallest available unit number to use
+ * for a new device.  We don't just use a static
+ * count to handle the "repeated hot-(un)plug"
+ * scenario.
+ */
+static int
+ahc_linux_next_unit(void)
+{
+	struct ahc_softc *ahc;
+	int unit;
+
+	unit = 0;
+retry:
+	TAILQ_FOREACH(ahc, &ahc_tailq, links) {
+		if (ahc->unit == unit) {
+			unit++;
+			goto retry;
+		}
+	}
+	return (unit);
+}
+
+/*
+ * Place the SCSI bus into a known state by either resetting it,
+ * or forcing transfer negotiations on the next command to any
+ * target.
+ */
+void
+ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc)
+{
+	int i;
+	int numtarg;
+
+	i = 0;
+	numtarg = 0;
+
+	if (aic7xxx_no_reset != 0)
+		ahc->flags &= ~(AHC_RESET_BUS_A|AHC_RESET_BUS_B);
+
+	if ((ahc->flags & AHC_RESET_BUS_A) != 0)
+		ahc_reset_channel(ahc, 'A', /*initiate_reset*/TRUE);
+	else
+		numtarg = (ahc->features & AHC_WIDE) ? 16 : 8;
+
+	if ((ahc->features & AHC_TWIN) != 0) {
+
+		if ((ahc->flags & AHC_RESET_BUS_B) != 0) {
+			ahc_reset_channel(ahc, 'B', /*initiate_reset*/TRUE);
+		} else {
+			if (numtarg == 0)
+				i = 8;
+			numtarg += 8;
+		}
+	}
+
+	/*
+	 * Force negotiation to async for all targets that
+	 * will not see an initial bus reset.
+	 */
+	for (; i < numtarg; i++) {
+		struct ahc_devinfo devinfo;
+		struct ahc_initiator_tinfo *tinfo;
+		struct ahc_tmode_tstate *tstate;
+		u_int our_id;
+		u_int target_id;
+		char channel;
+
+		channel = 'A';
+		our_id = ahc->our_id;
+		target_id = i;
+		if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
+			channel = 'B';
+			our_id = ahc->our_id_b;
+			target_id = i % 8;
+		}
+		tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
+					    target_id, &tstate);
+		ahc_compile_devinfo(&devinfo, our_id, target_id,
+				    CAM_LUN_WILDCARD, channel, ROLE_INITIATOR);
+		ahc_update_neg_request(ahc, &devinfo, tstate,
+				       tinfo, AHC_NEG_ALWAYS);
+	}
+	/* Give the bus some time to recover */
+	if ((ahc->flags & (AHC_RESET_BUS_A|AHC_RESET_BUS_B)) != 0) {
+		ahc_linux_freeze_simq(ahc);
+		init_timer(&ahc->platform_data->reset_timer);
+		ahc->platform_data->reset_timer.data = (u_long)ahc;
+		ahc->platform_data->reset_timer.expires =
+		    jiffies + (AIC7XXX_RESET_DELAY * HZ)/1000;
+		ahc->platform_data->reset_timer.function =
+		    ahc_linux_release_simq;
+		add_timer(&ahc->platform_data->reset_timer);
+	}
+}
+
+int
+ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
+{
+
+	ahc->platform_data =
+	    malloc(sizeof(struct ahc_platform_data), M_DEVBUF, M_NOWAIT);
+	if (ahc->platform_data == NULL)
+		return (ENOMEM);
+	memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data));
+	TAILQ_INIT(&ahc->platform_data->completeq);
+	TAILQ_INIT(&ahc->platform_data->device_runq);
+	ahc->platform_data->irq = AHC_LINUX_NOIRQ;
+	ahc->platform_data->hw_dma_mask = 0xFFFFFFFF;
+	ahc_lockinit(ahc);
+	ahc_done_lockinit(ahc);
+	init_timer(&ahc->platform_data->completeq_timer);
+	ahc->platform_data->completeq_timer.data = (u_long)ahc;
+	ahc->platform_data->completeq_timer.function =
+	    (ahc_linux_callback_t *)ahc_linux_thread_run_complete_queue;
+	init_MUTEX_LOCKED(&ahc->platform_data->eh_sem);
+	init_MUTEX_LOCKED(&ahc->platform_data->dv_sem);
+	init_MUTEX_LOCKED(&ahc->platform_data->dv_cmd_sem);
+	tasklet_init(&ahc->platform_data->runq_tasklet, ahc_runq_tasklet,
+		     (unsigned long)ahc);
+	ahc->seltime = (aic7xxx_seltime & 0x3) << 4;
+	ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4;
+	if (aic7xxx_pci_parity == 0)
+		ahc->flags |= AHC_DISABLE_PCI_PERR;
+
+	return (0);
+}
+
+void
+ahc_platform_free(struct ahc_softc *ahc)
+{
+	struct ahc_linux_target *targ;
+	struct ahc_linux_device *dev;
+	int i, j;
+
+	if (ahc->platform_data != NULL) {
+		del_timer_sync(&ahc->platform_data->completeq_timer);
+		ahc_linux_kill_dv_thread(ahc);
+		tasklet_kill(&ahc->platform_data->runq_tasklet);
+		if (ahc->platform_data->host != NULL) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+			scsi_remove_host(ahc->platform_data->host);
+#endif
+			scsi_host_put(ahc->platform_data->host);
+		}
+
+		/* destroy all of the device and target objects */
+		for (i = 0; i < AHC_NUM_TARGETS; i++) {
+			targ = ahc->platform_data->targets[i];
+			if (targ != NULL) {
+				/* Keep target around through the loop. */
+				targ->refcount++;
+				for (j = 0; j < AHC_NUM_LUNS; j++) {
+
+					if (targ->devices[j] == NULL)
+						continue;
+					dev = targ->devices[j];
+					ahc_linux_free_device(ahc, dev);
+				}
+				/*
+				 * Forcibly free the target now that
+				 * all devices are gone.
+				 */
+				ahc_linux_free_target(ahc, targ);
+ 			}
+ 		}
+
+		if (ahc->platform_data->irq != AHC_LINUX_NOIRQ)
+			free_irq(ahc->platform_data->irq, ahc);
+		if (ahc->tag == BUS_SPACE_PIO
+		 && ahc->bsh.ioport != 0)
+			release_region(ahc->bsh.ioport, 256);
+		if (ahc->tag == BUS_SPACE_MEMIO
+		 && ahc->bsh.maddr != NULL) {
+			iounmap(ahc->bsh.maddr);
+			release_mem_region(ahc->platform_data->mem_busaddr,
+					   0x1000);
+		}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+		/*
+		 * In 2.4 we detach from the scsi midlayer before the PCI
+		 * layer invokes our remove callback.  No per-instance
+		 * detach is provided, so we must reach inside the PCI
+		 * subsystem's internals and detach our driver manually.
+		 */
+		if (ahc->dev_softc != NULL)
+			ahc->dev_softc->driver = NULL;
+#endif
+		free(ahc->platform_data, M_DEVBUF);
+	}
+}
+
+void
+ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
+{
+	ahc_platform_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
+				SCB_GET_CHANNEL(ahc, scb),
+				SCB_GET_LUN(scb), SCB_LIST_NULL,
+				ROLE_UNKNOWN, CAM_REQUEUE_REQ);
+}
+
+void
+ahc_platform_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
+		      ahc_queue_alg alg)
+{
+	struct ahc_linux_device *dev;
+	int was_queuing;
+	int now_queuing;
+
+	dev = ahc_linux_get_device(ahc, devinfo->channel - 'A',
+				   devinfo->target,
+				   devinfo->lun, /*alloc*/FALSE);
+	if (dev == NULL)
+		return;
+	was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED);
+	switch (alg) {
+	default:
+	case AHC_QUEUE_NONE:
+		now_queuing = 0;
+		break; 
+	case AHC_QUEUE_BASIC:
+		now_queuing = AHC_DEV_Q_BASIC;
+		break;
+	case AHC_QUEUE_TAGGED:
+		now_queuing = AHC_DEV_Q_TAGGED;
+		break;
+	}
+	if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) == 0
+	 && (was_queuing != now_queuing)
+	 && (dev->active != 0)) {
+		dev->flags |= AHC_DEV_FREEZE_TIL_EMPTY;
+		dev->qfrozen++;
+	}
+
+	dev->flags &= ~(AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED|AHC_DEV_PERIODIC_OTAG);
+	if (now_queuing) {
+		u_int usertags;
+
+		usertags = ahc_linux_user_tagdepth(ahc, devinfo);
+		if (!was_queuing) {
+			/*
+			 * Start out agressively and allow our
+			 * dynamic queue depth algorithm to take
+			 * care of the rest.
+			 */
+			dev->maxtags = usertags;
+			dev->openings = dev->maxtags - dev->active;
+		}
+		if (dev->maxtags == 0) {
+			/*
+			 * Queueing is disabled by the user.
+			 */
+			dev->openings = 1;
+		} else if (alg == AHC_QUEUE_TAGGED) {
+			dev->flags |= AHC_DEV_Q_TAGGED;
+			if (aic7xxx_periodic_otag != 0)
+				dev->flags |= AHC_DEV_PERIODIC_OTAG;
+		} else
+			dev->flags |= AHC_DEV_Q_BASIC;
+	} else {
+		/* We can only have one opening. */
+		dev->maxtags = 0;
+		dev->openings =  1 - dev->active;
+	}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+	if (dev->scsi_device != NULL) {
+		switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) {
+		case AHC_DEV_Q_BASIC:
+			scsi_adjust_queue_depth(dev->scsi_device,
+						MSG_SIMPLE_TASK,
+						dev->openings + dev->active);
+			break;
+		case AHC_DEV_Q_TAGGED:
+			scsi_adjust_queue_depth(dev->scsi_device,
+						MSG_ORDERED_TASK,
+						dev->openings + dev->active);
+			break;
+		default:
+			/*
+			 * We allow the OS to queue 2 untagged transactions to
+			 * us at any time even though we can only execute them
+			 * serially on the controller/device.  This should
+			 * remove some latency.
+			 */
+			scsi_adjust_queue_depth(dev->scsi_device,
+						/*NON-TAGGED*/0,
+						/*queue depth*/2);
+			break;
+		}
+	}
+#endif
+}
+
+int
+ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel,
+			int lun, u_int tag, role_t role, uint32_t status)
+{
+	int chan;
+	int maxchan;
+	int targ;
+	int maxtarg;
+	int clun;
+	int maxlun;
+	int count;
+
+	if (tag != SCB_LIST_NULL)
+		return (0);
+
+	chan = 0;
+	if (channel != ALL_CHANNELS) {
+		chan = channel - 'A';
+		maxchan = chan + 1;
+	} else {
+		maxchan = (ahc->features & AHC_TWIN) ? 2 : 1;
+	}
+	targ = 0;
+	if (target != CAM_TARGET_WILDCARD) {
+		targ = target;
+		maxtarg = targ + 1;
+	} else {
+		maxtarg = (ahc->features & AHC_WIDE) ? 16 : 8;
+	}
+	clun = 0;
+	if (lun != CAM_LUN_WILDCARD) {
+		clun = lun;
+		maxlun = clun + 1;
+	} else {
+		maxlun = AHC_NUM_LUNS;
+	}
+
+	count = 0;
+	for (; chan < maxchan; chan++) {
+
+		for (; targ < maxtarg; targ++) {
+
+			for (; clun < maxlun; clun++) {
+				struct ahc_linux_device *dev;
+				struct ahc_busyq *busyq;
+				struct ahc_cmd *acmd;
+
+				dev = ahc_linux_get_device(ahc, chan,
+							   targ, clun,
+							   /*alloc*/FALSE);
+				if (dev == NULL)
+					continue;
+
+				busyq = &dev->busyq;
+				while ((acmd = TAILQ_FIRST(busyq)) != NULL) {
+					Scsi_Cmnd *cmd;
+
+					cmd = &acmd_scsi_cmd(acmd);
+					TAILQ_REMOVE(busyq, acmd,
+						     acmd_links.tqe);
+					count++;
+					cmd->result = status << 16;
+					ahc_linux_queue_cmd_complete(ahc, cmd);
+				}
+			}
+		}
+	}
+
+	return (count);
+}
+
+static void
+ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc)
+{
+	u_long flags;
+
+	ahc_lock(ahc, &flags);
+	del_timer(&ahc->platform_data->completeq_timer);
+	ahc->platform_data->flags &= ~AHC_RUN_CMPLT_Q_TIMER;
+	ahc_linux_run_complete_queue(ahc);
+	ahc_unlock(ahc, &flags);
+}
+
+static void
+ahc_linux_start_dv(struct ahc_softc *ahc)
+{
+
+	/*
+	 * Freeze the simq and signal ahc_linux_queue to not let any
+	 * more commands through.
+	 */
+	if ((ahc->platform_data->flags & AHC_DV_ACTIVE) == 0) {
+#ifdef AHC_DEBUG
+		if (ahc_debug & AHC_SHOW_DV)
+			printf("%s: Waking DV thread\n", ahc_name(ahc));
+#endif
+
+		ahc->platform_data->flags |= AHC_DV_ACTIVE;
+		ahc_linux_freeze_simq(ahc);
+
+		/* Wake up the DV kthread */
+		up(&ahc->platform_data->dv_sem);
+	}
+}
+
+static void
+ahc_linux_kill_dv_thread(struct ahc_softc *ahc)
+{
+	u_long s;
+
+	ahc_lock(ahc, &s);
+	if (ahc->platform_data->dv_pid != 0) {
+		ahc->platform_data->flags |= AHC_DV_SHUTDOWN;
+		ahc_unlock(ahc, &s);
+		up(&ahc->platform_data->dv_sem);
+
+		/*
+		 * Use the eh_sem as an indicator that the
+		 * dv thread is exiting.  Note that the dv
+		 * thread must still return after performing
+		 * the up on our semaphore before it has
+		 * completely exited this module.  Unfortunately,
+		 * there seems to be no easy way to wait for the
+		 * exit of a thread for which you are not the
+		 * parent (dv threads are parented by init).
+		 * Cross your fingers...
+		 */
+		down(&ahc->platform_data->eh_sem);
+
+		/*
+		 * Mark the dv thread as already dead.  This
+		 * avoids attempting to kill it a second time.
+		 * This is necessary because we must kill the
+		 * DV thread before calling ahc_free() in the
+		 * module shutdown case to avoid bogus locking
+		 * in the SCSI mid-layer, but we ahc_free() is
+		 * called without killing the DV thread in the
+		 * instance detach case, so ahc_platform_free()
+		 * calls us again to verify that the DV thread
+		 * is dead.
+		 */
+		ahc->platform_data->dv_pid = 0;
+	} else {
+		ahc_unlock(ahc, &s);
+	}
+}
+
+static int
+ahc_linux_dv_thread(void *data)
+{
+	struct	ahc_softc *ahc;
+	int	target;
+	u_long	s;
+
+	ahc = (struct ahc_softc *)data;
+
+#ifdef AHC_DEBUG
+	if (ahc_debug & AHC_SHOW_DV)
+		printf("Launching DV Thread\n");
+#endif
+
+	/*
+	 * Complete thread creation.
+	 */
+	lock_kernel();
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+	/*
+	 * Don't care about any signals.
+	 */
+	siginitsetinv(&current->blocked, 0);
+
+	daemonize();
+	sprintf(current->comm, "ahc_dv_%d", ahc->unit);
+#else
+	daemonize("ahc_dv_%d", ahc->unit);
+	current->flags |= PF_FREEZE;
+#endif
+	unlock_kernel();
+
+	while (1) {
+		/*
+		 * Use down_interruptible() rather than down() to
+		 * avoid inclusion in the load average.
+		 */
+		down_interruptible(&ahc->platform_data->dv_sem);
+
+		/* Check to see if we've been signaled to exit */
+		ahc_lock(ahc, &s);
+		if ((ahc->platform_data->flags & AHC_DV_SHUTDOWN) != 0) {
+			ahc_unlock(ahc, &s);
+			break;
+		}
+		ahc_unlock(ahc, &s);
+
+#ifdef AHC_DEBUG
+		if (ahc_debug & AHC_SHOW_DV)
+			printf("%s: Beginning Domain Validation\n",
+			       ahc_name(ahc));
+#endif
+
+		/*
+		 * Wait for any pending commands to drain before proceeding.
+		 */
+		ahc_lock(ahc, &s);
+		while (LIST_FIRST(&ahc->pending_scbs) != NULL) {
+			ahc->platform_data->flags |= AHC_DV_WAIT_SIMQ_EMPTY;
+			ahc_unlock(ahc, &s);
+			down_interruptible(&ahc->platform_data->dv_sem);
+			ahc_lock(ahc, &s);
+		}
+
+		/*
+		 * Wait for the SIMQ to be released so that DV is the
+		 * only reason the queue is frozen.
+		 */
+		while (AHC_DV_SIMQ_FROZEN(ahc) == 0) {
+			ahc->platform_data->flags |= AHC_DV_WAIT_SIMQ_RELEASE;
+			ahc_unlock(ahc, &s);
+			down_interruptible(&ahc->platform_data->dv_sem);
+			ahc_lock(ahc, &s);
+		}
+		ahc_unlock(ahc, &s);
+
+		for (target = 0; target < AHC_NUM_TARGETS; target++)
+			ahc_linux_dv_target(ahc, target);
+
+		ahc_lock(ahc, &s);
+		ahc->platform_data->flags &= ~AHC_DV_ACTIVE;
+		ahc_unlock(ahc, &s);
+
+		/*
+		 * Release the SIMQ so that normal commands are
+		 * allowed to continue on the bus.
+		 */
+		ahc_linux_release_simq((u_long)ahc);
+	}
+	up(&ahc->platform_data->eh_sem);
+	return (0);
+}
+
+#define AHC_LINUX_DV_INQ_SHORT_LEN	36
+#define AHC_LINUX_DV_INQ_LEN		256
+#define AHC_LINUX_DV_TIMEOUT		(HZ / 4)
+
+#define AHC_SET_DV_STATE(ahc, targ, newstate) \
+	ahc_set_dv_state(ahc, targ, newstate, __LINE__)
+
+static __inline void
+ahc_set_dv_state(struct ahc_softc *ahc, struct ahc_linux_target *targ,
+		 ahc_dv_state newstate, u_int line)
+{
+	ahc_dv_state oldstate;
+
+	oldstate = targ->dv_state;
+#ifdef AHC_DEBUG
+	if (ahc_debug & AHC_SHOW_DV)
+		printf("%s:%d: Going from state %d to state %d\n",
+		       ahc_name(ahc), line, oldstate, newstate);
+#endif
+
+	if (oldstate == newstate)
+		targ->dv_state_retry++;
+	else
+		targ->dv_state_retry = 0;
+	targ->dv_state = newstate;
+}
+
+static void
+ahc_linux_dv_target(struct ahc_softc *ahc, u_int target_offset)
+{
+	struct	 ahc_devinfo devinfo;
+	struct	 ahc_linux_target *targ;
+	struct	 scsi_cmnd *cmd;
+	struct	 scsi_device *scsi_dev;
+	struct	 scsi_sense_data *sense;
+	uint8_t *buffer;
+	u_long	 s;
+	u_int	 timeout;
+	int	 echo_size;
+
+	sense = NULL;
+	buffer = NULL;
+	echo_size = 0;
+	ahc_lock(ahc, &s);
+	targ = ahc->platform_data->targets[target_offset];
+	if (targ == NULL || (targ->flags & AHC_DV_REQUIRED) == 0) {
+		ahc_unlock(ahc, &s);
+		return;
+	}
+	ahc_compile_devinfo(&devinfo,
+			    targ->channel == 0 ? ahc->our_id : ahc->our_id_b,
+			    targ->target, /*lun*/0, targ->channel + 'A',
+			    ROLE_INITIATOR);
+#ifdef AHC_DEBUG
+	if (ahc_debug & AHC_SHOW_DV) {
+		ahc_print_devinfo(ahc, &devinfo);
+		printf("Performing DV\n");
+	}
+#endif
+
+	ahc_unlock(ahc, &s);
+
+	cmd = malloc(sizeof(struct scsi_cmnd), M_DEVBUF, M_WAITOK);
+	scsi_dev = malloc(sizeof(struct scsi_device), M_DEVBUF, M_WAITOK);
+	scsi_dev->host = ahc->platform_data->host;
+	scsi_dev->id = devinfo.target;
+	scsi_dev->lun = devinfo.lun;
+	scsi_dev->channel = devinfo.channel - 'A';
+	ahc->platform_data->dv_scsi_dev = scsi_dev;
+
+	AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_INQ_SHORT_ASYNC);
+
+	while (targ->dv_state != AHC_DV_STATE_EXIT) {
+		timeout = AHC_LINUX_DV_TIMEOUT;
+		switch (targ->dv_state) {
+		case AHC_DV_STATE_INQ_SHORT_ASYNC:
+		case AHC_DV_STATE_INQ_ASYNC:
+		case AHC_DV_STATE_INQ_ASYNC_VERIFY:
+			/*
+			 * Set things to async narrow to reduce the
+			 * chance that the INQ will fail.
+			 */
+			ahc_lock(ahc, &s);
+			ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0,
+					 AHC_TRANS_GOAL, /*paused*/FALSE);
+			ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+				      AHC_TRANS_GOAL, /*paused*/FALSE);
+			ahc_unlock(ahc, &s);
+			timeout = 10 * HZ;
+			targ->flags &= ~AHC_INQ_VALID;
+			/* FALLTHROUGH */
+		case AHC_DV_STATE_INQ_VERIFY:
+		{
+			u_int inq_len;
+
+			if (targ->dv_state == AHC_DV_STATE_INQ_SHORT_ASYNC)
+				inq_len = AHC_LINUX_DV_INQ_SHORT_LEN;
+			else
+				inq_len = targ->inq_data->additional_length + 5;
+			ahc_linux_dv_inq(ahc, cmd, &devinfo, targ, inq_len);
+			break;
+		}
+		case AHC_DV_STATE_TUR:
+		case AHC_DV_STATE_BUSY:
+			timeout = 5 * HZ;
+			ahc_linux_dv_tur(ahc, cmd, &devinfo);
+			break;
+		case AHC_DV_STATE_REBD:
+			ahc_linux_dv_rebd(ahc, cmd, &devinfo, targ);
+			break;
+		case AHC_DV_STATE_WEB:
+			ahc_linux_dv_web(ahc, cmd, &devinfo, targ);
+			break;
+
+		case AHC_DV_STATE_REB:
+			ahc_linux_dv_reb(ahc, cmd, &devinfo, targ);
+			break;
+
+		case AHC_DV_STATE_SU:
+			ahc_linux_dv_su(ahc, cmd, &devinfo, targ);
+			timeout = 50 * HZ;
+			break;
+
+		default:
+			ahc_print_devinfo(ahc, &devinfo);
+			printf("Unknown DV state %d\n", targ->dv_state);
+			goto out;
+		}
+
+		/* Queue the command and wait for it to complete */
+		/* Abuse eh_timeout in the scsi_cmnd struct for our purposes */
+		init_timer(&cmd->eh_timeout);
+#ifdef AHC_DEBUG
+		if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
+			/*
+			 * All of the printfs during negotiation
+			 * really slow down the negotiation.
+			 * Add a bit of time just to be safe.
+			 */
+			timeout += HZ;
+#endif
+		scsi_add_timer(cmd, timeout, ahc_linux_dv_timeout);
+		/*
+		 * In 2.5.X, it is assumed that all calls from the
+		 * "midlayer" (which we are emulating) will have the
+		 * ahc host lock held.  For other kernels, the
+		 * io_request_lock must be held.
+		 */
+#if AHC_SCSI_HAS_HOST_LOCK != 0
+		ahc_lock(ahc, &s);
+#else
+		spin_lock_irqsave(&io_request_lock, s);
+#endif
+		ahc_linux_queue(cmd, ahc_linux_dv_complete);
+#if AHC_SCSI_HAS_HOST_LOCK != 0
+		ahc_unlock(ahc, &s);
+#else
+		spin_unlock_irqrestore(&io_request_lock, s);
+#endif
+		down_interruptible(&ahc->platform_data->dv_cmd_sem);
+		/*
+		 * Wait for the SIMQ to be released so that DV is the
+		 * only reason the queue is frozen.
+		 */
+		ahc_lock(ahc, &s);
+		while (AHC_DV_SIMQ_FROZEN(ahc) == 0) {
+			ahc->platform_data->flags |= AHC_DV_WAIT_SIMQ_RELEASE;
+			ahc_unlock(ahc, &s);
+			down_interruptible(&ahc->platform_data->dv_sem);
+			ahc_lock(ahc, &s);
+		}
+		ahc_unlock(ahc, &s);
+
+		ahc_linux_dv_transition(ahc, cmd, &devinfo, targ);
+	}
+
+out:
+	if ((targ->flags & AHC_INQ_VALID) != 0
+	 && ahc_linux_get_device(ahc, devinfo.channel - 'A',
+				 devinfo.target, devinfo.lun,
+				 /*alloc*/FALSE) == NULL) {
+		/*
+		 * The DV state machine failed to configure this device.  
+		 * This is normal if DV is disabled.  Since we have inquiry
+		 * data, filter it and use the "optimistic" negotiation
+		 * parameters found in the inquiry string.
+		 */
+		ahc_linux_filter_inquiry(ahc, &devinfo);
+		if ((targ->flags & (AHC_BASIC_DV|AHC_ENHANCED_DV)) != 0) {
+			ahc_print_devinfo(ahc, &devinfo);
+			printf("DV failed to configure device.  "
+			       "Please file a bug report against "
+			       "this driver.\n");
+		}
+	}
+
+	if (cmd != NULL)
+		free(cmd, M_DEVBUF);
+
+	if (ahc->platform_data->dv_scsi_dev != NULL) {
+		free(ahc->platform_data->dv_scsi_dev, M_DEVBUF);
+		ahc->platform_data->dv_scsi_dev = NULL;
+	}
+
+	ahc_lock(ahc, &s);
+	if (targ->dv_buffer != NULL) {
+		free(targ->dv_buffer, M_DEVBUF);
+		targ->dv_buffer = NULL;
+	}
+	if (targ->dv_buffer1 != NULL) {
+		free(targ->dv_buffer1, M_DEVBUF);
+		targ->dv_buffer1 = NULL;
+	}
+	targ->flags &= ~AHC_DV_REQUIRED;
+	if (targ->refcount == 0)
+		ahc_linux_free_target(ahc, targ);
+	ahc_unlock(ahc, &s);
+}
+
+static void
+ahc_linux_dv_transition(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
+			struct ahc_devinfo *devinfo,
+			struct ahc_linux_target *targ)
+{
+	u_int32_t status;
+
+	status = aic_error_action(cmd, targ->inq_data,
+				  ahc_cmd_get_transaction_status(cmd),
+				  ahc_cmd_get_scsi_status(cmd));
+	
+#ifdef AHC_DEBUG
+	if (ahc_debug & AHC_SHOW_DV) {
+		ahc_print_devinfo(ahc, devinfo);
+		printf("Entering ahc_linux_dv_transition, state= %d, "
+		       "status= 0x%x, cmd->result= 0x%x\n", targ->dv_state,
+		       status, cmd->result);
+	}
+#endif
+
+	switch (targ->dv_state) {
+	case AHC_DV_STATE_INQ_SHORT_ASYNC:
+	case AHC_DV_STATE_INQ_ASYNC:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+		{
+			AHC_SET_DV_STATE(ahc, targ, targ->dv_state+1);
+			break;
+		}
+		case SS_INQ_REFRESH:
+			AHC_SET_DV_STATE(ahc, targ,
+					 AHC_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		case SS_TUR:
+		case SS_RETRY:
+			AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
+			if (ahc_cmd_get_transaction_status(cmd)
+			 == CAM_REQUEUE_REQ)
+				targ->dv_state_retry--;
+			if ((status & SS_ERRMASK) == EBUSY)
+				AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
+			if (targ->dv_state_retry < 10)
+				break;
+			/* FALLTHROUGH */
+		default:
+			AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
+#ifdef AHC_DEBUG
+			if (ahc_debug & AHC_SHOW_DV) {
+				ahc_print_devinfo(ahc, devinfo);
+				printf("Failed DV inquiry, skipping\n");
+			}
+#endif
+			break;
+		}
+		break;
+	case AHC_DV_STATE_INQ_ASYNC_VERIFY:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+		{
+			u_int xportflags;
+			u_int spi3data;
+
+			if (memcmp(targ->inq_data, targ->dv_buffer,
+				   AHC_LINUX_DV_INQ_LEN) != 0) {
+				/*
+				 * Inquiry data must have changed.
+				 * Try from the top again.
+				 */
+				AHC_SET_DV_STATE(ahc, targ,
+						 AHC_DV_STATE_INQ_SHORT_ASYNC);
+				break;
+			}
+
+			AHC_SET_DV_STATE(ahc, targ, targ->dv_state+1);
+			targ->flags |= AHC_INQ_VALID;
+			if (ahc_linux_user_dv_setting(ahc) == 0)
+				break;
+
+			xportflags = targ->inq_data->flags;
+			if ((xportflags & (SID_Sync|SID_WBus16)) == 0)
+				break;
+
+			spi3data = targ->inq_data->spi3data;
+			switch (spi3data & SID_SPI_CLOCK_DT_ST) {
+			default:
+			case SID_SPI_CLOCK_ST:
+				/* Assume only basic DV is supported. */
+				targ->flags |= AHC_BASIC_DV;
+				break;
+			case SID_SPI_CLOCK_DT:
+			case SID_SPI_CLOCK_DT_ST:
+				targ->flags |= AHC_ENHANCED_DV;
+				break;
+			}
+			break;
+		}
+		case SS_INQ_REFRESH:
+			AHC_SET_DV_STATE(ahc, targ,
+					 AHC_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		case SS_TUR:
+		case SS_RETRY:
+			AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
+			if (ahc_cmd_get_transaction_status(cmd)
+			 == CAM_REQUEUE_REQ)
+				targ->dv_state_retry--;
+
+			if ((status & SS_ERRMASK) == EBUSY)
+				AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
+			if (targ->dv_state_retry < 10)
+				break;
+			/* FALLTHROUGH */
+		default:
+			AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
+#ifdef AHC_DEBUG
+			if (ahc_debug & AHC_SHOW_DV) {
+				ahc_print_devinfo(ahc, devinfo);
+				printf("Failed DV inquiry, skipping\n");
+			}
+#endif
+			break;
+		}
+		break;
+	case AHC_DV_STATE_INQ_VERIFY:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+		{
+
+			if (memcmp(targ->inq_data, targ->dv_buffer,
+				   AHC_LINUX_DV_INQ_LEN) == 0) {
+				AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
+				break;
+			}
+#ifdef AHC_DEBUG
+			if (ahc_debug & AHC_SHOW_DV) {
+				int i;
+
+				ahc_print_devinfo(ahc, devinfo);
+				printf("Inquiry buffer mismatch:");
+				for (i = 0; i < AHC_LINUX_DV_INQ_LEN; i++) {
+					if ((i & 0xF) == 0)
+						printf("\n        ");
+					printf("0x%x:0x0%x ",
+					       ((uint8_t *)targ->inq_data)[i], 
+					       targ->dv_buffer[i]);
+				}
+				printf("\n");
+			}
+#endif
+
+			if (ahc_linux_fallback(ahc, devinfo) != 0) {
+				AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
+				break;
+			}
+			/*
+			 * Do not count "falling back"
+			 * against our retries.
+			 */
+			targ->dv_state_retry = 0;
+			AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
+			break;
+		}
+		case SS_INQ_REFRESH:
+			AHC_SET_DV_STATE(ahc, targ,
+					 AHC_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		case SS_TUR:
+		case SS_RETRY:
+			AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
+			if (ahc_cmd_get_transaction_status(cmd)
+			 == CAM_REQUEUE_REQ) {
+				targ->dv_state_retry--;
+			} else if ((status & SSQ_FALLBACK) != 0) {
+				if (ahc_linux_fallback(ahc, devinfo) != 0) {
+					AHC_SET_DV_STATE(ahc, targ,
+							 AHC_DV_STATE_EXIT);
+					break;
+				}
+				/*
+				 * Do not count "falling back"
+				 * against our retries.
+				 */
+				targ->dv_state_retry = 0;
+			} else if ((status & SS_ERRMASK) == EBUSY)
+				AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
+			if (targ->dv_state_retry < 10)
+				break;
+			/* FALLTHROUGH */
+		default:
+			AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
+#ifdef AHC_DEBUG
+			if (ahc_debug & AHC_SHOW_DV) {
+				ahc_print_devinfo(ahc, devinfo);
+				printf("Failed DV inquiry, skipping\n");
+			}
+#endif
+			break;
+		}
+		break;
+
+	case AHC_DV_STATE_TUR:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+			if ((targ->flags & AHC_BASIC_DV) != 0) {
+				ahc_linux_filter_inquiry(ahc, devinfo);
+				AHC_SET_DV_STATE(ahc, targ,
+						 AHC_DV_STATE_INQ_VERIFY);
+			} else if ((targ->flags & AHC_ENHANCED_DV) != 0) {
+				AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_REBD);
+			} else {
+				AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
+			}
+			break;
+		case SS_RETRY:
+		case SS_TUR:
+			if ((status & SS_ERRMASK) == EBUSY) {
+				AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_BUSY);
+				break;
+			}
+			AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
+			if (ahc_cmd_get_transaction_status(cmd)
+			 == CAM_REQUEUE_REQ) {
+				targ->dv_state_retry--;
+			} else if ((status & SSQ_FALLBACK) != 0) {
+				if (ahc_linux_fallback(ahc, devinfo) != 0) {
+					AHC_SET_DV_STATE(ahc, targ,
+							 AHC_DV_STATE_EXIT);
+					break;
+				}
+				/*
+				 * Do not count "falling back"
+				 * against our retries.
+				 */
+				targ->dv_state_retry = 0;
+			}
+			if (targ->dv_state_retry >= 10) {
+#ifdef AHC_DEBUG
+				if (ahc_debug & AHC_SHOW_DV) {
+					ahc_print_devinfo(ahc, devinfo);
+					printf("DV TUR reties exhausted\n");
+				}
+#endif
+				AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
+				break;
+			}
+			if (status & SSQ_DELAY)
+				ssleep(1);
+
+			break;
+		case SS_START:
+			AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_SU);
+			break;
+		case SS_INQ_REFRESH:
+			AHC_SET_DV_STATE(ahc, targ,
+					 AHC_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		default:
+			AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
+			break;
+		}
+		break;
+
+	case AHC_DV_STATE_REBD:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+		{
+			uint32_t echo_size;
+
+			AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_WEB);
+			echo_size = scsi_3btoul(&targ->dv_buffer[1]);
+			echo_size &= 0x1FFF;
+#ifdef AHC_DEBUG
+			if (ahc_debug & AHC_SHOW_DV) {
+				ahc_print_devinfo(ahc, devinfo);
+				printf("Echo buffer size= %d\n", echo_size);
+			}
+#endif
+			if (echo_size == 0) {
+				AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
+				break;
+			}
+
+			/* Generate the buffer pattern */
+			targ->dv_echo_size = echo_size;
+			ahc_linux_generate_dv_pattern(targ);
+			/*
+			 * Setup initial negotiation values.
+			 */
+			ahc_linux_filter_inquiry(ahc, devinfo);
+			break;
+		}
+		case SS_INQ_REFRESH:
+			AHC_SET_DV_STATE(ahc, targ,
+					 AHC_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		case SS_RETRY:
+			AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
+			if (ahc_cmd_get_transaction_status(cmd)
+			 == CAM_REQUEUE_REQ)
+				targ->dv_state_retry--;
+			if (targ->dv_state_retry <= 10)
+				break;
+#ifdef AHC_DEBUG
+			if (ahc_debug & AHC_SHOW_DV) {
+				ahc_print_devinfo(ahc, devinfo);
+				printf("DV REBD reties exhausted\n");
+			}
+#endif
+			/* FALLTHROUGH */
+		case SS_FATAL:
+		default:
+			/*
+			 * Setup initial negotiation values
+			 * and try level 1 DV.
+			 */
+			ahc_linux_filter_inquiry(ahc, devinfo);
+			AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_INQ_VERIFY);
+			targ->dv_echo_size = 0;
+			break;
+		}
+		break;
+
+	case AHC_DV_STATE_WEB:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+			AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_REB);
+			break;
+		case SS_INQ_REFRESH:
+			AHC_SET_DV_STATE(ahc, targ,
+					 AHC_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		case SS_RETRY:
+			AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
+			if (ahc_cmd_get_transaction_status(cmd)
+			 == CAM_REQUEUE_REQ) {
+				targ->dv_state_retry--;
+			} else if ((status & SSQ_FALLBACK) != 0) {
+				if (ahc_linux_fallback(ahc, devinfo) != 0) {
+					AHC_SET_DV_STATE(ahc, targ,
+							 AHC_DV_STATE_EXIT);
+					break;
+				}
+				/*
+				 * Do not count "falling back"
+				 * against our retries.
+				 */
+				targ->dv_state_retry = 0;
+			}
+			if (targ->dv_state_retry <= 10)
+				break;
+			/* FALLTHROUGH */
+#ifdef AHC_DEBUG
+			if (ahc_debug & AHC_SHOW_DV) {
+				ahc_print_devinfo(ahc, devinfo);
+				printf("DV WEB reties exhausted\n");
+			}
+#endif
+		default:
+			AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
+			break;
+		}
+		break;
+
+	case AHC_DV_STATE_REB:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+			if (memcmp(targ->dv_buffer, targ->dv_buffer1,
+				   targ->dv_echo_size) != 0) {
+				if (ahc_linux_fallback(ahc, devinfo) != 0)
+					AHC_SET_DV_STATE(ahc, targ,
+							 AHC_DV_STATE_EXIT);
+				else
+					AHC_SET_DV_STATE(ahc, targ,
+							 AHC_DV_STATE_WEB);
+				break;
+			}
+			
+			if (targ->dv_buffer != NULL) {
+				free(targ->dv_buffer, M_DEVBUF);
+				targ->dv_buffer = NULL;
+			}
+			if (targ->dv_buffer1 != NULL) {
+				free(targ->dv_buffer1, M_DEVBUF);
+				targ->dv_buffer1 = NULL;
+			}
+			AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
+			break;
+		case SS_INQ_REFRESH:
+			AHC_SET_DV_STATE(ahc, targ,
+					 AHC_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		case SS_RETRY:
+			AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
+			if (ahc_cmd_get_transaction_status(cmd)
+			 == CAM_REQUEUE_REQ) {
+				targ->dv_state_retry--;
+			} else if ((status & SSQ_FALLBACK) != 0) {
+				if (ahc_linux_fallback(ahc, devinfo) != 0) {
+					AHC_SET_DV_STATE(ahc, targ,
+							 AHC_DV_STATE_EXIT);
+					break;
+				}
+				AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_WEB);
+			}
+			if (targ->dv_state_retry <= 10) {
+				if ((status & (SSQ_DELAY_RANDOM|SSQ_DELAY))!= 0)
+					msleep(ahc->our_id*1000/10);
+				break;
+			}
+#ifdef AHC_DEBUG
+			if (ahc_debug & AHC_SHOW_DV) {
+				ahc_print_devinfo(ahc, devinfo);
+				printf("DV REB reties exhausted\n");
+			}
+#endif
+			/* FALLTHROUGH */
+		default:
+			AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
+			break;
+		}
+		break;
+
+	case AHC_DV_STATE_SU:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+		case SS_INQ_REFRESH:
+			AHC_SET_DV_STATE(ahc, targ,
+					 AHC_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		default:
+			AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
+			break;
+		}
+		break;
+
+	case AHC_DV_STATE_BUSY:
+		switch (status & SS_MASK) {
+		case SS_NOP:
+		case SS_INQ_REFRESH:
+			AHC_SET_DV_STATE(ahc, targ,
+					 AHC_DV_STATE_INQ_SHORT_ASYNC);
+			break;
+		case SS_TUR:
+		case SS_RETRY:
+			AHC_SET_DV_STATE(ahc, targ, targ->dv_state);
+			if (ahc_cmd_get_transaction_status(cmd)
+			 == CAM_REQUEUE_REQ) {
+				targ->dv_state_retry--;
+			} else if (targ->dv_state_retry < 60) {
+				if ((status & SSQ_DELAY) != 0)
+					ssleep(1);
+			} else {
+#ifdef AHC_DEBUG
+				if (ahc_debug & AHC_SHOW_DV) {
+					ahc_print_devinfo(ahc, devinfo);
+					printf("DV BUSY reties exhausted\n");
+				}
+#endif
+				AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
+			}
+			break;
+		default:
+			AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
+			break;
+		}
+		break;
+
+	default:
+		printf("%s: Invalid DV completion state %d\n", ahc_name(ahc),
+		       targ->dv_state);
+		AHC_SET_DV_STATE(ahc, targ, AHC_DV_STATE_EXIT);
+		break;
+	}
+}
+
+static void
+ahc_linux_dv_fill_cmd(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
+		      struct ahc_devinfo *devinfo)
+{
+	memset(cmd, 0, sizeof(struct scsi_cmnd));
+	cmd->device = ahc->platform_data->dv_scsi_dev;
+	cmd->scsi_done = ahc_linux_dv_complete;
+}
+
+/*
+ * Synthesize an inquiry command.  On the return trip, it'll be
+ * sniffed and the device transfer settings set for us.
+ */
+static void
+ahc_linux_dv_inq(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
+		 struct ahc_devinfo *devinfo, struct ahc_linux_target *targ,
+		 u_int request_length)
+{
+
+#ifdef AHC_DEBUG
+	if (ahc_debug & AHC_SHOW_DV) {
+		ahc_print_devinfo(ahc, devinfo);
+		printf("Sending INQ\n");
+	}
+#endif
+	if (targ->inq_data == NULL)
+		targ->inq_data = malloc(AHC_LINUX_DV_INQ_LEN,
+					M_DEVBUF, M_WAITOK);
+	if (targ->dv_state > AHC_DV_STATE_INQ_ASYNC) {
+		if (targ->dv_buffer != NULL)
+			free(targ->dv_buffer, M_DEVBUF);
+		targ->dv_buffer = malloc(AHC_LINUX_DV_INQ_LEN,
+					 M_DEVBUF, M_WAITOK);
+	}
+
+	ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
+	cmd->sc_data_direction = SCSI_DATA_READ;
+	cmd->cmd_len = 6;
+	cmd->cmnd[0] = INQUIRY;
+	cmd->cmnd[4] = request_length;
+	cmd->request_bufflen = request_length;
+	if (targ->dv_state > AHC_DV_STATE_INQ_ASYNC)
+		cmd->request_buffer = targ->dv_buffer;
+	else
+		cmd->request_buffer = targ->inq_data;
+	memset(cmd->request_buffer, 0, AHC_LINUX_DV_INQ_LEN);
+}
+
+static void
+ahc_linux_dv_tur(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
+		 struct ahc_devinfo *devinfo)
+{
+
+#ifdef AHC_DEBUG
+	if (ahc_debug & AHC_SHOW_DV) {
+		ahc_print_devinfo(ahc, devinfo);
+		printf("Sending TUR\n");
+	}
+#endif
+	/* Do a TUR to clear out any non-fatal transitional state */
+	ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
+	cmd->sc_data_direction = SCSI_DATA_NONE;
+	cmd->cmd_len = 6;
+	cmd->cmnd[0] = TEST_UNIT_READY;
+}
+
+#define AHC_REBD_LEN 4
+
+static void
+ahc_linux_dv_rebd(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
+		 struct ahc_devinfo *devinfo, struct ahc_linux_target *targ)
+{
+
+#ifdef AHC_DEBUG
+	if (ahc_debug & AHC_SHOW_DV) {
+		ahc_print_devinfo(ahc, devinfo);
+		printf("Sending REBD\n");
+	}
+#endif
+	if (targ->dv_buffer != NULL)
+		free(targ->dv_buffer, M_DEVBUF);
+	targ->dv_buffer = malloc(AHC_REBD_LEN, M_DEVBUF, M_WAITOK);
+	ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
+	cmd->sc_data_direction = SCSI_DATA_READ;
+	cmd->cmd_len = 10;
+	cmd->cmnd[0] = READ_BUFFER;
+	cmd->cmnd[1] = 0x0b;
+	scsi_ulto3b(AHC_REBD_LEN, &cmd->cmnd[6]);
+	cmd->request_bufflen = AHC_REBD_LEN;
+	cmd->underflow = cmd->request_bufflen;
+	cmd->request_buffer = targ->dv_buffer;
+}
+
+static void
+ahc_linux_dv_web(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
+		 struct ahc_devinfo *devinfo, struct ahc_linux_target *targ)
+{
+
+#ifdef AHC_DEBUG
+	if (ahc_debug & AHC_SHOW_DV) {
+		ahc_print_devinfo(ahc, devinfo);
+		printf("Sending WEB\n");
+	}
+#endif
+	ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
+	cmd->sc_data_direction = SCSI_DATA_WRITE;
+	cmd->cmd_len = 10;
+	cmd->cmnd[0] = WRITE_BUFFER;
+	cmd->cmnd[1] = 0x0a;
+	scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
+	cmd->request_bufflen = targ->dv_echo_size;
+	cmd->underflow = cmd->request_bufflen;
+	cmd->request_buffer = targ->dv_buffer;
+}
+
+static void
+ahc_linux_dv_reb(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
+		 struct ahc_devinfo *devinfo, struct ahc_linux_target *targ)
+{
+
+#ifdef AHC_DEBUG
+	if (ahc_debug & AHC_SHOW_DV) {
+		ahc_print_devinfo(ahc, devinfo);
+		printf("Sending REB\n");
+	}
+#endif
+	ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
+	cmd->sc_data_direction = SCSI_DATA_READ;
+	cmd->cmd_len = 10;
+	cmd->cmnd[0] = READ_BUFFER;
+	cmd->cmnd[1] = 0x0a;
+	scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
+	cmd->request_bufflen = targ->dv_echo_size;
+	cmd->underflow = cmd->request_bufflen;
+	cmd->request_buffer = targ->dv_buffer1;
+}
+
+static void
+ahc_linux_dv_su(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
+		struct ahc_devinfo *devinfo,
+		struct ahc_linux_target *targ)
+{
+	u_int le;
+
+	le = SID_IS_REMOVABLE(targ->inq_data) ? SSS_LOEJ : 0;
+
+#ifdef AHC_DEBUG
+	if (ahc_debug & AHC_SHOW_DV) {
+		ahc_print_devinfo(ahc, devinfo);
+		printf("Sending SU\n");
+	}
+#endif
+	ahc_linux_dv_fill_cmd(ahc, cmd, devinfo);
+	cmd->sc_data_direction = SCSI_DATA_NONE;
+	cmd->cmd_len = 6;
+	cmd->cmnd[0] = START_STOP_UNIT;
+	cmd->cmnd[4] = le | SSS_START;
+}
+
+static int
+ahc_linux_fallback(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+	struct	ahc_linux_target *targ;
+	struct	ahc_initiator_tinfo *tinfo;
+	struct	ahc_transinfo *goal;
+	struct	ahc_tmode_tstate *tstate;
+	struct	ahc_syncrate *syncrate;
+	u_long	s;
+	u_int	width;
+	u_int	period;
+	u_int	offset;
+	u_int	ppr_options;
+	u_int	cur_speed;
+	u_int	wide_speed;
+	u_int	narrow_speed;
+	u_int	fallback_speed;
+
+#ifdef AHC_DEBUG
+	if (ahc_debug & AHC_SHOW_DV) {
+		ahc_print_devinfo(ahc, devinfo);
+		printf("Trying to fallback\n");
+	}
+#endif
+	ahc_lock(ahc, &s);
+	targ = ahc->platform_data->targets[devinfo->target_offset];
+	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
+				    devinfo->our_scsiid,
+				    devinfo->target, &tstate);
+	goal = &tinfo->goal;
+	width = goal->width;
+	period = goal->period;
+	offset = goal->offset;
+	ppr_options = goal->ppr_options;
+	if (offset == 0)
+		period = AHC_ASYNC_XFER_PERIOD;
+	if (targ->dv_next_narrow_period == 0)
+		targ->dv_next_narrow_period = MAX(period, AHC_SYNCRATE_ULTRA2);
+	if (targ->dv_next_wide_period == 0)
+		targ->dv_next_wide_period = period;
+	if (targ->dv_max_width == 0)
+		targ->dv_max_width = width;
+	if (targ->dv_max_ppr_options == 0)
+		targ->dv_max_ppr_options = ppr_options;
+	if (targ->dv_last_ppr_options == 0)
+		targ->dv_last_ppr_options = ppr_options;
+
+	cur_speed = aic_calc_speed(width, period, offset, AHC_SYNCRATE_MIN);
+	wide_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_16_BIT,
+					  targ->dv_next_wide_period,
+					  MAX_OFFSET,
+					  AHC_SYNCRATE_MIN);
+	narrow_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_8_BIT,
+					    targ->dv_next_narrow_period,
+					    MAX_OFFSET,
+					    AHC_SYNCRATE_MIN);
+	fallback_speed = aic_calc_speed(width, period+1, offset,
+					AHC_SYNCRATE_MIN);
+#ifdef AHC_DEBUG
+	if (ahc_debug & AHC_SHOW_DV) {
+		printf("cur_speed= %d, wide_speed= %d, narrow_speed= %d, "
+		       "fallback_speed= %d\n", cur_speed, wide_speed,
+		       narrow_speed, fallback_speed);
+	}
+#endif
+
+	if (cur_speed > 160000) {
+		/*
+		 * Paced/DT/IU_REQ only transfer speeds.  All we
+		 * can do is fallback in terms of syncrate.
+		 */
+		period++;
+	} else if (cur_speed > 80000) {
+		if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
+			/*
+			 * Try without IU_REQ as it may be confusing
+			 * an expander.
+			 */
+			ppr_options &= ~MSG_EXT_PPR_IU_REQ;
+		} else {
+			/*
+			 * Paced/DT only transfer speeds.  All we
+			 * can do is fallback in terms of syncrate.
+			 */
+			period++;
+			ppr_options = targ->dv_max_ppr_options;
+		}
+	} else if (cur_speed > 3300) {
+
+		/*
+		 * In this range we the following
+		 * options ordered from highest to
+		 * lowest desireability:
+		 *
+		 * o Wide/DT
+		 * o Wide/non-DT
+		 * o Narrow at a potentally higher sync rate.
+		 *
+		 * All modes are tested with and without IU_REQ
+		 * set since using IUs may confuse an expander.
+		 */
+		if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
+
+			ppr_options &= ~MSG_EXT_PPR_IU_REQ;
+		} else if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
+			/*
+			 * Try going non-DT.
+			 */
+			ppr_options = targ->dv_max_ppr_options;
+			ppr_options &= ~MSG_EXT_PPR_DT_REQ;
+		} else if (targ->dv_last_ppr_options != 0) {
+			/*
+			 * Try without QAS or any other PPR options.
+			 * We may need a non-PPR message to work with
+			 * an expander.  We look at the "last PPR options"
+			 * so we will perform this fallback even if the
+			 * target responded to our PPR negotiation with
+			 * no option bits set.
+			 */
+			ppr_options = 0;
+		} else if (width == MSG_EXT_WDTR_BUS_16_BIT) {
+			/*
+			 * If the next narrow speed is greater than
+			 * the next wide speed, fallback to narrow.
+			 * Otherwise fallback to the next DT/Wide setting.
+			 * The narrow async speed will always be smaller
+			 * than the wide async speed, so handle this case
+			 * specifically.
+			 */
+			ppr_options = targ->dv_max_ppr_options;
+			if (narrow_speed > fallback_speed
+			 || period >= AHC_ASYNC_XFER_PERIOD) {
+				targ->dv_next_wide_period = period+1;
+				width = MSG_EXT_WDTR_BUS_8_BIT;
+				period = targ->dv_next_narrow_period;
+			} else {
+				period++;
+			}
+		} else if ((ahc->features & AHC_WIDE) != 0
+			&& targ->dv_max_width != 0
+			&& wide_speed >= fallback_speed
+			&& (targ->dv_next_wide_period <= AHC_ASYNC_XFER_PERIOD
+			 || period >= AHC_ASYNC_XFER_PERIOD)) {
+
+			/*
+			 * We are narrow.  Try falling back
+			 * to the next wide speed with 
+			 * all supported ppr options set.
+			 */
+			targ->dv_next_narrow_period = period+1;
+			width = MSG_EXT_WDTR_BUS_16_BIT;
+			period = targ->dv_next_wide_period;
+			ppr_options = targ->dv_max_ppr_options;
+		} else {
+			/* Only narrow fallback is allowed. */
+			period++;
+			ppr_options = targ->dv_max_ppr_options;
+		}
+	} else {
+		ahc_unlock(ahc, &s);
+		return (-1);
+	}
+	offset = MAX_OFFSET;
+	syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+				     AHC_SYNCRATE_DT);
+	ahc_set_width(ahc, devinfo, width, AHC_TRANS_GOAL, FALSE);
+	if (period == 0) {
+		period = 0;
+		offset = 0;
+		ppr_options = 0;
+		if (width == MSG_EXT_WDTR_BUS_8_BIT)
+			targ->dv_next_narrow_period = AHC_ASYNC_XFER_PERIOD;
+		else
+			targ->dv_next_wide_period = AHC_ASYNC_XFER_PERIOD;
+	}
+	ahc_set_syncrate(ahc, devinfo, syncrate, period, offset,
+			 ppr_options, AHC_TRANS_GOAL, FALSE);
+	targ->dv_last_ppr_options = ppr_options;
+	ahc_unlock(ahc, &s);
+	return (0);
+}
+
+static void
+ahc_linux_dv_timeout(struct scsi_cmnd *cmd)
+{
+	struct	ahc_softc *ahc;
+	struct	scb *scb;
+	u_long	flags;
+
+	ahc = *((struct ahc_softc **)cmd->device->host->hostdata);
+	ahc_lock(ahc, &flags);
+
+#ifdef AHC_DEBUG
+	if (ahc_debug & AHC_SHOW_DV) {
+		printf("%s: Timeout while doing DV command %x.\n",
+		       ahc_name(ahc), cmd->cmnd[0]);
+		ahc_dump_card_state(ahc);
+	}
+#endif
+	
+	/*
+	 * Guard against "done race".  No action is
+	 * required if we just completed.
+	 */
+	if ((scb = (struct scb *)cmd->host_scribble) == NULL) {
+		ahc_unlock(ahc, &flags);
+		return;
+	}
+
+	/*
+	 * Command has not completed.  Mark this
+	 * SCB as having failing status prior to
+	 * resetting the bus, so we get the correct
+	 * error code.
+	 */
+	if ((scb->flags & SCB_SENSE) != 0)
+		ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
+	else
+		ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
+	ahc_reset_channel(ahc, cmd->device->channel + 'A', /*initiate*/TRUE);
+
+	/*
+	 * Add a minimal bus settle delay for devices that are slow to
+	 * respond after bus resets.
+	 */
+	ahc_linux_freeze_simq(ahc);
+	init_timer(&ahc->platform_data->reset_timer);
+	ahc->platform_data->reset_timer.data = (u_long)ahc;
+	ahc->platform_data->reset_timer.expires = jiffies + HZ / 2;
+	ahc->platform_data->reset_timer.function =
+	    (ahc_linux_callback_t *)ahc_linux_release_simq;
+	add_timer(&ahc->platform_data->reset_timer);
+	if (ahc_linux_next_device_to_run(ahc) != NULL)
+		ahc_schedule_runq(ahc);
+	ahc_linux_run_complete_queue(ahc);
+	ahc_unlock(ahc, &flags);
+}
+
+static void
+ahc_linux_dv_complete(struct scsi_cmnd *cmd)
+{
+	struct ahc_softc *ahc;
+
+	ahc = *((struct ahc_softc **)cmd->device->host->hostdata);
+
+	/* Delete the DV timer before it goes off! */
+	scsi_delete_timer(cmd);
+
+#ifdef AHC_DEBUG
+	if (ahc_debug & AHC_SHOW_DV)
+		printf("%s:%d:%d: Command completed, status= 0x%x\n",
+		       ahc_name(ahc), cmd->device->channel,
+		       cmd->device->id, cmd->result);
+#endif
+
+	/* Wake up the state machine */
+	up(&ahc->platform_data->dv_cmd_sem);
+}
+
+static void
+ahc_linux_generate_dv_pattern(struct ahc_linux_target *targ)
+{
+	uint16_t b;
+	u_int	 i;
+	u_int	 j;
+
+	if (targ->dv_buffer != NULL)
+		free(targ->dv_buffer, M_DEVBUF);
+	targ->dv_buffer = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
+	if (targ->dv_buffer1 != NULL)
+		free(targ->dv_buffer1, M_DEVBUF);
+	targ->dv_buffer1 = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
+
+	i = 0;
+	b = 0x0001;
+	for (j = 0 ; i < targ->dv_echo_size; j++) {
+		if (j < 32) {
+			/*
+			 * 32bytes of sequential numbers.
+			 */
+			targ->dv_buffer[i++] = j & 0xff;
+		} else if (j < 48) {
+			/*
+			 * 32bytes of repeating 0x0000, 0xffff.
+			 */
+			targ->dv_buffer[i++] = (j & 0x02) ? 0xff : 0x00;
+		} else if (j < 64) {
+			/*
+			 * 32bytes of repeating 0x5555, 0xaaaa.
+			 */
+			targ->dv_buffer[i++] = (j & 0x02) ? 0xaa : 0x55;
+		} else {
+			/*
+			 * Remaining buffer is filled with a repeating
+			 * patter of:
+			 *
+			 *	 0xffff
+			 *	~0x0001 << shifted once in each loop.
+			 */
+			if (j & 0x02) {
+				if (j & 0x01) {
+					targ->dv_buffer[i++] = ~(b >> 8) & 0xff;
+					b <<= 1;
+					if (b == 0x0000)
+						b = 0x0001;
+				} else {
+					targ->dv_buffer[i++] = (~b & 0xff);
+				}
+			} else {
+				targ->dv_buffer[i++] = 0xff;
+			}
+		}
+	}
+}
+
+static u_int
+ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+	static int warned_user;
+	u_int tags;
+
+	tags = 0;
+	if ((ahc->user_discenable & devinfo->target_mask) != 0) {
+		if (ahc->unit >= NUM_ELEMENTS(aic7xxx_tag_info)) {
+			if (warned_user == 0) {
+
+				printf(KERN_WARNING
+"aic7xxx: WARNING: Insufficient tag_info instances\n"
+"aic7xxx: for installed controllers. Using defaults\n"
+"aic7xxx: Please update the aic7xxx_tag_info array in\n"
+"aic7xxx: the aic7xxx_osm..c source file.\n");
+				warned_user++;
+			}
+			tags = AHC_MAX_QUEUE;
+		} else {
+			adapter_tag_info_t *tag_info;
+
+			tag_info = &aic7xxx_tag_info[ahc->unit];
+			tags = tag_info->tag_commands[devinfo->target_offset];
+			if (tags > AHC_MAX_QUEUE)
+				tags = AHC_MAX_QUEUE;
+		}
+	}
+	return (tags);
+}
+
+static u_int
+ahc_linux_user_dv_setting(struct ahc_softc *ahc)
+{
+	static int warned_user;
+	int dv;
+
+	if (ahc->unit >= NUM_ELEMENTS(aic7xxx_dv_settings)) {
+		if (warned_user == 0) {
+
+			printf(KERN_WARNING
+"aic7xxx: WARNING: Insufficient dv settings instances\n"
+"aic7xxx: for installed controllers. Using defaults\n"
+"aic7xxx: Please update the aic7xxx_dv_settings array\n"
+"aic7xxx: in the aic7xxx_osm.c source file.\n");
+			warned_user++;
+		}
+		dv = -1;
+	} else {
+
+		dv = aic7xxx_dv_settings[ahc->unit];
+	}
+
+	if (dv < 0) {
+		u_long s;
+
+		/*
+		 * Apply the default.
+		 */
+		/*
+		 * XXX - Enable DV on non-U160 controllers once it
+		 *       has been tested there.
+		 */
+		ahc_lock(ahc, &s);
+		dv = (ahc->features & AHC_DT);
+		if (ahc->seep_config != 0
+		 && ahc->seep_config->signature >= CFSIGNATURE2)
+			dv = (ahc->seep_config->adapter_control & CFENABLEDV);
+		ahc_unlock(ahc, &s);
+	}
+	return (dv);
+}
+
+/*
+ * Determines the queue depth for a given device.
+ */
+static void
+ahc_linux_device_queue_depth(struct ahc_softc *ahc,
+			     struct ahc_linux_device *dev)
+{
+	struct	ahc_devinfo devinfo;
+	u_int	tags;
+
+	ahc_compile_devinfo(&devinfo,
+			    dev->target->channel == 0
+			  ? ahc->our_id : ahc->our_id_b,
+			    dev->target->target, dev->lun,
+			    dev->target->channel == 0 ? 'A' : 'B',
+			    ROLE_INITIATOR);
+	tags = ahc_linux_user_tagdepth(ahc, &devinfo);
+	if (tags != 0
+	 && dev->scsi_device != NULL
+	 && dev->scsi_device->tagged_supported != 0) {
+
+		ahc_set_tags(ahc, &devinfo, AHC_QUEUE_TAGGED);
+		ahc_print_devinfo(ahc, &devinfo);
+		printf("Tagged Queuing enabled.  Depth %d\n", tags);
+	} else {
+		ahc_set_tags(ahc, &devinfo, AHC_QUEUE_NONE);
+	}
+}
+
+static void
+ahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev)
+{
+	struct	 ahc_cmd *acmd;
+	struct	 scsi_cmnd *cmd;
+	struct	 scb *scb;
+	struct	 hardware_scb *hscb;
+	struct	 ahc_initiator_tinfo *tinfo;
+	struct	 ahc_tmode_tstate *tstate;
+	uint16_t mask;
+
+	if ((dev->flags & AHC_DEV_ON_RUN_LIST) != 0)
+		panic("running device on run list");
+
+	while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL
+	    && dev->openings > 0 && dev->qfrozen == 0) {
+
+		/*
+		 * Schedule us to run later.  The only reason we are not
+		 * running is because the whole controller Q is frozen.
+		 */
+		if (ahc->platform_data->qfrozen != 0
+	 	 && AHC_DV_SIMQ_FROZEN(ahc) == 0) {
+			TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq,
+					  dev, links);
+			dev->flags |= AHC_DEV_ON_RUN_LIST;
+			return;
+		}
+		/*
+		 * Get an scb to use.
+		 */
+		if ((scb = ahc_get_scb(ahc)) == NULL) {
+			TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq,
+					 dev, links);
+			dev->flags |= AHC_DEV_ON_RUN_LIST;
+			ahc->flags |= AHC_RESOURCE_SHORTAGE;
+			return;
+		}
+		TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe);
+		cmd = &acmd_scsi_cmd(acmd);
+		scb->io_ctx = cmd;
+		scb->platform_data->dev = dev;
+		hscb = scb->hscb;
+		cmd->host_scribble = (char *)scb;
+
+		/*
+		 * Fill out basics of the HSCB.
+		 */
+		hscb->control = 0;
+		hscb->scsiid = BUILD_SCSIID(ahc, cmd);
+		hscb->lun = cmd->device->lun;
+		mask = SCB_GET_TARGET_MASK(ahc, scb);
+		tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb),
+					    SCB_GET_OUR_ID(scb),
+					    SCB_GET_TARGET(ahc, scb), &tstate);
+		hscb->scsirate = tinfo->scsirate;
+		hscb->scsioffset = tinfo->curr.offset;
+		if ((tstate->ultraenb & mask) != 0)
+			hscb->control |= ULTRAENB;
+
+		if ((ahc->user_discenable & mask) != 0)
+			hscb->control |= DISCENB;
+
+	 	if (AHC_DV_CMD(cmd) != 0)
+			scb->flags |= SCB_SILENT;
+
+		if ((tstate->auto_negotiate & mask) != 0) {
+			scb->flags |= SCB_AUTO_NEGOTIATE;
+			scb->hscb->control |= MK_MESSAGE;
+		}
+
+		if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+			int	msg_bytes;
+			uint8_t tag_msgs[2];
+
+			msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
+			if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
+				hscb->control |= tag_msgs[0];
+				if (tag_msgs[0] == MSG_ORDERED_TASK)
+					dev->commands_since_idle_or_otag = 0;
+			} else
+#endif
+			if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH
+			 && (dev->flags & AHC_DEV_Q_TAGGED) != 0) {
+				hscb->control |= MSG_ORDERED_TASK;
+				dev->commands_since_idle_or_otag = 0;
+			} else {
+				hscb->control |= MSG_SIMPLE_TASK;
+			}
+		}
+
+		hscb->cdb_len = cmd->cmd_len;
+		if (hscb->cdb_len <= 12) {
+			memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len);
+		} else {
+			memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len);
+			scb->flags |= SCB_CDB32_PTR;
+		}
+
+		scb->platform_data->xfer_len = 0;
+		ahc_set_residual(scb, 0);
+		ahc_set_sense_residual(scb, 0);
+		scb->sg_count = 0;
+		if (cmd->use_sg != 0) {
+			struct	ahc_dma_seg *sg;
+			struct	scatterlist *cur_seg;
+			struct	scatterlist *end_seg;
+			int	nseg;
+
+			cur_seg = (struct scatterlist *)cmd->request_buffer;
+			nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg,
+			    scsi_to_pci_dma_dir(cmd->sc_data_direction));
+			end_seg = cur_seg + nseg;
+			/* Copy the segments into the SG list. */
+			sg = scb->sg_list;
+			/*
+			 * The sg_count may be larger than nseg if
+			 * a transfer crosses a 32bit page.
+			 */ 
+			while (cur_seg < end_seg) {
+				dma_addr_t addr;
+				bus_size_t len;
+				int consumed;
+
+				addr = sg_dma_address(cur_seg);
+				len = sg_dma_len(cur_seg);
+				consumed = ahc_linux_map_seg(ahc, scb,
+							     sg, addr, len);
+				sg += consumed;
+				scb->sg_count += consumed;
+				cur_seg++;
+			}
+			sg--;
+			sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
+
+			/*
+			 * Reset the sg list pointer.
+			 */
+			scb->hscb->sgptr =
+			    ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
+
+			/*
+			 * Copy the first SG into the "current"
+			 * data pointer area.
+			 */
+			scb->hscb->dataptr = scb->sg_list->addr;
+			scb->hscb->datacnt = scb->sg_list->len;
+		} else if (cmd->request_bufflen != 0) {
+			struct	 ahc_dma_seg *sg;
+			dma_addr_t addr;
+
+			sg = scb->sg_list;
+			addr = pci_map_single(ahc->dev_softc,
+			       cmd->request_buffer,
+			       cmd->request_bufflen,
+			       scsi_to_pci_dma_dir(cmd->sc_data_direction));
+			scb->platform_data->buf_busaddr = addr;
+			scb->sg_count = ahc_linux_map_seg(ahc, scb,
+							  sg, addr,
+							  cmd->request_bufflen);
+			sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
+
+			/*
+			 * Reset the sg list pointer.
+			 */
+			scb->hscb->sgptr =
+			    ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
+
+			/*
+			 * Copy the first SG into the "current"
+			 * data pointer area.
+			 */
+			scb->hscb->dataptr = sg->addr;
+			scb->hscb->datacnt = sg->len;
+		} else {
+			scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
+			scb->hscb->dataptr = 0;
+			scb->hscb->datacnt = 0;
+			scb->sg_count = 0;
+		}
+
+		ahc_sync_sglist(ahc, scb, BUS_DMASYNC_PREWRITE);
+		LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
+		dev->openings--;
+		dev->active++;
+		dev->commands_issued++;
+		if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0)
+			dev->commands_since_idle_or_otag++;
+
+		/*
+		 * We only allow one untagged transaction
+		 * per target in the initiator role unless
+		 * we are storing a full busy target *lun*
+		 * table in SCB space.
+		 */
+		if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
+		 && (ahc->features & AHC_SCB_BTT) == 0) {
+			struct scb_tailq *untagged_q;
+			int target_offset;
+
+			target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
+			untagged_q = &(ahc->untagged_queues[target_offset]);
+			TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
+			scb->flags |= SCB_UNTAGGEDQ;
+			if (TAILQ_FIRST(untagged_q) != scb)
+				continue;
+		}
+		scb->flags |= SCB_ACTIVE;
+		ahc_queue_scb(ahc, scb);
+	}
+}
+
+/*
+ * SCSI controller interrupt handler.
+ */
+irqreturn_t
+ahc_linux_isr(int irq, void *dev_id, struct pt_regs * regs)
+{
+	struct	ahc_softc *ahc;
+	u_long	flags;
+	int	ours;
+
+	ahc = (struct ahc_softc *) dev_id;
+	ahc_lock(ahc, &flags); 
+	ours = ahc_intr(ahc);
+	if (ahc_linux_next_device_to_run(ahc) != NULL)
+		ahc_schedule_runq(ahc);
+	ahc_linux_run_complete_queue(ahc);
+	ahc_unlock(ahc, &flags);
+	return IRQ_RETVAL(ours);
+}
+
+void
+ahc_platform_flushwork(struct ahc_softc *ahc)
+{
+
+	while (ahc_linux_run_complete_queue(ahc) != NULL)
+		;
+}
+
+static struct ahc_linux_target*
+ahc_linux_alloc_target(struct ahc_softc *ahc, u_int channel, u_int target)
+{
+	struct ahc_linux_target *targ;
+	u_int target_offset;
+
+	target_offset = target;
+	if (channel != 0)
+		target_offset += 8;
+
+	targ = malloc(sizeof(*targ), M_DEVBUG, M_NOWAIT);
+	if (targ == NULL)
+		return (NULL);
+	memset(targ, 0, sizeof(*targ));
+	targ->channel = channel;
+	targ->target = target;
+	targ->ahc = ahc;
+	targ->flags = AHC_DV_REQUIRED;
+	ahc->platform_data->targets[target_offset] = targ;
+	return (targ);
+}
+
+static void
+ahc_linux_free_target(struct ahc_softc *ahc, struct ahc_linux_target *targ)
+{
+	struct ahc_devinfo devinfo;
+	struct ahc_initiator_tinfo *tinfo;
+	struct ahc_tmode_tstate *tstate;
+	u_int our_id;
+	u_int target_offset;
+	char channel;
+
+	/*
+	 * Force a negotiation to async/narrow on any
+	 * future command to this device unless a bus
+	 * reset occurs between now and that command.
+	 */
+	channel = 'A' + targ->channel;
+	our_id = ahc->our_id;
+	target_offset = targ->target;
+	if (targ->channel != 0) {
+		target_offset += 8;
+		our_id = ahc->our_id_b;
+	}
+	tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
+				    targ->target, &tstate);
+	ahc_compile_devinfo(&devinfo, our_id, targ->target, CAM_LUN_WILDCARD,
+			    channel, ROLE_INITIATOR);
+	ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0,
+			 AHC_TRANS_GOAL, /*paused*/FALSE);
+	ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
+		      AHC_TRANS_GOAL, /*paused*/FALSE);
+	ahc_update_neg_request(ahc, &devinfo, tstate, tinfo, AHC_NEG_ALWAYS);
+	ahc->platform_data->targets[target_offset] = NULL;
+	if (targ->inq_data != NULL)
+		free(targ->inq_data, M_DEVBUF);
+	if (targ->dv_buffer != NULL)
+		free(targ->dv_buffer, M_DEVBUF);
+	if (targ->dv_buffer1 != NULL)
+		free(targ->dv_buffer1, M_DEVBUF);
+	free(targ, M_DEVBUF);
+}
+
+static struct ahc_linux_device*
+ahc_linux_alloc_device(struct ahc_softc *ahc,
+		 struct ahc_linux_target *targ, u_int lun)
+{
+	struct ahc_linux_device *dev;
+
+	dev = malloc(sizeof(*dev), M_DEVBUG, M_NOWAIT);
+	if (dev == NULL)
+		return (NULL);
+	memset(dev, 0, sizeof(*dev));
+	init_timer(&dev->timer);
+	TAILQ_INIT(&dev->busyq);
+	dev->flags = AHC_DEV_UNCONFIGURED;
+	dev->lun = lun;
+	dev->target = targ;
+
+	/*
+	 * We start out life using untagged
+	 * transactions of which we allow one.
+	 */
+	dev->openings = 1;
+
+	/*
+	 * Set maxtags to 0.  This will be changed if we
+	 * later determine that we are dealing with
+	 * a tagged queuing capable device.
+	 */
+	dev->maxtags = 0;
+	
+	targ->refcount++;
+	targ->devices[lun] = dev;
+	return (dev);
+}
+
+static void
+__ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev)
+{
+	struct ahc_linux_target *targ;
+
+	targ = dev->target;
+	targ->devices[dev->lun] = NULL;
+	free(dev, M_DEVBUF);
+	targ->refcount--;
+	if (targ->refcount == 0
+	 && (targ->flags & AHC_DV_REQUIRED) == 0)
+		ahc_linux_free_target(ahc, targ);
+}
+
+static void
+ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev)
+{
+	del_timer_sync(&dev->timer);
+	__ahc_linux_free_device(ahc, dev);
+}
+
+void
+ahc_send_async(struct ahc_softc *ahc, char channel,
+	       u_int target, u_int lun, ac_code code, void *arg)
+{
+	switch (code) {
+	case AC_TRANSFER_NEG:
+	{
+		char	buf[80];
+		struct	ahc_linux_target *targ;
+		struct	info_str info;
+		struct	ahc_initiator_tinfo *tinfo;
+		struct	ahc_tmode_tstate *tstate;
+		int	target_offset;
+
+		info.buffer = buf;
+		info.length = sizeof(buf);
+		info.offset = 0;
+		info.pos = 0;
+		tinfo = ahc_fetch_transinfo(ahc, channel,
+						channel == 'A' ? ahc->our_id
+							       : ahc->our_id_b,
+						target, &tstate);
+
+		/*
+		 * Don't bother reporting results while
+		 * negotiations are still pending.
+		 */
+		if (tinfo->curr.period != tinfo->goal.period
+		 || tinfo->curr.width != tinfo->goal.width
+		 || tinfo->curr.offset != tinfo->goal.offset
+		 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
+			if (bootverbose == 0)
+				break;
+
+		/*
+		 * Don't bother reporting results that
+		 * are identical to those last reported.
+		 */
+		target_offset = target;
+		if (channel == 'B')
+			target_offset += 8;
+		targ = ahc->platform_data->targets[target_offset];
+		if (targ == NULL)
+			break;
+		if (tinfo->curr.period == targ->last_tinfo.period
+		 && tinfo->curr.width == targ->last_tinfo.width
+		 && tinfo->curr.offset == targ->last_tinfo.offset
+		 && tinfo->curr.ppr_options == targ->last_tinfo.ppr_options)
+			if (bootverbose == 0)
+				break;
+
+		targ->last_tinfo.period = tinfo->curr.period;
+		targ->last_tinfo.width = tinfo->curr.width;
+		targ->last_tinfo.offset = tinfo->curr.offset;
+		targ->last_tinfo.ppr_options = tinfo->curr.ppr_options;
+
+		printf("(%s:%c:", ahc_name(ahc), channel);
+		if (target == CAM_TARGET_WILDCARD)
+			printf("*): ");
+		else
+			printf("%d): ", target);
+		ahc_format_transinfo(&info, &tinfo->curr);
+		if (info.pos < info.length)
+			*info.buffer = '\0';
+		else
+			buf[info.length - 1] = '\0';
+		printf("%s", buf);
+		break;
+	}
+        case AC_SENT_BDR:
+	{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+		WARN_ON(lun != CAM_LUN_WILDCARD);
+		scsi_report_device_reset(ahc->platform_data->host,
+					 channel - 'A', target);
+#else
+		Scsi_Device *scsi_dev;
+
+		/*
+		 * Find the SCSI device associated with this
+		 * request and indicate that a UA is expected.
+		 */
+		for (scsi_dev = ahc->platform_data->host->host_queue;
+		     scsi_dev != NULL; scsi_dev = scsi_dev->next) {
+			if (channel - 'A' == scsi_dev->channel
+			 && target == scsi_dev->id
+			 && (lun == CAM_LUN_WILDCARD
+			  || lun == scsi_dev->lun)) {
+				scsi_dev->was_reset = 1;
+				scsi_dev->expecting_cc_ua = 1;
+			}
+		}
+#endif
+		break;
+	}
+        case AC_BUS_RESET:
+		if (ahc->platform_data->host != NULL) {
+			scsi_report_bus_reset(ahc->platform_data->host,
+					      channel - 'A');
+		}
+                break;
+        default:
+                panic("ahc_send_async: Unexpected async event");
+        }
+}
+
+/*
+ * Calls the higher level scsi done function and frees the scb.
+ */
+void
+ahc_done(struct ahc_softc *ahc, struct scb *scb)
+{
+	Scsi_Cmnd *cmd;
+	struct	   ahc_linux_device *dev;
+
+	LIST_REMOVE(scb, pending_links);
+	if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
+		struct scb_tailq *untagged_q;
+		int target_offset;
+
+		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
+		untagged_q = &(ahc->untagged_queues[target_offset]);
+		TAILQ_REMOVE(untagged_q, scb, links.tqe);
+		ahc_run_untagged_queue(ahc, untagged_q);
+	}
+
+	if ((scb->flags & SCB_ACTIVE) == 0) {
+		printf("SCB %d done'd twice\n", scb->hscb->tag);
+		ahc_dump_card_state(ahc);
+		panic("Stopping for safety");
+	}
+	cmd = scb->io_ctx;
+	dev = scb->platform_data->dev;
+	dev->active--;
+	dev->openings++;
+	if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
+		cmd->result &= ~(CAM_DEV_QFRZN << 16);
+		dev->qfrozen--;
+	}
+	ahc_linux_unmap_scb(ahc, scb);
+
+	/*
+	 * Guard against stale sense data.
+	 * The Linux mid-layer assumes that sense
+	 * was retrieved anytime the first byte of
+	 * the sense buffer looks "sane".
+	 */
+	cmd->sense_buffer[0] = 0;
+	if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) {
+		uint32_t amount_xferred;
+
+		amount_xferred =
+		    ahc_get_transfer_length(scb) - ahc_get_residual(scb);
+		if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {
+#ifdef AHC_DEBUG
+			if ((ahc_debug & AHC_SHOW_MISC) != 0) {
+				ahc_print_path(ahc, scb);
+				printf("Set CAM_UNCOR_PARITY\n");
+			}
+#endif
+			ahc_set_transaction_status(scb, CAM_UNCOR_PARITY);
+#ifdef AHC_REPORT_UNDERFLOWS
+		/*
+		 * This code is disabled by default as some
+		 * clients of the SCSI system do not properly
+		 * initialize the underflow parameter.  This
+		 * results in spurious termination of commands
+		 * that complete as expected (e.g. underflow is
+		 * allowed as command can return variable amounts
+		 * of data.
+		 */
+		} else if (amount_xferred < scb->io_ctx->underflow) {
+			u_int i;
+
+			ahc_print_path(ahc, scb);
+			printf("CDB:");
+			for (i = 0; i < scb->io_ctx->cmd_len; i++)
+				printf(" 0x%x", scb->io_ctx->cmnd[i]);
+			printf("\n");
+			ahc_print_path(ahc, scb);
+			printf("Saw underflow (%ld of %ld bytes). "
+			       "Treated as error\n",
+				ahc_get_residual(scb),
+				ahc_get_transfer_length(scb));
+			ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
+#endif
+		} else {
+			ahc_set_transaction_status(scb, CAM_REQ_CMP);
+		}
+	} else if (ahc_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
+		ahc_linux_handle_scsi_status(ahc, dev, scb);
+	} else if (ahc_get_transaction_status(scb) == CAM_SEL_TIMEOUT) {
+		dev->flags |= AHC_DEV_UNCONFIGURED;
+		if (AHC_DV_CMD(cmd) == FALSE)
+			dev->target->flags &= ~AHC_DV_REQUIRED;
+	}
+	/*
+	 * Start DV for devices that require it assuming the first command
+	 * sent does not result in a selection timeout.
+	 */
+	if (ahc_get_transaction_status(scb) != CAM_SEL_TIMEOUT
+	 && (dev->target->flags & AHC_DV_REQUIRED) != 0)
+		ahc_linux_start_dv(ahc);
+
+	if (dev->openings == 1
+	 && ahc_get_transaction_status(scb) == CAM_REQ_CMP
+	 && ahc_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)
+		dev->tag_success_count++;
+	/*
+	 * Some devices deal with temporary internal resource
+	 * shortages by returning queue full.  When the queue
+	 * full occurrs, we throttle back.  Slowly try to get
+	 * back to our previous queue depth.
+	 */
+	if ((dev->openings + dev->active) < dev->maxtags
+	 && dev->tag_success_count > AHC_TAG_SUCCESS_INTERVAL) {
+		dev->tag_success_count = 0;
+		dev->openings++;
+	}
+
+	if (dev->active == 0)
+		dev->commands_since_idle_or_otag = 0;
+
+	if (TAILQ_EMPTY(&dev->busyq)) {
+		if ((dev->flags & AHC_DEV_UNCONFIGURED) != 0
+		 && dev->active == 0
+	 	 && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0)
+			ahc_linux_free_device(ahc, dev);
+	} else if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) {
+		TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links);
+		dev->flags |= AHC_DEV_ON_RUN_LIST;
+	}
+
+	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
+		printf("Recovery SCB completes\n");
+		if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
+		 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
+			ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
+		if ((ahc->platform_data->flags & AHC_UP_EH_SEMAPHORE) != 0) {
+			ahc->platform_data->flags &= ~AHC_UP_EH_SEMAPHORE;
+			up(&ahc->platform_data->eh_sem);
+		}
+	}
+
+	ahc_free_scb(ahc, scb);
+	ahc_linux_queue_cmd_complete(ahc, cmd);
+
+	if ((ahc->platform_data->flags & AHC_DV_WAIT_SIMQ_EMPTY) != 0
+	 && LIST_FIRST(&ahc->pending_scbs) == NULL) {
+		ahc->platform_data->flags &= ~AHC_DV_WAIT_SIMQ_EMPTY;
+		up(&ahc->platform_data->dv_sem);
+	}
+		
+}
+
+static void
+ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
+			     struct ahc_linux_device *dev, struct scb *scb)
+{
+	struct	ahc_devinfo devinfo;
+
+	ahc_compile_devinfo(&devinfo,
+			    ahc->our_id,
+			    dev->target->target, dev->lun,
+			    dev->target->channel == 0 ? 'A' : 'B',
+			    ROLE_INITIATOR);
+	
+	/*
+	 * We don't currently trust the mid-layer to
+	 * properly deal with queue full or busy.  So,
+	 * when one occurs, we tell the mid-layer to
+	 * unconditionally requeue the command to us
+	 * so that we can retry it ourselves.  We also
+	 * implement our own throttling mechanism so
+	 * we don't clobber the device with too many
+	 * commands.
+	 */
+	switch (ahc_get_scsi_status(scb)) {
+	default:
+		break;
+	case SCSI_STATUS_CHECK_COND:
+	case SCSI_STATUS_CMD_TERMINATED:
+	{
+		Scsi_Cmnd *cmd;
+
+		/*
+		 * Copy sense information to the OS's cmd
+		 * structure if it is available.
+		 */
+		cmd = scb->io_ctx;
+		if (scb->flags & SCB_SENSE) {
+			u_int sense_size;
+
+			sense_size = MIN(sizeof(struct scsi_sense_data)
+				       - ahc_get_sense_residual(scb),
+					 sizeof(cmd->sense_buffer));
+			memcpy(cmd->sense_buffer,
+			       ahc_get_sense_buf(ahc, scb), sense_size);
+			if (sense_size < sizeof(cmd->sense_buffer))
+				memset(&cmd->sense_buffer[sense_size], 0,
+				       sizeof(cmd->sense_buffer) - sense_size);
+			cmd->result |= (DRIVER_SENSE << 24);
+#ifdef AHC_DEBUG
+			if (ahc_debug & AHC_SHOW_SENSE) {
+				int i;
+
+				printf("Copied %d bytes of sense data:",
+				       sense_size);
+				for (i = 0; i < sense_size; i++) {
+					if ((i & 0xF) == 0)
+						printf("\n");
+					printf("0x%x ", cmd->sense_buffer[i]);
+				}
+				printf("\n");
+			}
+#endif
+		}
+		break;
+	}
+	case SCSI_STATUS_QUEUE_FULL:
+	{
+		/*
+		 * By the time the core driver has returned this
+		 * command, all other commands that were queued
+		 * to us but not the device have been returned.
+		 * This ensures that dev->active is equal to
+		 * the number of commands actually queued to
+		 * the device.
+		 */
+		dev->tag_success_count = 0;
+		if (dev->active != 0) {
+			/*
+			 * Drop our opening count to the number
+			 * of commands currently outstanding.
+			 */
+			dev->openings = 0;
+/*
+			ahc_print_path(ahc, scb);
+			printf("Dropping tag count to %d\n", dev->active);
+ */
+			if (dev->active == dev->tags_on_last_queuefull) {
+
+				dev->last_queuefull_same_count++;
+				/*
+				 * If we repeatedly see a queue full
+				 * at the same queue depth, this
+				 * device has a fixed number of tag
+				 * slots.  Lock in this tag depth
+				 * so we stop seeing queue fulls from
+				 * this device.
+				 */
+				if (dev->last_queuefull_same_count
+				 == AHC_LOCK_TAGS_COUNT) {
+					dev->maxtags = dev->active;
+					ahc_print_path(ahc, scb);
+					printf("Locking max tag count at %d\n",
+					       dev->active);
+				}
+			} else {
+				dev->tags_on_last_queuefull = dev->active;
+				dev->last_queuefull_same_count = 0;
+			}
+			ahc_set_transaction_status(scb, CAM_REQUEUE_REQ);
+			ahc_set_scsi_status(scb, SCSI_STATUS_OK);
+			ahc_platform_set_tags(ahc, &devinfo,
+				     (dev->flags & AHC_DEV_Q_BASIC)
+				   ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
+			break;
+		}
+		/*
+		 * Drop down to a single opening, and treat this
+		 * as if the target returned BUSY SCSI status.
+		 */
+		dev->openings = 1;
+		ahc_set_scsi_status(scb, SCSI_STATUS_BUSY);
+		ahc_platform_set_tags(ahc, &devinfo,
+			     (dev->flags & AHC_DEV_Q_BASIC)
+			   ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
+		/* FALLTHROUGH */
+	}
+	case SCSI_STATUS_BUSY:
+	{
+		/*
+		 * Set a short timer to defer sending commands for
+		 * a bit since Linux will not delay in this case.
+		 */
+		if ((dev->flags & AHC_DEV_TIMER_ACTIVE) != 0) {
+			printf("%s:%c:%d: Device Timer still active during "
+			       "busy processing\n", ahc_name(ahc),
+				dev->target->channel, dev->target->target);
+			break;
+		}
+		dev->flags |= AHC_DEV_TIMER_ACTIVE;
+		dev->qfrozen++;
+		init_timer(&dev->timer);
+		dev->timer.data = (u_long)dev;
+		dev->timer.expires = jiffies + (HZ/2);
+		dev->timer.function = ahc_linux_dev_timed_unfreeze;
+		add_timer(&dev->timer);
+		break;
+	}
+	}
+}
+
+static void
+ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, Scsi_Cmnd *cmd)
+{
+	/*
+	 * Typically, the complete queue has very few entries
+	 * queued to it before the queue is emptied by
+	 * ahc_linux_run_complete_queue, so sorting the entries
+	 * by generation number should be inexpensive.
+	 * We perform the sort so that commands that complete
+	 * with an error are retuned in the order origionally
+	 * queued to the controller so that any subsequent retries
+	 * are performed in order.  The underlying ahc routines do
+	 * not guarantee the order that aborted commands will be
+	 * returned to us.
+	 */
+	struct ahc_completeq *completeq;
+	struct ahc_cmd *list_cmd;
+	struct ahc_cmd *acmd;
+
+	/*
+	 * Map CAM error codes into Linux Error codes.  We
+	 * avoid the conversion so that the DV code has the
+	 * full error information available when making
+	 * state change decisions.
+	 */
+	if (AHC_DV_CMD(cmd) == FALSE) {
+		u_int new_status;
+
+		switch (ahc_cmd_get_transaction_status(cmd)) {
+		case CAM_REQ_INPROG:
+		case CAM_REQ_CMP:
+		case CAM_SCSI_STATUS_ERROR:
+			new_status = DID_OK;
+			break;
+		case CAM_REQ_ABORTED:
+			new_status = DID_ABORT;
+			break;
+		case CAM_BUSY:
+			new_status = DID_BUS_BUSY;
+			break;
+		case CAM_REQ_INVALID:
+		case CAM_PATH_INVALID:
+			new_status = DID_BAD_TARGET;
+			break;
+		case CAM_SEL_TIMEOUT:
+			new_status = DID_NO_CONNECT;
+			break;
+		case CAM_SCSI_BUS_RESET:
+		case CAM_BDR_SENT:
+			new_status = DID_RESET;
+			break;
+		case CAM_UNCOR_PARITY:
+			new_status = DID_PARITY;
+			break;
+		case CAM_CMD_TIMEOUT:
+			new_status = DID_TIME_OUT;
+			break;
+		case CAM_UA_ABORT:
+		case CAM_REQ_CMP_ERR:
+		case CAM_AUTOSENSE_FAIL:
+		case CAM_NO_HBA:
+		case CAM_DATA_RUN_ERR:
+		case CAM_UNEXP_BUSFREE:
+		case CAM_SEQUENCE_FAIL:
+		case CAM_CCB_LEN_ERR:
+		case CAM_PROVIDE_FAIL:
+		case CAM_REQ_TERMIO:
+		case CAM_UNREC_HBA_ERROR:
+		case CAM_REQ_TOO_BIG:
+			new_status = DID_ERROR;
+			break;
+		case CAM_REQUEUE_REQ:
+			/*
+			 * If we want the request requeued, make sure there
+			 * are sufficent retries.  In the old scsi error code,
+			 * we used to be able to specify a result code that
+			 * bypassed the retry count.  Now we must use this
+			 * hack.  We also "fake" a check condition with
+			 * a sense code of ABORTED COMMAND.  This seems to
+			 * evoke a retry even if this command is being sent
+			 * via the eh thread.  Ick!  Ick!  Ick!
+			 */
+			if (cmd->retries > 0)
+				cmd->retries--;
+			new_status = DID_OK;
+			ahc_cmd_set_scsi_status(cmd, SCSI_STATUS_CHECK_COND);
+			cmd->result |= (DRIVER_SENSE << 24);
+			memset(cmd->sense_buffer, 0,
+			       sizeof(cmd->sense_buffer));
+			cmd->sense_buffer[0] = SSD_ERRCODE_VALID
+					     | SSD_CURRENT_ERROR;
+			cmd->sense_buffer[2] = SSD_KEY_ABORTED_COMMAND;
+			break;
+		default:
+			/* We should never get here */
+			new_status = DID_ERROR;
+			break;
+		}
+
+		ahc_cmd_set_transaction_status(cmd, new_status);
+	}
+
+	completeq = &ahc->platform_data->completeq;
+	list_cmd = TAILQ_FIRST(completeq);
+	acmd = (struct ahc_cmd *)cmd;
+	while (list_cmd != NULL
+	    && acmd_scsi_cmd(list_cmd).serial_number
+	     < acmd_scsi_cmd(acmd).serial_number)
+		list_cmd = TAILQ_NEXT(list_cmd, acmd_links.tqe);
+	if (list_cmd != NULL)
+		TAILQ_INSERT_BEFORE(list_cmd, acmd, acmd_links.tqe);
+	else
+		TAILQ_INSERT_TAIL(completeq, acmd, acmd_links.tqe);
+}
+
+static void
+ahc_linux_filter_inquiry(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
+{
+	struct	scsi_inquiry_data *sid;
+	struct	ahc_initiator_tinfo *tinfo;
+	struct	ahc_transinfo *user;
+	struct	ahc_transinfo *goal;
+	struct	ahc_transinfo *curr;
+	struct	ahc_tmode_tstate *tstate;
+	struct	ahc_syncrate *syncrate;
+	struct	ahc_linux_device *dev;
+	u_int	maxsync;
+	u_int	width;
+	u_int	period;
+	u_int	offset;
+	u_int	ppr_options;
+	u_int	trans_version;
+	u_int	prot_version;
+
+	/*
+	 * Determine if this lun actually exists.  If so,
+	 * hold on to its corresponding device structure.
+	 * If not, make sure we release the device and
+	 * don't bother processing the rest of this inquiry
+	 * command.
+	 */
+	dev = ahc_linux_get_device(ahc, devinfo->channel - 'A',
+				   devinfo->target, devinfo->lun,
+				   /*alloc*/TRUE);
+
+	sid = (struct scsi_inquiry_data *)dev->target->inq_data;
+	if (SID_QUAL(sid) == SID_QUAL_LU_CONNECTED) {
+
+		dev->flags &= ~AHC_DEV_UNCONFIGURED;
+	} else {
+		dev->flags |= AHC_DEV_UNCONFIGURED;
+		return;
+	}
+
+	/*
+	 * Update our notion of this device's transfer
+	 * negotiation capabilities.
+	 */
+	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
+				    devinfo->our_scsiid,
+				    devinfo->target, &tstate);
+	user = &tinfo->user;
+	goal = &tinfo->goal;
+	curr = &tinfo->curr;
+	width = user->width;
+	period = user->period;
+	offset = user->offset;
+	ppr_options = user->ppr_options;
+	trans_version = user->transport_version;
+	prot_version = MIN(user->protocol_version, SID_ANSI_REV(sid));
+
+	/*
+	 * Only attempt SPI3/4 once we've verified that
+	 * the device claims to support SPI3/4 features.
+	 */
+	if (prot_version < SCSI_REV_2)
+		trans_version = SID_ANSI_REV(sid);
+	else
+		trans_version = SCSI_REV_2;
+
+	if ((sid->flags & SID_WBus16) == 0)
+		width = MSG_EXT_WDTR_BUS_8_BIT;
+	if ((sid->flags & SID_Sync) == 0) {
+		period = 0;
+		offset = 0;
+		ppr_options = 0;
+	}
+	if ((sid->spi3data & SID_SPI_QAS) == 0)
+		ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
+	if ((sid->spi3data & SID_SPI_CLOCK_DT) == 0)
+		ppr_options &= MSG_EXT_PPR_QAS_REQ;
+	if ((sid->spi3data & SID_SPI_IUS) == 0)
+		ppr_options &= (MSG_EXT_PPR_DT_REQ
+			      | MSG_EXT_PPR_QAS_REQ);
+
+	if (prot_version > SCSI_REV_2
+	 && ppr_options != 0)
+		trans_version = user->transport_version;
+
+	ahc_validate_width(ahc, /*tinfo limit*/NULL, &width, ROLE_UNKNOWN);
+	if ((ahc->features & AHC_ULTRA2) != 0)
+		maxsync = AHC_SYNCRATE_DT;
+	else if ((ahc->features & AHC_ULTRA) != 0)
+		maxsync = AHC_SYNCRATE_ULTRA;
+	else
+		maxsync = AHC_SYNCRATE_FAST;
+
+	syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, maxsync);
+	ahc_validate_offset(ahc, /*tinfo limit*/NULL, syncrate,
+			    &offset, width, ROLE_UNKNOWN);
+	if (offset == 0 || period == 0) {
+		period = 0;
+		offset = 0;
+		ppr_options = 0;
+	}
+	/* Apply our filtered user settings. */
+	curr->transport_version = trans_version;
+	curr->protocol_version = prot_version;
+	ahc_set_width(ahc, devinfo, width, AHC_TRANS_GOAL, /*paused*/FALSE);
+	ahc_set_syncrate(ahc, devinfo, syncrate, period,
+			 offset, ppr_options, AHC_TRANS_GOAL,
+			 /*paused*/FALSE);
+}
+
+static void
+ahc_linux_sem_timeout(u_long arg)
+{
+	struct	ahc_softc *ahc;
+	u_long	s;
+
+	ahc = (struct ahc_softc *)arg;
+
+	ahc_lock(ahc, &s);
+	if ((ahc->platform_data->flags & AHC_UP_EH_SEMAPHORE) != 0) {
+		ahc->platform_data->flags &= ~AHC_UP_EH_SEMAPHORE;
+		up(&ahc->platform_data->eh_sem);
+	}
+	ahc_unlock(ahc, &s);
+}
+
+static void
+ahc_linux_freeze_simq(struct ahc_softc *ahc)
+{
+	ahc->platform_data->qfrozen++;
+	if (ahc->platform_data->qfrozen == 1) {
+		scsi_block_requests(ahc->platform_data->host);
+
+		/* XXX What about Twin channels? */
+		ahc_platform_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
+					CAM_LUN_WILDCARD, SCB_LIST_NULL,
+					ROLE_INITIATOR, CAM_REQUEUE_REQ);
+	}
+}
+
+static void
+ahc_linux_release_simq(u_long arg)
+{
+	struct ahc_softc *ahc;
+	u_long s;
+	int    unblock_reqs;
+
+	ahc = (struct ahc_softc *)arg;
+
+	unblock_reqs = 0;
+	ahc_lock(ahc, &s);
+	if (ahc->platform_data->qfrozen > 0)
+		ahc->platform_data->qfrozen--;
+	if (ahc->platform_data->qfrozen == 0)
+		unblock_reqs = 1;
+	if (AHC_DV_SIMQ_FROZEN(ahc)
+	 && ((ahc->platform_data->flags & AHC_DV_WAIT_SIMQ_RELEASE) != 0)) {
+		ahc->platform_data->flags &= ~AHC_DV_WAIT_SIMQ_RELEASE;
+		up(&ahc->platform_data->dv_sem);
+	}
+	ahc_schedule_runq(ahc);
+	ahc_unlock(ahc, &s);
+	/*
+	 * There is still a race here.  The mid-layer
+	 * should keep its own freeze count and use
+	 * a bottom half handler to run the queues
+	 * so we can unblock with our own lock held.
+	 */
+	if (unblock_reqs)
+		scsi_unblock_requests(ahc->platform_data->host);
+}
+
+static void
+ahc_linux_dev_timed_unfreeze(u_long arg)
+{
+	struct ahc_linux_device *dev;
+	struct ahc_softc *ahc;
+	u_long s;
+
+	dev = (struct ahc_linux_device *)arg;
+	ahc = dev->target->ahc;
+	ahc_lock(ahc, &s);
+	dev->flags &= ~AHC_DEV_TIMER_ACTIVE;
+	if (dev->qfrozen > 0)
+		dev->qfrozen--;
+	if (dev->qfrozen == 0
+	 && (dev->flags & AHC_DEV_ON_RUN_LIST) == 0)
+		ahc_linux_run_device_queue(ahc, dev);
+	if (TAILQ_EMPTY(&dev->busyq)
+	 && dev->active == 0)
+		__ahc_linux_free_device(ahc, dev);
+	ahc_unlock(ahc, &s);
+}
+
+static int
+ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag)
+{
+	struct ahc_softc *ahc;
+	struct ahc_cmd *acmd;
+	struct ahc_cmd *list_acmd;
+	struct ahc_linux_device *dev;
+	struct scb *pending_scb;
+	u_long s;
+	u_int  saved_scbptr;
+	u_int  active_scb_index;
+	u_int  last_phase;
+	u_int  saved_scsiid;
+	u_int  cdb_byte;
+	int    retval;
+	int    was_paused;
+	int    paused;
+	int    wait;
+	int    disconnected;
+
+	pending_scb = NULL;
+	paused = FALSE;
+	wait = FALSE;
+	ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
+	acmd = (struct ahc_cmd *)cmd;
+
+	printf("%s:%d:%d:%d: Attempting to queue a%s message\n",
+	       ahc_name(ahc), cmd->device->channel,
+	       cmd->device->id, cmd->device->lun,
+	       flag == SCB_ABORT ? "n ABORT" : " TARGET RESET");
+
+	printf("CDB:");
+	for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
+		printf(" 0x%x", cmd->cmnd[cdb_byte]);
+	printf("\n");
+
+	/*
+	 * In all versions of Linux, we have to work around
+	 * a major flaw in how the mid-layer is locked down
+	 * if we are to sleep successfully in our error handler
+	 * while allowing our interrupt handler to run.  Since
+	 * the midlayer acquires either the io_request_lock or
+	 * our lock prior to calling us, we must use the
+	 * spin_unlock_irq() method for unlocking our lock.
+	 * This will force interrupts to be enabled on the
+	 * current CPU.  Since the EH thread should not have
+	 * been running with CPU interrupts disabled other than
+	 * by acquiring either the io_request_lock or our own
+	 * lock, this *should* be safe.
+	 */
+	ahc_midlayer_entrypoint_lock(ahc, &s);
+
+	/*
+	 * First determine if we currently own this command.
+	 * Start by searching the device queue.  If not found
+	 * there, check the pending_scb list.  If not found
+	 * at all, and the system wanted us to just abort the
+	 * command, return success.
+	 */
+	dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id,
+				   cmd->device->lun, /*alloc*/FALSE);
+
+	if (dev == NULL) {
+		/*
+		 * No target device for this command exists,
+		 * so we must not still own the command.
+		 */
+		printf("%s:%d:%d:%d: Is not an active device\n",
+		       ahc_name(ahc), cmd->device->channel, cmd->device->id,
+		       cmd->device->lun);
+		retval = SUCCESS;
+		goto no_cmd;
+	}
+
+	TAILQ_FOREACH(list_acmd, &dev->busyq, acmd_links.tqe) {
+		if (list_acmd == acmd)
+			break;
+	}
+
+	if (list_acmd != NULL) {
+		printf("%s:%d:%d:%d: Command found on device queue\n",
+		       ahc_name(ahc), cmd->device->channel, cmd->device->id,
+		       cmd->device->lun);
+		if (flag == SCB_ABORT) {
+			TAILQ_REMOVE(&dev->busyq, list_acmd, acmd_links.tqe);
+			cmd->result = DID_ABORT << 16;
+			ahc_linux_queue_cmd_complete(ahc, cmd);
+			retval = SUCCESS;
+			goto done;
+		}
+	}
+
+	if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0
+	 && ahc_search_untagged_queues(ahc, cmd, cmd->device->id,
+				       cmd->device->channel + 'A',
+				       cmd->device->lun,
+				       CAM_REQ_ABORTED, SEARCH_COMPLETE) != 0) {
+		printf("%s:%d:%d:%d: Command found on untagged queue\n",
+		       ahc_name(ahc), cmd->device->channel, cmd->device->id,
+		       cmd->device->lun);
+		retval = SUCCESS;
+		goto done;
+	}
+
+	/*
+	 * See if we can find a matching cmd in the pending list.
+	 */
+	LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
+		if (pending_scb->io_ctx == cmd)
+			break;
+	}
+
+	if (pending_scb == NULL && flag == SCB_DEVICE_RESET) {
+
+		/* Any SCB for this device will do for a target reset */
+		LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
+		  	if (ahc_match_scb(ahc, pending_scb, cmd->device->id,
+					  cmd->device->channel + 'A',
+					  CAM_LUN_WILDCARD,
+					  SCB_LIST_NULL, ROLE_INITIATOR) == 0)
+				break;
+		}
+	}
+
+	if (pending_scb == NULL) {
+		printf("%s:%d:%d:%d: Command not found\n",
+		       ahc_name(ahc), cmd->device->channel, cmd->device->id,
+		       cmd->device->lun);
+		goto no_cmd;
+	}
+
+	if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
+		/*
+		 * We can't queue two recovery actions using the same SCB
+		 */
+		retval = FAILED;
+		goto  done;
+	}
+
+	/*
+	 * Ensure that the card doesn't do anything
+	 * behind our back and that we didn't "just" miss
+	 * an interrupt that would affect this cmd.
+	 */
+	was_paused = ahc_is_paused(ahc);
+	ahc_pause_and_flushwork(ahc);
+	paused = TRUE;
+
+	if ((pending_scb->flags & SCB_ACTIVE) == 0) {
+		printf("%s:%d:%d:%d: Command already completed\n",
+		       ahc_name(ahc), cmd->device->channel, cmd->device->id,
+		       cmd->device->lun);
+		goto no_cmd;
+	}
+
+	printf("%s: At time of recovery, card was %spaused\n",
+	       ahc_name(ahc), was_paused ? "" : "not ");
+	ahc_dump_card_state(ahc);
+
+	disconnected = TRUE;
+	if (flag == SCB_ABORT) {
+		if (ahc_search_qinfifo(ahc, cmd->device->id,
+				       cmd->device->channel + 'A',
+				       cmd->device->lun,
+				       pending_scb->hscb->tag,
+				       ROLE_INITIATOR, CAM_REQ_ABORTED,
+				       SEARCH_COMPLETE) > 0) {
+			printf("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
+			       ahc_name(ahc), cmd->device->channel,
+					cmd->device->id, cmd->device->lun);
+			retval = SUCCESS;
+			goto done;
+		}
+	} else if (ahc_search_qinfifo(ahc, cmd->device->id,
+				      cmd->device->channel + 'A',
+				      cmd->device->lun, pending_scb->hscb->tag,
+				      ROLE_INITIATOR, /*status*/0,
+				      SEARCH_COUNT) > 0) {
+		disconnected = FALSE;
+	}
+
+	if (disconnected && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
+		struct scb *bus_scb;
+
+		bus_scb = ahc_lookup_scb(ahc, ahc_inb(ahc, SCB_TAG));
+		if (bus_scb == pending_scb)
+			disconnected = FALSE;
+		else if (flag != SCB_ABORT
+		      && ahc_inb(ahc, SAVED_SCSIID) == pending_scb->hscb->scsiid
+		      && ahc_inb(ahc, SAVED_LUN) == SCB_GET_LUN(pending_scb))
+			disconnected = FALSE;
+	}
+
+	/*
+	 * At this point, pending_scb is the scb associated with the
+	 * passed in command.  That command is currently active on the
+	 * bus, is in the disconnected state, or we're hoping to find
+	 * a command for the same target active on the bus to abuse to
+	 * send a BDR.  Queue the appropriate message based on which of
+	 * these states we are in.
+	 */
+	last_phase = ahc_inb(ahc, LASTPHASE);
+	saved_scbptr = ahc_inb(ahc, SCBPTR);
+	active_scb_index = ahc_inb(ahc, SCB_TAG);
+	saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
+	if (last_phase != P_BUSFREE
+	 && (pending_scb->hscb->tag == active_scb_index
+	  || (flag == SCB_DEVICE_RESET
+	   && SCSIID_TARGET(ahc, saved_scsiid) == cmd->device->id))) {
+
+		/*
+		 * We're active on the bus, so assert ATN
+		 * and hope that the target responds.
+		 */
+		pending_scb = ahc_lookup_scb(ahc, active_scb_index);
+		pending_scb->flags |= SCB_RECOVERY_SCB|flag;
+		ahc_outb(ahc, MSG_OUT, HOST_MSG);
+		ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
+		printf("%s:%d:%d:%d: Device is active, asserting ATN\n",
+		       ahc_name(ahc), cmd->device->channel, cmd->device->id,
+		       cmd->device->lun);
+		wait = TRUE;
+	} else if (disconnected) {
+
+		/*
+		 * Actually re-queue this SCB in an attempt
+		 * to select the device before it reconnects.
+		 * In either case (selection or reselection),
+		 * we will now issue the approprate message
+		 * to the timed-out device.
+		 *
+		 * Set the MK_MESSAGE control bit indicating
+		 * that we desire to send a message.  We
+		 * also set the disconnected flag since
+		 * in the paging case there is no guarantee
+		 * that our SCB control byte matches the
+		 * version on the card.  We don't want the
+		 * sequencer to abort the command thinking
+		 * an unsolicited reselection occurred.
+		 */
+		pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
+		pending_scb->flags |= SCB_RECOVERY_SCB|flag;
+
+		/*
+		 * Remove any cached copy of this SCB in the
+		 * disconnected list in preparation for the
+		 * queuing of our abort SCB.  We use the
+		 * same element in the SCB, SCB_NEXT, for
+		 * both the qinfifo and the disconnected list.
+		 */
+		ahc_search_disc_list(ahc, cmd->device->id,
+				     cmd->device->channel + 'A',
+				     cmd->device->lun, pending_scb->hscb->tag,
+				     /*stop_on_first*/TRUE,
+				     /*remove*/TRUE,
+				     /*save_state*/FALSE);
+
+		/*
+		 * In the non-paging case, the sequencer will
+		 * never re-reference the in-core SCB.
+		 * To make sure we are notified during
+		 * reslection, set the MK_MESSAGE flag in
+		 * the card's copy of the SCB.
+		 */
+		if ((ahc->flags & AHC_PAGESCBS) == 0) {
+			ahc_outb(ahc, SCBPTR, pending_scb->hscb->tag);
+			ahc_outb(ahc, SCB_CONTROL,
+				 ahc_inb(ahc, SCB_CONTROL)|MK_MESSAGE);
+		}
+
+		/*
+		 * Clear out any entries in the QINFIFO first
+		 * so we are the next SCB for this target
+		 * to run.
+		 */
+		ahc_search_qinfifo(ahc, cmd->device->id,
+				   cmd->device->channel + 'A',
+				   cmd->device->lun, SCB_LIST_NULL,
+				   ROLE_INITIATOR, CAM_REQUEUE_REQ,
+				   SEARCH_COMPLETE);
+		ahc_qinfifo_requeue_tail(ahc, pending_scb);
+		ahc_outb(ahc, SCBPTR, saved_scbptr);
+		ahc_print_path(ahc, pending_scb);
+		printf("Device is disconnected, re-queuing SCB\n");
+		wait = TRUE;
+	} else {
+		printf("%s:%d:%d:%d: Unable to deliver message\n",
+		       ahc_name(ahc), cmd->device->channel, cmd->device->id,
+		       cmd->device->lun);
+		retval = FAILED;
+		goto done;
+	}
+
+no_cmd:
+	/*
+	 * Our assumption is that if we don't have the command, no
+	 * recovery action was required, so we return success.  Again,
+	 * the semantics of the mid-layer recovery engine are not
+	 * well defined, so this may change in time.
+	 */
+	retval = SUCCESS;
+done:
+	if (paused)
+		ahc_unpause(ahc);
+	if (wait) {
+		struct timer_list timer;
+		int ret;
+
+		ahc->platform_data->flags |= AHC_UP_EH_SEMAPHORE;
+		spin_unlock_irq(&ahc->platform_data->spin_lock);
+		init_timer(&timer);
+		timer.data = (u_long)ahc;
+		timer.expires = jiffies + (5 * HZ);
+		timer.function = ahc_linux_sem_timeout;
+		add_timer(&timer);
+		printf("Recovery code sleeping\n");
+		down(&ahc->platform_data->eh_sem);
+		printf("Recovery code awake\n");
+        	ret = del_timer_sync(&timer);
+		if (ret == 0) {
+			printf("Timer Expired\n");
+			retval = FAILED;
+		}
+		spin_lock_irq(&ahc->platform_data->spin_lock);
+	}
+	ahc_schedule_runq(ahc);
+	ahc_linux_run_complete_queue(ahc);
+	ahc_midlayer_entrypoint_unlock(ahc, &s);
+	return (retval);
+}
+
+void
+ahc_platform_dump_card_state(struct ahc_softc *ahc)
+{
+	struct ahc_linux_device *dev;
+	int channel;
+	int maxchannel;
+	int target;
+	int maxtarget;
+	int lun;
+	int i;
+
+	maxchannel = (ahc->features & AHC_TWIN) ? 1 : 0;
+	maxtarget = (ahc->features & AHC_WIDE) ? 15 : 7;
+	for (channel = 0; channel <= maxchannel; channel++) {
+
+		for (target = 0; target <=maxtarget; target++) {
+
+			for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
+				struct ahc_cmd *acmd;
+
+				dev = ahc_linux_get_device(ahc, channel, target,
+							   lun, /*alloc*/FALSE);
+				if (dev == NULL)
+					continue;
+
+				printf("DevQ(%d:%d:%d): ",
+				       channel, target, lun);
+				i = 0;
+				TAILQ_FOREACH(acmd, &dev->busyq,
+					      acmd_links.tqe) {
+					if (i++ > AHC_SCB_MAX)
+						break;
+				}
+				printf("%d waiting\n", i);
+			}
+		}
+	}
+}
+
+static void ahc_linux_exit(void);
+
+static int __init
+ahc_linux_init(void)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+	int rc = ahc_linux_detect(&aic7xxx_driver_template);
+	if (rc)
+		return rc;
+	ahc_linux_exit();
+	return -ENODEV;
+#else
+	scsi_register_module(MODULE_SCSI_HA, &aic7xxx_driver_template);
+	if (aic7xxx_driver_template.present == 0) {
+		scsi_unregister_module(MODULE_SCSI_HA,
+				       &aic7xxx_driver_template);
+		return (-ENODEV);
+	}
+
+	return (0);
+#endif
+}
+
+static void
+ahc_linux_exit(void)
+{
+	struct ahc_softc *ahc;
+
+	/*
+	 * Shutdown DV threads before going into the SCSI mid-layer.
+	 * This avoids situations where the mid-layer locks the entire
+	 * kernel so that waiting for our DV threads to exit leads
+	 * to deadlock.
+	 */
+	TAILQ_FOREACH(ahc, &ahc_tailq, links) {
+
+		ahc_linux_kill_dv_thread(ahc);
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+	/*
+	 * In 2.4 we have to unregister from the PCI core _after_
+	 * unregistering from the scsi midlayer to avoid dangling
+	 * references.
+	 */
+	scsi_unregister_module(MODULE_SCSI_HA, &aic7xxx_driver_template);
+#endif
+	ahc_linux_pci_exit();
+	ahc_linux_eisa_exit();
+}
+
+module_init(ahc_linux_init);
+module_exit(ahc_linux_exit);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
new file mode 100644
index 0000000..db3bd63
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -0,0 +1,1130 @@
+/*
+ * Adaptec AIC7xxx device driver for Linux.
+ *
+ * Copyright (c) 1994 John Aycock
+ *   The University of Calgary Department of Computer Science.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ * 
+ * Copyright (c) 2000-2003 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm.h#151 $
+ *
+ */
+#ifndef _AIC7XXX_LINUX_H_
+#define _AIC7XXX_LINUX_H_
+
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/smp_lock.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+#include <asm/io.h>
+
+#include <linux/interrupt.h> /* For tasklet support. */
+#include <linux/config.h>
+#include <linux/slab.h>
+
+/* Core SCSI definitions */
+#define AIC_LIB_PREFIX ahc
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+
+/* Name space conflict with BSD queue macros */
+#ifdef LIST_HEAD
+#undef LIST_HEAD
+#endif
+
+#include "cam.h"
+#include "queue.h"
+#include "scsi_message.h"
+#include "aiclib.h"
+
+/*********************************** Debugging ********************************/
+#ifdef CONFIG_AIC7XXX_DEBUG_ENABLE
+#ifdef CONFIG_AIC7XXX_DEBUG_MASK
+#define AHC_DEBUG 1
+#define AHC_DEBUG_OPTS CONFIG_AIC7XXX_DEBUG_MASK
+#else
+/*
+ * Compile in debugging code, but do not enable any printfs.
+ */
+#define AHC_DEBUG 1
+#endif
+/* No debugging code. */
+#endif
+
+/************************* Forward Declarations *******************************/
+struct ahc_softc;
+typedef struct pci_dev *ahc_dev_softc_t;
+typedef Scsi_Cmnd      *ahc_io_ctx_t;
+
+/******************************* Byte Order ***********************************/
+#define ahc_htobe16(x)	cpu_to_be16(x)
+#define ahc_htobe32(x)	cpu_to_be32(x)
+#define ahc_htobe64(x)	cpu_to_be64(x)
+#define ahc_htole16(x)	cpu_to_le16(x)
+#define ahc_htole32(x)	cpu_to_le32(x)
+#define ahc_htole64(x)	cpu_to_le64(x)
+
+#define ahc_be16toh(x)	be16_to_cpu(x)
+#define ahc_be32toh(x)	be32_to_cpu(x)
+#define ahc_be64toh(x)	be64_to_cpu(x)
+#define ahc_le16toh(x)	le16_to_cpu(x)
+#define ahc_le32toh(x)	le32_to_cpu(x)
+#define ahc_le64toh(x)	le64_to_cpu(x)
+
+#ifndef LITTLE_ENDIAN
+#define LITTLE_ENDIAN 1234
+#endif
+
+#ifndef BIG_ENDIAN
+#define BIG_ENDIAN 4321
+#endif
+
+#ifndef BYTE_ORDER
+#if defined(__BIG_ENDIAN)
+#define BYTE_ORDER BIG_ENDIAN
+#endif
+#if defined(__LITTLE_ENDIAN)
+#define BYTE_ORDER LITTLE_ENDIAN
+#endif
+#endif /* BYTE_ORDER */
+
+/************************* Configuration Data *********************************/
+extern u_int aic7xxx_no_probe;
+extern u_int aic7xxx_allow_memio;
+extern int aic7xxx_detect_complete;
+extern Scsi_Host_Template aic7xxx_driver_template;
+
+/***************************** Bus Space/DMA **********************************/
+
+typedef uint32_t bus_size_t;
+
+typedef enum {
+	BUS_SPACE_MEMIO,
+	BUS_SPACE_PIO
+} bus_space_tag_t;
+
+typedef union {
+	u_long		  ioport;
+	volatile uint8_t __iomem *maddr;
+} bus_space_handle_t;
+
+typedef struct bus_dma_segment
+{
+	dma_addr_t	ds_addr;
+	bus_size_t	ds_len;
+} bus_dma_segment_t;
+
+struct ahc_linux_dma_tag
+{
+	bus_size_t	alignment;
+	bus_size_t	boundary;
+	bus_size_t	maxsize;
+};
+typedef struct ahc_linux_dma_tag* bus_dma_tag_t;
+
+struct ahc_linux_dmamap
+{
+	dma_addr_t	bus_addr;
+};
+typedef struct ahc_linux_dmamap* bus_dmamap_t;
+
+typedef int bus_dma_filter_t(void*, dma_addr_t);
+typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
+
+#define BUS_DMA_WAITOK		0x0
+#define BUS_DMA_NOWAIT		0x1
+#define BUS_DMA_ALLOCNOW	0x2
+#define BUS_DMA_LOAD_SEGS	0x4	/*
+					 * Argument is an S/G list not
+					 * a single buffer.
+					 */
+
+#define BUS_SPACE_MAXADDR	0xFFFFFFFF
+#define BUS_SPACE_MAXADDR_32BIT	0xFFFFFFFF
+#define BUS_SPACE_MAXSIZE_32BIT	0xFFFFFFFF
+
+int	ahc_dma_tag_create(struct ahc_softc *, bus_dma_tag_t /*parent*/,
+			   bus_size_t /*alignment*/, bus_size_t /*boundary*/,
+			   dma_addr_t /*lowaddr*/, dma_addr_t /*highaddr*/,
+			   bus_dma_filter_t*/*filter*/, void */*filterarg*/,
+			   bus_size_t /*maxsize*/, int /*nsegments*/,
+			   bus_size_t /*maxsegsz*/, int /*flags*/,
+			   bus_dma_tag_t */*dma_tagp*/);
+
+void	ahc_dma_tag_destroy(struct ahc_softc *, bus_dma_tag_t /*tag*/);
+
+int	ahc_dmamem_alloc(struct ahc_softc *, bus_dma_tag_t /*dmat*/,
+			 void** /*vaddr*/, int /*flags*/,
+			 bus_dmamap_t* /*mapp*/);
+
+void	ahc_dmamem_free(struct ahc_softc *, bus_dma_tag_t /*dmat*/,
+			void* /*vaddr*/, bus_dmamap_t /*map*/);
+
+void	ahc_dmamap_destroy(struct ahc_softc *, bus_dma_tag_t /*tag*/,
+			   bus_dmamap_t /*map*/);
+
+int	ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t /*dmat*/,
+			bus_dmamap_t /*map*/, void * /*buf*/,
+			bus_size_t /*buflen*/, bus_dmamap_callback_t *,
+			void */*callback_arg*/, int /*flags*/);
+
+int	ahc_dmamap_unload(struct ahc_softc *, bus_dma_tag_t, bus_dmamap_t);
+
+/*
+ * Operations performed by ahc_dmamap_sync().
+ */
+#define BUS_DMASYNC_PREREAD	0x01	/* pre-read synchronization */
+#define BUS_DMASYNC_POSTREAD	0x02	/* post-read synchronization */
+#define BUS_DMASYNC_PREWRITE	0x04	/* pre-write synchronization */
+#define BUS_DMASYNC_POSTWRITE	0x08	/* post-write synchronization */
+
+/*
+ * XXX
+ * ahc_dmamap_sync is only used on buffers allocated with
+ * the pci_alloc_consistent() API.  Although I'm not sure how
+ * this works on architectures with a write buffer, Linux does
+ * not have an API to sync "coherent" memory.  Perhaps we need
+ * to do an mb()?
+ */
+#define ahc_dmamap_sync(ahc, dma_tag, dmamap, offset, len, op)
+
+/************************** Timer DataStructures ******************************/
+typedef struct timer_list ahc_timer_t;
+
+/********************************** Includes **********************************/
+#ifdef CONFIG_AIC7XXX_REG_PRETTY_PRINT
+#define AIC_DEBUG_REGISTERS 1
+#else
+#define AIC_DEBUG_REGISTERS 0
+#endif
+#include "aic7xxx.h"
+
+/***************************** Timer Facilities *******************************/
+#define ahc_timer_init init_timer
+#define ahc_timer_stop del_timer_sync
+typedef void ahc_linux_callback_t (u_long);  
+static __inline void ahc_timer_reset(ahc_timer_t *timer, int usec,
+				     ahc_callback_t *func, void *arg);
+static __inline void ahc_scb_timer_reset(struct scb *scb, u_int usec);
+
+static __inline void
+ahc_timer_reset(ahc_timer_t *timer, int usec, ahc_callback_t *func, void *arg)
+{
+	struct ahc_softc *ahc;
+
+	ahc = (struct ahc_softc *)arg;
+	del_timer(timer);
+	timer->data = (u_long)arg;
+	timer->expires = jiffies + (usec * HZ)/1000000;
+	timer->function = (ahc_linux_callback_t*)func;
+	add_timer(timer);
+}
+
+static __inline void
+ahc_scb_timer_reset(struct scb *scb, u_int usec)
+{
+	mod_timer(&scb->io_ctx->eh_timeout, jiffies + (usec * HZ)/1000000);
+}
+
+/***************************** SMP support ************************************/
+#include <linux/spinlock.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) || defined(SCSI_HAS_HOST_LOCK))
+#define AHC_SCSI_HAS_HOST_LOCK 1
+#else
+#define AHC_SCSI_HAS_HOST_LOCK 0
+#endif
+
+#define AIC7XXX_DRIVER_VERSION "6.2.36"
+
+/**************************** Front End Queues ********************************/
+/*
+ * Data structure used to cast the Linux struct scsi_cmnd to something
+ * that allows us to use the queue macros.  The linux structure has
+ * plenty of space to hold the links fields as required by the queue
+ * macros, but the queue macors require them to have the correct type.
+ */
+struct ahc_cmd_internal {
+	/* Area owned by the Linux scsi layer. */
+	uint8_t	private[offsetof(struct scsi_cmnd, SCp.Status)];
+	union {
+		STAILQ_ENTRY(ahc_cmd)	ste;
+		LIST_ENTRY(ahc_cmd)	le;
+		TAILQ_ENTRY(ahc_cmd)	tqe;
+	} links;
+	uint32_t			end;
+};
+
+struct ahc_cmd {
+	union {
+		struct ahc_cmd_internal	icmd;
+		struct scsi_cmnd	scsi_cmd;
+	} un;
+};
+
+#define acmd_icmd(cmd) ((cmd)->un.icmd)
+#define acmd_scsi_cmd(cmd) ((cmd)->un.scsi_cmd)
+#define acmd_links un.icmd.links
+
+/*************************** Device Data Structures ***************************/
+/*
+ * A per probed device structure used to deal with some error recovery
+ * scenarios that the Linux mid-layer code just doesn't know how to
+ * handle.  The structure allocated for a device only becomes persistent
+ * after a successfully completed inquiry command to the target when
+ * that inquiry data indicates a lun is present.
+ */
+TAILQ_HEAD(ahc_busyq, ahc_cmd);
+typedef enum {
+	AHC_DEV_UNCONFIGURED	 = 0x01,
+	AHC_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */
+	AHC_DEV_TIMER_ACTIVE	 = 0x04, /* Our timer is active */
+	AHC_DEV_ON_RUN_LIST	 = 0x08, /* Queued to be run later */
+	AHC_DEV_Q_BASIC		 = 0x10, /* Allow basic device queuing */
+	AHC_DEV_Q_TAGGED	 = 0x20, /* Allow full SCSI2 command queueing */
+	AHC_DEV_PERIODIC_OTAG	 = 0x40, /* Send OTAG to prevent starvation */
+	AHC_DEV_SLAVE_CONFIGURED = 0x80	 /* slave_configure() has been called */
+} ahc_linux_dev_flags;
+
+struct ahc_linux_target;
+struct ahc_linux_device {
+	TAILQ_ENTRY(ahc_linux_device) links;
+	struct		ahc_busyq busyq;
+
+	/*
+	 * The number of transactions currently
+	 * queued to the device.
+	 */
+	int			active;
+
+	/*
+	 * The currently allowed number of 
+	 * transactions that can be queued to
+	 * the device.  Must be signed for
+	 * conversion from tagged to untagged
+	 * mode where the device may have more
+	 * than one outstanding active transaction.
+	 */
+	int			openings;
+
+	/*
+	 * A positive count indicates that this
+	 * device's queue is halted.
+	 */
+	u_int			qfrozen;
+	
+	/*
+	 * Cumulative command counter.
+	 */
+	u_long			commands_issued;
+
+	/*
+	 * The number of tagged transactions when
+	 * running at our current opening level
+	 * that have been successfully received by
+	 * this device since the last QUEUE FULL.
+	 */
+	u_int			tag_success_count;
+#define AHC_TAG_SUCCESS_INTERVAL 50
+
+	ahc_linux_dev_flags	flags;
+
+	/*
+	 * Per device timer.
+	 */
+	struct timer_list	timer;
+
+	/*
+	 * The high limit for the tags variable.
+	 */
+	u_int			maxtags;
+
+	/*
+	 * The computed number of tags outstanding
+	 * at the time of the last QUEUE FULL event.
+	 */
+	u_int			tags_on_last_queuefull;
+
+	/*
+	 * How many times we have seen a queue full
+	 * with the same number of tags.  This is used
+	 * to stop our adaptive queue depth algorithm
+	 * on devices with a fixed number of tags.
+	 */
+	u_int			last_queuefull_same_count;
+#define AHC_LOCK_TAGS_COUNT 50
+
+	/*
+	 * How many transactions have been queued
+	 * without the device going idle.  We use
+	 * this statistic to determine when to issue
+	 * an ordered tag to prevent transaction
+	 * starvation.  This statistic is only updated
+	 * if the AHC_DEV_PERIODIC_OTAG flag is set
+	 * on this device.
+	 */
+	u_int			commands_since_idle_or_otag;
+#define AHC_OTAG_THRESH	500
+
+	int			lun;
+	Scsi_Device	       *scsi_device;
+	struct			ahc_linux_target *target;
+};
+
+typedef enum {
+	AHC_DV_REQUIRED		 = 0x01,
+	AHC_INQ_VALID		 = 0x02,
+	AHC_BASIC_DV		 = 0x04,
+	AHC_ENHANCED_DV		 = 0x08
+} ahc_linux_targ_flags;
+
+/* DV States */
+typedef enum {
+	AHC_DV_STATE_EXIT = 0,
+	AHC_DV_STATE_INQ_SHORT_ASYNC,
+	AHC_DV_STATE_INQ_ASYNC,
+	AHC_DV_STATE_INQ_ASYNC_VERIFY,
+	AHC_DV_STATE_TUR,
+	AHC_DV_STATE_REBD,
+	AHC_DV_STATE_INQ_VERIFY,
+	AHC_DV_STATE_WEB,
+	AHC_DV_STATE_REB,
+	AHC_DV_STATE_SU,
+	AHC_DV_STATE_BUSY
+} ahc_dv_state;
+
+struct ahc_linux_target {
+	struct ahc_linux_device	 *devices[AHC_NUM_LUNS];
+	int			  channel;
+	int			  target;
+	int			  refcount;
+	struct ahc_transinfo	  last_tinfo;
+	struct ahc_softc	 *ahc;
+	ahc_linux_targ_flags	  flags;
+	struct scsi_inquiry_data *inq_data;
+	/*
+	 * The next "fallback" period to use for narrow/wide transfers.
+	 */
+	uint8_t			  dv_next_narrow_period;
+	uint8_t			  dv_next_wide_period;
+	uint8_t			  dv_max_width;
+	uint8_t			  dv_max_ppr_options;
+	uint8_t			  dv_last_ppr_options;
+	u_int			  dv_echo_size;
+	ahc_dv_state		  dv_state;
+	u_int			  dv_state_retry;
+	char			 *dv_buffer;
+	char			 *dv_buffer1;
+};
+
+/********************* Definitions Required by the Core ***********************/
+/*
+ * Number of SG segments we require.  So long as the S/G segments for
+ * a particular transaction are allocated in a physically contiguous
+ * manner and are allocated below 4GB, the number of S/G segments is
+ * unrestricted.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+/*
+ * We dynamically adjust the number of segments in pre-2.5 kernels to
+ * avoid fragmentation issues in the SCSI mid-layer's private memory
+ * allocator.  See aic7xxx_osm.c ahc_linux_size_nseg() for details.
+ */
+extern u_int ahc_linux_nseg;
+#define	AHC_NSEG ahc_linux_nseg
+#define	AHC_LINUX_MIN_NSEG 64
+#else
+#define	AHC_NSEG 128
+#endif
+
+/*
+ * Per-SCB OSM storage.
+ */
+typedef enum {
+	AHC_UP_EH_SEMAPHORE = 0x1
+} ahc_linux_scb_flags;
+
+struct scb_platform_data {
+	struct ahc_linux_device	*dev;
+	dma_addr_t		 buf_busaddr;
+	uint32_t		 xfer_len;
+	uint32_t		 sense_resid;	/* Auto-Sense residual */
+	ahc_linux_scb_flags	 flags;
+};
+
+/*
+ * Define a structure used for each host adapter.  All members are
+ * aligned on a boundary >= the size of the member to honor the
+ * alignment restrictions of the various platforms supported by
+ * this driver.
+ */
+typedef enum {
+	AHC_DV_WAIT_SIMQ_EMPTY	 = 0x01,
+	AHC_DV_WAIT_SIMQ_RELEASE = 0x02,
+	AHC_DV_ACTIVE		 = 0x04,
+	AHC_DV_SHUTDOWN		 = 0x08,
+	AHC_RUN_CMPLT_Q_TIMER	 = 0x10
+} ahc_linux_softc_flags;
+
+TAILQ_HEAD(ahc_completeq, ahc_cmd);
+
+struct ahc_platform_data {
+	/*
+	 * Fields accessed from interrupt context.
+	 */
+	struct ahc_linux_target *targets[AHC_NUM_TARGETS]; 
+	TAILQ_HEAD(, ahc_linux_device) device_runq;
+	struct ahc_completeq	 completeq;
+
+	spinlock_t		 spin_lock;
+	struct tasklet_struct	 runq_tasklet;
+	u_int			 qfrozen;
+	pid_t			 dv_pid;
+	struct timer_list	 completeq_timer;
+	struct timer_list	 reset_timer;
+	struct semaphore	 eh_sem;
+	struct semaphore	 dv_sem;
+	struct semaphore	 dv_cmd_sem;	/* XXX This needs to be in
+						 * the target struct
+						 */
+	struct scsi_device	*dv_scsi_dev;
+	struct Scsi_Host        *host;		/* pointer to scsi host */
+#define AHC_LINUX_NOIRQ	((uint32_t)~0)
+	uint32_t		 irq;		/* IRQ for this adapter */
+	uint32_t		 bios_address;
+	uint32_t		 mem_busaddr;	/* Mem Base Addr */
+	uint64_t		 hw_dma_mask;
+	ahc_linux_softc_flags	 flags;
+};
+
+/************************** OS Utility Wrappers *******************************/
+#define printf printk
+#define M_NOWAIT GFP_ATOMIC
+#define M_WAITOK 0
+#define malloc(size, type, flags) kmalloc(size, flags)
+#define free(ptr, type) kfree(ptr)
+
+static __inline void ahc_delay(long);
+static __inline void
+ahc_delay(long usec)
+{
+	/*
+	 * udelay on Linux can have problems for
+	 * multi-millisecond waits.  Wait at most
+	 * 1024us per call.
+	 */
+	while (usec > 0) {
+		udelay(usec % 1024);
+		usec -= 1024;
+	}
+}
+
+
+/***************************** Low Level I/O **********************************/
+static __inline uint8_t ahc_inb(struct ahc_softc * ahc, long port);
+static __inline void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val);
+static __inline void ahc_outsb(struct ahc_softc * ahc, long port,
+			       uint8_t *, int count);
+static __inline void ahc_insb(struct ahc_softc * ahc, long port,
+			       uint8_t *, int count);
+
+static __inline uint8_t
+ahc_inb(struct ahc_softc * ahc, long port)
+{
+	uint8_t x;
+
+	if (ahc->tag == BUS_SPACE_MEMIO) {
+		x = readb(ahc->bsh.maddr + port);
+	} else {
+		x = inb(ahc->bsh.ioport + port);
+	}
+	mb();
+	return (x);
+}
+
+static __inline void
+ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
+{
+	if (ahc->tag == BUS_SPACE_MEMIO) {
+		writeb(val, ahc->bsh.maddr + port);
+	} else {
+		outb(val, ahc->bsh.ioport + port);
+	}
+	mb();
+}
+
+static __inline void
+ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
+{
+	int i;
+
+	/*
+	 * There is probably a more efficient way to do this on Linux
+	 * but we don't use this for anything speed critical and this
+	 * should work.
+	 */
+	for (i = 0; i < count; i++)
+		ahc_outb(ahc, port, *array++);
+}
+
+static __inline void
+ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
+{
+	int i;
+
+	/*
+	 * There is probably a more efficient way to do this on Linux
+	 * but we don't use this for anything speed critical and this
+	 * should work.
+	 */
+	for (i = 0; i < count; i++)
+		*array++ = ahc_inb(ahc, port);
+}
+
+/**************************** Initialization **********************************/
+int		ahc_linux_register_host(struct ahc_softc *,
+					Scsi_Host_Template *);
+
+uint64_t	ahc_linux_get_memsize(void);
+
+/*************************** Pretty Printing **********************************/
+struct info_str {
+	char *buffer;
+	int length;
+	off_t offset;
+	int pos;
+};
+
+void	ahc_format_transinfo(struct info_str *info,
+			     struct ahc_transinfo *tinfo);
+
+/******************************** Locking *************************************/
+/* Lock protecting internal data structures */
+static __inline void ahc_lockinit(struct ahc_softc *);
+static __inline void ahc_lock(struct ahc_softc *, unsigned long *flags);
+static __inline void ahc_unlock(struct ahc_softc *, unsigned long *flags);
+
+/* Lock acquisition and release of the above lock in midlayer entry points. */
+static __inline void ahc_midlayer_entrypoint_lock(struct ahc_softc *,
+						  unsigned long *flags);
+static __inline void ahc_midlayer_entrypoint_unlock(struct ahc_softc *,
+						    unsigned long *flags);
+
+/* Lock held during command compeletion to the upper layer */
+static __inline void ahc_done_lockinit(struct ahc_softc *);
+static __inline void ahc_done_lock(struct ahc_softc *, unsigned long *flags);
+static __inline void ahc_done_unlock(struct ahc_softc *, unsigned long *flags);
+
+/* Lock held during ahc_list manipulation and ahc softc frees */
+extern spinlock_t ahc_list_spinlock;
+static __inline void ahc_list_lockinit(void);
+static __inline void ahc_list_lock(unsigned long *flags);
+static __inline void ahc_list_unlock(unsigned long *flags);
+
+static __inline void
+ahc_lockinit(struct ahc_softc *ahc)
+{
+	spin_lock_init(&ahc->platform_data->spin_lock);
+}
+
+static __inline void
+ahc_lock(struct ahc_softc *ahc, unsigned long *flags)
+{
+	spin_lock_irqsave(&ahc->platform_data->spin_lock, *flags);
+}
+
+static __inline void
+ahc_unlock(struct ahc_softc *ahc, unsigned long *flags)
+{
+	spin_unlock_irqrestore(&ahc->platform_data->spin_lock, *flags);
+}
+
+static __inline void
+ahc_midlayer_entrypoint_lock(struct ahc_softc *ahc, unsigned long *flags)
+{
+	/*
+	 * In 2.5.X and some 2.4.X versions, the midlayer takes our
+	 * lock just before calling us, so we avoid locking again.
+	 * For other kernel versions, the io_request_lock is taken
+	 * just before our entry point is called.  In this case, we
+	 * trade the io_request_lock for our per-softc lock.
+	 */
+#if AHC_SCSI_HAS_HOST_LOCK == 0
+	spin_unlock(&io_request_lock);
+	spin_lock(&ahc->platform_data->spin_lock);
+#endif
+}
+
+static __inline void
+ahc_midlayer_entrypoint_unlock(struct ahc_softc *ahc, unsigned long *flags)
+{
+#if AHC_SCSI_HAS_HOST_LOCK == 0
+	spin_unlock(&ahc->platform_data->spin_lock);
+	spin_lock(&io_request_lock);
+#endif
+}
+
+static __inline void
+ahc_done_lockinit(struct ahc_softc *ahc)
+{
+	/*
+	 * In 2.5.X, our own lock is held during completions.
+	 * In previous versions, the io_request_lock is used.
+	 * In either case, we can't initialize this lock again.
+	 */
+}
+
+static __inline void
+ahc_done_lock(struct ahc_softc *ahc, unsigned long *flags)
+{
+#if AHC_SCSI_HAS_HOST_LOCK == 0
+	spin_lock_irqsave(&io_request_lock, *flags);
+#endif
+}
+
+static __inline void
+ahc_done_unlock(struct ahc_softc *ahc, unsigned long *flags)
+{
+#if AHC_SCSI_HAS_HOST_LOCK == 0
+	spin_unlock_irqrestore(&io_request_lock, *flags);
+#endif
+}
+
+static __inline void
+ahc_list_lockinit(void)
+{
+	spin_lock_init(&ahc_list_spinlock);
+}
+
+static __inline void
+ahc_list_lock(unsigned long *flags)
+{
+	spin_lock_irqsave(&ahc_list_spinlock, *flags);
+}
+
+static __inline void
+ahc_list_unlock(unsigned long *flags)
+{
+	spin_unlock_irqrestore(&ahc_list_spinlock, *flags);
+}
+
+/******************************* PCI Definitions ******************************/
+/*
+ * PCIM_xxx: mask to locate subfield in register
+ * PCIR_xxx: config register offset
+ * PCIC_xxx: device class
+ * PCIS_xxx: device subclass
+ * PCIP_xxx: device programming interface
+ * PCIV_xxx: PCI vendor ID (only required to fixup ancient devices)
+ * PCID_xxx: device ID
+ */
+#define PCIR_DEVVENDOR		0x00
+#define PCIR_VENDOR		0x00
+#define PCIR_DEVICE		0x02
+#define PCIR_COMMAND		0x04
+#define PCIM_CMD_PORTEN		0x0001
+#define PCIM_CMD_MEMEN		0x0002
+#define PCIM_CMD_BUSMASTEREN	0x0004
+#define PCIM_CMD_MWRICEN	0x0010
+#define PCIM_CMD_PERRESPEN	0x0040
+#define	PCIM_CMD_SERRESPEN	0x0100
+#define PCIR_STATUS		0x06
+#define PCIR_REVID		0x08
+#define PCIR_PROGIF		0x09
+#define PCIR_SUBCLASS		0x0a
+#define PCIR_CLASS		0x0b
+#define PCIR_CACHELNSZ		0x0c
+#define PCIR_LATTIMER		0x0d
+#define PCIR_HEADERTYPE		0x0e
+#define PCIM_MFDEV		0x80
+#define PCIR_BIST		0x0f
+#define PCIR_CAP_PTR		0x34
+
+/* config registers for header type 0 devices */
+#define PCIR_MAPS	0x10
+#define PCIR_SUBVEND_0	0x2c
+#define PCIR_SUBDEV_0	0x2e
+
+extern struct pci_driver aic7xxx_pci_driver;
+
+typedef enum
+{
+	AHC_POWER_STATE_D0,
+	AHC_POWER_STATE_D1,
+	AHC_POWER_STATE_D2,
+	AHC_POWER_STATE_D3
+} ahc_power_state;
+
+/**************************** VL/EISA Routines ********************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) \
+  && (defined(__i386__) || defined(__alpha__)) \
+  && (!defined(CONFIG_EISA)))
+#define CONFIG_EISA
+#endif
+
+#ifdef CONFIG_EISA
+extern uint32_t aic7xxx_probe_eisa_vl;
+int			 ahc_linux_eisa_init(void);
+void			 ahc_linux_eisa_exit(void);
+int			 aic7770_map_registers(struct ahc_softc *ahc,
+					       u_int port);
+int			 aic7770_map_int(struct ahc_softc *ahc, u_int irq);
+#else
+static inline int	ahc_linux_eisa_init(void) {
+	return -ENODEV;
+}
+static inline void	ahc_linux_eisa_exit(void) {
+}
+#endif
+
+/******************************* PCI Routines *********************************/
+#ifdef CONFIG_PCI
+int			 ahc_linux_pci_init(void);
+void			 ahc_linux_pci_exit(void);
+int			 ahc_pci_map_registers(struct ahc_softc *ahc);
+int			 ahc_pci_map_int(struct ahc_softc *ahc);
+
+static __inline uint32_t ahc_pci_read_config(ahc_dev_softc_t pci,
+					     int reg, int width);
+
+static __inline uint32_t
+ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width)
+{
+	switch (width) {
+	case 1:
+	{
+		uint8_t retval;
+
+		pci_read_config_byte(pci, reg, &retval);
+		return (retval);
+	}
+	case 2:
+	{
+		uint16_t retval;
+		pci_read_config_word(pci, reg, &retval);
+		return (retval);
+	}
+	case 4:
+	{
+		uint32_t retval;
+		pci_read_config_dword(pci, reg, &retval);
+		return (retval);
+	}
+	default:
+		panic("ahc_pci_read_config: Read size too big");
+		/* NOTREACHED */
+		return (0);
+	}
+}
+
+static __inline void ahc_pci_write_config(ahc_dev_softc_t pci,
+					  int reg, uint32_t value,
+					  int width);
+
+static __inline void
+ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
+{
+	switch (width) {
+	case 1:
+		pci_write_config_byte(pci, reg, value);
+		break;
+	case 2:
+		pci_write_config_word(pci, reg, value);
+		break;
+	case 4:
+		pci_write_config_dword(pci, reg, value);
+		break;
+	default:
+		panic("ahc_pci_write_config: Write size too big");
+		/* NOTREACHED */
+	}
+}
+
+static __inline int ahc_get_pci_function(ahc_dev_softc_t);
+static __inline int
+ahc_get_pci_function(ahc_dev_softc_t pci)
+{
+	return (PCI_FUNC(pci->devfn));
+}
+
+static __inline int ahc_get_pci_slot(ahc_dev_softc_t);
+static __inline int
+ahc_get_pci_slot(ahc_dev_softc_t pci)
+{
+	return (PCI_SLOT(pci->devfn));
+}
+
+static __inline int ahc_get_pci_bus(ahc_dev_softc_t);
+static __inline int
+ahc_get_pci_bus(ahc_dev_softc_t pci)
+{
+	return (pci->bus->number);
+}
+#else
+static inline int ahc_linux_pci_init(void) {
+	return 0;
+}
+static inline void ahc_linux_pci_exit(void) {
+}
+#endif
+
+static __inline void ahc_flush_device_writes(struct ahc_softc *);
+static __inline void
+ahc_flush_device_writes(struct ahc_softc *ahc)
+{
+	/* XXX Is this sufficient for all architectures??? */
+	ahc_inb(ahc, INTSTAT);
+}
+
+/**************************** Proc FS Support *********************************/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+int	ahc_linux_proc_info(char *, char **, off_t, int, int, int);
+#else
+int	ahc_linux_proc_info(struct Scsi_Host *, char *, char **,
+			    off_t, int, int);
+#endif
+
+/*************************** Domain Validation ********************************/
+#define AHC_DV_CMD(cmd) ((cmd)->scsi_done == ahc_linux_dv_complete)
+#define AHC_DV_SIMQ_FROZEN(ahc)					\
+	((((ahc)->platform_data->flags & AHC_DV_ACTIVE) != 0)	\
+	 && (ahc)->platform_data->qfrozen == 1)
+
+/*********************** Transaction Access Wrappers *************************/
+static __inline void ahc_cmd_set_transaction_status(Scsi_Cmnd *, uint32_t);
+static __inline void ahc_set_transaction_status(struct scb *, uint32_t);
+static __inline void ahc_cmd_set_scsi_status(Scsi_Cmnd *, uint32_t);
+static __inline void ahc_set_scsi_status(struct scb *, uint32_t);
+static __inline uint32_t ahc_cmd_get_transaction_status(Scsi_Cmnd *cmd);
+static __inline uint32_t ahc_get_transaction_status(struct scb *);
+static __inline uint32_t ahc_cmd_get_scsi_status(Scsi_Cmnd *cmd);
+static __inline uint32_t ahc_get_scsi_status(struct scb *);
+static __inline void ahc_set_transaction_tag(struct scb *, int, u_int);
+static __inline u_long ahc_get_transfer_length(struct scb *);
+static __inline int ahc_get_transfer_dir(struct scb *);
+static __inline void ahc_set_residual(struct scb *, u_long);
+static __inline void ahc_set_sense_residual(struct scb *scb, u_long resid);
+static __inline u_long ahc_get_residual(struct scb *);
+static __inline u_long ahc_get_sense_residual(struct scb *);
+static __inline int ahc_perform_autosense(struct scb *);
+static __inline uint32_t ahc_get_sense_bufsize(struct ahc_softc *,
+					       struct scb *);
+static __inline void ahc_notify_xfer_settings_change(struct ahc_softc *,
+						     struct ahc_devinfo *);
+static __inline void ahc_platform_scb_free(struct ahc_softc *ahc,
+					   struct scb *scb);
+static __inline void ahc_freeze_scb(struct scb *scb);
+
+static __inline
+void ahc_cmd_set_transaction_status(Scsi_Cmnd *cmd, uint32_t status)
+{
+	cmd->result &= ~(CAM_STATUS_MASK << 16);
+	cmd->result |= status << 16;
+}
+
+static __inline
+void ahc_set_transaction_status(struct scb *scb, uint32_t status)
+{
+	ahc_cmd_set_transaction_status(scb->io_ctx,status);
+}
+
+static __inline
+void ahc_cmd_set_scsi_status(Scsi_Cmnd *cmd, uint32_t status)
+{
+	cmd->result &= ~0xFFFF;
+	cmd->result |= status;
+}
+
+static __inline
+void ahc_set_scsi_status(struct scb *scb, uint32_t status)
+{
+	ahc_cmd_set_scsi_status(scb->io_ctx, status);
+}
+
+static __inline
+uint32_t ahc_cmd_get_transaction_status(Scsi_Cmnd *cmd)
+{
+	return ((cmd->result >> 16) & CAM_STATUS_MASK);
+}
+
+static __inline
+uint32_t ahc_get_transaction_status(struct scb *scb)
+{
+	return (ahc_cmd_get_transaction_status(scb->io_ctx));
+}
+
+static __inline
+uint32_t ahc_cmd_get_scsi_status(Scsi_Cmnd *cmd)
+{
+	return (cmd->result & 0xFFFF);
+}
+
+static __inline
+uint32_t ahc_get_scsi_status(struct scb *scb)
+{
+	return (ahc_cmd_get_scsi_status(scb->io_ctx));
+}
+
+static __inline
+void ahc_set_transaction_tag(struct scb *scb, int enabled, u_int type)
+{
+	/*
+	 * Nothing to do for linux as the incoming transaction
+	 * has no concept of tag/non tagged, etc.
+	 */
+}
+
+static __inline
+u_long ahc_get_transfer_length(struct scb *scb)
+{
+	return (scb->platform_data->xfer_len);
+}
+
+static __inline
+int ahc_get_transfer_dir(struct scb *scb)
+{
+	return (scb->io_ctx->sc_data_direction);
+}
+
+static __inline
+void ahc_set_residual(struct scb *scb, u_long resid)
+{
+	scb->io_ctx->resid = resid;
+}
+
+static __inline
+void ahc_set_sense_residual(struct scb *scb, u_long resid)
+{
+	scb->platform_data->sense_resid = resid;
+}
+
+static __inline
+u_long ahc_get_residual(struct scb *scb)
+{
+	return (scb->io_ctx->resid);
+}
+
+static __inline
+u_long ahc_get_sense_residual(struct scb *scb)
+{
+	return (scb->platform_data->sense_resid);
+}
+
+static __inline
+int ahc_perform_autosense(struct scb *scb)
+{
+	/*
+	 * We always perform autosense in Linux.
+	 * On other platforms this is set on a
+	 * per-transaction basis.
+	 */
+	return (1);
+}
+
+static __inline uint32_t
+ahc_get_sense_bufsize(struct ahc_softc *ahc, struct scb *scb)
+{
+	return (sizeof(struct scsi_sense_data));
+}
+
+static __inline void
+ahc_notify_xfer_settings_change(struct ahc_softc *ahc,
+				struct ahc_devinfo *devinfo)
+{
+	/* Nothing to do here for linux */
+}
+
+static __inline void
+ahc_platform_scb_free(struct ahc_softc *ahc, struct scb *scb)
+{
+	ahc->flags &= ~AHC_RESOURCE_SHORTAGE;
+}
+
+int	ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg);
+void	ahc_platform_free(struct ahc_softc *ahc);
+void	ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
+
+static __inline void
+ahc_freeze_scb(struct scb *scb)
+{
+	if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
+                scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
+                scb->platform_data->dev->qfrozen++;
+        }
+}
+
+void	ahc_platform_set_tags(struct ahc_softc *ahc,
+			      struct ahc_devinfo *devinfo, ahc_queue_alg);
+int	ahc_platform_abort_scbs(struct ahc_softc *ahc, int target,
+				char channel, int lun, u_int tag,
+				role_t role, uint32_t status);
+irqreturn_t
+	ahc_linux_isr(int irq, void *dev_id, struct pt_regs * regs);
+void	ahc_platform_flushwork(struct ahc_softc *ahc);
+int	ahc_softc_comp(struct ahc_softc *, struct ahc_softc *);
+void	ahc_done(struct ahc_softc*, struct scb*);
+void	ahc_send_async(struct ahc_softc *, char channel,
+		       u_int target, u_int lun, ac_code, void *);
+void	ahc_print_path(struct ahc_softc *, struct scb *);
+void	ahc_platform_dump_card_state(struct ahc_softc *ahc);
+
+#ifdef CONFIG_PCI
+#define AHC_PCI_CONFIG 1
+#else
+#define AHC_PCI_CONFIG 0
+#endif
+#define bootverbose aic7xxx_verbose
+extern u_int aic7xxx_verbose;
+#endif /* _AIC7XXX_LINUX_H_ */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
new file mode 100644
index 0000000..6f6674a
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -0,0 +1,389 @@
+/*
+ * Linux driver attachment glue for PCI based controllers.
+ *
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c#47 $
+ */
+
+#include "aic7xxx_osm.h"
+#include "aic7xxx_pci.h"
+
+static int	ahc_linux_pci_dev_probe(struct pci_dev *pdev,
+					const struct pci_device_id *ent);
+static int	ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc,
+						u_long *base);
+static int	ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
+						 u_long *bus_addr,
+						 uint8_t __iomem **maddr);
+static void	ahc_linux_pci_dev_remove(struct pci_dev *pdev);
+
+/* Define the macro locally since it's different for different class of chips.
+*/
+#define ID(x)	ID_C(x, PCI_CLASS_STORAGE_SCSI)
+
+static struct pci_device_id ahc_linux_pci_id_table[] = {
+	/* aic7850 based controllers */
+	ID(ID_AHA_2902_04_10_15_20C_30C),
+	/* aic7860 based controllers */
+	ID(ID_AHA_2930CU),
+	ID(ID_AHA_1480A & ID_DEV_VENDOR_MASK),
+	ID(ID_AHA_2940AU_0 & ID_DEV_VENDOR_MASK),
+	ID(ID_AHA_2940AU_CN & ID_DEV_VENDOR_MASK),
+	ID(ID_AHA_2930C_VAR & ID_DEV_VENDOR_MASK),
+	/* aic7870 based controllers */
+	ID(ID_AHA_2940),
+	ID(ID_AHA_3940),
+	ID(ID_AHA_398X),
+	ID(ID_AHA_2944),
+	ID(ID_AHA_3944),
+	ID(ID_AHA_4944),
+	/* aic7880 based controllers */
+	ID(ID_AHA_2940U & ID_DEV_VENDOR_MASK),
+	ID(ID_AHA_3940U & ID_DEV_VENDOR_MASK),
+	ID(ID_AHA_2944U & ID_DEV_VENDOR_MASK),
+	ID(ID_AHA_3944U & ID_DEV_VENDOR_MASK),
+	ID(ID_AHA_398XU & ID_DEV_VENDOR_MASK),
+	ID(ID_AHA_4944U & ID_DEV_VENDOR_MASK),
+	ID(ID_AHA_2930U & ID_DEV_VENDOR_MASK),
+	ID(ID_AHA_2940U_PRO & ID_DEV_VENDOR_MASK),
+	ID(ID_AHA_2940U_CN & ID_DEV_VENDOR_MASK),
+	/* aic7890 based controllers */
+	ID(ID_AHA_2930U2),
+	ID(ID_AHA_2940U2B),
+	ID(ID_AHA_2940U2_OEM),
+	ID(ID_AHA_2940U2),
+	ID(ID_AHA_2950U2B),
+	ID16(ID_AIC7890_ARO & ID_AIC7895_ARO_MASK),
+	ID(ID_AAA_131U2),
+	/* aic7890 based controllers */
+	ID(ID_AHA_29160),
+	ID(ID_AHA_29160_CPQ),
+	ID(ID_AHA_29160N),
+	ID(ID_AHA_29160C),
+	ID(ID_AHA_29160B),
+	ID(ID_AHA_19160B),
+	ID(ID_AIC7892_ARO),
+	/* aic7892 based controllers */
+	ID(ID_AHA_2940U_DUAL),
+	ID(ID_AHA_3940AU),
+	ID(ID_AHA_3944AU),
+	ID(ID_AIC7895_ARO),
+	ID(ID_AHA_3950U2B_0),
+	ID(ID_AHA_3950U2B_1),
+	ID(ID_AHA_3950U2D_0),
+	ID(ID_AHA_3950U2D_1),
+	ID(ID_AIC7896_ARO),
+	/* aic7899 based controllers */
+	ID(ID_AHA_3960D),
+	ID(ID_AHA_3960D_CPQ),
+	ID(ID_AIC7899_ARO),
+	/* Generic chip probes for devices we don't know exactly. */
+	ID(ID_AIC7850 & ID_DEV_VENDOR_MASK),
+	ID(ID_AIC7855 & ID_DEV_VENDOR_MASK),
+	ID(ID_AIC7859 & ID_DEV_VENDOR_MASK),
+	ID(ID_AIC7860 & ID_DEV_VENDOR_MASK),
+	ID(ID_AIC7870 & ID_DEV_VENDOR_MASK),
+	ID(ID_AIC7880 & ID_DEV_VENDOR_MASK),
+ 	ID16(ID_AIC7890 & ID_9005_GENERIC_MASK),
+ 	ID16(ID_AIC7892 & ID_9005_GENERIC_MASK),
+	ID(ID_AIC7895 & ID_DEV_VENDOR_MASK),
+	ID16(ID_AIC7896 & ID_9005_GENERIC_MASK),
+	ID16(ID_AIC7899 & ID_9005_GENERIC_MASK),
+	ID(ID_AIC7810 & ID_DEV_VENDOR_MASK),
+	ID(ID_AIC7815 & ID_DEV_VENDOR_MASK),
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, ahc_linux_pci_id_table);
+
+struct pci_driver aic7xxx_pci_driver = {
+	.name		= "aic7xxx",
+	.probe		= ahc_linux_pci_dev_probe,
+	.remove		= ahc_linux_pci_dev_remove,
+	.id_table	= ahc_linux_pci_id_table
+};
+
+static void
+ahc_linux_pci_dev_remove(struct pci_dev *pdev)
+{
+	struct ahc_softc *ahc;
+	u_long l;
+
+	/*
+	 * We should be able to just perform
+	 * the free directly, but check our
+	 * list for extra sanity.
+	 */
+	ahc_list_lock(&l);
+	ahc = ahc_find_softc((struct ahc_softc *)pci_get_drvdata(pdev));
+	if (ahc != NULL) {
+		u_long s;
+
+		TAILQ_REMOVE(&ahc_tailq, ahc, links);
+		ahc_list_unlock(&l);
+		ahc_lock(ahc, &s);
+		ahc_intr_enable(ahc, FALSE);
+		ahc_unlock(ahc, &s);
+		ahc_free(ahc);
+	} else
+		ahc_list_unlock(&l);
+}
+
+static int
+ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	char		 buf[80];
+	const uint64_t	 mask_39bit = 0x7FFFFFFFFFULL;
+	struct		 ahc_softc *ahc;
+	ahc_dev_softc_t	 pci;
+	struct		 ahc_pci_identity *entry;
+	char		*name;
+	int		 error;
+
+	/*
+	 * Some BIOSen report the same device multiple times.
+	 */
+	TAILQ_FOREACH(ahc, &ahc_tailq, links) {
+		struct pci_dev *probed_pdev;
+
+		probed_pdev = ahc->dev_softc;
+		if (probed_pdev->bus->number == pdev->bus->number
+		 && probed_pdev->devfn == pdev->devfn)
+			break;
+	}
+	if (ahc != NULL) {
+		/* Skip duplicate. */
+		return (-ENODEV);
+	}
+
+	pci = pdev;
+	entry = ahc_find_pci_device(pci);
+	if (entry == NULL)
+		return (-ENODEV);
+
+	/*
+	 * Allocate a softc for this card and
+	 * set it up for attachment by our
+	 * common detect routine.
+	 */
+	sprintf(buf, "ahc_pci:%d:%d:%d",
+		ahc_get_pci_bus(pci),
+		ahc_get_pci_slot(pci),
+		ahc_get_pci_function(pci));
+	name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
+	if (name == NULL)
+		return (-ENOMEM);
+	strcpy(name, buf);
+	ahc = ahc_alloc(NULL, name);
+	if (ahc == NULL)
+		return (-ENOMEM);
+	if (pci_enable_device(pdev)) {
+		ahc_free(ahc);
+		return (-ENODEV);
+	}
+	pci_set_master(pdev);
+
+	if (sizeof(dma_addr_t) > 4
+	 && ahc_linux_get_memsize() > 0x80000000
+	 && pci_set_dma_mask(pdev, mask_39bit) == 0) {
+		ahc->flags |= AHC_39BIT_ADDRESSING;
+		ahc->platform_data->hw_dma_mask = mask_39bit;
+	} else {
+		if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
+			printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
+                	return (-ENODEV);
+		}
+		ahc->platform_data->hw_dma_mask = DMA_32BIT_MASK;
+	}
+	ahc->dev_softc = pci;
+	error = ahc_pci_config(ahc, entry);
+	if (error != 0) {
+		ahc_free(ahc);
+		return (-error);
+	}
+	pci_set_drvdata(pdev, ahc);
+	if (aic7xxx_detect_complete) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+		ahc_linux_register_host(ahc, &aic7xxx_driver_template);
+#else
+		printf("aic7xxx: ignoring PCI device found after "
+		       "initialization\n");
+		return (-ENODEV);
+#endif
+	}
+	return (0);
+}
+
+int
+ahc_linux_pci_init(void)
+{
+	/* Translate error or zero return into zero or one */
+	return pci_module_init(&aic7xxx_pci_driver) ? 0 : 1;
+}
+
+void
+ahc_linux_pci_exit(void)
+{
+	pci_unregister_driver(&aic7xxx_pci_driver);
+}
+
+static int
+ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, u_long *base)
+{
+	if (aic7xxx_allow_memio == 0)
+		return (ENOMEM);
+
+	*base = pci_resource_start(ahc->dev_softc, 0);
+	if (*base == 0)
+		return (ENOMEM);
+	if (request_region(*base, 256, "aic7xxx") == 0)
+		return (ENOMEM);
+	return (0);
+}
+
+static int
+ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
+				 u_long *bus_addr,
+				 uint8_t __iomem **maddr)
+{
+	u_long	start;
+	int	error;
+
+	error = 0;
+	start = pci_resource_start(ahc->dev_softc, 1);
+	if (start != 0) {
+		*bus_addr = start;
+		if (request_mem_region(start, 0x1000, "aic7xxx") == 0)
+			error = ENOMEM;
+		if (error == 0) {
+			*maddr = ioremap_nocache(start, 256);
+			if (*maddr == NULL) {
+				error = ENOMEM;
+				release_mem_region(start, 0x1000);
+			}
+		}
+	} else
+		error = ENOMEM;
+	return (error);
+}
+
+int
+ahc_pci_map_registers(struct ahc_softc *ahc)
+{
+	uint32_t command;
+	u_long	 base;
+	uint8_t	__iomem *maddr;
+	int	 error;
+
+	/*
+	 * If its allowed, we prefer memory mapped access.
+	 */
+	command = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, 4);
+	command &= ~(PCIM_CMD_PORTEN|PCIM_CMD_MEMEN);
+	base = 0;
+	maddr = NULL;
+	error = ahc_linux_pci_reserve_mem_region(ahc, &base, &maddr);
+	if (error == 0) {
+		ahc->platform_data->mem_busaddr = base;
+		ahc->tag = BUS_SPACE_MEMIO;
+		ahc->bsh.maddr = maddr;
+		ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND,
+				     command | PCIM_CMD_MEMEN, 4);
+
+		/*
+		 * Do a quick test to see if memory mapped
+		 * I/O is functioning correctly.
+		 */
+		if (ahc_pci_test_register_access(ahc) != 0) {
+
+			printf("aic7xxx: PCI Device %d:%d:%d "
+			       "failed memory mapped test.  Using PIO.\n",
+			       ahc_get_pci_bus(ahc->dev_softc),
+			       ahc_get_pci_slot(ahc->dev_softc),
+			       ahc_get_pci_function(ahc->dev_softc));
+			iounmap(maddr);
+			release_mem_region(ahc->platform_data->mem_busaddr,
+					   0x1000);
+			ahc->bsh.maddr = NULL;
+			maddr = NULL;
+		} else
+			command |= PCIM_CMD_MEMEN;
+	} else {
+		printf("aic7xxx: PCI%d:%d:%d MEM region 0x%lx "
+		       "unavailable. Cannot memory map device.\n",
+		       ahc_get_pci_bus(ahc->dev_softc),
+		       ahc_get_pci_slot(ahc->dev_softc),
+		       ahc_get_pci_function(ahc->dev_softc),
+		       base);
+	}
+
+	/*
+	 * We always prefer memory mapped access.
+	 */
+	if (maddr == NULL) {
+
+		error = ahc_linux_pci_reserve_io_region(ahc, &base);
+		if (error == 0) {
+			ahc->tag = BUS_SPACE_PIO;
+			ahc->bsh.ioport = base;
+			command |= PCIM_CMD_PORTEN;
+		} else {
+			printf("aic7xxx: PCI%d:%d:%d IO region 0x%lx[0..255] "
+			       "unavailable. Cannot map device.\n",
+			       ahc_get_pci_bus(ahc->dev_softc),
+			       ahc_get_pci_slot(ahc->dev_softc),
+			       ahc_get_pci_function(ahc->dev_softc),
+			       base);
+		}
+	}
+	ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, 4);
+	return (error);
+}
+
+int
+ahc_pci_map_int(struct ahc_softc *ahc)
+{
+	int error;
+
+	error = request_irq(ahc->dev_softc->irq, ahc_linux_isr,
+			    SA_SHIRQ, "aic7xxx", ahc);
+	if (error == 0)
+		ahc->platform_data->irq = ahc->dev_softc->irq;
+	
+	return (-error);
+}
+
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
new file mode 100644
index 0000000..7ddcc97
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -0,0 +1,2407 @@
+/*
+ * Product specific probe and attach routines for:
+ *      3940, 2940, aic7895, aic7890, aic7880,
+ *	aic7870, aic7860 and aic7850 SCSI controllers
+ *
+ * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#69 $
+ *
+ * $FreeBSD$
+ */
+
+#ifdef __linux__
+#include "aic7xxx_osm.h"
+#include "aic7xxx_inline.h"
+#include "aic7xxx_93cx6.h"
+#else
+#include <dev/aic7xxx/aic7xxx_osm.h>
+#include <dev/aic7xxx/aic7xxx_inline.h>
+#include <dev/aic7xxx/aic7xxx_93cx6.h>
+#endif
+
+#include "aic7xxx_pci.h"
+
+static __inline uint64_t
+ahc_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor)
+{
+	uint64_t id;
+
+	id = subvendor
+	   | (subdevice << 16)
+	   | ((uint64_t)vendor << 32)
+	   | ((uint64_t)device << 48);
+
+	return (id);
+}
+
+#define AHC_PCI_IOADDR	PCIR_MAPS	/* I/O Address */
+#define AHC_PCI_MEMADDR	(PCIR_MAPS + 4)	/* Mem I/O Address */
+
+#define DEVID_9005_TYPE(id) ((id) & 0xF)
+#define		DEVID_9005_TYPE_HBA		0x0	/* Standard Card */
+#define		DEVID_9005_TYPE_AAA		0x3	/* RAID Card */
+#define		DEVID_9005_TYPE_SISL		0x5	/* Container ROMB */
+#define		DEVID_9005_TYPE_MB		0xF	/* On Motherboard */
+
+#define DEVID_9005_MAXRATE(id) (((id) & 0x30) >> 4)
+#define		DEVID_9005_MAXRATE_U160		0x0
+#define		DEVID_9005_MAXRATE_ULTRA2	0x1
+#define		DEVID_9005_MAXRATE_ULTRA	0x2
+#define		DEVID_9005_MAXRATE_FAST		0x3
+
+#define DEVID_9005_MFUNC(id) (((id) & 0x40) >> 6)
+
+#define DEVID_9005_CLASS(id) (((id) & 0xFF00) >> 8)
+#define		DEVID_9005_CLASS_SPI		0x0	/* Parallel SCSI */
+
+#define SUBID_9005_TYPE(id) ((id) & 0xF)
+#define		SUBID_9005_TYPE_MB		0xF	/* On Motherboard */
+#define		SUBID_9005_TYPE_CARD		0x0	/* Standard Card */
+#define		SUBID_9005_TYPE_LCCARD		0x1	/* Low Cost Card */
+#define		SUBID_9005_TYPE_RAID		0x3	/* Combined with Raid */
+
+#define SUBID_9005_TYPE_KNOWN(id)			\
+	  ((((id) & 0xF) == SUBID_9005_TYPE_MB)		\
+	|| (((id) & 0xF) == SUBID_9005_TYPE_CARD)	\
+	|| (((id) & 0xF) == SUBID_9005_TYPE_LCCARD)	\
+	|| (((id) & 0xF) == SUBID_9005_TYPE_RAID))
+
+#define SUBID_9005_MAXRATE(id) (((id) & 0x30) >> 4)
+#define		SUBID_9005_MAXRATE_ULTRA2	0x0
+#define		SUBID_9005_MAXRATE_ULTRA	0x1
+#define		SUBID_9005_MAXRATE_U160		0x2
+#define		SUBID_9005_MAXRATE_RESERVED	0x3
+
+#define SUBID_9005_SEEPTYPE(id)						\
+	((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB)			\
+	 ? ((id) & 0xC0) >> 6						\
+	 : ((id) & 0x300) >> 8)
+#define		SUBID_9005_SEEPTYPE_NONE	0x0
+#define		SUBID_9005_SEEPTYPE_1K		0x1
+#define		SUBID_9005_SEEPTYPE_2K_4K	0x2
+#define		SUBID_9005_SEEPTYPE_RESERVED	0x3
+#define SUBID_9005_AUTOTERM(id)						\
+	((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB)			\
+	 ? (((id) & 0x400) >> 10) == 0					\
+	 : (((id) & 0x40) >> 6) == 0)
+
+#define SUBID_9005_NUMCHAN(id)						\
+	((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB)			\
+	 ? ((id) & 0x300) >> 8						\
+	 : ((id) & 0xC00) >> 10)
+
+#define SUBID_9005_LEGACYCONN(id)					\
+	((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB)			\
+	 ? 0								\
+	 : ((id) & 0x80) >> 7)
+
+#define SUBID_9005_MFUNCENB(id)						\
+	((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB)			\
+	 ? ((id) & 0x800) >> 11						\
+	 : ((id) & 0x1000) >> 12)
+/*
+ * Informational only. Should use chip register to be
+ * certain, but may be use in identification strings.
+ */
+#define SUBID_9005_CARD_SCSIWIDTH_MASK	0x2000
+#define SUBID_9005_CARD_PCIWIDTH_MASK	0x4000
+#define SUBID_9005_CARD_SEDIFF_MASK	0x8000
+
+static ahc_device_setup_t ahc_aic785X_setup;
+static ahc_device_setup_t ahc_aic7860_setup;
+static ahc_device_setup_t ahc_apa1480_setup;
+static ahc_device_setup_t ahc_aic7870_setup;
+static ahc_device_setup_t ahc_aha394X_setup;
+static ahc_device_setup_t ahc_aha494X_setup;
+static ahc_device_setup_t ahc_aha398X_setup;
+static ahc_device_setup_t ahc_aic7880_setup;
+static ahc_device_setup_t ahc_aha2940Pro_setup;
+static ahc_device_setup_t ahc_aha394XU_setup;
+static ahc_device_setup_t ahc_aha398XU_setup;
+static ahc_device_setup_t ahc_aic7890_setup;
+static ahc_device_setup_t ahc_aic7892_setup;
+static ahc_device_setup_t ahc_aic7895_setup;
+static ahc_device_setup_t ahc_aic7896_setup;
+static ahc_device_setup_t ahc_aic7899_setup;
+static ahc_device_setup_t ahc_aha29160C_setup;
+static ahc_device_setup_t ahc_raid_setup;
+static ahc_device_setup_t ahc_aha394XX_setup;
+static ahc_device_setup_t ahc_aha494XX_setup;
+static ahc_device_setup_t ahc_aha398XX_setup;
+
+struct ahc_pci_identity ahc_pci_ident_table [] =
+{
+	/* aic7850 based controllers */
+	{
+		ID_AHA_2902_04_10_15_20C_30C,
+		ID_ALL_MASK,
+		"Adaptec 2902/04/10/15/20C/30C SCSI adapter",
+		ahc_aic785X_setup
+	},
+	/* aic7860 based controllers */
+	{
+		ID_AHA_2930CU,
+		ID_ALL_MASK,
+		"Adaptec 2930CU SCSI adapter",
+		ahc_aic7860_setup
+	},
+	{
+		ID_AHA_1480A & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec 1480A Ultra SCSI adapter",
+		ahc_apa1480_setup
+	},
+	{
+		ID_AHA_2940AU_0 & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec 2940A Ultra SCSI adapter",
+		ahc_aic7860_setup
+	},
+	{
+		ID_AHA_2940AU_CN & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec 2940A/CN Ultra SCSI adapter",
+		ahc_aic7860_setup
+	},
+	{
+		ID_AHA_2930C_VAR & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec 2930C Ultra SCSI adapter (VAR)",
+		ahc_aic7860_setup
+	},
+	/* aic7870 based controllers */
+	{
+		ID_AHA_2940,
+		ID_ALL_MASK,
+		"Adaptec 2940 SCSI adapter",
+		ahc_aic7870_setup
+	},
+	{
+		ID_AHA_3940,
+		ID_ALL_MASK,
+		"Adaptec 3940 SCSI adapter",
+		ahc_aha394X_setup
+	},
+	{
+		ID_AHA_398X,
+		ID_ALL_MASK,
+		"Adaptec 398X SCSI RAID adapter",
+		ahc_aha398X_setup
+	},
+	{
+		ID_AHA_2944,
+		ID_ALL_MASK,
+		"Adaptec 2944 SCSI adapter",
+		ahc_aic7870_setup
+	},
+	{
+		ID_AHA_3944,
+		ID_ALL_MASK,
+		"Adaptec 3944 SCSI adapter",
+		ahc_aha394X_setup
+	},
+	{
+		ID_AHA_4944,
+		ID_ALL_MASK,
+		"Adaptec 4944 SCSI adapter",
+		ahc_aha494X_setup
+	},
+	/* aic7880 based controllers */
+	{
+		ID_AHA_2940U & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec 2940 Ultra SCSI adapter",
+		ahc_aic7880_setup
+	},
+	{
+		ID_AHA_3940U & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec 3940 Ultra SCSI adapter",
+		ahc_aha394XU_setup
+	},
+	{
+		ID_AHA_2944U & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec 2944 Ultra SCSI adapter",
+		ahc_aic7880_setup
+	},
+	{
+		ID_AHA_3944U & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec 3944 Ultra SCSI adapter",
+		ahc_aha394XU_setup
+	},
+	{
+		ID_AHA_398XU & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec 398X Ultra SCSI RAID adapter",
+		ahc_aha398XU_setup
+	},
+	{
+		/*
+		 * XXX Don't know the slot numbers
+		 * so we can't identify channels
+		 */
+		ID_AHA_4944U & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec 4944 Ultra SCSI adapter",
+		ahc_aic7880_setup
+	},
+	{
+		ID_AHA_2930U & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec 2930 Ultra SCSI adapter",
+		ahc_aic7880_setup
+	},
+	{
+		ID_AHA_2940U_PRO & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec 2940 Pro Ultra SCSI adapter",
+		ahc_aha2940Pro_setup
+	},
+	{
+		ID_AHA_2940U_CN & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec 2940/CN Ultra SCSI adapter",
+		ahc_aic7880_setup
+	},
+	/* Ignore all SISL (AAC on MB) based controllers. */
+	{
+		ID_9005_SISL_ID,
+		ID_9005_SISL_MASK,
+		NULL,
+		NULL
+	},
+	/* aic7890 based controllers */
+	{
+		ID_AHA_2930U2,
+		ID_ALL_MASK,
+		"Adaptec 2930 Ultra2 SCSI adapter",
+		ahc_aic7890_setup
+	},
+	{
+		ID_AHA_2940U2B,
+		ID_ALL_MASK,
+		"Adaptec 2940B Ultra2 SCSI adapter",
+		ahc_aic7890_setup
+	},
+	{
+		ID_AHA_2940U2_OEM,
+		ID_ALL_MASK,
+		"Adaptec 2940 Ultra2 SCSI adapter (OEM)",
+		ahc_aic7890_setup
+	},
+	{
+		ID_AHA_2940U2,
+		ID_ALL_MASK,
+		"Adaptec 2940 Ultra2 SCSI adapter",
+		ahc_aic7890_setup
+	},
+	{
+		ID_AHA_2950U2B,
+		ID_ALL_MASK,
+		"Adaptec 2950 Ultra2 SCSI adapter",
+		ahc_aic7890_setup
+	},
+	{
+		ID_AIC7890_ARO,
+		ID_ALL_MASK,
+		"Adaptec aic7890/91 Ultra2 SCSI adapter (ARO)",
+		ahc_aic7890_setup
+	},
+	{
+		ID_AAA_131U2,
+		ID_ALL_MASK,
+		"Adaptec AAA-131 Ultra2 RAID adapter",
+		ahc_aic7890_setup
+	},
+	/* aic7892 based controllers */
+	{
+		ID_AHA_29160,
+		ID_ALL_MASK,
+		"Adaptec 29160 Ultra160 SCSI adapter",
+		ahc_aic7892_setup
+	},
+	{
+		ID_AHA_29160_CPQ,
+		ID_ALL_MASK,
+		"Adaptec (Compaq OEM) 29160 Ultra160 SCSI adapter",
+		ahc_aic7892_setup
+	},
+	{
+		ID_AHA_29160N,
+		ID_ALL_MASK,
+		"Adaptec 29160N Ultra160 SCSI adapter",
+		ahc_aic7892_setup
+	},
+	{
+		ID_AHA_29160C,
+		ID_ALL_MASK,
+		"Adaptec 29160C Ultra160 SCSI adapter",
+		ahc_aha29160C_setup
+	},
+	{
+		ID_AHA_29160B,
+		ID_ALL_MASK,
+		"Adaptec 29160B Ultra160 SCSI adapter",
+		ahc_aic7892_setup
+	},
+	{
+		ID_AHA_19160B,
+		ID_ALL_MASK,
+		"Adaptec 19160B Ultra160 SCSI adapter",
+		ahc_aic7892_setup
+	},
+	{
+		ID_AIC7892_ARO,
+		ID_ALL_MASK,
+		"Adaptec aic7892 Ultra160 SCSI adapter (ARO)",
+		ahc_aic7892_setup
+	},
+	/* aic7895 based controllers */	
+	{
+		ID_AHA_2940U_DUAL,
+		ID_ALL_MASK,
+		"Adaptec 2940/DUAL Ultra SCSI adapter",
+		ahc_aic7895_setup
+	},
+	{
+		ID_AHA_3940AU,
+		ID_ALL_MASK,
+		"Adaptec 3940A Ultra SCSI adapter",
+		ahc_aic7895_setup
+	},
+	{
+		ID_AHA_3944AU,
+		ID_ALL_MASK,
+		"Adaptec 3944A Ultra SCSI adapter",
+		ahc_aic7895_setup
+	},
+	{
+		ID_AIC7895_ARO,
+		ID_AIC7895_ARO_MASK,
+		"Adaptec aic7895 Ultra SCSI adapter (ARO)",
+		ahc_aic7895_setup
+	},
+	/* aic7896/97 based controllers */	
+	{
+		ID_AHA_3950U2B_0,
+		ID_ALL_MASK,
+		"Adaptec 3950B Ultra2 SCSI adapter",
+		ahc_aic7896_setup
+	},
+	{
+		ID_AHA_3950U2B_1,
+		ID_ALL_MASK,
+		"Adaptec 3950B Ultra2 SCSI adapter",
+		ahc_aic7896_setup
+	},
+	{
+		ID_AHA_3950U2D_0,
+		ID_ALL_MASK,
+		"Adaptec 3950D Ultra2 SCSI adapter",
+		ahc_aic7896_setup
+	},
+	{
+		ID_AHA_3950U2D_1,
+		ID_ALL_MASK,
+		"Adaptec 3950D Ultra2 SCSI adapter",
+		ahc_aic7896_setup
+	},
+	{
+		ID_AIC7896_ARO,
+		ID_ALL_MASK,
+		"Adaptec aic7896/97 Ultra2 SCSI adapter (ARO)",
+		ahc_aic7896_setup
+	},
+	/* aic7899 based controllers */	
+	{
+		ID_AHA_3960D,
+		ID_ALL_MASK,
+		"Adaptec 3960D Ultra160 SCSI adapter",
+		ahc_aic7899_setup
+	},
+	{
+		ID_AHA_3960D_CPQ,
+		ID_ALL_MASK,
+		"Adaptec (Compaq OEM) 3960D Ultra160 SCSI adapter",
+		ahc_aic7899_setup
+	},
+	{
+		ID_AIC7899_ARO,
+		ID_ALL_MASK,
+		"Adaptec aic7899 Ultra160 SCSI adapter (ARO)",
+		ahc_aic7899_setup
+	},
+	/* Generic chip probes for devices we don't know 'exactly' */
+	{
+		ID_AIC7850 & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec aic7850 SCSI adapter",
+		ahc_aic785X_setup
+	},
+	{
+		ID_AIC7855 & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec aic7855 SCSI adapter",
+		ahc_aic785X_setup
+	},
+	{
+		ID_AIC7859 & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec aic7859 SCSI adapter",
+		ahc_aic7860_setup
+	},
+	{
+		ID_AIC7860 & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec aic7860 Ultra SCSI adapter",
+		ahc_aic7860_setup
+	},
+	{
+		ID_AIC7870 & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec aic7870 SCSI adapter",
+		ahc_aic7870_setup
+	},
+	{
+		ID_AIC7880 & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec aic7880 Ultra SCSI adapter",
+		ahc_aic7880_setup
+	},
+	{
+		ID_AIC7890 & ID_9005_GENERIC_MASK,
+		ID_9005_GENERIC_MASK,
+		"Adaptec aic7890/91 Ultra2 SCSI adapter",
+		ahc_aic7890_setup
+	},
+	{
+		ID_AIC7892 & ID_9005_GENERIC_MASK,
+		ID_9005_GENERIC_MASK,
+		"Adaptec aic7892 Ultra160 SCSI adapter",
+		ahc_aic7892_setup
+	},
+	{
+		ID_AIC7895 & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec aic7895 Ultra SCSI adapter",
+		ahc_aic7895_setup
+	},
+	{
+		ID_AIC7896 & ID_9005_GENERIC_MASK,
+		ID_9005_GENERIC_MASK,
+		"Adaptec aic7896/97 Ultra2 SCSI adapter",
+		ahc_aic7896_setup
+	},
+	{
+		ID_AIC7899 & ID_9005_GENERIC_MASK,
+		ID_9005_GENERIC_MASK,
+		"Adaptec aic7899 Ultra160 SCSI adapter",
+		ahc_aic7899_setup
+	},
+	{
+		ID_AIC7810 & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec aic7810 RAID memory controller",
+		ahc_raid_setup
+	},
+	{
+		ID_AIC7815 & ID_DEV_VENDOR_MASK,
+		ID_DEV_VENDOR_MASK,
+		"Adaptec aic7815 RAID memory controller",
+		ahc_raid_setup
+	}
+};
+
+const u_int ahc_num_pci_devs = NUM_ELEMENTS(ahc_pci_ident_table);
+		
+#define AHC_394X_SLOT_CHANNEL_A	4
+#define AHC_394X_SLOT_CHANNEL_B	5
+
+#define AHC_398X_SLOT_CHANNEL_A	4
+#define AHC_398X_SLOT_CHANNEL_B	8
+#define AHC_398X_SLOT_CHANNEL_C	12
+
+#define AHC_494X_SLOT_CHANNEL_A	4
+#define AHC_494X_SLOT_CHANNEL_B	5
+#define AHC_494X_SLOT_CHANNEL_C	6
+#define AHC_494X_SLOT_CHANNEL_D	7
+
+#define	DEVCONFIG		0x40
+#define		PCIERRGENDIS	0x80000000ul
+#define		SCBSIZE32	0x00010000ul	/* aic789X only */
+#define		REXTVALID	0x00001000ul	/* ultra cards only */
+#define		MPORTMODE	0x00000400ul	/* aic7870+ only */
+#define		RAMPSM		0x00000200ul	/* aic7870+ only */
+#define		VOLSENSE	0x00000100ul
+#define		PCI64BIT	0x00000080ul	/* 64Bit PCI bus (Ultra2 Only)*/
+#define		SCBRAMSEL	0x00000080ul
+#define		MRDCEN		0x00000040ul
+#define		EXTSCBTIME	0x00000020ul	/* aic7870 only */
+#define		EXTSCBPEN	0x00000010ul	/* aic7870 only */
+#define		BERREN		0x00000008ul
+#define		DACEN		0x00000004ul
+#define		STPWLEVEL	0x00000002ul
+#define		DIFACTNEGEN	0x00000001ul	/* aic7870 only */
+
+#define	CSIZE_LATTIME		0x0c
+#define		CACHESIZE	0x0000003ful	/* only 5 bits */
+#define		LATTIME		0x0000ff00ul
+
+/* PCI STATUS definitions */
+#define	DPE	0x80
+#define SSE	0x40
+#define	RMA	0x20
+#define	RTA	0x10
+#define STA	0x08
+#define DPR	0x01
+
+static int ahc_9005_subdevinfo_valid(uint16_t vendor, uint16_t device,
+				     uint16_t subvendor, uint16_t subdevice);
+static int ahc_ext_scbram_present(struct ahc_softc *ahc);
+static void ahc_scbram_config(struct ahc_softc *ahc, int enable,
+				  int pcheck, int fast, int large);
+static void ahc_probe_ext_scbram(struct ahc_softc *ahc);
+static void check_extport(struct ahc_softc *ahc, u_int *sxfrctl1);
+static void ahc_parse_pci_eeprom(struct ahc_softc *ahc,
+				 struct seeprom_config *sc);
+static void configure_termination(struct ahc_softc *ahc,
+				  struct seeprom_descriptor *sd,
+				  u_int adapter_control,
+	 			  u_int *sxfrctl1);
+
+static void ahc_new_term_detect(struct ahc_softc *ahc,
+				int *enableSEC_low,
+				int *enableSEC_high,
+				int *enablePRI_low,
+				int *enablePRI_high,
+				int *eeprom_present);
+static void aic787X_cable_detect(struct ahc_softc *ahc, int *internal50_present,
+				 int *internal68_present,
+				 int *externalcable_present,
+				 int *eeprom_present);
+static void aic785X_cable_detect(struct ahc_softc *ahc, int *internal50_present,
+				 int *externalcable_present,
+				 int *eeprom_present);
+static void    write_brdctl(struct ahc_softc *ahc, uint8_t value);
+static uint8_t read_brdctl(struct ahc_softc *ahc);
+static void ahc_pci_intr(struct ahc_softc *ahc);
+static int  ahc_pci_chip_init(struct ahc_softc *ahc);
+static int  ahc_pci_suspend(struct ahc_softc *ahc);
+static int  ahc_pci_resume(struct ahc_softc *ahc);
+
+static int
+ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor,
+			  uint16_t subdevice, uint16_t subvendor)
+{
+	int result;
+
+	/* Default to invalid. */
+	result = 0;
+	if (vendor == 0x9005
+	 && subvendor == 0x9005
+         && subdevice != device
+         && SUBID_9005_TYPE_KNOWN(subdevice) != 0) {
+
+		switch (SUBID_9005_TYPE(subdevice)) {
+		case SUBID_9005_TYPE_MB:
+			break;
+		case SUBID_9005_TYPE_CARD:
+		case SUBID_9005_TYPE_LCCARD:
+			/*
+			 * Currently only trust Adaptec cards to
+			 * get the sub device info correct.
+			 */
+			if (DEVID_9005_TYPE(device) == DEVID_9005_TYPE_HBA)
+				result = 1;
+			break;
+		case SUBID_9005_TYPE_RAID:
+			break;
+		default:
+			break;
+		}
+	}
+	return (result);
+}
+
+struct ahc_pci_identity *
+ahc_find_pci_device(ahc_dev_softc_t pci)
+{
+	uint64_t  full_id;
+	uint16_t  device;
+	uint16_t  vendor;
+	uint16_t  subdevice;
+	uint16_t  subvendor;
+	struct	  ahc_pci_identity *entry;
+	u_int	  i;
+
+	vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
+	device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
+	subvendor = ahc_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
+	subdevice = ahc_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+	full_id = ahc_compose_id(device, vendor, subdevice, subvendor);
+
+	/*
+	 * If the second function is not hooked up, ignore it.
+	 * Unfortunately, not all MB vendors implement the
+	 * subdevice ID as per the Adaptec spec, so do our best
+	 * to sanity check it prior to accepting the subdevice
+	 * ID as valid.
+	 */
+	if (ahc_get_pci_function(pci) > 0
+	 && ahc_9005_subdevinfo_valid(vendor, device, subvendor, subdevice)
+	 && SUBID_9005_MFUNCENB(subdevice) == 0)
+		return (NULL);
+
+	for (i = 0; i < ahc_num_pci_devs; i++) {
+		entry = &ahc_pci_ident_table[i];
+		if (entry->full_id == (full_id & entry->id_mask)) {
+			/* Honor exclusion entries. */
+			if (entry->name == NULL)
+				return (NULL);
+			return (entry);
+		}
+	}
+	return (NULL);
+}
+
+int
+ahc_pci_config(struct ahc_softc *ahc, struct ahc_pci_identity *entry)
+{
+	u_long	 l;
+	u_int	 command;
+	u_int	 our_id;
+	u_int	 sxfrctl1;
+	u_int	 scsiseq;
+	u_int	 dscommand0;
+	uint32_t devconfig;
+	int	 error;
+	uint8_t	 sblkctl;
+
+	our_id = 0;
+	error = entry->setup(ahc);
+	if (error != 0)
+		return (error);
+	ahc->chip |= AHC_PCI;
+	ahc->description = entry->name;
+
+	pci_set_power_state(ahc->dev_softc, AHC_POWER_STATE_D0);
+
+	error = ahc_pci_map_registers(ahc);
+	if (error != 0)
+		return (error);
+
+	/*
+	 * Before we continue probing the card, ensure that
+	 * its interrupts are *disabled*.  We don't want
+	 * a misstep to hang the machine in an interrupt
+	 * storm.
+	 */
+	ahc_intr_enable(ahc, FALSE);
+
+	devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4);
+
+	/*
+	 * If we need to support high memory, enable dual
+	 * address cycles.  This bit must be set to enable
+	 * high address bit generation even if we are on a
+	 * 64bit bus (PCI64BIT set in devconfig).
+	 */
+	if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
+
+		if (bootverbose)
+			printf("%s: Enabling 39Bit Addressing\n",
+			       ahc_name(ahc));
+		devconfig |= DACEN;
+	}
+	
+	/* Ensure that pci error generation, a test feature, is disabled. */
+	devconfig |= PCIERRGENDIS;
+
+	ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4);
+
+	/* Ensure busmastering is enabled */
+	command = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2);
+	command |= PCIM_CMD_BUSMASTEREN;
+
+	ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, /*bytes*/2);
+
+	/* On all PCI adapters, we allow SCB paging */
+	ahc->flags |= AHC_PAGESCBS;
+
+	error = ahc_softc_init(ahc);
+	if (error != 0)
+		return (error);
+
+	/*
+	 * Disable PCI parity error checking.  Users typically
+	 * do this to work around broken PCI chipsets that get
+	 * the parity timing wrong and thus generate lots of spurious
+	 * errors.  The chip only allows us to disable *all* parity
+	 * error reporting when doing this, so CIO bus, scb ram, and
+	 * scratch ram parity errors will be ignored too.
+	 */
+	if ((ahc->flags & AHC_DISABLE_PCI_PERR) != 0)
+		ahc->seqctl |= FAILDIS;
+
+	ahc->bus_intr = ahc_pci_intr;
+	ahc->bus_chip_init = ahc_pci_chip_init;
+	ahc->bus_suspend = ahc_pci_suspend;
+	ahc->bus_resume = ahc_pci_resume;
+
+	/* Remeber how the card was setup in case there is no SEEPROM */
+	if ((ahc_inb(ahc, HCNTRL) & POWRDN) == 0) {
+		ahc_pause(ahc);
+		if ((ahc->features & AHC_ULTRA2) != 0)
+			our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
+		else
+			our_id = ahc_inb(ahc, SCSIID) & OID;
+		sxfrctl1 = ahc_inb(ahc, SXFRCTL1) & STPWEN;
+		scsiseq = ahc_inb(ahc, SCSISEQ);
+	} else {
+		sxfrctl1 = STPWEN;
+		our_id = 7;
+		scsiseq = 0;
+	}
+
+	error = ahc_reset(ahc, /*reinit*/FALSE);
+	if (error != 0)
+		return (ENXIO);
+
+	if ((ahc->features & AHC_DT) != 0) {
+		u_int sfunct;
+
+		/* Perform ALT-Mode Setup */
+		sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
+		ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
+		ahc_outb(ahc, OPTIONMODE,
+			 OPTIONMODE_DEFAULTS|AUTOACKEN|BUSFREEREV|EXPPHASEDIS);
+		ahc_outb(ahc, SFUNCT, sfunct);
+
+		/* Normal mode setup */
+		ahc_outb(ahc, CRCCONTROL1, CRCVALCHKEN|CRCENDCHKEN|CRCREQCHKEN
+					  |TARGCRCENDEN);
+	}
+
+	dscommand0 = ahc_inb(ahc, DSCOMMAND0);
+	dscommand0 |= MPARCKEN|CACHETHEN;
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+
+		/*
+		 * DPARCKEN doesn't work correctly on
+		 * some MBs so don't use it.
+		 */
+		dscommand0 &= ~DPARCKEN;
+	}
+
+	/*
+	 * Handle chips that must have cache line
+	 * streaming (dis/en)abled.
+	 */
+	if ((ahc->bugs & AHC_CACHETHEN_DIS_BUG) != 0)
+		dscommand0 |= CACHETHEN;
+
+	if ((ahc->bugs & AHC_CACHETHEN_BUG) != 0)
+		dscommand0 &= ~CACHETHEN;
+
+	ahc_outb(ahc, DSCOMMAND0, dscommand0);
+
+	ahc->pci_cachesize =
+	    ahc_pci_read_config(ahc->dev_softc, CSIZE_LATTIME,
+				/*bytes*/1) & CACHESIZE;
+	ahc->pci_cachesize *= 4;
+
+	if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0
+	 && ahc->pci_cachesize == 4) {
+
+		ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME,
+				     0, /*bytes*/1);
+		ahc->pci_cachesize = 0;
+	}
+
+	/*
+	 * We cannot perform ULTRA speeds without the presense
+	 * of the external precision resistor.
+	 */
+	if ((ahc->features & AHC_ULTRA) != 0) {
+		uint32_t devconfig;
+
+		devconfig = ahc_pci_read_config(ahc->dev_softc,
+						DEVCONFIG, /*bytes*/4);
+		if ((devconfig & REXTVALID) == 0)
+			ahc->features &= ~AHC_ULTRA;
+	}
+
+	/* See if we have a SEEPROM and perform auto-term */
+	check_extport(ahc, &sxfrctl1);
+
+	/*
+	 * Take the LED out of diagnostic mode
+	 */
+	sblkctl = ahc_inb(ahc, SBLKCTL);
+	ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON)));
+
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		ahc_outb(ahc, DFF_THRSH, RD_DFTHRSH_MAX|WR_DFTHRSH_MAX);
+	} else {
+		ahc_outb(ahc, DSPCISTATUS, DFTHRSH_100);
+	}
+
+	if (ahc->flags & AHC_USEDEFAULTS) {
+		/*
+		 * PCI Adapter default setup
+		 * Should only be used if the adapter does not have
+		 * a SEEPROM.
+		 */
+		/* See if someone else set us up already */
+		if ((ahc->flags & AHC_NO_BIOS_INIT) == 0
+		 && scsiseq != 0) {
+			printf("%s: Using left over BIOS settings\n",
+				ahc_name(ahc));
+			ahc->flags &= ~AHC_USEDEFAULTS;
+			ahc->flags |= AHC_BIOS_ENABLED;
+		} else {
+			/*
+			 * Assume only one connector and always turn
+			 * on termination.
+			 */
+ 			our_id = 0x07;
+			sxfrctl1 = STPWEN;
+		}
+		ahc_outb(ahc, SCSICONF, our_id|ENSPCHK|RESET_SCSI);
+
+		ahc->our_id = our_id;
+	}
+
+	/*
+	 * Take a look to see if we have external SRAM.
+	 * We currently do not attempt to use SRAM that is
+	 * shared among multiple controllers.
+	 */
+	ahc_probe_ext_scbram(ahc);
+
+	/*
+	 * Record our termination setting for the
+	 * generic initialization routine.
+	 */
+	if ((sxfrctl1 & STPWEN) != 0)
+		ahc->flags |= AHC_TERM_ENB_A;
+
+	/*
+	 * Save chip register configuration data for chip resets
+	 * that occur during runtime and resume events.
+	 */
+	ahc->bus_softc.pci_softc.devconfig =
+	    ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4);
+	ahc->bus_softc.pci_softc.command =
+	    ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1);
+	ahc->bus_softc.pci_softc.csize_lattime =
+	    ahc_pci_read_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1);
+	ahc->bus_softc.pci_softc.dscommand0 = ahc_inb(ahc, DSCOMMAND0);
+	ahc->bus_softc.pci_softc.dspcistatus = ahc_inb(ahc, DSPCISTATUS);
+	if ((ahc->features & AHC_DT) != 0) {
+		u_int sfunct;
+
+		sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
+		ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
+		ahc->bus_softc.pci_softc.optionmode = ahc_inb(ahc, OPTIONMODE);
+		ahc->bus_softc.pci_softc.targcrccnt = ahc_inw(ahc, TARGCRCCNT);
+		ahc_outb(ahc, SFUNCT, sfunct);
+		ahc->bus_softc.pci_softc.crccontrol1 =
+		    ahc_inb(ahc, CRCCONTROL1);
+	}
+	if ((ahc->features & AHC_MULTI_FUNC) != 0)
+		ahc->bus_softc.pci_softc.scbbaddr = ahc_inb(ahc, SCBBADDR);
+
+	if ((ahc->features & AHC_ULTRA2) != 0)
+		ahc->bus_softc.pci_softc.dff_thrsh = ahc_inb(ahc, DFF_THRSH);
+
+	/* Core initialization */
+	error = ahc_init(ahc);
+	if (error != 0)
+		return (error);
+
+	/*
+	 * Allow interrupts now that we are completely setup.
+	 */
+	error = ahc_pci_map_int(ahc);
+	if (error != 0)
+		return (error);
+
+	ahc_list_lock(&l);
+	/*
+	 * Link this softc in with all other ahc instances.
+	 */
+	ahc_softc_insert(ahc);
+	ahc_list_unlock(&l);
+	return (0);
+}
+
+/*
+ * Test for the presense of external sram in an
+ * "unshared" configuration.
+ */
+static int
+ahc_ext_scbram_present(struct ahc_softc *ahc)
+{
+	u_int chip;
+	int ramps;
+	int single_user;
+	uint32_t devconfig;
+
+	chip = ahc->chip & AHC_CHIPID_MASK;
+	devconfig = ahc_pci_read_config(ahc->dev_softc,
+					DEVCONFIG, /*bytes*/4);
+	single_user = (devconfig & MPORTMODE) != 0;
+
+	if ((ahc->features & AHC_ULTRA2) != 0)
+		ramps = (ahc_inb(ahc, DSCOMMAND0) & RAMPS) != 0;
+	else if (chip == AHC_AIC7895 || chip == AHC_AIC7895C)
+		/*
+		 * External SCBRAM arbitration is flakey
+		 * on these chips.  Unfortunately this means
+		 * we don't use the extra SCB ram space on the
+		 * 3940AUW.
+		 */
+		ramps = 0;
+	else if (chip >= AHC_AIC7870)
+		ramps = (devconfig & RAMPSM) != 0;
+	else
+		ramps = 0;
+
+	if (ramps && single_user)
+		return (1);
+	return (0);
+}
+
+/*
+ * Enable external scbram.
+ */
+static void
+ahc_scbram_config(struct ahc_softc *ahc, int enable, int pcheck,
+		  int fast, int large)
+{
+	uint32_t devconfig;
+
+	if (ahc->features & AHC_MULTI_FUNC) {
+		/*
+		 * Set the SCB Base addr (highest address bit)
+		 * depending on which channel we are.
+		 */
+		ahc_outb(ahc, SCBBADDR, ahc_get_pci_function(ahc->dev_softc));
+	}
+
+	ahc->flags &= ~AHC_LSCBS_ENABLED;
+	if (large)
+		ahc->flags |= AHC_LSCBS_ENABLED;
+	devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4);
+	if ((ahc->features & AHC_ULTRA2) != 0) {
+		u_int dscommand0;
+
+		dscommand0 = ahc_inb(ahc, DSCOMMAND0);
+		if (enable)
+			dscommand0 &= ~INTSCBRAMSEL;
+		else
+			dscommand0 |= INTSCBRAMSEL;
+		if (large)
+			dscommand0 &= ~USCBSIZE32;
+		else
+			dscommand0 |= USCBSIZE32;
+		ahc_outb(ahc, DSCOMMAND0, dscommand0);
+	} else {
+		if (fast)
+			devconfig &= ~EXTSCBTIME;
+		else
+			devconfig |= EXTSCBTIME;
+		if (enable)
+			devconfig &= ~SCBRAMSEL;
+		else
+			devconfig |= SCBRAMSEL;
+		if (large)
+			devconfig &= ~SCBSIZE32;
+		else
+			devconfig |= SCBSIZE32;
+	}
+	if (pcheck)
+		devconfig |= EXTSCBPEN;
+	else
+		devconfig &= ~EXTSCBPEN;
+
+	ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4);
+}
+
+/*
+ * Take a look to see if we have external SRAM.
+ * We currently do not attempt to use SRAM that is
+ * shared among multiple controllers.
+ */
+static void
+ahc_probe_ext_scbram(struct ahc_softc *ahc)
+{
+	int num_scbs;
+	int test_num_scbs;
+	int enable;
+	int pcheck;
+	int fast;
+	int large;
+
+	enable = FALSE;
+	pcheck = FALSE;
+	fast = FALSE;
+	large = FALSE;
+	num_scbs = 0;
+	
+	if (ahc_ext_scbram_present(ahc) == 0)
+		goto done;
+
+	/*
+	 * Probe for the best parameters to use.
+	 */
+	ahc_scbram_config(ahc, /*enable*/TRUE, pcheck, fast, large);
+	num_scbs = ahc_probe_scbs(ahc);
+	if (num_scbs == 0) {
+		/* The SRAM wasn't really present. */
+		goto done;
+	}
+	enable = TRUE;
+
+	/*
+	 * Clear any outstanding parity error
+	 * and ensure that parity error reporting
+	 * is enabled.
+	 */
+	ahc_outb(ahc, SEQCTL, 0);
+	ahc_outb(ahc, CLRINT, CLRPARERR);
+	ahc_outb(ahc, CLRINT, CLRBRKADRINT);
+
+	/* Now see if we can do parity */
+	ahc_scbram_config(ahc, enable, /*pcheck*/TRUE, fast, large);
+	num_scbs = ahc_probe_scbs(ahc);
+	if ((ahc_inb(ahc, INTSTAT) & BRKADRINT) == 0
+	 || (ahc_inb(ahc, ERROR) & MPARERR) == 0)
+		pcheck = TRUE;
+
+	/* Clear any resulting parity error */
+	ahc_outb(ahc, CLRINT, CLRPARERR);
+	ahc_outb(ahc, CLRINT, CLRBRKADRINT);
+
+	/* Now see if we can do fast timing */
+	ahc_scbram_config(ahc, enable, pcheck, /*fast*/TRUE, large);
+	test_num_scbs = ahc_probe_scbs(ahc);
+	if (test_num_scbs == num_scbs
+	 && ((ahc_inb(ahc, INTSTAT) & BRKADRINT) == 0
+	  || (ahc_inb(ahc, ERROR) & MPARERR) == 0))
+		fast = TRUE;
+
+	/*
+	 * See if we can use large SCBs and still maintain
+	 * the same overall count of SCBs.
+	 */
+	if ((ahc->features & AHC_LARGE_SCBS) != 0) {
+		ahc_scbram_config(ahc, enable, pcheck, fast, /*large*/TRUE);
+		test_num_scbs = ahc_probe_scbs(ahc);
+		if (test_num_scbs >= num_scbs) {
+			large = TRUE;
+			num_scbs = test_num_scbs;
+	 		if (num_scbs >= 64) {
+				/*
+				 * We have enough space to move the
+				 * "busy targets table" into SCB space
+				 * and make it qualify all the way to the
+				 * lun level.
+				 */
+				ahc->flags |= AHC_SCB_BTT;
+			}
+		}
+	}
+done:
+	/*
+	 * Disable parity error reporting until we
+	 * can load instruction ram.
+	 */
+	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS);
+	/* Clear any latched parity error */
+	ahc_outb(ahc, CLRINT, CLRPARERR);
+	ahc_outb(ahc, CLRINT, CLRBRKADRINT);
+	if (bootverbose && enable) {
+		printf("%s: External SRAM, %s access%s, %dbytes/SCB\n",
+		       ahc_name(ahc), fast ? "fast" : "slow", 
+		       pcheck ? ", parity checking enabled" : "",
+		       large ? 64 : 32);
+	}
+	ahc_scbram_config(ahc, enable, pcheck, fast, large);
+}
+
+/*
+ * Perform some simple tests that should catch situations where
+ * our registers are invalidly mapped.
+ */
+int
+ahc_pci_test_register_access(struct ahc_softc *ahc)
+{
+	int	 error;
+	u_int	 status1;
+	uint32_t cmd;
+	uint8_t	 hcntrl;
+
+	error = EIO;
+
+	/*
+	 * Enable PCI error interrupt status, but suppress NMIs
+	 * generated by SERR raised due to target aborts.
+	 */
+	cmd = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2);
+	ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND,
+			     cmd & ~PCIM_CMD_SERRESPEN, /*bytes*/2);
+
+	/*
+	 * First a simple test to see if any
+	 * registers can be read.  Reading
+	 * HCNTRL has no side effects and has
+	 * at least one bit that is guaranteed to
+	 * be zero so it is a good register to
+	 * use for this test.
+	 */
+	hcntrl = ahc_inb(ahc, HCNTRL);
+	if (hcntrl == 0xFF)
+		goto fail;
+
+	/*
+	 * Next create a situation where write combining
+	 * or read prefetching could be initiated by the
+	 * CPU or host bridge.  Our device does not support
+	 * either, so look for data corruption and/or flagged
+	 * PCI errors.  First pause without causing another
+	 * chip reset.
+	 */
+	hcntrl &= ~CHIPRST;
+	ahc_outb(ahc, HCNTRL, hcntrl|PAUSE);
+	while (ahc_is_paused(ahc) == 0)
+		;
+
+	/* Clear any PCI errors that occurred before our driver attached. */
+	status1 = ahc_pci_read_config(ahc->dev_softc,
+				      PCIR_STATUS + 1, /*bytes*/1);
+	ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1,
+			     status1, /*bytes*/1);
+	ahc_outb(ahc, CLRINT, CLRPARERR);
+
+	ahc_outb(ahc, SEQCTL, PERRORDIS);
+	ahc_outb(ahc, SCBPTR, 0);
+	ahc_outl(ahc, SCB_BASE, 0x5aa555aa);
+	if (ahc_inl(ahc, SCB_BASE) != 0x5aa555aa)
+		goto fail;
+
+	status1 = ahc_pci_read_config(ahc->dev_softc,
+				      PCIR_STATUS + 1, /*bytes*/1);
+	if ((status1 & STA) != 0)
+		goto fail;
+
+	error = 0;
+
+fail:
+	/* Silently clear any latched errors. */
+	status1 = ahc_pci_read_config(ahc->dev_softc,
+				      PCIR_STATUS + 1, /*bytes*/1);
+	ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1,
+			     status1, /*bytes*/1);
+	ahc_outb(ahc, CLRINT, CLRPARERR);
+	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS);
+	ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2);
+	return (error);
+}
+
+/*
+ * Check the external port logic for a serial eeprom
+ * and termination/cable detection contrls.
+ */
+static void
+check_extport(struct ahc_softc *ahc, u_int *sxfrctl1)
+{
+	struct	seeprom_descriptor sd;
+	struct	seeprom_config *sc;
+	int	have_seeprom;
+	int	have_autoterm;
+
+	sd.sd_ahc = ahc;
+	sd.sd_control_offset = SEECTL;		
+	sd.sd_status_offset = SEECTL;		
+	sd.sd_dataout_offset = SEECTL;		
+	sc = ahc->seep_config;
+
+	/*
+	 * For some multi-channel devices, the c46 is simply too
+	 * small to work.  For the other controller types, we can
+	 * get our information from either SEEPROM type.  Set the
+	 * type to start our probe with accordingly.
+	 */
+	if (ahc->flags & AHC_LARGE_SEEPROM)
+		sd.sd_chip = C56_66;
+	else
+		sd.sd_chip = C46;
+
+	sd.sd_MS = SEEMS;
+	sd.sd_RDY = SEERDY;
+	sd.sd_CS = SEECS;
+	sd.sd_CK = SEECK;
+	sd.sd_DO = SEEDO;
+	sd.sd_DI = SEEDI;
+
+	have_seeprom = ahc_acquire_seeprom(ahc, &sd);
+	if (have_seeprom) {
+
+		if (bootverbose) 
+			printf("%s: Reading SEEPROM...", ahc_name(ahc));
+
+		for (;;) {
+			u_int start_addr;
+
+			start_addr = 32 * (ahc->channel - 'A');
+
+			have_seeprom = ahc_read_seeprom(&sd, (uint16_t *)sc,
+							start_addr,
+							sizeof(*sc)/2);
+
+			if (have_seeprom)
+				have_seeprom = ahc_verify_cksum(sc);
+
+			if (have_seeprom != 0 || sd.sd_chip == C56_66) {
+				if (bootverbose) {
+					if (have_seeprom == 0)
+						printf ("checksum error\n");
+					else
+						printf ("done.\n");
+				}
+				break;
+			}
+			sd.sd_chip = C56_66;
+		}
+		ahc_release_seeprom(&sd);
+	}
+
+	if (!have_seeprom) {
+		/*
+		 * Pull scratch ram settings and treat them as
+		 * if they are the contents of an seeprom if
+		 * the 'ADPT' signature is found in SCB2.
+		 * We manually compose the data as 16bit values
+		 * to avoid endian issues.
+		 */
+		ahc_outb(ahc, SCBPTR, 2);
+		if (ahc_inb(ahc, SCB_BASE) == 'A'
+		 && ahc_inb(ahc, SCB_BASE + 1) == 'D'
+		 && ahc_inb(ahc, SCB_BASE + 2) == 'P'
+		 && ahc_inb(ahc, SCB_BASE + 3) == 'T') {
+			uint16_t *sc_data;
+			int	  i;
+
+			sc_data = (uint16_t *)sc;
+			for (i = 0; i < 32; i++, sc_data++) {
+				int	j;
+
+				j = i * 2;
+				*sc_data = ahc_inb(ahc, SRAM_BASE + j)
+					 | ahc_inb(ahc, SRAM_BASE + j + 1) << 8;
+			}
+			have_seeprom = ahc_verify_cksum(sc);
+			if (have_seeprom)
+				ahc->flags |= AHC_SCB_CONFIG_USED;
+		}
+		/*
+		 * Clear any SCB parity errors in case this data and
+		 * its associated parity was not initialized by the BIOS
+		 */
+		ahc_outb(ahc, CLRINT, CLRPARERR);
+		ahc_outb(ahc, CLRINT, CLRBRKADRINT);
+	}
+
+	if (!have_seeprom) {
+		if (bootverbose)
+			printf("%s: No SEEPROM available.\n", ahc_name(ahc));
+		ahc->flags |= AHC_USEDEFAULTS;
+		free(ahc->seep_config, M_DEVBUF);
+		ahc->seep_config = NULL;
+		sc = NULL;
+	} else {
+		ahc_parse_pci_eeprom(ahc, sc);
+	}
+
+	/*
+	 * Cards that have the external logic necessary to talk to
+	 * a SEEPROM, are almost certain to have the remaining logic
+	 * necessary for auto-termination control.  This assumption
+	 * hasn't failed yet...
+	 */
+	have_autoterm = have_seeprom;
+
+	/*
+	 * Some low-cost chips have SEEPROM and auto-term control built
+	 * in, instead of using a GAL.  They can tell us directly
+	 * if the termination logic is enabled.
+	 */
+	if ((ahc->features & AHC_SPIOCAP) != 0) {
+		if ((ahc_inb(ahc, SPIOCAP) & SSPIOCPS) == 0)
+			have_autoterm = FALSE;
+	}
+
+	if (have_autoterm) {
+		ahc->flags |= AHC_HAS_TERM_LOGIC;
+		ahc_acquire_seeprom(ahc, &sd);
+		configure_termination(ahc, &sd, sc->adapter_control, sxfrctl1);
+		ahc_release_seeprom(&sd);
+	} else if (have_seeprom) {
+		*sxfrctl1 &= ~STPWEN;
+		if ((sc->adapter_control & CFSTERM) != 0)
+			*sxfrctl1 |= STPWEN;
+		if (bootverbose)
+			printf("%s: Low byte termination %sabled\n",
+			       ahc_name(ahc),
+			       (*sxfrctl1 & STPWEN) ? "en" : "dis");
+	}
+}
+
+static void
+ahc_parse_pci_eeprom(struct ahc_softc *ahc, struct seeprom_config *sc)
+{
+	/*
+	 * Put the data we've collected down into SRAM
+	 * where ahc_init will find it.
+	 */
+	int	 i;
+	int	 max_targ = sc->max_targets & CFMAXTARG;
+	u_int	 scsi_conf;
+	uint16_t discenable;
+	uint16_t ultraenb;
+
+	discenable = 0;
+	ultraenb = 0;
+	if ((sc->adapter_control & CFULTRAEN) != 0) {
+		/*
+		 * Determine if this adapter has a "newstyle"
+		 * SEEPROM format.
+		 */
+		for (i = 0; i < max_targ; i++) {
+			if ((sc->device_flags[i] & CFSYNCHISULTRA) != 0) {
+				ahc->flags |= AHC_NEWEEPROM_FMT;
+				break;
+			}
+		}
+	}
+
+	for (i = 0; i < max_targ; i++) {
+		u_int     scsirate;
+		uint16_t target_mask;
+
+		target_mask = 0x01 << i;
+		if (sc->device_flags[i] & CFDISC)
+			discenable |= target_mask;
+		if ((ahc->flags & AHC_NEWEEPROM_FMT) != 0) {
+			if ((sc->device_flags[i] & CFSYNCHISULTRA) != 0)
+				ultraenb |= target_mask;
+		} else if ((sc->adapter_control & CFULTRAEN) != 0) {
+			ultraenb |= target_mask;
+		}
+		if ((sc->device_flags[i] & CFXFER) == 0x04
+		 && (ultraenb & target_mask) != 0) {
+			/* Treat 10MHz as a non-ultra speed */
+			sc->device_flags[i] &= ~CFXFER;
+		 	ultraenb &= ~target_mask;
+		}
+		if ((ahc->features & AHC_ULTRA2) != 0) {
+			u_int offset;
+
+			if (sc->device_flags[i] & CFSYNCH)
+				offset = MAX_OFFSET_ULTRA2;
+			else 
+				offset = 0;
+			ahc_outb(ahc, TARG_OFFSET + i, offset);
+
+			/*
+			 * The ultra enable bits contain the
+			 * high bit of the ultra2 sync rate
+			 * field.
+			 */
+			scsirate = (sc->device_flags[i] & CFXFER)
+				 | ((ultraenb & target_mask) ? 0x8 : 0x0);
+			if (sc->device_flags[i] & CFWIDEB)
+				scsirate |= WIDEXFER;
+		} else {
+			scsirate = (sc->device_flags[i] & CFXFER) << 4;
+			if (sc->device_flags[i] & CFSYNCH)
+				scsirate |= SOFS;
+			if (sc->device_flags[i] & CFWIDEB)
+				scsirate |= WIDEXFER;
+		}
+		ahc_outb(ahc, TARG_SCSIRATE + i, scsirate);
+	}
+	ahc->our_id = sc->brtime_id & CFSCSIID;
+
+	scsi_conf = (ahc->our_id & 0x7);
+	if (sc->adapter_control & CFSPARITY)
+		scsi_conf |= ENSPCHK;
+	if (sc->adapter_control & CFRESETB)
+		scsi_conf |= RESET_SCSI;
+
+	ahc->flags |= (sc->adapter_control & CFBOOTCHAN) >> CFBOOTCHANSHIFT;
+
+	if (sc->bios_control & CFEXTEND)
+		ahc->flags |= AHC_EXTENDED_TRANS_A;
+
+	if (sc->bios_control & CFBIOSEN)
+		ahc->flags |= AHC_BIOS_ENABLED;
+	if (ahc->features & AHC_ULTRA
+	 && (ahc->flags & AHC_NEWEEPROM_FMT) == 0) {
+		/* Should we enable Ultra mode? */
+		if (!(sc->adapter_control & CFULTRAEN))
+			/* Treat us as a non-ultra card */
+			ultraenb = 0;
+	}
+
+	if (sc->signature == CFSIGNATURE
+	 || sc->signature == CFSIGNATURE2) {
+		uint32_t devconfig;
+
+		/* Honor the STPWLEVEL settings */
+		devconfig = ahc_pci_read_config(ahc->dev_softc,
+						DEVCONFIG, /*bytes*/4);
+		devconfig &= ~STPWLEVEL;
+		if ((sc->bios_control & CFSTPWLEVEL) != 0)
+			devconfig |= STPWLEVEL;
+		ahc_pci_write_config(ahc->dev_softc, DEVCONFIG,
+				     devconfig, /*bytes*/4);
+	}
+	/* Set SCSICONF info */
+	ahc_outb(ahc, SCSICONF, scsi_conf);
+	ahc_outb(ahc, DISC_DSB, ~(discenable & 0xff));
+	ahc_outb(ahc, DISC_DSB + 1, ~((discenable >> 8) & 0xff));
+	ahc_outb(ahc, ULTRA_ENB, ultraenb & 0xff);
+	ahc_outb(ahc, ULTRA_ENB + 1, (ultraenb >> 8) & 0xff);
+}
+
+static void
+configure_termination(struct ahc_softc *ahc,
+		      struct seeprom_descriptor *sd,
+		      u_int adapter_control,
+		      u_int *sxfrctl1)
+{
+	uint8_t brddat;
+	
+	brddat = 0;
+
+	/*
+	 * Update the settings in sxfrctl1 to match the
+	 * termination settings 
+	 */
+	*sxfrctl1 = 0;
+	
+	/*
+	 * SEECS must be on for the GALS to latch
+	 * the data properly.  Be sure to leave MS
+	 * on or we will release the seeprom.
+	 */
+	SEEPROM_OUTB(sd, sd->sd_MS | sd->sd_CS);
+	if ((adapter_control & CFAUTOTERM) != 0
+	 || (ahc->features & AHC_NEW_TERMCTL) != 0) {
+		int internal50_present;
+		int internal68_present;
+		int externalcable_present;
+		int eeprom_present;
+		int enableSEC_low;
+		int enableSEC_high;
+		int enablePRI_low;
+		int enablePRI_high;
+		int sum;
+
+		enableSEC_low = 0;
+		enableSEC_high = 0;
+		enablePRI_low = 0;
+		enablePRI_high = 0;
+		if ((ahc->features & AHC_NEW_TERMCTL) != 0) {
+			ahc_new_term_detect(ahc, &enableSEC_low,
+					    &enableSEC_high,
+					    &enablePRI_low,
+					    &enablePRI_high,
+					    &eeprom_present);
+			if ((adapter_control & CFSEAUTOTERM) == 0) {
+				if (bootverbose)
+					printf("%s: Manual SE Termination\n",
+					       ahc_name(ahc));
+				enableSEC_low = (adapter_control & CFSELOWTERM);
+				enableSEC_high =
+				    (adapter_control & CFSEHIGHTERM);
+			}
+			if ((adapter_control & CFAUTOTERM) == 0) {
+				if (bootverbose)
+					printf("%s: Manual LVD Termination\n",
+					       ahc_name(ahc));
+				enablePRI_low = (adapter_control & CFSTERM);
+				enablePRI_high = (adapter_control & CFWSTERM);
+			}
+			/* Make the table calculations below happy */
+			internal50_present = 0;
+			internal68_present = 1;
+			externalcable_present = 1;
+		} else if ((ahc->features & AHC_SPIOCAP) != 0) {
+			aic785X_cable_detect(ahc, &internal50_present,
+					     &externalcable_present,
+					     &eeprom_present);
+			/* Can never support a wide connector. */
+			internal68_present = 0;
+		} else {
+			aic787X_cable_detect(ahc, &internal50_present,
+					     &internal68_present,
+					     &externalcable_present,
+					     &eeprom_present);
+		}
+
+		if ((ahc->features & AHC_WIDE) == 0)
+			internal68_present = 0;
+
+		if (bootverbose
+		 && (ahc->features & AHC_ULTRA2) == 0) {
+			printf("%s: internal 50 cable %s present",
+			       ahc_name(ahc),
+			       internal50_present ? "is":"not");
+
+			if ((ahc->features & AHC_WIDE) != 0)
+				printf(", internal 68 cable %s present",
+				       internal68_present ? "is":"not");
+			printf("\n%s: external cable %s present\n",
+			       ahc_name(ahc),
+			       externalcable_present ? "is":"not");
+		}
+		if (bootverbose)
+			printf("%s: BIOS eeprom %s present\n",
+			       ahc_name(ahc), eeprom_present ? "is" : "not");
+
+		if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) {
+			/*
+			 * The 50 pin connector is a separate bus,
+			 * so force it to always be terminated.
+			 * In the future, perform current sensing
+			 * to determine if we are in the middle of
+			 * a properly terminated bus.
+			 */
+			internal50_present = 0;
+		}
+
+		/*
+		 * Now set the termination based on what
+		 * we found.
+		 * Flash Enable = BRDDAT7
+		 * Secondary High Term Enable = BRDDAT6
+		 * Secondary Low Term Enable = BRDDAT5 (7890)
+		 * Primary High Term Enable = BRDDAT4 (7890)
+		 */
+		if ((ahc->features & AHC_ULTRA2) == 0
+		 && (internal50_present != 0)
+		 && (internal68_present != 0)
+		 && (externalcable_present != 0)) {
+			printf("%s: Illegal cable configuration!!. "
+			       "Only two connectors on the "
+			       "adapter may be used at a "
+			       "time!\n", ahc_name(ahc));
+
+			/*
+			 * Pretend there are no cables in the hope
+			 * that having all of the termination on
+			 * gives us a more stable bus.
+			 */
+		 	internal50_present = 0;
+			internal68_present = 0;
+			externalcable_present = 0;
+		}
+
+		if ((ahc->features & AHC_WIDE) != 0
+		 && ((externalcable_present == 0)
+		  || (internal68_present == 0)
+		  || (enableSEC_high != 0))) {
+			brddat |= BRDDAT6;
+			if (bootverbose) {
+				if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0)
+					printf("%s: 68 pin termination "
+					       "Enabled\n", ahc_name(ahc));
+				else
+					printf("%s: %sHigh byte termination "
+					       "Enabled\n", ahc_name(ahc),
+					       enableSEC_high ? "Secondary "
+							      : "");
+			}
+		}
+
+		sum = internal50_present + internal68_present
+		    + externalcable_present;
+		if (sum < 2 || (enableSEC_low != 0)) {
+			if ((ahc->features & AHC_ULTRA2) != 0)
+				brddat |= BRDDAT5;
+			else
+				*sxfrctl1 |= STPWEN;
+			if (bootverbose) {
+				if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0)
+					printf("%s: 50 pin termination "
+					       "Enabled\n", ahc_name(ahc));
+				else
+					printf("%s: %sLow byte termination "
+					       "Enabled\n", ahc_name(ahc),
+					       enableSEC_low ? "Secondary "
+							     : "");
+			}
+		}
+
+		if (enablePRI_low != 0) {
+			*sxfrctl1 |= STPWEN;
+			if (bootverbose)
+				printf("%s: Primary Low Byte termination "
+				       "Enabled\n", ahc_name(ahc));
+		}
+
+		/*
+		 * Setup STPWEN before setting up the rest of
+		 * the termination per the tech note on the U160 cards.
+		 */
+		ahc_outb(ahc, SXFRCTL1, *sxfrctl1);
+
+		if (enablePRI_high != 0) {
+			brddat |= BRDDAT4;
+			if (bootverbose)
+				printf("%s: Primary High Byte "
+				       "termination Enabled\n",
+				       ahc_name(ahc));
+		}
+		
+		write_brdctl(ahc, brddat);
+
+	} else {
+		if ((adapter_control & CFSTERM) != 0) {
+			*sxfrctl1 |= STPWEN;
+
+			if (bootverbose)
+				printf("%s: %sLow byte termination Enabled\n",
+				       ahc_name(ahc),
+				       (ahc->features & AHC_ULTRA2) ? "Primary "
+								    : "");
+		}
+
+		if ((adapter_control & CFWSTERM) != 0
+		 && (ahc->features & AHC_WIDE) != 0) {
+			brddat |= BRDDAT6;
+			if (bootverbose)
+				printf("%s: %sHigh byte termination Enabled\n",
+				       ahc_name(ahc),
+				       (ahc->features & AHC_ULTRA2)
+				     ? "Secondary " : "");
+		}
+
+		/*
+		 * Setup STPWEN before setting up the rest of
+		 * the termination per the tech note on the U160 cards.
+		 */
+		ahc_outb(ahc, SXFRCTL1, *sxfrctl1);
+
+		if ((ahc->features & AHC_WIDE) != 0)
+			write_brdctl(ahc, brddat);
+	}
+	SEEPROM_OUTB(sd, sd->sd_MS); /* Clear CS */
+}
+
+static void
+ahc_new_term_detect(struct ahc_softc *ahc, int *enableSEC_low,
+		    int *enableSEC_high, int *enablePRI_low,
+		    int *enablePRI_high, int *eeprom_present)
+{
+	uint8_t brdctl;
+
+	/*
+	 * BRDDAT7 = Eeprom
+	 * BRDDAT6 = Enable Secondary High Byte termination
+	 * BRDDAT5 = Enable Secondary Low Byte termination
+	 * BRDDAT4 = Enable Primary high byte termination
+	 * BRDDAT3 = Enable Primary low byte termination
+	 */
+	brdctl = read_brdctl(ahc);
+	*eeprom_present = brdctl & BRDDAT7;
+	*enableSEC_high = (brdctl & BRDDAT6);
+	*enableSEC_low = (brdctl & BRDDAT5);
+	*enablePRI_high = (brdctl & BRDDAT4);
+	*enablePRI_low = (brdctl & BRDDAT3);
+}
+
+static void
+aic787X_cable_detect(struct ahc_softc *ahc, int *internal50_present,
+		     int *internal68_present, int *externalcable_present,
+		     int *eeprom_present)
+{
+	uint8_t brdctl;
+
+	/*
+	 * First read the status of our cables.
+	 * Set the rom bank to 0 since the
+	 * bank setting serves as a multiplexor
+	 * for the cable detection logic.
+	 * BRDDAT5 controls the bank switch.
+	 */
+	write_brdctl(ahc, 0);
+
+	/*
+	 * Now read the state of the internal
+	 * connectors.  BRDDAT6 is INT50 and
+	 * BRDDAT7 is INT68.
+	 */
+	brdctl = read_brdctl(ahc);
+	*internal50_present = (brdctl & BRDDAT6) ? 0 : 1;
+	*internal68_present = (brdctl & BRDDAT7) ? 0 : 1;
+
+	/*
+	 * Set the rom bank to 1 and determine
+	 * the other signals.
+	 */
+	write_brdctl(ahc, BRDDAT5);
+
+	/*
+	 * Now read the state of the external
+	 * connectors.  BRDDAT6 is EXT68 and
+	 * BRDDAT7 is EPROMPS.
+	 */
+	brdctl = read_brdctl(ahc);
+	*externalcable_present = (brdctl & BRDDAT6) ? 0 : 1;
+	*eeprom_present = (brdctl & BRDDAT7) ? 1 : 0;
+}
+
+static void
+aic785X_cable_detect(struct ahc_softc *ahc, int *internal50_present,
+		     int *externalcable_present, int *eeprom_present)
+{
+	uint8_t brdctl;
+	uint8_t spiocap;
+
+	spiocap = ahc_inb(ahc, SPIOCAP);
+	spiocap &= ~SOFTCMDEN;
+	spiocap |= EXT_BRDCTL;
+	ahc_outb(ahc, SPIOCAP, spiocap);
+	ahc_outb(ahc, BRDCTL, BRDRW|BRDCS);
+	ahc_flush_device_writes(ahc);
+	ahc_delay(500);
+	ahc_outb(ahc, BRDCTL, 0);
+	ahc_flush_device_writes(ahc);
+	ahc_delay(500);
+	brdctl = ahc_inb(ahc, BRDCTL);
+	*internal50_present = (brdctl & BRDDAT5) ? 0 : 1;
+	*externalcable_present = (brdctl & BRDDAT6) ? 0 : 1;
+	*eeprom_present = (ahc_inb(ahc, SPIOCAP) & EEPROM) ? 1 : 0;
+}
+	
+int
+ahc_acquire_seeprom(struct ahc_softc *ahc, struct seeprom_descriptor *sd)
+{
+	int wait;
+
+	if ((ahc->features & AHC_SPIOCAP) != 0
+	 && (ahc_inb(ahc, SPIOCAP) & SEEPROM) == 0)
+		return (0);
+
+	/*
+	 * Request access of the memory port.  When access is
+	 * granted, SEERDY will go high.  We use a 1 second
+	 * timeout which should be near 1 second more than
+	 * is needed.  Reason: after the chip reset, there
+	 * should be no contention.
+	 */
+	SEEPROM_OUTB(sd, sd->sd_MS);
+	wait = 1000;  /* 1 second timeout in msec */
+	while (--wait && ((SEEPROM_STATUS_INB(sd) & sd->sd_RDY) == 0)) {
+		ahc_delay(1000);  /* delay 1 msec */
+	}
+	if ((SEEPROM_STATUS_INB(sd) & sd->sd_RDY) == 0) {
+		SEEPROM_OUTB(sd, 0); 
+		return (0);
+	}
+	return(1);
+}
+
+void
+ahc_release_seeprom(struct seeprom_descriptor *sd)
+{
+	/* Release access to the memory port and the serial EEPROM. */
+	SEEPROM_OUTB(sd, 0);
+}
+
+static void
+write_brdctl(struct ahc_softc *ahc, uint8_t value)
+{
+	uint8_t brdctl;
+
+	if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7895) {
+		brdctl = BRDSTB;
+	 	if (ahc->channel == 'B')
+			brdctl |= BRDCS;
+	} else if ((ahc->features & AHC_ULTRA2) != 0) {
+		brdctl = 0;
+	} else {
+		brdctl = BRDSTB|BRDCS;
+	}
+	ahc_outb(ahc, BRDCTL, brdctl);
+	ahc_flush_device_writes(ahc);
+	brdctl |= value;
+	ahc_outb(ahc, BRDCTL, brdctl);
+	ahc_flush_device_writes(ahc);
+	if ((ahc->features & AHC_ULTRA2) != 0)
+		brdctl |= BRDSTB_ULTRA2;
+	else
+		brdctl &= ~BRDSTB;
+	ahc_outb(ahc, BRDCTL, brdctl);
+	ahc_flush_device_writes(ahc);
+	if ((ahc->features & AHC_ULTRA2) != 0)
+		brdctl = 0;
+	else
+		brdctl &= ~BRDCS;
+	ahc_outb(ahc, BRDCTL, brdctl);
+}
+
+static uint8_t
+read_brdctl(struct ahc_softc *ahc)
+{
+	uint8_t brdctl;
+	uint8_t value;
+
+	if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7895) {
+		brdctl = BRDRW;
+	 	if (ahc->channel == 'B')
+			brdctl |= BRDCS;
+	} else if ((ahc->features & AHC_ULTRA2) != 0) {
+		brdctl = BRDRW_ULTRA2;
+	} else {
+		brdctl = BRDRW|BRDCS;
+	}
+	ahc_outb(ahc, BRDCTL, brdctl);
+	ahc_flush_device_writes(ahc);
+	value = ahc_inb(ahc, BRDCTL);
+	ahc_outb(ahc, BRDCTL, 0);
+	return (value);
+}
+
+static void
+ahc_pci_intr(struct ahc_softc *ahc)
+{
+	u_int error;
+	u_int status1;
+
+	error = ahc_inb(ahc, ERROR);
+	if ((error & PCIERRSTAT) == 0)
+		return;
+
+	status1 = ahc_pci_read_config(ahc->dev_softc,
+				      PCIR_STATUS + 1, /*bytes*/1);
+
+	printf("%s: PCI error Interrupt at seqaddr = 0x%x\n",
+	      ahc_name(ahc),
+	      ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
+
+	if (status1 & DPE) {
+		ahc->pci_target_perr_count++;
+		printf("%s: Data Parity Error Detected during address "
+		       "or write data phase\n", ahc_name(ahc));
+	}
+	if (status1 & SSE) {
+		printf("%s: Signal System Error Detected\n", ahc_name(ahc));
+	}
+	if (status1 & RMA) {
+		printf("%s: Received a Master Abort\n", ahc_name(ahc));
+	}
+	if (status1 & RTA) {
+		printf("%s: Received a Target Abort\n", ahc_name(ahc));
+	}
+	if (status1 & STA) {
+		printf("%s: Signaled a Target Abort\n", ahc_name(ahc));
+	}
+	if (status1 & DPR) {
+		printf("%s: Data Parity Error has been reported via PERR#\n",
+		       ahc_name(ahc));
+	}
+
+	/* Clear latched errors. */
+	ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1,
+			     status1, /*bytes*/1);
+
+	if ((status1 & (DPE|SSE|RMA|RTA|STA|DPR)) == 0) {
+		printf("%s: Latched PCIERR interrupt with "
+		       "no status bits set\n", ahc_name(ahc)); 
+	} else {
+		ahc_outb(ahc, CLRINT, CLRPARERR);
+	}
+
+	if (ahc->pci_target_perr_count > AHC_PCI_TARGET_PERR_THRESH) {
+		printf(
+"%s: WARNING WARNING WARNING WARNING\n"
+"%s: Too many PCI parity errors observed as a target.\n"
+"%s: Some device on this bus is generating bad parity.\n"
+"%s: This is an error *observed by*, not *generated by*, this controller.\n"
+"%s: PCI parity error checking has been disabled.\n"
+"%s: WARNING WARNING WARNING WARNING\n",
+		       ahc_name(ahc), ahc_name(ahc), ahc_name(ahc),
+		       ahc_name(ahc), ahc_name(ahc), ahc_name(ahc));
+		ahc->seqctl |= FAILDIS;
+		ahc_outb(ahc, SEQCTL, ahc->seqctl);
+	}
+	ahc_unpause(ahc);
+}
+
+static int
+ahc_pci_chip_init(struct ahc_softc *ahc)
+{
+	ahc_outb(ahc, DSCOMMAND0, ahc->bus_softc.pci_softc.dscommand0);
+	ahc_outb(ahc, DSPCISTATUS, ahc->bus_softc.pci_softc.dspcistatus);
+	if ((ahc->features & AHC_DT) != 0) {
+		u_int sfunct;
+
+		sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE;
+		ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE);
+		ahc_outb(ahc, OPTIONMODE, ahc->bus_softc.pci_softc.optionmode);
+		ahc_outw(ahc, TARGCRCCNT, ahc->bus_softc.pci_softc.targcrccnt);
+		ahc_outb(ahc, SFUNCT, sfunct);
+		ahc_outb(ahc, CRCCONTROL1,
+			 ahc->bus_softc.pci_softc.crccontrol1);
+	}
+	if ((ahc->features & AHC_MULTI_FUNC) != 0)
+		ahc_outb(ahc, SCBBADDR, ahc->bus_softc.pci_softc.scbbaddr);
+
+	if ((ahc->features & AHC_ULTRA2) != 0)
+		ahc_outb(ahc, DFF_THRSH, ahc->bus_softc.pci_softc.dff_thrsh);
+
+	return (ahc_chip_init(ahc));
+}
+
+static int
+ahc_pci_suspend(struct ahc_softc *ahc)
+{
+	return (ahc_suspend(ahc));
+}
+
+static int
+ahc_pci_resume(struct ahc_softc *ahc)
+{
+
+	pci_set_power_state(ahc->dev_softc, AHC_POWER_STATE_D0);
+
+	/*
+	 * We assume that the OS has restored our register
+	 * mappings, etc.  Just update the config space registers
+	 * that the OS doesn't know about and rely on our chip
+	 * reset handler to handle the rest.
+	 */
+	ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4,
+			     ahc->bus_softc.pci_softc.devconfig);
+	ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1,
+			     ahc->bus_softc.pci_softc.command);
+	ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1,
+			     ahc->bus_softc.pci_softc.csize_lattime);
+	if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) {
+		struct	seeprom_descriptor sd;
+		u_int	sxfrctl1;
+
+		sd.sd_ahc = ahc;
+		sd.sd_control_offset = SEECTL;		
+		sd.sd_status_offset = SEECTL;		
+		sd.sd_dataout_offset = SEECTL;		
+
+		ahc_acquire_seeprom(ahc, &sd);
+		configure_termination(ahc, &sd,
+				      ahc->seep_config->adapter_control,
+				      &sxfrctl1);
+		ahc_release_seeprom(&sd);
+	}
+	return (ahc_resume(ahc));
+}
+
+static int
+ahc_aic785X_setup(struct ahc_softc *ahc)
+{
+	ahc_dev_softc_t pci;
+	uint8_t rev;
+
+	pci = ahc->dev_softc;
+	ahc->channel = 'A';
+	ahc->chip = AHC_AIC7850;
+	ahc->features = AHC_AIC7850_FE;
+	ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG;
+	rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
+	if (rev >= 1)
+		ahc->bugs |= AHC_PCI_2_1_RETRY_BUG;
+	ahc->instruction_ram_size = 512;
+	return (0);
+}
+
+static int
+ahc_aic7860_setup(struct ahc_softc *ahc)
+{
+	ahc_dev_softc_t pci;
+	uint8_t rev;
+
+	pci = ahc->dev_softc;
+	ahc->channel = 'A';
+	ahc->chip = AHC_AIC7860;
+	ahc->features = AHC_AIC7860_FE;
+	ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG;
+	rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
+	if (rev >= 1)
+		ahc->bugs |= AHC_PCI_2_1_RETRY_BUG;
+	ahc->instruction_ram_size = 512;
+	return (0);
+}
+
+static int
+ahc_apa1480_setup(struct ahc_softc *ahc)
+{
+	int error;
+
+	error = ahc_aic7860_setup(ahc);
+	if (error != 0)
+		return (error);
+	ahc->features |= AHC_REMOVABLE;
+	return (0);
+}
+
+static int
+ahc_aic7870_setup(struct ahc_softc *ahc)
+{
+
+	ahc->channel = 'A';
+	ahc->chip = AHC_AIC7870;
+	ahc->features = AHC_AIC7870_FE;
+	ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG;
+	ahc->instruction_ram_size = 512;
+	return (0);
+}
+
+static int
+ahc_aha394X_setup(struct ahc_softc *ahc)
+{
+	int error;
+
+	error = ahc_aic7870_setup(ahc);
+	if (error == 0)
+		error = ahc_aha394XX_setup(ahc);
+	return (error);
+}
+
+static int
+ahc_aha398X_setup(struct ahc_softc *ahc)
+{
+	int error;
+
+	error = ahc_aic7870_setup(ahc);
+	if (error == 0)
+		error = ahc_aha398XX_setup(ahc);
+	return (error);
+}
+
+static int
+ahc_aha494X_setup(struct ahc_softc *ahc)
+{
+	int error;
+
+	error = ahc_aic7870_setup(ahc);
+	if (error == 0)
+		error = ahc_aha494XX_setup(ahc);
+	return (error);
+}
+
+static int
+ahc_aic7880_setup(struct ahc_softc *ahc)
+{
+	ahc_dev_softc_t pci;
+	uint8_t rev;
+
+	pci = ahc->dev_softc;
+	ahc->channel = 'A';
+	ahc->chip = AHC_AIC7880;
+	ahc->features = AHC_AIC7880_FE;
+	ahc->bugs |= AHC_TMODE_WIDEODD_BUG;
+	rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
+	if (rev >= 1) {
+		ahc->bugs |= AHC_PCI_2_1_RETRY_BUG;
+	} else {
+		ahc->bugs |= AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG;
+	}
+	ahc->instruction_ram_size = 512;
+	return (0);
+}
+
+static int
+ahc_aha2940Pro_setup(struct ahc_softc *ahc)
+{
+
+	ahc->flags |= AHC_INT50_SPEEDFLEX;
+	return (ahc_aic7880_setup(ahc));
+}
+
+static int
+ahc_aha394XU_setup(struct ahc_softc *ahc)
+{
+	int error;
+
+	error = ahc_aic7880_setup(ahc);
+	if (error == 0)
+		error = ahc_aha394XX_setup(ahc);
+	return (error);
+}
+
+static int
+ahc_aha398XU_setup(struct ahc_softc *ahc)
+{
+	int error;
+
+	error = ahc_aic7880_setup(ahc);
+	if (error == 0)
+		error = ahc_aha398XX_setup(ahc);
+	return (error);
+}
+
+static int
+ahc_aic7890_setup(struct ahc_softc *ahc)
+{
+	ahc_dev_softc_t pci;
+	uint8_t rev;
+
+	pci = ahc->dev_softc;
+	ahc->channel = 'A';
+	ahc->chip = AHC_AIC7890;
+	ahc->features = AHC_AIC7890_FE;
+	ahc->flags |= AHC_NEWEEPROM_FMT;
+	rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
+	if (rev == 0)
+		ahc->bugs |= AHC_AUTOFLUSH_BUG|AHC_CACHETHEN_BUG;
+	ahc->instruction_ram_size = 768;
+	return (0);
+}
+
+static int
+ahc_aic7892_setup(struct ahc_softc *ahc)
+{
+
+	ahc->channel = 'A';
+	ahc->chip = AHC_AIC7892;
+	ahc->features = AHC_AIC7892_FE;
+	ahc->flags |= AHC_NEWEEPROM_FMT;
+	ahc->bugs |= AHC_SCBCHAN_UPLOAD_BUG;
+	ahc->instruction_ram_size = 1024;
+	return (0);
+}
+
+static int
+ahc_aic7895_setup(struct ahc_softc *ahc)
+{
+	ahc_dev_softc_t pci;
+	uint8_t rev;
+
+	pci = ahc->dev_softc;
+	ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A';
+	/*
+	 * The 'C' revision of the aic7895 has a few additional features.
+	 */
+	rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
+	if (rev >= 4) {
+		ahc->chip = AHC_AIC7895C;
+		ahc->features = AHC_AIC7895C_FE;
+	} else  {
+		u_int command;
+
+		ahc->chip = AHC_AIC7895;
+		ahc->features = AHC_AIC7895_FE;
+
+		/*
+		 * The BIOS disables the use of MWI transactions
+		 * since it does not have the MWI bug work around
+		 * we have.  Disabling MWI reduces performance, so
+		 * turn it on again.
+		 */
+		command = ahc_pci_read_config(pci, PCIR_COMMAND, /*bytes*/1);
+		command |= PCIM_CMD_MWRICEN;
+		ahc_pci_write_config(pci, PCIR_COMMAND, command, /*bytes*/1);
+		ahc->bugs |= AHC_PCI_MWI_BUG;
+	}
+	/*
+	 * XXX Does CACHETHEN really not work???  What about PCI retry?
+	 * on C level chips.  Need to test, but for now, play it safe.
+	 */
+	ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_PCI_2_1_RETRY_BUG
+		  |  AHC_CACHETHEN_BUG;
+
+#if 0
+	uint32_t devconfig;
+
+	/*
+	 * Cachesize must also be zero due to stray DAC
+	 * problem when sitting behind some bridges.
+	 */
+	ahc_pci_write_config(pci, CSIZE_LATTIME, 0, /*bytes*/1);
+	devconfig = ahc_pci_read_config(pci, DEVCONFIG, /*bytes*/1);
+	devconfig |= MRDCEN;
+	ahc_pci_write_config(pci, DEVCONFIG, devconfig, /*bytes*/1);
+#endif
+	ahc->flags |= AHC_NEWEEPROM_FMT;
+	ahc->instruction_ram_size = 512;
+	return (0);
+}
+
+static int
+ahc_aic7896_setup(struct ahc_softc *ahc)
+{
+	ahc_dev_softc_t pci;
+
+	pci = ahc->dev_softc;
+	ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A';
+	ahc->chip = AHC_AIC7896;
+	ahc->features = AHC_AIC7896_FE;
+	ahc->flags |= AHC_NEWEEPROM_FMT;
+	ahc->bugs |= AHC_CACHETHEN_DIS_BUG;
+	ahc->instruction_ram_size = 768;
+	return (0);
+}
+
+static int
+ahc_aic7899_setup(struct ahc_softc *ahc)
+{
+	ahc_dev_softc_t pci;
+
+	pci = ahc->dev_softc;
+	ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A';
+	ahc->chip = AHC_AIC7899;
+	ahc->features = AHC_AIC7899_FE;
+	ahc->flags |= AHC_NEWEEPROM_FMT;
+	ahc->bugs |= AHC_SCBCHAN_UPLOAD_BUG;
+	ahc->instruction_ram_size = 1024;
+	return (0);
+}
+
+static int
+ahc_aha29160C_setup(struct ahc_softc *ahc)
+{
+	int error;
+
+	error = ahc_aic7899_setup(ahc);
+	if (error != 0)
+		return (error);
+	ahc->features |= AHC_REMOVABLE;
+	return (0);
+}
+
+static int
+ahc_raid_setup(struct ahc_softc *ahc)
+{
+	printf("RAID functionality unsupported\n");
+	return (ENXIO);
+}
+
+static int
+ahc_aha394XX_setup(struct ahc_softc *ahc)
+{
+	ahc_dev_softc_t pci;
+
+	pci = ahc->dev_softc;
+	switch (ahc_get_pci_slot(pci)) {
+	case AHC_394X_SLOT_CHANNEL_A:
+		ahc->channel = 'A';
+		break;
+	case AHC_394X_SLOT_CHANNEL_B:
+		ahc->channel = 'B';
+		break;
+	default:
+		printf("adapter at unexpected slot %d\n"
+		       "unable to map to a channel\n",
+		       ahc_get_pci_slot(pci));
+		ahc->channel = 'A';
+	}
+	return (0);
+}
+
+static int
+ahc_aha398XX_setup(struct ahc_softc *ahc)
+{
+	ahc_dev_softc_t pci;
+
+	pci = ahc->dev_softc;
+	switch (ahc_get_pci_slot(pci)) {
+	case AHC_398X_SLOT_CHANNEL_A:
+		ahc->channel = 'A';
+		break;
+	case AHC_398X_SLOT_CHANNEL_B:
+		ahc->channel = 'B';
+		break;
+	case AHC_398X_SLOT_CHANNEL_C:
+		ahc->channel = 'C';
+		break;
+	default:
+		printf("adapter at unexpected slot %d\n"
+		       "unable to map to a channel\n",
+		       ahc_get_pci_slot(pci));
+		ahc->channel = 'A';
+		break;
+	}
+	ahc->flags |= AHC_LARGE_SEEPROM;
+	return (0);
+}
+
+static int
+ahc_aha494XX_setup(struct ahc_softc *ahc)
+{
+	ahc_dev_softc_t pci;
+
+	pci = ahc->dev_softc;
+	switch (ahc_get_pci_slot(pci)) {
+	case AHC_494X_SLOT_CHANNEL_A:
+		ahc->channel = 'A';
+		break;
+	case AHC_494X_SLOT_CHANNEL_B:
+		ahc->channel = 'B';
+		break;
+	case AHC_494X_SLOT_CHANNEL_C:
+		ahc->channel = 'C';
+		break;
+	case AHC_494X_SLOT_CHANNEL_D:
+		ahc->channel = 'D';
+		break;
+	default:
+		printf("adapter at unexpected slot %d\n"
+		       "unable to map to a channel\n",
+		       ahc_get_pci_slot(pci));
+		ahc->channel = 'A';
+	}
+	ahc->flags |= AHC_LARGE_SEEPROM;
+	return (0);
+}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.h b/drivers/scsi/aic7xxx/aic7xxx_pci.h
new file mode 100644
index 0000000..be27fcb
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.h
@@ -0,0 +1,124 @@
+/*
+ * Adaptec AIC7xxx device driver for Linux.
+ *
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id$
+ *
+ */
+#ifndef _AIC7XXX_PCI_H_
+#define _AIC7XXX_PCI_H_
+
+#define ID_ALL_MASK			0xFFFFFFFFFFFFFFFFull
+#define ID_DEV_VENDOR_MASK		0xFFFFFFFF00000000ull
+#define ID_9005_GENERIC_MASK		0xFFF0FFFF00000000ull
+#define ID_9005_SISL_MASK		0x000FFFFF00000000ull
+#define ID_9005_SISL_ID			0x0005900500000000ull
+#define ID_AIC7850			0x5078900400000000ull
+#define ID_AHA_2902_04_10_15_20C_30C	0x5078900478509004ull
+#define ID_AIC7855			0x5578900400000000ull
+#define ID_AIC7859			0x3860900400000000ull
+#define ID_AHA_2930CU			0x3860900438699004ull
+#define ID_AIC7860			0x6078900400000000ull
+#define ID_AIC7860C			0x6078900478609004ull
+#define ID_AHA_1480A			0x6075900400000000ull
+#define ID_AHA_2940AU_0			0x6178900400000000ull
+#define ID_AHA_2940AU_1			0x6178900478619004ull
+#define ID_AHA_2940AU_CN		0x2178900478219004ull
+#define ID_AHA_2930C_VAR		0x6038900438689004ull
+
+#define ID_AIC7870			0x7078900400000000ull
+#define ID_AHA_2940			0x7178900400000000ull
+#define ID_AHA_3940			0x7278900400000000ull
+#define ID_AHA_398X			0x7378900400000000ull
+#define ID_AHA_2944			0x7478900400000000ull
+#define ID_AHA_3944			0x7578900400000000ull
+#define ID_AHA_4944			0x7678900400000000ull
+
+#define ID_AIC7880			0x8078900400000000ull
+#define ID_AIC7880_B			0x8078900478809004ull
+#define ID_AHA_2940U			0x8178900400000000ull
+#define ID_AHA_3940U			0x8278900400000000ull
+#define ID_AHA_2944U			0x8478900400000000ull
+#define ID_AHA_3944U			0x8578900400000000ull
+#define ID_AHA_398XU			0x8378900400000000ull
+#define ID_AHA_4944U			0x8678900400000000ull
+#define ID_AHA_2940UB			0x8178900478819004ull
+#define ID_AHA_2930U			0x8878900478889004ull
+#define ID_AHA_2940U_PRO		0x8778900478879004ull
+#define ID_AHA_2940U_CN			0x0078900478009004ull
+
+#define ID_AIC7895			0x7895900478959004ull
+#define ID_AIC7895_ARO			0x7890900478939004ull
+#define ID_AIC7895_ARO_MASK		0xFFF0FFFFFFFFFFFFull
+#define ID_AHA_2940U_DUAL		0x7895900478919004ull
+#define ID_AHA_3940AU			0x7895900478929004ull
+#define ID_AHA_3944AU			0x7895900478949004ull
+
+#define ID_AIC7890			0x001F9005000F9005ull
+#define ID_AIC7890_ARO			0x00139005000F9005ull
+#define ID_AAA_131U2			0x0013900500039005ull
+#define ID_AHA_2930U2			0x0011900501819005ull
+#define ID_AHA_2940U2B			0x00109005A1009005ull
+#define ID_AHA_2940U2_OEM		0x0010900521809005ull
+#define ID_AHA_2940U2			0x00109005A1809005ull
+#define ID_AHA_2950U2B			0x00109005E1009005ull
+
+#define ID_AIC7892			0x008F9005FFFF9005ull
+#define ID_AIC7892_ARO			0x00839005FFFF9005ull
+#define ID_AHA_29160			0x00809005E2A09005ull
+#define ID_AHA_29160_CPQ		0x00809005E2A00E11ull
+#define ID_AHA_29160N			0x0080900562A09005ull
+#define ID_AHA_29160C			0x0080900562209005ull
+#define ID_AHA_29160B			0x00809005E2209005ull
+#define ID_AHA_19160B			0x0081900562A19005ull
+
+#define ID_AIC7896			0x005F9005FFFF9005ull
+#define ID_AIC7896_ARO			0x00539005FFFF9005ull
+#define ID_AHA_3950U2B_0		0x00509005FFFF9005ull
+#define ID_AHA_3950U2B_1		0x00509005F5009005ull
+#define ID_AHA_3950U2D_0		0x00519005FFFF9005ull
+#define ID_AHA_3950U2D_1		0x00519005B5009005ull
+
+#define ID_AIC7899			0x00CF9005FFFF9005ull
+#define ID_AIC7899_ARO			0x00C39005FFFF9005ull
+#define ID_AHA_3960D			0x00C09005F6209005ull
+#define ID_AHA_3960D_CPQ		0x00C09005F6200E11ull
+
+#define ID_AIC7810			0x1078900400000000ull
+#define ID_AIC7815			0x7815900400000000ull
+
+#endif /* _AIC7XXX_PCI_H_ */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c
new file mode 100644
index 0000000..85e80ee
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright (c) 2000-2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * String handling code courtesy of Gerard Roudier's <groudier@club-internet.fr>
+ * sym driver.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7xxx_proc.c#29 $
+ */
+#include "aic7xxx_osm.h"
+#include "aic7xxx_inline.h"
+#include "aic7xxx_93cx6.h"
+
+static void	copy_mem_info(struct info_str *info, char *data, int len);
+static int	copy_info(struct info_str *info, char *fmt, ...);
+static void	ahc_dump_target_state(struct ahc_softc *ahc,
+				      struct info_str *info,
+				      u_int our_id, char channel,
+				      u_int target_id, u_int target_offset);
+static void	ahc_dump_device_state(struct info_str *info,
+				      struct ahc_linux_device *dev);
+static int	ahc_proc_write_seeprom(struct ahc_softc *ahc,
+				       char *buffer, int length);
+
+static void
+copy_mem_info(struct info_str *info, char *data, int len)
+{
+	if (info->pos + len > info->offset + info->length)
+		len = info->offset + info->length - info->pos;
+
+	if (info->pos + len < info->offset) {
+		info->pos += len;
+		return;
+	}
+
+	if (info->pos < info->offset) {
+		off_t partial;
+
+		partial = info->offset - info->pos;
+		data += partial;
+		info->pos += partial;
+		len  -= partial;
+	}
+
+	if (len > 0) {
+		memcpy(info->buffer, data, len);
+		info->pos += len;
+		info->buffer += len;
+	}
+}
+
+static int
+copy_info(struct info_str *info, char *fmt, ...)
+{
+	va_list args;
+	char buf[256];
+	int len;
+
+	va_start(args, fmt);
+	len = vsprintf(buf, fmt, args);
+	va_end(args);
+
+	copy_mem_info(info, buf, len);
+	return (len);
+}
+
+void
+ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo)
+{
+	u_int speed;
+	u_int freq;
+	u_int mb;
+
+        speed = 3300;
+        freq = 0;
+	if (tinfo->offset != 0) {
+		freq = aic_calc_syncsrate(tinfo->period);
+		speed = freq;
+	}
+	speed *= (0x01 << tinfo->width);
+        mb = speed / 1000;
+        if (mb > 0)
+		copy_info(info, "%d.%03dMB/s transfers", mb, speed % 1000);
+        else
+		copy_info(info, "%dKB/s transfers", speed);
+
+	if (freq != 0) {
+		copy_info(info, " (%d.%03dMHz%s, offset %d",
+			 freq / 1000, freq % 1000,
+			 (tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
+			 ? " DT" : "", tinfo->offset);
+	}
+
+	if (tinfo->width > 0) {
+		if (freq != 0) {
+			copy_info(info, ", ");
+		} else {
+			copy_info(info, " (");
+		}
+		copy_info(info, "%dbit)", 8 * (0x01 << tinfo->width));
+	} else if (freq != 0) {
+		copy_info(info, ")");
+	}
+	copy_info(info, "\n");
+}
+
+static void
+ahc_dump_target_state(struct ahc_softc *ahc, struct info_str *info,
+		      u_int our_id, char channel, u_int target_id,
+		      u_int target_offset)
+{
+	struct	ahc_linux_target *targ;
+	struct	ahc_initiator_tinfo *tinfo;
+	struct	ahc_tmode_tstate *tstate;
+	int	lun;
+
+	tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
+				    target_id, &tstate);
+	if ((ahc->features & AHC_TWIN) != 0)
+		copy_info(info, "Channel %c ", channel);
+	copy_info(info, "Target %d Negotiation Settings\n", target_id);
+	copy_info(info, "\tUser: ");
+	ahc_format_transinfo(info, &tinfo->user);
+	targ = ahc->platform_data->targets[target_offset];
+	if (targ == NULL)
+		return;
+
+	copy_info(info, "\tGoal: ");
+	ahc_format_transinfo(info, &tinfo->goal);
+	copy_info(info, "\tCurr: ");
+	ahc_format_transinfo(info, &tinfo->curr);
+
+	for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
+		struct ahc_linux_device *dev;
+
+		dev = targ->devices[lun];
+
+		if (dev == NULL)
+			continue;
+
+		ahc_dump_device_state(info, dev);
+	}
+}
+
+static void
+ahc_dump_device_state(struct info_str *info, struct ahc_linux_device *dev)
+{
+	copy_info(info, "\tChannel %c Target %d Lun %d Settings\n",
+		  dev->target->channel + 'A', dev->target->target, dev->lun);
+
+	copy_info(info, "\t\tCommands Queued %ld\n", dev->commands_issued);
+	copy_info(info, "\t\tCommands Active %d\n", dev->active);
+	copy_info(info, "\t\tCommand Openings %d\n", dev->openings);
+	copy_info(info, "\t\tMax Tagged Openings %d\n", dev->maxtags);
+	copy_info(info, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen);
+}
+
+static int
+ahc_proc_write_seeprom(struct ahc_softc *ahc, char *buffer, int length)
+{
+	struct seeprom_descriptor sd;
+	int have_seeprom;
+	u_long s;
+	int paused;
+	int written;
+
+	/* Default to failure. */
+	written = -EINVAL;
+	ahc_lock(ahc, &s);
+	paused = ahc_is_paused(ahc);
+	if (!paused)
+		ahc_pause(ahc);
+
+	if (length != sizeof(struct seeprom_config)) {
+		printf("ahc_proc_write_seeprom: incorrect buffer size\n");
+		goto done;
+	}
+
+	have_seeprom = ahc_verify_cksum((struct seeprom_config*)buffer);
+	if (have_seeprom == 0) {
+		printf("ahc_proc_write_seeprom: cksum verification failed\n");
+		goto done;
+	}
+
+	sd.sd_ahc = ahc;
+#if AHC_PCI_CONFIG > 0
+	if ((ahc->chip & AHC_PCI) != 0) {
+		sd.sd_control_offset = SEECTL;
+		sd.sd_status_offset = SEECTL;
+		sd.sd_dataout_offset = SEECTL;
+		if (ahc->flags & AHC_LARGE_SEEPROM)
+			sd.sd_chip = C56_66;
+		else
+			sd.sd_chip = C46;
+		sd.sd_MS = SEEMS;
+		sd.sd_RDY = SEERDY;
+		sd.sd_CS = SEECS;
+		sd.sd_CK = SEECK;
+		sd.sd_DO = SEEDO;
+		sd.sd_DI = SEEDI;
+		have_seeprom = ahc_acquire_seeprom(ahc, &sd);
+	} else
+#endif
+	if ((ahc->chip & AHC_VL) != 0) {
+		sd.sd_control_offset = SEECTL_2840;
+		sd.sd_status_offset = STATUS_2840;
+		sd.sd_dataout_offset = STATUS_2840;		
+		sd.sd_chip = C46;
+		sd.sd_MS = 0;
+		sd.sd_RDY = EEPROM_TF;
+		sd.sd_CS = CS_2840;
+		sd.sd_CK = CK_2840;
+		sd.sd_DO = DO_2840;
+		sd.sd_DI = DI_2840;
+		have_seeprom = TRUE;
+	} else {
+		printf("ahc_proc_write_seeprom: unsupported adapter type\n");
+		goto done;
+	}
+
+	if (!have_seeprom) {
+		printf("ahc_proc_write_seeprom: No Serial EEPROM\n");
+		goto done;
+	} else {
+		u_int start_addr;
+
+		if (ahc->seep_config == NULL) {
+			ahc->seep_config = malloc(sizeof(*ahc->seep_config),
+						  M_DEVBUF, M_NOWAIT);
+			if (ahc->seep_config == NULL) {
+				printf("aic7xxx: Unable to allocate serial "
+				       "eeprom buffer.  Write failing\n");
+				goto done;
+			}
+		}
+		printf("aic7xxx: Writing Serial EEPROM\n");
+		start_addr = 32 * (ahc->channel - 'A');
+		ahc_write_seeprom(&sd, (u_int16_t *)buffer, start_addr,
+				  sizeof(struct seeprom_config)/2);
+		ahc_read_seeprom(&sd, (uint16_t *)ahc->seep_config,
+				 start_addr, sizeof(struct seeprom_config)/2);
+#if AHC_PCI_CONFIG > 0
+		if ((ahc->chip & AHC_VL) == 0)
+			ahc_release_seeprom(&sd);
+#endif
+		written = length;
+	}
+
+done:
+	if (!paused)
+		ahc_unpause(ahc);
+	ahc_unlock(ahc, &s);
+	return (written);
+}
+
+/*
+ * Return information to handle /proc support for the driver.
+ */
+int
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+ahc_linux_proc_info(char *buffer, char **start, off_t offset,
+		    int length, int hostno, int inout)
+#else
+ahc_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
+		    off_t offset, int length, int inout)
+#endif
+{
+	struct	ahc_softc *ahc;
+	struct	info_str info;
+	char	ahc_info[256];
+	u_long	s;
+	u_int	max_targ;
+	u_int	i;
+	int	retval;
+
+	retval = -EINVAL;
+	ahc_list_lock(&s);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+	TAILQ_FOREACH(ahc, &ahc_tailq, links) {
+		if (ahc->platform_data->host->host_no == hostno)
+			break;
+	}
+#else
+	ahc = ahc_find_softc(*(struct ahc_softc **)shost->hostdata);
+#endif
+
+	if (ahc == NULL)
+		goto done;
+
+	 /* Has data been written to the file? */ 
+	if (inout == TRUE) {
+		retval = ahc_proc_write_seeprom(ahc, buffer, length);
+		goto done;
+	}
+
+	if (start)
+		*start = buffer;
+
+	info.buffer	= buffer;
+	info.length	= length;
+	info.offset	= offset;
+	info.pos	= 0;
+
+	copy_info(&info, "Adaptec AIC7xxx driver version: %s\n",
+		  AIC7XXX_DRIVER_VERSION);
+	copy_info(&info, "%s\n", ahc->description);
+	ahc_controller_info(ahc, ahc_info);
+	copy_info(&info, "%s\n", ahc_info);
+	copy_info(&info, "Allocated SCBs: %d, SG List Length: %d\n\n",
+		  ahc->scb_data->numscbs, AHC_NSEG);
+
+
+	if (ahc->seep_config == NULL)
+		copy_info(&info, "No Serial EEPROM\n");
+	else {
+		copy_info(&info, "Serial EEPROM:\n");
+		for (i = 0; i < sizeof(*ahc->seep_config)/2; i++) {
+			if (((i % 8) == 0) && (i != 0)) {
+				copy_info(&info, "\n");
+			}
+			copy_info(&info, "0x%.4x ",
+				  ((uint16_t*)ahc->seep_config)[i]);
+		}
+		copy_info(&info, "\n");
+	}
+	copy_info(&info, "\n");
+
+	max_targ = 15;
+	if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
+		max_targ = 7;
+
+	for (i = 0; i <= max_targ; i++) {
+		u_int our_id;
+		u_int target_id;
+		char channel;
+
+		channel = 'A';
+		our_id = ahc->our_id;
+		target_id = i;
+		if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
+			channel = 'B';
+			our_id = ahc->our_id_b;
+			target_id = i % 8;
+		}
+
+		ahc_dump_target_state(ahc, &info, our_id,
+				      channel, target_id, i);
+	}
+	retval = info.pos > info.offset ? info.pos - info.offset : 0;
+done:
+	ahc_list_unlock(&s);
+	return (retval);
+}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
new file mode 100644
index 0000000..7c1390e
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
@@ -0,0 +1,1787 @@
+/*
+ * DO NOT EDIT - This file is automatically generated
+ *		 from the following source files:
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#56 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#39 $
+ */
+typedef int (ahc_reg_print_t)(u_int, u_int *, u_int);
+typedef struct ahc_reg_parse_entry {
+	char	*name;
+	uint8_t	 value;
+	uint8_t	 mask;
+} ahc_reg_parse_entry_t;
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsiseq_print;
+#else
+#define ahc_scsiseq_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCSISEQ", 0x00, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sxfrctl0_print;
+#else
+#define ahc_sxfrctl0_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SXFRCTL0", 0x01, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sxfrctl1_print;
+#else
+#define ahc_sxfrctl1_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SXFRCTL1", 0x02, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsisigo_print;
+#else
+#define ahc_scsisigo_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCSISIGO", 0x03, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsisigi_print;
+#else
+#define ahc_scsisigi_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCSISIGI", 0x03, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsirate_print;
+#else
+#define ahc_scsirate_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCSIRATE", 0x04, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsiid_print;
+#else
+#define ahc_scsiid_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCSIID", 0x05, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsidatl_print;
+#else
+#define ahc_scsidatl_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCSIDATL", 0x06, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsidath_print;
+#else
+#define ahc_scsidath_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCSIDATH", 0x07, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_stcnt_print;
+#else
+#define ahc_stcnt_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "STCNT", 0x08, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_optionmode_print;
+#else
+#define ahc_optionmode_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "OPTIONMODE", 0x08, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_targcrccnt_print;
+#else
+#define ahc_targcrccnt_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "TARGCRCCNT", 0x0a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_clrsint0_print;
+#else
+#define ahc_clrsint0_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "CLRSINT0", 0x0b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sstat0_print;
+#else
+#define ahc_sstat0_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SSTAT0", 0x0b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_clrsint1_print;
+#else
+#define ahc_clrsint1_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "CLRSINT1", 0x0c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sstat1_print;
+#else
+#define ahc_sstat1_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SSTAT1", 0x0c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sstat2_print;
+#else
+#define ahc_sstat2_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SSTAT2", 0x0d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sstat3_print;
+#else
+#define ahc_sstat3_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SSTAT3", 0x0e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsiid_ultra2_print;
+#else
+#define ahc_scsiid_ultra2_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCSIID_ULTRA2", 0x0f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_simode0_print;
+#else
+#define ahc_simode0_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SIMODE0", 0x10, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_simode1_print;
+#else
+#define ahc_simode1_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SIMODE1", 0x11, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsibusl_print;
+#else
+#define ahc_scsibusl_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCSIBUSL", 0x12, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsibush_print;
+#else
+#define ahc_scsibush_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCSIBUSH", 0x13, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sxfrctl2_print;
+#else
+#define ahc_sxfrctl2_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SXFRCTL2", 0x13, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_shaddr_print;
+#else
+#define ahc_shaddr_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SHADDR", 0x14, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_seltimer_print;
+#else
+#define ahc_seltimer_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SELTIMER", 0x18, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_selid_print;
+#else
+#define ahc_selid_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SELID", 0x19, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scamctl_print;
+#else
+#define ahc_scamctl_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCAMCTL", 0x1a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_targid_print;
+#else
+#define ahc_targid_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "TARGID", 0x1b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_spiocap_print;
+#else
+#define ahc_spiocap_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SPIOCAP", 0x1b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_brdctl_print;
+#else
+#define ahc_brdctl_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "BRDCTL", 0x1d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_seectl_print;
+#else
+#define ahc_seectl_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SEECTL", 0x1e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sblkctl_print;
+#else
+#define ahc_sblkctl_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SBLKCTL", 0x1f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_busy_targets_print;
+#else
+#define ahc_busy_targets_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "BUSY_TARGETS", 0x20, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_ultra_enb_print;
+#else
+#define ahc_ultra_enb_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "ULTRA_ENB", 0x30, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_disc_dsb_print;
+#else
+#define ahc_disc_dsb_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "DISC_DSB", 0x32, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_cmdsize_table_tail_print;
+#else
+#define ahc_cmdsize_table_tail_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "CMDSIZE_TABLE_TAIL", 0x34, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_mwi_residual_print;
+#else
+#define ahc_mwi_residual_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "MWI_RESIDUAL", 0x38, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_next_queued_scb_print;
+#else
+#define ahc_next_queued_scb_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "NEXT_QUEUED_SCB", 0x39, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_msg_out_print;
+#else
+#define ahc_msg_out_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "MSG_OUT", 0x3a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_dmaparams_print;
+#else
+#define ahc_dmaparams_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "DMAPARAMS", 0x3b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_seq_flags_print;
+#else
+#define ahc_seq_flags_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SEQ_FLAGS", 0x3c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_saved_scsiid_print;
+#else
+#define ahc_saved_scsiid_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SAVED_SCSIID", 0x3d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_saved_lun_print;
+#else
+#define ahc_saved_lun_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SAVED_LUN", 0x3e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_lastphase_print;
+#else
+#define ahc_lastphase_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "LASTPHASE", 0x3f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_waiting_scbh_print;
+#else
+#define ahc_waiting_scbh_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "WAITING_SCBH", 0x40, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_disconnected_scbh_print;
+#else
+#define ahc_disconnected_scbh_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "DISCONNECTED_SCBH", 0x41, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_free_scbh_print;
+#else
+#define ahc_free_scbh_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "FREE_SCBH", 0x42, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_complete_scbh_print;
+#else
+#define ahc_complete_scbh_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "COMPLETE_SCBH", 0x43, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_hscb_addr_print;
+#else
+#define ahc_hscb_addr_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "HSCB_ADDR", 0x44, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_shared_data_addr_print;
+#else
+#define ahc_shared_data_addr_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SHARED_DATA_ADDR", 0x48, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_kernel_qinpos_print;
+#else
+#define ahc_kernel_qinpos_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "KERNEL_QINPOS", 0x4c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_qinpos_print;
+#else
+#define ahc_qinpos_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "QINPOS", 0x4d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_qoutpos_print;
+#else
+#define ahc_qoutpos_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "QOUTPOS", 0x4e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_kernel_tqinpos_print;
+#else
+#define ahc_kernel_tqinpos_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "KERNEL_TQINPOS", 0x4f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_tqinpos_print;
+#else
+#define ahc_tqinpos_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "TQINPOS", 0x50, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_arg_1_print;
+#else
+#define ahc_arg_1_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "ARG_1", 0x51, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_arg_2_print;
+#else
+#define ahc_arg_2_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "ARG_2", 0x52, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_last_msg_print;
+#else
+#define ahc_last_msg_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "LAST_MSG", 0x53, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsiseq_template_print;
+#else
+#define ahc_scsiseq_template_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCSISEQ_TEMPLATE", 0x54, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_ha_274_biosglobal_print;
+#else
+#define ahc_ha_274_biosglobal_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "HA_274_BIOSGLOBAL", 0x56, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_seq_flags2_print;
+#else
+#define ahc_seq_flags2_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SEQ_FLAGS2", 0x57, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsiconf_print;
+#else
+#define ahc_scsiconf_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCSICONF", 0x5a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_intdef_print;
+#else
+#define ahc_intdef_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "INTDEF", 0x5c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_hostconf_print;
+#else
+#define ahc_hostconf_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "HOSTCONF", 0x5d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_ha_274_biosctrl_print;
+#else
+#define ahc_ha_274_biosctrl_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "HA_274_BIOSCTRL", 0x5f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_seqctl_print;
+#else
+#define ahc_seqctl_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SEQCTL", 0x60, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_seqram_print;
+#else
+#define ahc_seqram_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SEQRAM", 0x61, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_seqaddr0_print;
+#else
+#define ahc_seqaddr0_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SEQADDR0", 0x62, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_seqaddr1_print;
+#else
+#define ahc_seqaddr1_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SEQADDR1", 0x63, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_accum_print;
+#else
+#define ahc_accum_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "ACCUM", 0x64, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sindex_print;
+#else
+#define ahc_sindex_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SINDEX", 0x65, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_dindex_print;
+#else
+#define ahc_dindex_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "DINDEX", 0x66, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_allones_print;
+#else
+#define ahc_allones_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "ALLONES", 0x69, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_allzeros_print;
+#else
+#define ahc_allzeros_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "ALLZEROS", 0x6a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_none_print;
+#else
+#define ahc_none_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "NONE", 0x6a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_flags_print;
+#else
+#define ahc_flags_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "FLAGS", 0x6b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sindir_print;
+#else
+#define ahc_sindir_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SINDIR", 0x6c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_dindir_print;
+#else
+#define ahc_dindir_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "DINDIR", 0x6d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_function1_print;
+#else
+#define ahc_function1_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "FUNCTION1", 0x6e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_stack_print;
+#else
+#define ahc_stack_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "STACK", 0x6f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_targ_offset_print;
+#else
+#define ahc_targ_offset_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "TARG_OFFSET", 0x70, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sram_base_print;
+#else
+#define ahc_sram_base_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SRAM_BASE", 0x70, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_bctl_print;
+#else
+#define ahc_bctl_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "BCTL", 0x84, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_dscommand0_print;
+#else
+#define ahc_dscommand0_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "DSCOMMAND0", 0x84, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_bustime_print;
+#else
+#define ahc_bustime_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "BUSTIME", 0x85, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_dscommand1_print;
+#else
+#define ahc_dscommand1_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "DSCOMMAND1", 0x85, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_busspd_print;
+#else
+#define ahc_busspd_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "BUSSPD", 0x86, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_hs_mailbox_print;
+#else
+#define ahc_hs_mailbox_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "HS_MAILBOX", 0x86, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_dspcistatus_print;
+#else
+#define ahc_dspcistatus_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "DSPCISTATUS", 0x86, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_hcntrl_print;
+#else
+#define ahc_hcntrl_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "HCNTRL", 0x87, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_haddr_print;
+#else
+#define ahc_haddr_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "HADDR", 0x88, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_hcnt_print;
+#else
+#define ahc_hcnt_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "HCNT", 0x8c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scbptr_print;
+#else
+#define ahc_scbptr_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCBPTR", 0x90, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_intstat_print;
+#else
+#define ahc_intstat_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "INTSTAT", 0x91, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_clrint_print;
+#else
+#define ahc_clrint_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "CLRINT", 0x92, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_error_print;
+#else
+#define ahc_error_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "ERROR", 0x92, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_dfcntrl_print;
+#else
+#define ahc_dfcntrl_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "DFCNTRL", 0x93, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_dfstatus_print;
+#else
+#define ahc_dfstatus_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "DFSTATUS", 0x94, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_dfwaddr_print;
+#else
+#define ahc_dfwaddr_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "DFWADDR", 0x95, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_dfraddr_print;
+#else
+#define ahc_dfraddr_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "DFRADDR", 0x97, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_dfdat_print;
+#else
+#define ahc_dfdat_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "DFDAT", 0x99, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scbcnt_print;
+#else
+#define ahc_scbcnt_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCBCNT", 0x9a, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_qinfifo_print;
+#else
+#define ahc_qinfifo_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "QINFIFO", 0x9b, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_qincnt_print;
+#else
+#define ahc_qincnt_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "QINCNT", 0x9c, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_qoutfifo_print;
+#else
+#define ahc_qoutfifo_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "QOUTFIFO", 0x9d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_crccontrol1_print;
+#else
+#define ahc_crccontrol1_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "CRCCONTROL1", 0x9d, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_qoutcnt_print;
+#else
+#define ahc_qoutcnt_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "QOUTCNT", 0x9e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scsiphase_print;
+#else
+#define ahc_scsiphase_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCSIPHASE", 0x9e, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sfunct_print;
+#else
+#define ahc_sfunct_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SFUNCT", 0x9f, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_base_print;
+#else
+#define ahc_scb_base_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_BASE", 0xa0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_cdb_ptr_print;
+#else
+#define ahc_scb_cdb_ptr_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_CDB_PTR", 0xa0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_residual_sgptr_print;
+#else
+#define ahc_scb_residual_sgptr_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_RESIDUAL_SGPTR", 0xa4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_scsi_status_print;
+#else
+#define ahc_scb_scsi_status_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_SCSI_STATUS", 0xa8, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_target_phases_print;
+#else
+#define ahc_scb_target_phases_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_TARGET_PHASES", 0xa9, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_target_data_dir_print;
+#else
+#define ahc_scb_target_data_dir_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_TARGET_DATA_DIR", 0xaa, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_target_itag_print;
+#else
+#define ahc_scb_target_itag_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_TARGET_ITAG", 0xab, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_dataptr_print;
+#else
+#define ahc_scb_dataptr_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_DATAPTR", 0xac, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_datacnt_print;
+#else
+#define ahc_scb_datacnt_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_DATACNT", 0xb0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_sgptr_print;
+#else
+#define ahc_scb_sgptr_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_SGPTR", 0xb4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_control_print;
+#else
+#define ahc_scb_control_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_CONTROL", 0xb8, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_scsiid_print;
+#else
+#define ahc_scb_scsiid_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_SCSIID", 0xb9, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_lun_print;
+#else
+#define ahc_scb_lun_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_LUN", 0xba, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_tag_print;
+#else
+#define ahc_scb_tag_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_TAG", 0xbb, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_cdb_len_print;
+#else
+#define ahc_scb_cdb_len_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_CDB_LEN", 0xbc, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_scsirate_print;
+#else
+#define ahc_scb_scsirate_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_SCSIRATE", 0xbd, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_scsioffset_print;
+#else
+#define ahc_scb_scsioffset_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_SCSIOFFSET", 0xbe, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_next_print;
+#else
+#define ahc_scb_next_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_NEXT", 0xbf, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_64_spare_print;
+#else
+#define ahc_scb_64_spare_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_64_SPARE", 0xc0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_seectl_2840_print;
+#else
+#define ahc_seectl_2840_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SEECTL_2840", 0xc0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_status_2840_print;
+#else
+#define ahc_status_2840_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "STATUS_2840", 0xc1, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scb_64_btt_print;
+#else
+#define ahc_scb_64_btt_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCB_64_BTT", 0xd0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_cchaddr_print;
+#else
+#define ahc_cchaddr_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "CCHADDR", 0xe0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_cchcnt_print;
+#else
+#define ahc_cchcnt_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "CCHCNT", 0xe8, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_ccsgram_print;
+#else
+#define ahc_ccsgram_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "CCSGRAM", 0xe9, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_ccsgaddr_print;
+#else
+#define ahc_ccsgaddr_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "CCSGADDR", 0xea, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_ccsgctl_print;
+#else
+#define ahc_ccsgctl_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "CCSGCTL", 0xeb, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_ccscbram_print;
+#else
+#define ahc_ccscbram_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "CCSCBRAM", 0xec, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_ccscbaddr_print;
+#else
+#define ahc_ccscbaddr_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "CCSCBADDR", 0xed, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_ccscbctl_print;
+#else
+#define ahc_ccscbctl_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "CCSCBCTL", 0xee, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_ccscbcnt_print;
+#else
+#define ahc_ccscbcnt_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "CCSCBCNT", 0xef, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_scbbaddr_print;
+#else
+#define ahc_scbbaddr_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SCBBADDR", 0xf0, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_ccscbptr_print;
+#else
+#define ahc_ccscbptr_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "CCSCBPTR", 0xf1, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_hnscb_qoff_print;
+#else
+#define ahc_hnscb_qoff_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "HNSCB_QOFF", 0xf4, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_snscb_qoff_print;
+#else
+#define ahc_snscb_qoff_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SNSCB_QOFF", 0xf6, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sdscb_qoff_print;
+#else
+#define ahc_sdscb_qoff_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SDSCB_QOFF", 0xf8, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_qoff_ctlsta_print;
+#else
+#define ahc_qoff_ctlsta_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "QOFF_CTLSTA", 0xfa, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_dff_thrsh_print;
+#else
+#define ahc_dff_thrsh_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "DFF_THRSH", 0xfb, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sg_cache_shadow_print;
+#else
+#define ahc_sg_cache_shadow_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SG_CACHE_SHADOW", 0xfc, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahc_reg_print_t ahc_sg_cache_pre_print;
+#else
+#define ahc_sg_cache_pre_print(regvalue, cur_col, wrap) \
+    ahc_print_register(NULL, 0, "SG_CACHE_PRE", 0xfc, regvalue, cur_col, wrap)
+#endif
+
+
+#define	SCSISEQ         		0x00
+#define		TEMODE          	0x80
+#define		SCSIRSTO        	0x01
+
+#define	SXFRCTL0        		0x01
+#define		DFON            	0x80
+#define		DFPEXP          	0x40
+#define		FAST20          	0x20
+#define		CLRSTCNT        	0x10
+#define		SPIOEN          	0x08
+#define		SCAMEN          	0x04
+#define		CLRCHN          	0x02
+
+#define	SXFRCTL1        		0x02
+#define		STIMESEL        	0x18
+#define		BITBUCKET       	0x80
+#define		SWRAPEN         	0x40
+#define		ENSTIMER        	0x04
+#define		ACTNEGEN        	0x02
+#define		STPWEN          	0x01
+
+#define	SCSISIGO        		0x03
+#define		CDO             	0x80
+#define		IOO             	0x40
+#define		MSGO            	0x20
+#define		ATNO            	0x10
+#define		SELO            	0x08
+#define		BSYO            	0x04
+#define		REQO            	0x02
+#define		ACKO            	0x01
+
+#define	SCSISIGI        		0x03
+#define		P_DATAIN_DT     	0x60
+#define		P_DATAOUT_DT    	0x20
+#define		ATNI            	0x10
+#define		SELI            	0x08
+#define		BSYI            	0x04
+#define		REQI            	0x02
+#define		ACKI            	0x01
+
+#define	SCSIRATE        		0x04
+#define		SXFR            	0x70
+#define		SOFS            	0x0f
+#define		SXFR_ULTRA2     	0x0f
+#define		WIDEXFER        	0x80
+#define		ENABLE_CRC      	0x40
+#define		SINGLE_EDGE     	0x10
+
+#define	SCSIID          		0x05
+#define	SCSIOFFSET      		0x05
+#define		SOFS_ULTRA2     	0x7f
+
+#define	SCSIDATL        		0x06
+
+#define	SCSIDATH        		0x07
+
+#define	STCNT           		0x08
+
+#define	OPTIONMODE      		0x08
+#define		OPTIONMODE_DEFAULTS	0x03
+#define		AUTORATEEN      	0x80
+#define		AUTOACKEN       	0x40
+#define		ATNMGMNTEN      	0x20
+#define		BUSFREEREV      	0x10
+#define		EXPPHASEDIS     	0x08
+#define		SCSIDATL_IMGEN  	0x04
+#define		AUTO_MSGOUT_DE  	0x02
+#define		DIS_MSGIN_DUALEDGE	0x01
+
+#define	TARGCRCCNT      		0x0a
+
+#define	CLRSINT0        		0x0b
+#define		CLRSELDO        	0x40
+#define		CLRSELDI        	0x20
+#define		CLRSELINGO      	0x10
+#define		CLRIOERR        	0x08
+#define		CLRSWRAP        	0x08
+#define		CLRSPIORDY      	0x02
+
+#define	SSTAT0          		0x0b
+#define		TARGET          	0x80
+#define		SELDO           	0x40
+#define		SELDI           	0x20
+#define		SELINGO         	0x10
+#define		SWRAP           	0x08
+#define		IOERR           	0x08
+#define		SDONE           	0x04
+#define		SPIORDY         	0x02
+#define		DMADONE         	0x01
+
+#define	CLRSINT1        		0x0c
+#define		CLRSELTIMEO     	0x80
+#define		CLRATNO         	0x40
+#define		CLRSCSIRSTI     	0x20
+#define		CLRBUSFREE      	0x08
+#define		CLRSCSIPERR     	0x04
+#define		CLRPHASECHG     	0x02
+#define		CLRREQINIT      	0x01
+
+#define	SSTAT1          		0x0c
+#define		SELTO           	0x80
+#define		ATNTARG         	0x40
+#define		SCSIRSTI        	0x20
+#define		PHASEMIS        	0x10
+#define		BUSFREE         	0x08
+#define		SCSIPERR        	0x04
+#define		PHASECHG        	0x02
+#define		REQINIT         	0x01
+
+#define	SSTAT2          		0x0d
+#define		SFCNT           	0x1f
+#define		OVERRUN         	0x80
+#define		SHVALID         	0x40
+#define		EXP_ACTIVE      	0x10
+#define		CRCVALERR       	0x08
+#define		CRCENDERR       	0x04
+#define		CRCREQERR       	0x02
+#define		DUAL_EDGE_ERR   	0x01
+
+#define	SSTAT3          		0x0e
+#define		SCSICNT         	0xf0
+#define		U2OFFCNT        	0x7f
+#define		OFFCNT          	0x0f
+
+#define	SCSIID_ULTRA2   		0x0f
+
+#define	SIMODE0         		0x10
+#define		ENSELDO         	0x40
+#define		ENSELDI         	0x20
+#define		ENSELINGO       	0x10
+#define		ENIOERR         	0x08
+#define		ENSWRAP         	0x08
+#define		ENSDONE         	0x04
+#define		ENSPIORDY       	0x02
+#define		ENDMADONE       	0x01
+
+#define	SIMODE1         		0x11
+#define		ENSELTIMO       	0x80
+#define		ENATNTARG       	0x40
+#define		ENSCSIRST       	0x20
+#define		ENPHASEMIS      	0x10
+#define		ENBUSFREE       	0x08
+#define		ENSCSIPERR      	0x04
+#define		ENPHASECHG      	0x02
+#define		ENREQINIT       	0x01
+
+#define	SCSIBUSL        		0x12
+
+#define	SCSIBUSH        		0x13
+
+#define	SXFRCTL2        		0x13
+#define		ASYNC_SETUP     	0x07
+#define		AUTORSTDIS      	0x10
+#define		CMDDMAEN        	0x08
+
+#define	SHADDR          		0x14
+
+#define	SELTIMER        		0x18
+#define	TARGIDIN        		0x18
+#define		STAGE6          	0x20
+#define		STAGE5          	0x10
+#define		STAGE4          	0x08
+#define		STAGE3          	0x04
+#define		STAGE2          	0x02
+#define		STAGE1          	0x01
+
+#define	SELID           		0x19
+#define		SELID_MASK      	0xf0
+#define		ONEBIT          	0x08
+
+#define	SCAMCTL         		0x1a
+#define		SCAMLVL         	0x03
+#define		ENSCAMSELO      	0x80
+#define		CLRSCAMSELID    	0x40
+#define		ALTSTIM         	0x20
+#define		DFLTTID         	0x10
+
+#define	TARGID          		0x1b
+
+#define	SPIOCAP         		0x1b
+#define		SOFT1           	0x80
+#define		SOFT0           	0x40
+#define		SOFTCMDEN       	0x20
+#define		EXT_BRDCTL      	0x10
+#define		SEEPROM         	0x08
+#define		EEPROM          	0x04
+#define		ROM             	0x02
+#define		SSPIOCPS        	0x01
+
+#define	BRDCTL          		0x1d
+#define		BRDDAT7         	0x80
+#define		BRDDAT6         	0x40
+#define		BRDDAT5         	0x20
+#define		BRDDAT4         	0x10
+#define		BRDSTB          	0x10
+#define		BRDDAT3         	0x08
+#define		BRDCS           	0x08
+#define		BRDDAT2         	0x04
+#define		BRDRW           	0x04
+#define		BRDRW_ULTRA2    	0x02
+#define		BRDCTL1         	0x02
+#define		BRDCTL0         	0x01
+#define		BRDSTB_ULTRA2   	0x01
+
+#define	SEECTL          		0x1e
+#define		EXTARBACK       	0x80
+#define		EXTARBREQ       	0x40
+#define		SEEMS           	0x20
+#define		SEERDY          	0x10
+#define		SEECS           	0x08
+#define		SEECK           	0x04
+#define		SEEDO           	0x02
+#define		SEEDI           	0x01
+
+#define	SBLKCTL         		0x1f
+#define		DIAGLEDEN       	0x80
+#define		DIAGLEDON       	0x40
+#define		AUTOFLUSHDIS    	0x20
+#define		ENAB40          	0x08
+#define		SELBUSB         	0x08
+#define		ENAB20          	0x04
+#define		SELWIDE         	0x02
+#define		XCVR            	0x01
+
+#define	BUSY_TARGETS    		0x20
+#define	TARG_SCSIRATE   		0x20
+
+#define	ULTRA_ENB       		0x30
+#define	CMDSIZE_TABLE   		0x30
+
+#define	DISC_DSB        		0x32
+
+#define	CMDSIZE_TABLE_TAIL		0x34
+
+#define	MWI_RESIDUAL    		0x38
+#define	TARG_IMMEDIATE_SCB		0x38
+
+#define	NEXT_QUEUED_SCB 		0x39
+
+#define	MSG_OUT         		0x3a
+
+#define	DMAPARAMS       		0x3b
+#define		PRELOADEN       	0x80
+#define		WIDEODD         	0x40
+#define		SCSIEN          	0x20
+#define		SDMAEN          	0x10
+#define		SDMAENACK       	0x10
+#define		HDMAEN          	0x08
+#define		HDMAENACK       	0x08
+#define		DIRECTION       	0x04
+#define		FIFOFLUSH       	0x02
+#define		FIFORESET       	0x01
+
+#define	SEQ_FLAGS       		0x3c
+#define		NOT_IDENTIFIED  	0x80
+#define		NO_CDB_SENT     	0x40
+#define		TARGET_CMD_IS_TAGGED	0x40
+#define		DPHASE          	0x20
+#define		TARG_CMD_PENDING	0x10
+#define		CMDPHASE_PENDING	0x08
+#define		DPHASE_PENDING  	0x04
+#define		SPHASE_PENDING  	0x02
+#define		NO_DISCONNECT   	0x01
+
+#define	SAVED_SCSIID    		0x3d
+
+#define	SAVED_LUN       		0x3e
+
+#define	LASTPHASE       		0x3f
+#define		P_MESGIN        	0xe0
+#define		PHASE_MASK      	0xe0
+#define		P_STATUS        	0xc0
+#define		P_MESGOUT       	0xa0
+#define		P_COMMAND       	0x80
+#define		P_DATAIN        	0x40
+#define		P_BUSFREE       	0x01
+#define		P_DATAOUT       	0x00
+#define		CDI             	0x80
+#define		IOI             	0x40
+#define		MSGI            	0x20
+
+#define	WAITING_SCBH    		0x40
+
+#define	DISCONNECTED_SCBH		0x41
+
+#define	FREE_SCBH       		0x42
+
+#define	COMPLETE_SCBH   		0x43
+
+#define	HSCB_ADDR       		0x44
+
+#define	SHARED_DATA_ADDR		0x48
+
+#define	KERNEL_QINPOS   		0x4c
+
+#define	QINPOS          		0x4d
+
+#define	QOUTPOS         		0x4e
+
+#define	KERNEL_TQINPOS  		0x4f
+
+#define	TQINPOS         		0x50
+
+#define	ARG_1           		0x51
+#define	RETURN_1        		0x51
+#define		SEND_MSG        	0x80
+#define		SEND_SENSE      	0x40
+#define		SEND_REJ        	0x20
+#define		MSGOUT_PHASEMIS 	0x10
+#define		EXIT_MSG_LOOP   	0x08
+#define		CONT_MSG_LOOP   	0x04
+#define		CONT_TARG_SESSION	0x02
+
+#define	ARG_2           		0x52
+#define	RETURN_2        		0x52
+
+#define	LAST_MSG        		0x53
+
+#define	SCSISEQ_TEMPLATE		0x54
+#define		ENSELO          	0x40
+#define		ENSELI          	0x20
+#define		ENRSELI         	0x10
+#define		ENAUTOATNO      	0x08
+#define		ENAUTOATNI      	0x04
+#define		ENAUTOATNP      	0x02
+
+#define	HA_274_BIOSGLOBAL		0x56
+#define	INITIATOR_TAG   		0x56
+#define		HA_274_EXTENDED_TRANS	0x01
+
+#define	SEQ_FLAGS2      		0x57
+#define		TARGET_MSG_PENDING	0x02
+#define		SCB_DMA         	0x01
+
+#define	SCSICONF        		0x5a
+#define		HWSCSIID        	0x0f
+#define		HSCSIID         	0x07
+#define		TERM_ENB        	0x80
+#define		RESET_SCSI      	0x40
+#define		ENSPCHK         	0x20
+
+#define	INTDEF          		0x5c
+#define		VECTOR          	0x0f
+#define		EDGE_TRIG       	0x80
+
+#define	HOSTCONF        		0x5d
+
+#define	HA_274_BIOSCTRL 		0x5f
+#define		BIOSDISABLED    	0x30
+#define		BIOSMODE        	0x30
+#define		CHANNEL_B_PRIMARY	0x08
+
+#define	SEQCTL          		0x60
+#define		PERRORDIS       	0x80
+#define		PAUSEDIS        	0x40
+#define		FAILDIS         	0x20
+#define		FASTMODE        	0x10
+#define		BRKADRINTEN     	0x08
+#define		STEP            	0x04
+#define		SEQRESET        	0x02
+#define		LOADRAM         	0x01
+
+#define	SEQRAM          		0x61
+
+#define	SEQADDR0        		0x62
+
+#define	SEQADDR1        		0x63
+#define		SEQADDR1_MASK   	0x01
+
+#define	ACCUM           		0x64
+
+#define	SINDEX          		0x65
+
+#define	DINDEX          		0x66
+
+#define	ALLONES         		0x69
+
+#define	ALLZEROS        		0x6a
+
+#define	NONE            		0x6a
+
+#define	FLAGS           		0x6b
+#define		ZERO            	0x02
+#define		CARRY           	0x01
+
+#define	SINDIR          		0x6c
+
+#define	DINDIR          		0x6d
+
+#define	FUNCTION1       		0x6e
+
+#define	STACK           		0x6f
+
+#define	TARG_OFFSET     		0x70
+
+#define	SRAM_BASE       		0x70
+
+#define	BCTL            		0x84
+#define		ACE             	0x08
+#define		ENABLE          	0x01
+
+#define	DSCOMMAND0      		0x84
+#define		CACHETHEN       	0x80
+#define		DPARCKEN        	0x40
+#define		MPARCKEN        	0x20
+#define		EXTREQLCK       	0x10
+#define		INTSCBRAMSEL    	0x08
+#define		RAMPS           	0x04
+#define		USCBSIZE32      	0x02
+#define		CIOPARCKEN      	0x01
+
+#define	BUSTIME         		0x85
+#define		BOFF            	0xf0
+#define		BON             	0x0f
+
+#define	DSCOMMAND1      		0x85
+#define		DSLATT          	0xfc
+#define		HADDLDSEL1      	0x02
+#define		HADDLDSEL0      	0x01
+
+#define	BUSSPD          		0x86
+#define		DFTHRSH         	0xc0
+#define		DFTHRSH_75      	0x80
+#define		STBOFF          	0x38
+#define		STBON           	0x07
+
+#define	HS_MAILBOX      		0x86
+#define		HOST_MAILBOX    	0xf0
+#define		HOST_TQINPOS    	0x80
+#define		SEQ_MAILBOX     	0x0f
+
+#define	DSPCISTATUS     		0x86
+#define		DFTHRSH_100     	0xc0
+
+#define	HCNTRL          		0x87
+#define		POWRDN          	0x40
+#define		SWINT           	0x10
+#define		IRQMS           	0x08
+#define		PAUSE           	0x04
+#define		INTEN           	0x02
+#define		CHIPRST         	0x01
+#define		CHIPRSTACK      	0x01
+
+#define	HADDR           		0x88
+
+#define	HCNT            		0x8c
+
+#define	SCBPTR          		0x90
+
+#define	INTSTAT         		0x91
+#define		SEQINT_MASK     	0xf1
+#define		OUT_OF_RANGE    	0xe1
+#define		NO_FREE_SCB     	0xd1
+#define		SCB_MISMATCH    	0xc1
+#define		MISSED_BUSFREE  	0xb1
+#define		MKMSG_FAILED    	0xa1
+#define		DATA_OVERRUN    	0x91
+#define		PERR_DETECTED   	0x81
+#define		BAD_STATUS      	0x71
+#define		HOST_MSG_LOOP   	0x61
+#define		PDATA_REINIT    	0x51
+#define		IGN_WIDE_RES    	0x41
+#define		NO_MATCH        	0x31
+#define		PROTO_VIOLATION 	0x21
+#define		SEND_REJECT     	0x11
+#define		INT_PEND        	0x0f
+#define		BAD_PHASE       	0x01
+#define		BRKADRINT       	0x08
+#define		SCSIINT         	0x04
+#define		CMDCMPLT        	0x02
+#define		SEQINT          	0x01
+
+#define	CLRINT          		0x92
+#define		CLRPARERR       	0x10
+#define		CLRBRKADRINT    	0x08
+#define		CLRSCSIINT      	0x04
+#define		CLRCMDINT       	0x02
+#define		CLRSEQINT       	0x01
+
+#define	ERROR           		0x92
+#define		CIOPARERR       	0x80
+#define		PCIERRSTAT      	0x40
+#define		MPARERR         	0x20
+#define		DPARERR         	0x10
+#define		SQPARERR        	0x08
+#define		ILLOPCODE       	0x04
+#define		ILLSADDR        	0x02
+#define		ILLHADDR        	0x01
+
+#define	DFCNTRL         		0x93
+
+#define	DFSTATUS        		0x94
+#define		PRELOAD_AVAIL   	0x80
+#define		DFCACHETH       	0x40
+#define		FIFOQWDEMP      	0x20
+#define		MREQPEND        	0x10
+#define		HDONE           	0x08
+#define		DFTHRESH        	0x04
+#define		FIFOFULL        	0x02
+#define		FIFOEMP         	0x01
+
+#define	DFWADDR         		0x95
+
+#define	DFRADDR         		0x97
+
+#define	DFDAT           		0x99
+
+#define	SCBCNT          		0x9a
+#define		SCBCNT_MASK     	0x1f
+#define		SCBAUTO         	0x80
+
+#define	QINFIFO         		0x9b
+
+#define	QINCNT          		0x9c
+
+#define	QOUTFIFO        		0x9d
+
+#define	CRCCONTROL1     		0x9d
+#define		CRCONSEEN       	0x80
+#define		CRCVALCHKEN     	0x40
+#define		CRCENDCHKEN     	0x20
+#define		CRCREQCHKEN     	0x10
+#define		TARGCRCENDEN    	0x08
+#define		TARGCRCCNTEN    	0x04
+
+#define	QOUTCNT         		0x9e
+
+#define	SCSIPHASE       		0x9e
+#define		DATA_PHASE_MASK 	0x03
+#define		STATUS_PHASE    	0x20
+#define		COMMAND_PHASE   	0x10
+#define		MSG_IN_PHASE    	0x08
+#define		MSG_OUT_PHASE   	0x04
+#define		DATA_IN_PHASE   	0x02
+#define		DATA_OUT_PHASE  	0x01
+
+#define	SFUNCT          		0x9f
+#define		ALT_MODE        	0x80
+
+#define	SCB_BASE        		0xa0
+
+#define	SCB_CDB_PTR     		0xa0
+#define	SCB_RESIDUAL_DATACNT		0xa0
+#define	SCB_CDB_STORE   		0xa0
+
+#define	SCB_RESIDUAL_SGPTR		0xa4
+
+#define	SCB_SCSI_STATUS 		0xa8
+
+#define	SCB_TARGET_PHASES		0xa9
+
+#define	SCB_TARGET_DATA_DIR		0xaa
+
+#define	SCB_TARGET_ITAG 		0xab
+
+#define	SCB_DATAPTR     		0xac
+
+#define	SCB_DATACNT     		0xb0
+#define		SG_HIGH_ADDR_BITS	0x7f
+#define		SG_LAST_SEG     	0x80
+
+#define	SCB_SGPTR       		0xb4
+#define		SG_RESID_VALID  	0x04
+#define		SG_FULL_RESID   	0x02
+#define		SG_LIST_NULL    	0x01
+
+#define	SCB_CONTROL     		0xb8
+#define		SCB_TAG_TYPE    	0x03
+#define		STATUS_RCVD     	0x80
+#define		TARGET_SCB      	0x80
+#define		DISCENB         	0x40
+#define		TAG_ENB         	0x20
+#define		MK_MESSAGE      	0x10
+#define		ULTRAENB        	0x08
+#define		DISCONNECTED    	0x04
+
+#define	SCB_SCSIID      		0xb9
+#define		TID             	0xf0
+#define		TWIN_TID        	0x70
+#define		OID             	0x0f
+#define		TWIN_CHNLB      	0x80
+
+#define	SCB_LUN         		0xba
+#define		LID             	0x3f
+#define		SCB_XFERLEN_ODD 	0x80
+
+#define	SCB_TAG         		0xbb
+
+#define	SCB_CDB_LEN     		0xbc
+
+#define	SCB_SCSIRATE    		0xbd
+
+#define	SCB_SCSIOFFSET  		0xbe
+
+#define	SCB_NEXT        		0xbf
+
+#define	SCB_64_SPARE    		0xc0
+
+#define	SEECTL_2840     		0xc0
+#define		CS_2840         	0x04
+#define		CK_2840         	0x02
+#define		DO_2840         	0x01
+
+#define	STATUS_2840     		0xc1
+#define		BIOS_SEL        	0x60
+#define		ADSEL           	0x1e
+#define		EEPROM_TF       	0x80
+#define		DI_2840         	0x01
+
+#define	SCB_64_BTT      		0xd0
+
+#define	CCHADDR         		0xe0
+
+#define	CCHCNT          		0xe8
+
+#define	CCSGRAM         		0xe9
+
+#define	CCSGADDR        		0xea
+
+#define	CCSGCTL         		0xeb
+#define		CCSGDONE        	0x80
+#define		CCSGEN          	0x08
+#define		SG_FETCH_NEEDED 	0x02
+#define		CCSGRESET       	0x01
+
+#define	CCSCBRAM        		0xec
+
+#define	CCSCBADDR       		0xed
+
+#define	CCSCBCTL        		0xee
+#define		CCSCBDONE       	0x80
+#define		ARRDONE         	0x40
+#define		CCARREN         	0x10
+#define		CCSCBEN         	0x08
+#define		CCSCBDIR        	0x04
+#define		CCSCBRESET      	0x01
+
+#define	CCSCBCNT        		0xef
+
+#define	SCBBADDR        		0xf0
+
+#define	CCSCBPTR        		0xf1
+
+#define	HNSCB_QOFF      		0xf4
+
+#define	SNSCB_QOFF      		0xf6
+
+#define	SDSCB_QOFF      		0xf8
+
+#define	QOFF_CTLSTA     		0xfa
+#define		SCB_QSIZE       	0x07
+#define		SCB_QSIZE_256   	0x06
+#define		SCB_AVAIL       	0x40
+#define		SNSCB_ROLLOVER  	0x20
+#define		SDSCB_ROLLOVER  	0x10
+
+#define	DFF_THRSH       		0xfb
+#define		WR_DFTHRSH      	0x70
+#define		WR_DFTHRSH_MAX  	0x70
+#define		WR_DFTHRSH_90   	0x60
+#define		WR_DFTHRSH_85   	0x50
+#define		WR_DFTHRSH_75   	0x40
+#define		WR_DFTHRSH_63   	0x30
+#define		WR_DFTHRSH_50   	0x20
+#define		WR_DFTHRSH_25   	0x10
+#define		RD_DFTHRSH      	0x07
+#define		RD_DFTHRSH_MAX  	0x07
+#define		RD_DFTHRSH_90   	0x06
+#define		RD_DFTHRSH_85   	0x05
+#define		RD_DFTHRSH_75   	0x04
+#define		RD_DFTHRSH_63   	0x03
+#define		RD_DFTHRSH_50   	0x02
+#define		RD_DFTHRSH_25   	0x01
+#define		RD_DFTHRSH_MIN  	0x00
+#define		WR_DFTHRSH_MIN  	0x00
+
+#define	SG_CACHE_SHADOW 		0xfc
+#define		SG_ADDR_MASK    	0xf8
+#define		LAST_SEG        	0x02
+#define		LAST_SEG_DONE   	0x01
+
+#define	SG_CACHE_PRE    		0xfc
+
+
+#define	MAX_OFFSET_ULTRA2	0x7f
+#define	MAX_OFFSET_16BIT	0x08
+#define	BUS_8_BIT	0x00
+#define	TARGET_CMD_CMPLT	0xfe
+#define	STATUS_QUEUE_FULL	0x28
+#define	STATUS_BUSY	0x08
+#define	MAX_OFFSET_8BIT	0x0f
+#define	BUS_32_BIT	0x02
+#define	CCSGADDR_MAX	0x80
+#define	TID_SHIFT	0x04
+#define	SCB_DOWNLOAD_SIZE_64	0x30
+#define	HOST_MAILBOX_SHIFT	0x04
+#define	CMD_GROUP_CODE_SHIFT	0x05
+#define	CCSGRAM_MAXSEGS	0x10
+#define	SCB_LIST_NULL	0xff
+#define	SG_SIZEOF	0x08
+#define	SCB_DOWNLOAD_SIZE	0x20
+#define	SEQ_MAILBOX_SHIFT	0x00
+#define	TARGET_DATA_IN	0x01
+#define	HOST_MSG	0xff
+#define	MAX_OFFSET	0x7f
+#define	BUS_16_BIT	0x01
+#define	SCB_UPLOAD_SIZE	0x20
+#define	STACK_SIZE	0x04
+
+
+/* Downloaded Constant Definitions */
+#define	INVERTED_CACHESIZE_MASK	0x03
+#define	SG_PREFETCH_ADDR_MASK	0x06
+#define	SG_PREFETCH_ALIGN_MASK	0x05
+#define	QOUTFIFO_OFFSET	0x00
+#define	SG_PREFETCH_CNT	0x04
+#define	CACHESIZE_MASK	0x02
+#define	QINFIFO_OFFSET	0x01
+#define	DOWNLOAD_CONST_COUNT	0x07
+
+
+/* Exported Labels */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
new file mode 100644
index 0000000..9c71377
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
@@ -0,0 +1,1681 @@
+/*
+ * DO NOT EDIT - This file is automatically generated
+ *		 from the following source files:
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#56 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#39 $
+ */
+
+#include "aic7xxx_osm.h"
+
+static ahc_reg_parse_entry_t SCSISEQ_parse_table[] = {
+	{ "SCSIRSTO",		0x01, 0x01 },
+	{ "ENAUTOATNP",		0x02, 0x02 },
+	{ "ENAUTOATNI",		0x04, 0x04 },
+	{ "ENAUTOATNO",		0x08, 0x08 },
+	{ "ENRSELI",		0x10, 0x10 },
+	{ "ENSELI",		0x20, 0x20 },
+	{ "ENSELO",		0x40, 0x40 },
+	{ "TEMODE",		0x80, 0x80 }
+};
+
+int
+ahc_scsiseq_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SCSISEQ_parse_table, 8, "SCSISEQ",
+	    0x00, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SXFRCTL0_parse_table[] = {
+	{ "CLRCHN",		0x02, 0x02 },
+	{ "SCAMEN",		0x04, 0x04 },
+	{ "SPIOEN",		0x08, 0x08 },
+	{ "CLRSTCNT",		0x10, 0x10 },
+	{ "FAST20",		0x20, 0x20 },
+	{ "DFPEXP",		0x40, 0x40 },
+	{ "DFON",		0x80, 0x80 }
+};
+
+int
+ahc_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SXFRCTL0_parse_table, 7, "SXFRCTL0",
+	    0x01, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SXFRCTL1_parse_table[] = {
+	{ "STPWEN",		0x01, 0x01 },
+	{ "ACTNEGEN",		0x02, 0x02 },
+	{ "ENSTIMER",		0x04, 0x04 },
+	{ "ENSPCHK",		0x20, 0x20 },
+	{ "SWRAPEN",		0x40, 0x40 },
+	{ "BITBUCKET",		0x80, 0x80 },
+	{ "STIMESEL",		0x18, 0x18 }
+};
+
+int
+ahc_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SXFRCTL1_parse_table, 7, "SXFRCTL1",
+	    0x02, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SCSISIGO_parse_table[] = {
+	{ "ACKO",		0x01, 0x01 },
+	{ "REQO",		0x02, 0x02 },
+	{ "BSYO",		0x04, 0x04 },
+	{ "SELO",		0x08, 0x08 },
+	{ "ATNO",		0x10, 0x10 },
+	{ "MSGO",		0x20, 0x20 },
+	{ "IOO",		0x40, 0x40 },
+	{ "CDO",		0x80, 0x80 },
+	{ "P_DATAOUT",		0x00, 0x00 },
+	{ "P_DATAIN",		0x40, 0x40 },
+	{ "P_COMMAND",		0x80, 0x80 },
+	{ "P_MESGOUT",		0xa0, 0xa0 },
+	{ "P_STATUS",		0xc0, 0xc0 },
+	{ "PHASE_MASK",		0xe0, 0xe0 },
+	{ "P_MESGIN",		0xe0, 0xe0 }
+};
+
+int
+ahc_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SCSISIGO_parse_table, 15, "SCSISIGO",
+	    0x03, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SCSISIGI_parse_table[] = {
+	{ "ACKI",		0x01, 0x01 },
+	{ "REQI",		0x02, 0x02 },
+	{ "BSYI",		0x04, 0x04 },
+	{ "SELI",		0x08, 0x08 },
+	{ "ATNI",		0x10, 0x10 },
+	{ "MSGI",		0x20, 0x20 },
+	{ "IOI",		0x40, 0x40 },
+	{ "CDI",		0x80, 0x80 },
+	{ "P_DATAOUT",		0x00, 0x00 },
+	{ "P_DATAOUT_DT",	0x20, 0x20 },
+	{ "P_DATAIN",		0x40, 0x40 },
+	{ "P_DATAIN_DT",	0x60, 0x60 },
+	{ "P_COMMAND",		0x80, 0x80 },
+	{ "P_MESGOUT",		0xa0, 0xa0 },
+	{ "P_STATUS",		0xc0, 0xc0 },
+	{ "PHASE_MASK",		0xe0, 0xe0 },
+	{ "P_MESGIN",		0xe0, 0xe0 }
+};
+
+int
+ahc_scsisigi_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SCSISIGI_parse_table, 17, "SCSISIGI",
+	    0x03, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SCSIRATE_parse_table[] = {
+	{ "SINGLE_EDGE",	0x10, 0x10 },
+	{ "ENABLE_CRC",		0x40, 0x40 },
+	{ "WIDEXFER",		0x80, 0x80 },
+	{ "SXFR_ULTRA2",	0x0f, 0x0f },
+	{ "SOFS",		0x0f, 0x0f },
+	{ "SXFR",		0x70, 0x70 }
+};
+
+int
+ahc_scsirate_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SCSIRATE_parse_table, 6, "SCSIRATE",
+	    0x04, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SCSIID_parse_table[] = {
+	{ "TWIN_CHNLB",		0x80, 0x80 },
+	{ "OID",		0x0f, 0x0f },
+	{ "TWIN_TID",		0x70, 0x70 },
+	{ "SOFS_ULTRA2",	0x7f, 0x7f },
+	{ "TID",		0xf0, 0xf0 }
+};
+
+int
+ahc_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SCSIID_parse_table, 5, "SCSIID",
+	    0x05, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scsidatl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCSIDATL",
+	    0x06, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scsidath_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCSIDATH",
+	    0x07, regvalue, cur_col, wrap));
+}
+
+int
+ahc_stcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "STCNT",
+	    0x08, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t OPTIONMODE_parse_table[] = {
+	{ "DIS_MSGIN_DUALEDGE",	0x01, 0x01 },
+	{ "AUTO_MSGOUT_DE",	0x02, 0x02 },
+	{ "SCSIDATL_IMGEN",	0x04, 0x04 },
+	{ "EXPPHASEDIS",	0x08, 0x08 },
+	{ "BUSFREEREV",		0x10, 0x10 },
+	{ "ATNMGMNTEN",		0x20, 0x20 },
+	{ "AUTOACKEN",		0x40, 0x40 },
+	{ "AUTORATEEN",		0x80, 0x80 },
+	{ "OPTIONMODE_DEFAULTS",0x03, 0x03 }
+};
+
+int
+ahc_optionmode_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(OPTIONMODE_parse_table, 9, "OPTIONMODE",
+	    0x08, regvalue, cur_col, wrap));
+}
+
+int
+ahc_targcrccnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "TARGCRCCNT",
+	    0x0a, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t CLRSINT0_parse_table[] = {
+	{ "CLRSPIORDY",		0x02, 0x02 },
+	{ "CLRSWRAP",		0x08, 0x08 },
+	{ "CLRIOERR",		0x08, 0x08 },
+	{ "CLRSELINGO",		0x10, 0x10 },
+	{ "CLRSELDI",		0x20, 0x20 },
+	{ "CLRSELDO",		0x40, 0x40 }
+};
+
+int
+ahc_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(CLRSINT0_parse_table, 6, "CLRSINT0",
+	    0x0b, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SSTAT0_parse_table[] = {
+	{ "DMADONE",		0x01, 0x01 },
+	{ "SPIORDY",		0x02, 0x02 },
+	{ "SDONE",		0x04, 0x04 },
+	{ "SWRAP",		0x08, 0x08 },
+	{ "IOERR",		0x08, 0x08 },
+	{ "SELINGO",		0x10, 0x10 },
+	{ "SELDI",		0x20, 0x20 },
+	{ "SELDO",		0x40, 0x40 },
+	{ "TARGET",		0x80, 0x80 }
+};
+
+int
+ahc_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SSTAT0_parse_table, 9, "SSTAT0",
+	    0x0b, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t CLRSINT1_parse_table[] = {
+	{ "CLRREQINIT",		0x01, 0x01 },
+	{ "CLRPHASECHG",	0x02, 0x02 },
+	{ "CLRSCSIPERR",	0x04, 0x04 },
+	{ "CLRBUSFREE",		0x08, 0x08 },
+	{ "CLRSCSIRSTI",	0x20, 0x20 },
+	{ "CLRATNO",		0x40, 0x40 },
+	{ "CLRSELTIMEO",	0x80, 0x80 }
+};
+
+int
+ahc_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(CLRSINT1_parse_table, 7, "CLRSINT1",
+	    0x0c, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SSTAT1_parse_table[] = {
+	{ "REQINIT",		0x01, 0x01 },
+	{ "PHASECHG",		0x02, 0x02 },
+	{ "SCSIPERR",		0x04, 0x04 },
+	{ "BUSFREE",		0x08, 0x08 },
+	{ "PHASEMIS",		0x10, 0x10 },
+	{ "SCSIRSTI",		0x20, 0x20 },
+	{ "ATNTARG",		0x40, 0x40 },
+	{ "SELTO",		0x80, 0x80 }
+};
+
+int
+ahc_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SSTAT1_parse_table, 8, "SSTAT1",
+	    0x0c, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SSTAT2_parse_table[] = {
+	{ "DUAL_EDGE_ERR",	0x01, 0x01 },
+	{ "CRCREQERR",		0x02, 0x02 },
+	{ "CRCENDERR",		0x04, 0x04 },
+	{ "CRCVALERR",		0x08, 0x08 },
+	{ "EXP_ACTIVE",		0x10, 0x10 },
+	{ "SHVALID",		0x40, 0x40 },
+	{ "OVERRUN",		0x80, 0x80 },
+	{ "SFCNT",		0x1f, 0x1f }
+};
+
+int
+ahc_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SSTAT2_parse_table, 8, "SSTAT2",
+	    0x0d, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SSTAT3_parse_table[] = {
+	{ "OFFCNT",		0x0f, 0x0f },
+	{ "U2OFFCNT",		0x7f, 0x7f },
+	{ "SCSICNT",		0xf0, 0xf0 }
+};
+
+int
+ahc_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SSTAT3_parse_table, 3, "SSTAT3",
+	    0x0e, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SCSIID_ULTRA2_parse_table[] = {
+	{ "OID",		0x0f, 0x0f },
+	{ "TID",		0xf0, 0xf0 }
+};
+
+int
+ahc_scsiid_ultra2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SCSIID_ULTRA2_parse_table, 2, "SCSIID_ULTRA2",
+	    0x0f, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SIMODE0_parse_table[] = {
+	{ "ENDMADONE",		0x01, 0x01 },
+	{ "ENSPIORDY",		0x02, 0x02 },
+	{ "ENSDONE",		0x04, 0x04 },
+	{ "ENSWRAP",		0x08, 0x08 },
+	{ "ENIOERR",		0x08, 0x08 },
+	{ "ENSELINGO",		0x10, 0x10 },
+	{ "ENSELDI",		0x20, 0x20 },
+	{ "ENSELDO",		0x40, 0x40 }
+};
+
+int
+ahc_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SIMODE0_parse_table, 8, "SIMODE0",
+	    0x10, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SIMODE1_parse_table[] = {
+	{ "ENREQINIT",		0x01, 0x01 },
+	{ "ENPHASECHG",		0x02, 0x02 },
+	{ "ENSCSIPERR",		0x04, 0x04 },
+	{ "ENBUSFREE",		0x08, 0x08 },
+	{ "ENPHASEMIS",		0x10, 0x10 },
+	{ "ENSCSIRST",		0x20, 0x20 },
+	{ "ENATNTARG",		0x40, 0x40 },
+	{ "ENSELTIMO",		0x80, 0x80 }
+};
+
+int
+ahc_simode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SIMODE1_parse_table, 8, "SIMODE1",
+	    0x11, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scsibusl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCSIBUSL",
+	    0x12, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scsibush_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCSIBUSH",
+	    0x13, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SXFRCTL2_parse_table[] = {
+	{ "CMDDMAEN",		0x08, 0x08 },
+	{ "AUTORSTDIS",		0x10, 0x10 },
+	{ "ASYNC_SETUP",	0x07, 0x07 }
+};
+
+int
+ahc_sxfrctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SXFRCTL2_parse_table, 3, "SXFRCTL2",
+	    0x13, regvalue, cur_col, wrap));
+}
+
+int
+ahc_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SHADDR",
+	    0x14, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SELTIMER_parse_table[] = {
+	{ "STAGE1",		0x01, 0x01 },
+	{ "STAGE2",		0x02, 0x02 },
+	{ "STAGE3",		0x04, 0x04 },
+	{ "STAGE4",		0x08, 0x08 },
+	{ "STAGE5",		0x10, 0x10 },
+	{ "STAGE6",		0x20, 0x20 }
+};
+
+int
+ahc_seltimer_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SELTIMER_parse_table, 6, "SELTIMER",
+	    0x18, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SELID_parse_table[] = {
+	{ "ONEBIT",		0x08, 0x08 },
+	{ "SELID_MASK",		0xf0, 0xf0 }
+};
+
+int
+ahc_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SELID_parse_table, 2, "SELID",
+	    0x19, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SCAMCTL_parse_table[] = {
+	{ "DFLTTID",		0x10, 0x10 },
+	{ "ALTSTIM",		0x20, 0x20 },
+	{ "CLRSCAMSELID",	0x40, 0x40 },
+	{ "ENSCAMSELO",		0x80, 0x80 },
+	{ "SCAMLVL",		0x03, 0x03 }
+};
+
+int
+ahc_scamctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SCAMCTL_parse_table, 5, "SCAMCTL",
+	    0x1a, regvalue, cur_col, wrap));
+}
+
+int
+ahc_targid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "TARGID",
+	    0x1b, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SPIOCAP_parse_table[] = {
+	{ "SSPIOCPS",		0x01, 0x01 },
+	{ "ROM",		0x02, 0x02 },
+	{ "EEPROM",		0x04, 0x04 },
+	{ "SEEPROM",		0x08, 0x08 },
+	{ "EXT_BRDCTL",		0x10, 0x10 },
+	{ "SOFTCMDEN",		0x20, 0x20 },
+	{ "SOFT0",		0x40, 0x40 },
+	{ "SOFT1",		0x80, 0x80 }
+};
+
+int
+ahc_spiocap_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SPIOCAP_parse_table, 8, "SPIOCAP",
+	    0x1b, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t BRDCTL_parse_table[] = {
+	{ "BRDCTL0",		0x01, 0x01 },
+	{ "BRDSTB_ULTRA2",	0x01, 0x01 },
+	{ "BRDCTL1",		0x02, 0x02 },
+	{ "BRDRW_ULTRA2",	0x02, 0x02 },
+	{ "BRDRW",		0x04, 0x04 },
+	{ "BRDDAT2",		0x04, 0x04 },
+	{ "BRDCS",		0x08, 0x08 },
+	{ "BRDDAT3",		0x08, 0x08 },
+	{ "BRDSTB",		0x10, 0x10 },
+	{ "BRDDAT4",		0x10, 0x10 },
+	{ "BRDDAT5",		0x20, 0x20 },
+	{ "BRDDAT6",		0x40, 0x40 },
+	{ "BRDDAT7",		0x80, 0x80 }
+};
+
+int
+ahc_brdctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(BRDCTL_parse_table, 13, "BRDCTL",
+	    0x1d, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SEECTL_parse_table[] = {
+	{ "SEEDI",		0x01, 0x01 },
+	{ "SEEDO",		0x02, 0x02 },
+	{ "SEECK",		0x04, 0x04 },
+	{ "SEECS",		0x08, 0x08 },
+	{ "SEERDY",		0x10, 0x10 },
+	{ "SEEMS",		0x20, 0x20 },
+	{ "EXTARBREQ",		0x40, 0x40 },
+	{ "EXTARBACK",		0x80, 0x80 }
+};
+
+int
+ahc_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SEECTL_parse_table, 8, "SEECTL",
+	    0x1e, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SBLKCTL_parse_table[] = {
+	{ "XCVR",		0x01, 0x01 },
+	{ "SELWIDE",		0x02, 0x02 },
+	{ "ENAB20",		0x04, 0x04 },
+	{ "SELBUSB",		0x08, 0x08 },
+	{ "ENAB40",		0x08, 0x08 },
+	{ "AUTOFLUSHDIS",	0x20, 0x20 },
+	{ "DIAGLEDON",		0x40, 0x40 },
+	{ "DIAGLEDEN",		0x80, 0x80 }
+};
+
+int
+ahc_sblkctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SBLKCTL_parse_table, 8, "SBLKCTL",
+	    0x1f, regvalue, cur_col, wrap));
+}
+
+int
+ahc_busy_targets_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "BUSY_TARGETS",
+	    0x20, regvalue, cur_col, wrap));
+}
+
+int
+ahc_ultra_enb_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "ULTRA_ENB",
+	    0x30, regvalue, cur_col, wrap));
+}
+
+int
+ahc_disc_dsb_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "DISC_DSB",
+	    0x32, regvalue, cur_col, wrap));
+}
+
+int
+ahc_cmdsize_table_tail_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "CMDSIZE_TABLE_TAIL",
+	    0x34, regvalue, cur_col, wrap));
+}
+
+int
+ahc_mwi_residual_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "MWI_RESIDUAL",
+	    0x38, regvalue, cur_col, wrap));
+}
+
+int
+ahc_next_queued_scb_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "NEXT_QUEUED_SCB",
+	    0x39, regvalue, cur_col, wrap));
+}
+
+int
+ahc_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "MSG_OUT",
+	    0x3a, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t DMAPARAMS_parse_table[] = {
+	{ "FIFORESET",		0x01, 0x01 },
+	{ "FIFOFLUSH",		0x02, 0x02 },
+	{ "DIRECTION",		0x04, 0x04 },
+	{ "HDMAEN",		0x08, 0x08 },
+	{ "HDMAENACK",		0x08, 0x08 },
+	{ "SDMAEN",		0x10, 0x10 },
+	{ "SDMAENACK",		0x10, 0x10 },
+	{ "SCSIEN",		0x20, 0x20 },
+	{ "WIDEODD",		0x40, 0x40 },
+	{ "PRELOADEN",		0x80, 0x80 }
+};
+
+int
+ahc_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(DMAPARAMS_parse_table, 10, "DMAPARAMS",
+	    0x3b, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
+	{ "NO_DISCONNECT",	0x01, 0x01 },
+	{ "SPHASE_PENDING",	0x02, 0x02 },
+	{ "DPHASE_PENDING",	0x04, 0x04 },
+	{ "CMDPHASE_PENDING",	0x08, 0x08 },
+	{ "TARG_CMD_PENDING",	0x10, 0x10 },
+	{ "DPHASE",		0x20, 0x20 },
+	{ "NO_CDB_SENT",	0x40, 0x40 },
+	{ "TARGET_CMD_IS_TAGGED",0x40, 0x40 },
+	{ "NOT_IDENTIFIED",	0x80, 0x80 }
+};
+
+int
+ahc_seq_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SEQ_FLAGS_parse_table, 9, "SEQ_FLAGS",
+	    0x3c, regvalue, cur_col, wrap));
+}
+
+int
+ahc_saved_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SAVED_SCSIID",
+	    0x3d, regvalue, cur_col, wrap));
+}
+
+int
+ahc_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SAVED_LUN",
+	    0x3e, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t LASTPHASE_parse_table[] = {
+	{ "MSGI",		0x20, 0x20 },
+	{ "IOI",		0x40, 0x40 },
+	{ "CDI",		0x80, 0x80 },
+	{ "P_DATAOUT",		0x00, 0x00 },
+	{ "P_BUSFREE",		0x01, 0x01 },
+	{ "P_DATAIN",		0x40, 0x40 },
+	{ "P_COMMAND",		0x80, 0x80 },
+	{ "P_MESGOUT",		0xa0, 0xa0 },
+	{ "P_STATUS",		0xc0, 0xc0 },
+	{ "PHASE_MASK",		0xe0, 0xe0 },
+	{ "P_MESGIN",		0xe0, 0xe0 }
+};
+
+int
+ahc_lastphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(LASTPHASE_parse_table, 11, "LASTPHASE",
+	    0x3f, regvalue, cur_col, wrap));
+}
+
+int
+ahc_waiting_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "WAITING_SCBH",
+	    0x40, regvalue, cur_col, wrap));
+}
+
+int
+ahc_disconnected_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "DISCONNECTED_SCBH",
+	    0x41, regvalue, cur_col, wrap));
+}
+
+int
+ahc_free_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "FREE_SCBH",
+	    0x42, regvalue, cur_col, wrap));
+}
+
+int
+ahc_complete_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "COMPLETE_SCBH",
+	    0x43, regvalue, cur_col, wrap));
+}
+
+int
+ahc_hscb_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "HSCB_ADDR",
+	    0x44, regvalue, cur_col, wrap));
+}
+
+int
+ahc_shared_data_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SHARED_DATA_ADDR",
+	    0x48, regvalue, cur_col, wrap));
+}
+
+int
+ahc_kernel_qinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "KERNEL_QINPOS",
+	    0x4c, regvalue, cur_col, wrap));
+}
+
+int
+ahc_qinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "QINPOS",
+	    0x4d, regvalue, cur_col, wrap));
+}
+
+int
+ahc_qoutpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "QOUTPOS",
+	    0x4e, regvalue, cur_col, wrap));
+}
+
+int
+ahc_kernel_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "KERNEL_TQINPOS",
+	    0x4f, regvalue, cur_col, wrap));
+}
+
+int
+ahc_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "TQINPOS",
+	    0x50, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t ARG_1_parse_table[] = {
+	{ "CONT_TARG_SESSION",	0x02, 0x02 },
+	{ "CONT_MSG_LOOP",	0x04, 0x04 },
+	{ "EXIT_MSG_LOOP",	0x08, 0x08 },
+	{ "MSGOUT_PHASEMIS",	0x10, 0x10 },
+	{ "SEND_REJ",		0x20, 0x20 },
+	{ "SEND_SENSE",		0x40, 0x40 },
+	{ "SEND_MSG",		0x80, 0x80 }
+};
+
+int
+ahc_arg_1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(ARG_1_parse_table, 7, "ARG_1",
+	    0x51, regvalue, cur_col, wrap));
+}
+
+int
+ahc_arg_2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "ARG_2",
+	    0x52, regvalue, cur_col, wrap));
+}
+
+int
+ahc_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "LAST_MSG",
+	    0x53, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
+	{ "ENAUTOATNP",		0x02, 0x02 },
+	{ "ENAUTOATNI",		0x04, 0x04 },
+	{ "ENAUTOATNO",		0x08, 0x08 },
+	{ "ENRSELI",		0x10, 0x10 },
+	{ "ENSELI",		0x20, 0x20 },
+	{ "ENSELO",		0x40, 0x40 }
+};
+
+int
+ahc_scsiseq_template_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SCSISEQ_TEMPLATE_parse_table, 6, "SCSISEQ_TEMPLATE",
+	    0x54, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t HA_274_BIOSGLOBAL_parse_table[] = {
+	{ "HA_274_EXTENDED_TRANS",0x01, 0x01 }
+};
+
+int
+ahc_ha_274_biosglobal_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(HA_274_BIOSGLOBAL_parse_table, 1, "HA_274_BIOSGLOBAL",
+	    0x56, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
+	{ "SCB_DMA",		0x01, 0x01 },
+	{ "TARGET_MSG_PENDING",	0x02, 0x02 }
+};
+
+int
+ahc_seq_flags2_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SEQ_FLAGS2_parse_table, 2, "SEQ_FLAGS2",
+	    0x57, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SCSICONF_parse_table[] = {
+	{ "ENSPCHK",		0x20, 0x20 },
+	{ "RESET_SCSI",		0x40, 0x40 },
+	{ "TERM_ENB",		0x80, 0x80 },
+	{ "HSCSIID",		0x07, 0x07 },
+	{ "HWSCSIID",		0x0f, 0x0f }
+};
+
+int
+ahc_scsiconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SCSICONF_parse_table, 5, "SCSICONF",
+	    0x5a, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t INTDEF_parse_table[] = {
+	{ "EDGE_TRIG",		0x80, 0x80 },
+	{ "VECTOR",		0x0f, 0x0f }
+};
+
+int
+ahc_intdef_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(INTDEF_parse_table, 2, "INTDEF",
+	    0x5c, regvalue, cur_col, wrap));
+}
+
+int
+ahc_hostconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "HOSTCONF",
+	    0x5d, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t HA_274_BIOSCTRL_parse_table[] = {
+	{ "CHANNEL_B_PRIMARY",	0x08, 0x08 },
+	{ "BIOSMODE",		0x30, 0x30 },
+	{ "BIOSDISABLED",	0x30, 0x30 }
+};
+
+int
+ahc_ha_274_biosctrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(HA_274_BIOSCTRL_parse_table, 3, "HA_274_BIOSCTRL",
+	    0x5f, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SEQCTL_parse_table[] = {
+	{ "LOADRAM",		0x01, 0x01 },
+	{ "SEQRESET",		0x02, 0x02 },
+	{ "STEP",		0x04, 0x04 },
+	{ "BRKADRINTEN",	0x08, 0x08 },
+	{ "FASTMODE",		0x10, 0x10 },
+	{ "FAILDIS",		0x20, 0x20 },
+	{ "PAUSEDIS",		0x40, 0x40 },
+	{ "PERRORDIS",		0x80, 0x80 }
+};
+
+int
+ahc_seqctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SEQCTL_parse_table, 8, "SEQCTL",
+	    0x60, regvalue, cur_col, wrap));
+}
+
+int
+ahc_seqram_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SEQRAM",
+	    0x61, regvalue, cur_col, wrap));
+}
+
+int
+ahc_seqaddr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SEQADDR0",
+	    0x62, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SEQADDR1_parse_table[] = {
+	{ "SEQADDR1_MASK",	0x01, 0x01 }
+};
+
+int
+ahc_seqaddr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SEQADDR1_parse_table, 1, "SEQADDR1",
+	    0x63, regvalue, cur_col, wrap));
+}
+
+int
+ahc_accum_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "ACCUM",
+	    0x64, regvalue, cur_col, wrap));
+}
+
+int
+ahc_sindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SINDEX",
+	    0x65, regvalue, cur_col, wrap));
+}
+
+int
+ahc_dindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "DINDEX",
+	    0x66, regvalue, cur_col, wrap));
+}
+
+int
+ahc_allones_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "ALLONES",
+	    0x69, regvalue, cur_col, wrap));
+}
+
+int
+ahc_allzeros_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "ALLZEROS",
+	    0x6a, regvalue, cur_col, wrap));
+}
+
+int
+ahc_none_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "NONE",
+	    0x6a, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t FLAGS_parse_table[] = {
+	{ "CARRY",		0x01, 0x01 },
+	{ "ZERO",		0x02, 0x02 }
+};
+
+int
+ahc_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(FLAGS_parse_table, 2, "FLAGS",
+	    0x6b, regvalue, cur_col, wrap));
+}
+
+int
+ahc_sindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SINDIR",
+	    0x6c, regvalue, cur_col, wrap));
+}
+
+int
+ahc_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "DINDIR",
+	    0x6d, regvalue, cur_col, wrap));
+}
+
+int
+ahc_function1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "FUNCTION1",
+	    0x6e, regvalue, cur_col, wrap));
+}
+
+int
+ahc_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "STACK",
+	    0x6f, regvalue, cur_col, wrap));
+}
+
+int
+ahc_targ_offset_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "TARG_OFFSET",
+	    0x70, regvalue, cur_col, wrap));
+}
+
+int
+ahc_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SRAM_BASE",
+	    0x70, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t BCTL_parse_table[] = {
+	{ "ENABLE",		0x01, 0x01 },
+	{ "ACE",		0x08, 0x08 }
+};
+
+int
+ahc_bctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(BCTL_parse_table, 2, "BCTL",
+	    0x84, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
+	{ "CIOPARCKEN",		0x01, 0x01 },
+	{ "USCBSIZE32",		0x02, 0x02 },
+	{ "RAMPS",		0x04, 0x04 },
+	{ "INTSCBRAMSEL",	0x08, 0x08 },
+	{ "EXTREQLCK",		0x10, 0x10 },
+	{ "MPARCKEN",		0x20, 0x20 },
+	{ "DPARCKEN",		0x40, 0x40 },
+	{ "CACHETHEN",		0x80, 0x80 }
+};
+
+int
+ahc_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(DSCOMMAND0_parse_table, 8, "DSCOMMAND0",
+	    0x84, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t BUSTIME_parse_table[] = {
+	{ "BON",		0x0f, 0x0f },
+	{ "BOFF",		0xf0, 0xf0 }
+};
+
+int
+ahc_bustime_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(BUSTIME_parse_table, 2, "BUSTIME",
+	    0x85, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t DSCOMMAND1_parse_table[] = {
+	{ "HADDLDSEL0",		0x01, 0x01 },
+	{ "HADDLDSEL1",		0x02, 0x02 },
+	{ "DSLATT",		0xfc, 0xfc }
+};
+
+int
+ahc_dscommand1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(DSCOMMAND1_parse_table, 3, "DSCOMMAND1",
+	    0x85, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t BUSSPD_parse_table[] = {
+	{ "STBON",		0x07, 0x07 },
+	{ "STBOFF",		0x38, 0x38 },
+	{ "DFTHRSH_75",		0x80, 0x80 },
+	{ "DFTHRSH",		0xc0, 0xc0 },
+	{ "DFTHRSH_100",	0xc0, 0xc0 }
+};
+
+int
+ahc_busspd_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(BUSSPD_parse_table, 5, "BUSSPD",
+	    0x86, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
+	{ "SEQ_MAILBOX",	0x0f, 0x0f },
+	{ "HOST_TQINPOS",	0x80, 0x80 },
+	{ "HOST_MAILBOX",	0xf0, 0xf0 }
+};
+
+int
+ahc_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(HS_MAILBOX_parse_table, 3, "HS_MAILBOX",
+	    0x86, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t DSPCISTATUS_parse_table[] = {
+	{ "DFTHRSH_100",	0xc0, 0xc0 }
+};
+
+int
+ahc_dspcistatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(DSPCISTATUS_parse_table, 1, "DSPCISTATUS",
+	    0x86, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t HCNTRL_parse_table[] = {
+	{ "CHIPRST",		0x01, 0x01 },
+	{ "CHIPRSTACK",		0x01, 0x01 },
+	{ "INTEN",		0x02, 0x02 },
+	{ "PAUSE",		0x04, 0x04 },
+	{ "IRQMS",		0x08, 0x08 },
+	{ "SWINT",		0x10, 0x10 },
+	{ "POWRDN",		0x40, 0x40 }
+};
+
+int
+ahc_hcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(HCNTRL_parse_table, 7, "HCNTRL",
+	    0x87, regvalue, cur_col, wrap));
+}
+
+int
+ahc_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "HADDR",
+	    0x88, regvalue, cur_col, wrap));
+}
+
+int
+ahc_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "HCNT",
+	    0x8c, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCBPTR",
+	    0x90, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t INTSTAT_parse_table[] = {
+	{ "SEQINT",		0x01, 0x01 },
+	{ "CMDCMPLT",		0x02, 0x02 },
+	{ "SCSIINT",		0x04, 0x04 },
+	{ "BRKADRINT",		0x08, 0x08 },
+	{ "BAD_PHASE",		0x01, 0x01 },
+	{ "INT_PEND",		0x0f, 0x0f },
+	{ "SEND_REJECT",	0x11, 0x11 },
+	{ "PROTO_VIOLATION",	0x21, 0x21 },
+	{ "NO_MATCH",		0x31, 0x31 },
+	{ "IGN_WIDE_RES",	0x41, 0x41 },
+	{ "PDATA_REINIT",	0x51, 0x51 },
+	{ "HOST_MSG_LOOP",	0x61, 0x61 },
+	{ "BAD_STATUS",		0x71, 0x71 },
+	{ "PERR_DETECTED",	0x81, 0x81 },
+	{ "DATA_OVERRUN",	0x91, 0x91 },
+	{ "MKMSG_FAILED",	0xa1, 0xa1 },
+	{ "MISSED_BUSFREE",	0xb1, 0xb1 },
+	{ "SCB_MISMATCH",	0xc1, 0xc1 },
+	{ "NO_FREE_SCB",	0xd1, 0xd1 },
+	{ "OUT_OF_RANGE",	0xe1, 0xe1 },
+	{ "SEQINT_MASK",	0xf1, 0xf1 }
+};
+
+int
+ahc_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(INTSTAT_parse_table, 21, "INTSTAT",
+	    0x91, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t CLRINT_parse_table[] = {
+	{ "CLRSEQINT",		0x01, 0x01 },
+	{ "CLRCMDINT",		0x02, 0x02 },
+	{ "CLRSCSIINT",		0x04, 0x04 },
+	{ "CLRBRKADRINT",	0x08, 0x08 },
+	{ "CLRPARERR",		0x10, 0x10 }
+};
+
+int
+ahc_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(CLRINT_parse_table, 5, "CLRINT",
+	    0x92, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t ERROR_parse_table[] = {
+	{ "ILLHADDR",		0x01, 0x01 },
+	{ "ILLSADDR",		0x02, 0x02 },
+	{ "ILLOPCODE",		0x04, 0x04 },
+	{ "SQPARERR",		0x08, 0x08 },
+	{ "DPARERR",		0x10, 0x10 },
+	{ "MPARERR",		0x20, 0x20 },
+	{ "PCIERRSTAT",		0x40, 0x40 },
+	{ "CIOPARERR",		0x80, 0x80 }
+};
+
+int
+ahc_error_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(ERROR_parse_table, 8, "ERROR",
+	    0x92, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t DFCNTRL_parse_table[] = {
+	{ "FIFORESET",		0x01, 0x01 },
+	{ "FIFOFLUSH",		0x02, 0x02 },
+	{ "DIRECTION",		0x04, 0x04 },
+	{ "HDMAEN",		0x08, 0x08 },
+	{ "HDMAENACK",		0x08, 0x08 },
+	{ "SDMAEN",		0x10, 0x10 },
+	{ "SDMAENACK",		0x10, 0x10 },
+	{ "SCSIEN",		0x20, 0x20 },
+	{ "WIDEODD",		0x40, 0x40 },
+	{ "PRELOADEN",		0x80, 0x80 }
+};
+
+int
+ahc_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(DFCNTRL_parse_table, 10, "DFCNTRL",
+	    0x93, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t DFSTATUS_parse_table[] = {
+	{ "FIFOEMP",		0x01, 0x01 },
+	{ "FIFOFULL",		0x02, 0x02 },
+	{ "DFTHRESH",		0x04, 0x04 },
+	{ "HDONE",		0x08, 0x08 },
+	{ "MREQPEND",		0x10, 0x10 },
+	{ "FIFOQWDEMP",		0x20, 0x20 },
+	{ "DFCACHETH",		0x40, 0x40 },
+	{ "PRELOAD_AVAIL",	0x80, 0x80 }
+};
+
+int
+ahc_dfstatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(DFSTATUS_parse_table, 8, "DFSTATUS",
+	    0x94, regvalue, cur_col, wrap));
+}
+
+int
+ahc_dfwaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "DFWADDR",
+	    0x95, regvalue, cur_col, wrap));
+}
+
+int
+ahc_dfraddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "DFRADDR",
+	    0x97, regvalue, cur_col, wrap));
+}
+
+int
+ahc_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "DFDAT",
+	    0x99, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SCBCNT_parse_table[] = {
+	{ "SCBAUTO",		0x80, 0x80 },
+	{ "SCBCNT_MASK",	0x1f, 0x1f }
+};
+
+int
+ahc_scbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SCBCNT_parse_table, 2, "SCBCNT",
+	    0x9a, regvalue, cur_col, wrap));
+}
+
+int
+ahc_qinfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "QINFIFO",
+	    0x9b, regvalue, cur_col, wrap));
+}
+
+int
+ahc_qincnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "QINCNT",
+	    0x9c, regvalue, cur_col, wrap));
+}
+
+int
+ahc_qoutfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "QOUTFIFO",
+	    0x9d, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t CRCCONTROL1_parse_table[] = {
+	{ "TARGCRCCNTEN",	0x04, 0x04 },
+	{ "TARGCRCENDEN",	0x08, 0x08 },
+	{ "CRCREQCHKEN",	0x10, 0x10 },
+	{ "CRCENDCHKEN",	0x20, 0x20 },
+	{ "CRCVALCHKEN",	0x40, 0x40 },
+	{ "CRCONSEEN",		0x80, 0x80 }
+};
+
+int
+ahc_crccontrol1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(CRCCONTROL1_parse_table, 6, "CRCCONTROL1",
+	    0x9d, regvalue, cur_col, wrap));
+}
+
+int
+ahc_qoutcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "QOUTCNT",
+	    0x9e, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = {
+	{ "DATA_OUT_PHASE",	0x01, 0x01 },
+	{ "DATA_IN_PHASE",	0x02, 0x02 },
+	{ "MSG_OUT_PHASE",	0x04, 0x04 },
+	{ "MSG_IN_PHASE",	0x08, 0x08 },
+	{ "COMMAND_PHASE",	0x10, 0x10 },
+	{ "STATUS_PHASE",	0x20, 0x20 },
+	{ "DATA_PHASE_MASK",	0x03, 0x03 }
+};
+
+int
+ahc_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SCSIPHASE_parse_table, 7, "SCSIPHASE",
+	    0x9e, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SFUNCT_parse_table[] = {
+	{ "ALT_MODE",		0x80, 0x80 }
+};
+
+int
+ahc_sfunct_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SFUNCT_parse_table, 1, "SFUNCT",
+	    0x9f, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCB_BASE",
+	    0xa0, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_cdb_ptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCB_CDB_PTR",
+	    0xa0, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_residual_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCB_RESIDUAL_SGPTR",
+	    0xa4, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_scsi_status_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCB_SCSI_STATUS",
+	    0xa8, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_target_phases_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCB_TARGET_PHASES",
+	    0xa9, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_target_data_dir_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCB_TARGET_DATA_DIR",
+	    0xaa, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_target_itag_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCB_TARGET_ITAG",
+	    0xab, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCB_DATAPTR",
+	    0xac, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
+	{ "SG_LAST_SEG",	0x80, 0x80 },
+	{ "SG_HIGH_ADDR_BITS",	0x7f, 0x7f }
+};
+
+int
+ahc_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SCB_DATACNT_parse_table, 2, "SCB_DATACNT",
+	    0xb0, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
+	{ "SG_LIST_NULL",	0x01, 0x01 },
+	{ "SG_FULL_RESID",	0x02, 0x02 },
+	{ "SG_RESID_VALID",	0x04, 0x04 }
+};
+
+int
+ahc_scb_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SCB_SGPTR_parse_table, 3, "SCB_SGPTR",
+	    0xb4, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
+	{ "DISCONNECTED",	0x04, 0x04 },
+	{ "ULTRAENB",		0x08, 0x08 },
+	{ "MK_MESSAGE",		0x10, 0x10 },
+	{ "TAG_ENB",		0x20, 0x20 },
+	{ "DISCENB",		0x40, 0x40 },
+	{ "TARGET_SCB",		0x80, 0x80 },
+	{ "STATUS_RCVD",	0x80, 0x80 },
+	{ "SCB_TAG_TYPE",	0x03, 0x03 }
+};
+
+int
+ahc_scb_control_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SCB_CONTROL_parse_table, 8, "SCB_CONTROL",
+	    0xb8, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
+	{ "TWIN_CHNLB",		0x80, 0x80 },
+	{ "OID",		0x0f, 0x0f },
+	{ "TWIN_TID",		0x70, 0x70 },
+	{ "TID",		0xf0, 0xf0 }
+};
+
+int
+ahc_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SCB_SCSIID_parse_table, 4, "SCB_SCSIID",
+	    0xb9, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SCB_LUN_parse_table[] = {
+	{ "SCB_XFERLEN_ODD",	0x80, 0x80 },
+	{ "LID",		0x3f, 0x3f }
+};
+
+int
+ahc_scb_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SCB_LUN_parse_table, 2, "SCB_LUN",
+	    0xba, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCB_TAG",
+	    0xbb, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_cdb_len_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCB_CDB_LEN",
+	    0xbc, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_scsirate_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCB_SCSIRATE",
+	    0xbd, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_scsioffset_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCB_SCSIOFFSET",
+	    0xbe, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_next_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCB_NEXT",
+	    0xbf, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_64_spare_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCB_64_SPARE",
+	    0xc0, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SEECTL_2840_parse_table[] = {
+	{ "DO_2840",		0x01, 0x01 },
+	{ "CK_2840",		0x02, 0x02 },
+	{ "CS_2840",		0x04, 0x04 }
+};
+
+int
+ahc_seectl_2840_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SEECTL_2840_parse_table, 3, "SEECTL_2840",
+	    0xc0, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t STATUS_2840_parse_table[] = {
+	{ "DI_2840",		0x01, 0x01 },
+	{ "EEPROM_TF",		0x80, 0x80 },
+	{ "ADSEL",		0x1e, 0x1e },
+	{ "BIOS_SEL",		0x60, 0x60 }
+};
+
+int
+ahc_status_2840_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(STATUS_2840_parse_table, 4, "STATUS_2840",
+	    0xc1, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scb_64_btt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCB_64_BTT",
+	    0xd0, regvalue, cur_col, wrap));
+}
+
+int
+ahc_cchaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "CCHADDR",
+	    0xe0, regvalue, cur_col, wrap));
+}
+
+int
+ahc_cchcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "CCHCNT",
+	    0xe8, regvalue, cur_col, wrap));
+}
+
+int
+ahc_ccsgram_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "CCSGRAM",
+	    0xe9, regvalue, cur_col, wrap));
+}
+
+int
+ahc_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "CCSGADDR",
+	    0xea, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t CCSGCTL_parse_table[] = {
+	{ "CCSGRESET",		0x01, 0x01 },
+	{ "SG_FETCH_NEEDED",	0x02, 0x02 },
+	{ "CCSGEN",		0x08, 0x08 },
+	{ "CCSGDONE",		0x80, 0x80 }
+};
+
+int
+ahc_ccsgctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(CCSGCTL_parse_table, 4, "CCSGCTL",
+	    0xeb, regvalue, cur_col, wrap));
+}
+
+int
+ahc_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "CCSCBRAM",
+	    0xec, regvalue, cur_col, wrap));
+}
+
+int
+ahc_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "CCSCBADDR",
+	    0xed, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t CCSCBCTL_parse_table[] = {
+	{ "CCSCBRESET",		0x01, 0x01 },
+	{ "CCSCBDIR",		0x04, 0x04 },
+	{ "CCSCBEN",		0x08, 0x08 },
+	{ "CCARREN",		0x10, 0x10 },
+	{ "ARRDONE",		0x40, 0x40 },
+	{ "CCSCBDONE",		0x80, 0x80 }
+};
+
+int
+ahc_ccscbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(CCSCBCTL_parse_table, 6, "CCSCBCTL",
+	    0xee, regvalue, cur_col, wrap));
+}
+
+int
+ahc_ccscbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "CCSCBCNT",
+	    0xef, regvalue, cur_col, wrap));
+}
+
+int
+ahc_scbbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SCBBADDR",
+	    0xf0, regvalue, cur_col, wrap));
+}
+
+int
+ahc_ccscbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "CCSCBPTR",
+	    0xf1, regvalue, cur_col, wrap));
+}
+
+int
+ahc_hnscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "HNSCB_QOFF",
+	    0xf4, regvalue, cur_col, wrap));
+}
+
+int
+ahc_snscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SNSCB_QOFF",
+	    0xf6, regvalue, cur_col, wrap));
+}
+
+int
+ahc_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(NULL, 0, "SDSCB_QOFF",
+	    0xf8, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
+	{ "SDSCB_ROLLOVER",	0x10, 0x10 },
+	{ "SNSCB_ROLLOVER",	0x20, 0x20 },
+	{ "SCB_AVAIL",		0x40, 0x40 },
+	{ "SCB_QSIZE_256",	0x06, 0x06 },
+	{ "SCB_QSIZE",		0x07, 0x07 }
+};
+
+int
+ahc_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(QOFF_CTLSTA_parse_table, 5, "QOFF_CTLSTA",
+	    0xfa, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t DFF_THRSH_parse_table[] = {
+	{ "RD_DFTHRSH_MIN",	0x00, 0x00 },
+	{ "WR_DFTHRSH_MIN",	0x00, 0x00 },
+	{ "RD_DFTHRSH_25",	0x01, 0x01 },
+	{ "RD_DFTHRSH_50",	0x02, 0x02 },
+	{ "RD_DFTHRSH_63",	0x03, 0x03 },
+	{ "RD_DFTHRSH_75",	0x04, 0x04 },
+	{ "RD_DFTHRSH_85",	0x05, 0x05 },
+	{ "RD_DFTHRSH_90",	0x06, 0x06 },
+	{ "RD_DFTHRSH",		0x07, 0x07 },
+	{ "RD_DFTHRSH_MAX",	0x07, 0x07 },
+	{ "WR_DFTHRSH_25",	0x10, 0x10 },
+	{ "WR_DFTHRSH_50",	0x20, 0x20 },
+	{ "WR_DFTHRSH_63",	0x30, 0x30 },
+	{ "WR_DFTHRSH_75",	0x40, 0x40 },
+	{ "WR_DFTHRSH_85",	0x50, 0x50 },
+	{ "WR_DFTHRSH_90",	0x60, 0x60 },
+	{ "WR_DFTHRSH",		0x70, 0x70 },
+	{ "WR_DFTHRSH_MAX",	0x70, 0x70 }
+};
+
+int
+ahc_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(DFF_THRSH_parse_table, 18, "DFF_THRSH",
+	    0xfb, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
+	{ "LAST_SEG_DONE",	0x01, 0x01 },
+	{ "LAST_SEG",		0x02, 0x02 },
+	{ "SG_ADDR_MASK",	0xf8, 0xf8 }
+};
+
+int
+ahc_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SG_CACHE_SHADOW_parse_table, 3, "SG_CACHE_SHADOW",
+	    0xfc, regvalue, cur_col, wrap));
+}
+
+static ahc_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
+	{ "LAST_SEG_DONE",	0x01, 0x01 },
+	{ "LAST_SEG",		0x02, 0x02 },
+	{ "SG_ADDR_MASK",	0xf8, 0xf8 }
+};
+
+int
+ahc_sg_cache_pre_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+	return (ahc_print_register(SG_CACHE_PRE_parse_table, 3, "SG_CACHE_PRE",
+	    0xfc, regvalue, cur_col, wrap));
+}
+
diff --git a/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
new file mode 100644
index 0000000..cf41136
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
@@ -0,0 +1,1307 @@
+/*
+ * DO NOT EDIT - This file is automatically generated
+ *		 from the following source files:
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#56 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#39 $
+ */
+static uint8_t seqprog[] = {
+	0xb2, 0x00, 0x00, 0x08,
+	0xf7, 0x11, 0x22, 0x08,
+	0x00, 0x65, 0xec, 0x59,
+	0xf7, 0x01, 0x02, 0x08,
+	0xff, 0x6a, 0x24, 0x08,
+	0x40, 0x00, 0x40, 0x68,
+	0x08, 0x1f, 0x3e, 0x10,
+	0x40, 0x00, 0x40, 0x68,
+	0xff, 0x40, 0x3c, 0x60,
+	0x08, 0x1f, 0x3e, 0x10,
+	0x60, 0x0b, 0x42, 0x68,
+	0x40, 0xfa, 0x12, 0x78,
+	0x01, 0x4d, 0xc8, 0x30,
+	0x00, 0x4c, 0x12, 0x70,
+	0x01, 0x39, 0xa2, 0x30,
+	0x00, 0x6a, 0xc0, 0x5e,
+	0x01, 0x51, 0x20, 0x31,
+	0x01, 0x57, 0xae, 0x00,
+	0x0d, 0x6a, 0x76, 0x00,
+	0x00, 0x51, 0x12, 0x5e,
+	0x01, 0x51, 0xc8, 0x30,
+	0x00, 0x39, 0xc8, 0x60,
+	0x00, 0xbb, 0x30, 0x70,
+	0xc1, 0x6a, 0xd8, 0x5e,
+	0x01, 0xbf, 0x72, 0x30,
+	0x01, 0x40, 0x7e, 0x31,
+	0x01, 0x90, 0x80, 0x30,
+	0x01, 0xf6, 0xd4, 0x30,
+	0x01, 0x4d, 0x9a, 0x18,
+	0xfe, 0x57, 0xae, 0x08,
+	0x01, 0x40, 0x20, 0x31,
+	0x00, 0x65, 0xcc, 0x58,
+	0x60, 0x0b, 0x40, 0x78,
+	0x08, 0x6a, 0x18, 0x00,
+	0x08, 0x11, 0x22, 0x00,
+	0x60, 0x0b, 0x00, 0x78,
+	0x40, 0x0b, 0xfa, 0x68,
+	0x80, 0x0b, 0xb6, 0x78,
+	0x20, 0x6a, 0x16, 0x00,
+	0xa4, 0x6a, 0x06, 0x00,
+	0x08, 0x6a, 0x78, 0x00,
+	0x01, 0x50, 0xc8, 0x30,
+	0xe0, 0x6a, 0xcc, 0x00,
+	0x48, 0x6a, 0xfc, 0x5d,
+	0x01, 0x6a, 0xdc, 0x01,
+	0x88, 0x6a, 0xcc, 0x00,
+	0x48, 0x6a, 0xfc, 0x5d,
+	0x01, 0x6a, 0x26, 0x01,
+	0xf0, 0x19, 0x7a, 0x08,
+	0x0f, 0x18, 0xc8, 0x08,
+	0x0f, 0x0f, 0xc8, 0x08,
+	0x0f, 0x05, 0xc8, 0x08,
+	0x00, 0x3d, 0x7a, 0x00,
+	0x08, 0x1f, 0x6e, 0x78,
+	0x80, 0x3d, 0x7a, 0x00,
+	0x01, 0x3d, 0xd8, 0x31,
+	0x01, 0x3d, 0x32, 0x31,
+	0x10, 0x03, 0x4e, 0x79,
+	0x00, 0x65, 0xf2, 0x58,
+	0x80, 0x66, 0xae, 0x78,
+	0x01, 0x66, 0xd8, 0x31,
+	0x01, 0x66, 0x32, 0x31,
+	0x3f, 0x66, 0x7c, 0x08,
+	0x40, 0x66, 0x82, 0x68,
+	0x01, 0x3c, 0x78, 0x00,
+	0x10, 0x03, 0x9e, 0x78,
+	0x00, 0x65, 0xf2, 0x58,
+	0xe0, 0x66, 0xc8, 0x18,
+	0x00, 0x65, 0xaa, 0x50,
+	0xdd, 0x66, 0xc8, 0x18,
+	0x00, 0x65, 0xaa, 0x48,
+	0x01, 0x66, 0xd8, 0x31,
+	0x01, 0x66, 0x32, 0x31,
+	0x10, 0x03, 0x4e, 0x79,
+	0x00, 0x65, 0xf2, 0x58,
+	0x01, 0x66, 0xd8, 0x31,
+	0x01, 0x66, 0x32, 0x31,
+	0x01, 0x66, 0xac, 0x30,
+	0x40, 0x3c, 0x78, 0x00,
+	0xff, 0x6a, 0xd8, 0x01,
+	0xff, 0x6a, 0x32, 0x01,
+	0x10, 0x3c, 0x78, 0x00,
+	0x02, 0x57, 0x40, 0x69,
+	0x10, 0x03, 0x3e, 0x69,
+	0x00, 0x65, 0x20, 0x41,
+	0x02, 0x57, 0xae, 0x00,
+	0x00, 0x65, 0x9e, 0x40,
+	0x61, 0x6a, 0xd8, 0x5e,
+	0x08, 0x51, 0x20, 0x71,
+	0x02, 0x0b, 0xb2, 0x78,
+	0x00, 0x65, 0xae, 0x40,
+	0x1a, 0x01, 0x02, 0x00,
+	0xf0, 0x19, 0x7a, 0x08,
+	0x0f, 0x0f, 0xc8, 0x08,
+	0x0f, 0x05, 0xc8, 0x08,
+	0x00, 0x3d, 0x7a, 0x00,
+	0x08, 0x1f, 0xc4, 0x78,
+	0x80, 0x3d, 0x7a, 0x00,
+	0x20, 0x6a, 0x16, 0x00,
+	0x00, 0x65, 0xcc, 0x41,
+	0x00, 0x65, 0xb2, 0x5e,
+	0x00, 0x65, 0x12, 0x40,
+	0x20, 0x11, 0xd2, 0x68,
+	0x20, 0x6a, 0x18, 0x00,
+	0x20, 0x11, 0x22, 0x00,
+	0xf7, 0x1f, 0xca, 0x08,
+	0x80, 0xb9, 0xd8, 0x78,
+	0x08, 0x65, 0xca, 0x00,
+	0x01, 0x65, 0x3e, 0x30,
+	0x01, 0xb9, 0x1e, 0x30,
+	0x7f, 0xb9, 0x0a, 0x08,
+	0x01, 0xb9, 0x0a, 0x30,
+	0x01, 0x54, 0xca, 0x30,
+	0x80, 0xb8, 0xe6, 0x78,
+	0x80, 0x65, 0xca, 0x00,
+	0x01, 0x65, 0x00, 0x34,
+	0x01, 0x54, 0x00, 0x34,
+	0x08, 0xb8, 0xee, 0x78,
+	0x20, 0x01, 0x02, 0x00,
+	0x02, 0xbd, 0x08, 0x34,
+	0x01, 0xbd, 0x08, 0x34,
+	0x08, 0x01, 0x02, 0x00,
+	0x02, 0x0b, 0xf4, 0x78,
+	0xf7, 0x01, 0x02, 0x08,
+	0x01, 0x06, 0xcc, 0x34,
+	0xb2, 0x00, 0x00, 0x08,
+	0x01, 0x40, 0x20, 0x31,
+	0x01, 0xbf, 0x80, 0x30,
+	0x01, 0xb9, 0x7a, 0x30,
+	0x3f, 0xba, 0x7c, 0x08,
+	0x00, 0x65, 0xea, 0x58,
+	0x80, 0x0b, 0xc4, 0x79,
+	0x12, 0x01, 0x02, 0x00,
+	0x01, 0xab, 0xac, 0x30,
+	0xe4, 0x6a, 0x6e, 0x5d,
+	0x40, 0x6a, 0x16, 0x00,
+	0x80, 0x3e, 0x84, 0x5d,
+	0x20, 0xb8, 0x18, 0x79,
+	0x20, 0x6a, 0x84, 0x5d,
+	0x00, 0xab, 0x84, 0x5d,
+	0x01, 0xa9, 0x78, 0x30,
+	0x10, 0xb8, 0x20, 0x79,
+	0xe4, 0x6a, 0x6e, 0x5d,
+	0x00, 0x65, 0xae, 0x40,
+	0x10, 0x03, 0x3c, 0x69,
+	0x08, 0x3c, 0x5a, 0x69,
+	0x04, 0x3c, 0x92, 0x69,
+	0x02, 0x3c, 0x98, 0x69,
+	0x01, 0x3c, 0x44, 0x79,
+	0xff, 0x6a, 0x70, 0x00,
+	0x00, 0x65, 0xa4, 0x59,
+	0x00, 0x6a, 0xc0, 0x5e,
+	0xff, 0x38, 0x30, 0x71,
+	0x0d, 0x6a, 0x76, 0x00,
+	0x00, 0x38, 0x12, 0x5e,
+	0x00, 0x65, 0xea, 0x58,
+	0x12, 0x01, 0x02, 0x00,
+	0x00, 0x65, 0x18, 0x41,
+	0xa4, 0x6a, 0x06, 0x00,
+	0x00, 0x65, 0xf2, 0x58,
+	0xfd, 0x57, 0xae, 0x08,
+	0x00, 0x65, 0xae, 0x40,
+	0xe4, 0x6a, 0x6e, 0x5d,
+	0x20, 0x3c, 0x4a, 0x79,
+	0x02, 0x6a, 0x84, 0x5d,
+	0x04, 0x6a, 0x84, 0x5d,
+	0x01, 0x03, 0x4c, 0x69,
+	0xf7, 0x11, 0x22, 0x08,
+	0xff, 0x6a, 0x24, 0x08,
+	0xff, 0x6a, 0x06, 0x08,
+	0x01, 0x6a, 0x7e, 0x00,
+	0x00, 0x65, 0xa4, 0x59,
+	0x00, 0x65, 0x04, 0x40,
+	0x80, 0x86, 0xc8, 0x08,
+	0x01, 0x4f, 0xc8, 0x30,
+	0x00, 0x50, 0x6c, 0x61,
+	0xc4, 0x6a, 0x6e, 0x5d,
+	0x40, 0x3c, 0x68, 0x79,
+	0x28, 0x6a, 0x84, 0x5d,
+	0x00, 0x65, 0x4c, 0x41,
+	0x08, 0x6a, 0x84, 0x5d,
+	0x00, 0x65, 0x4c, 0x41,
+	0x84, 0x6a, 0x6e, 0x5d,
+	0x00, 0x65, 0xf2, 0x58,
+	0x01, 0x66, 0xc8, 0x30,
+	0x01, 0x64, 0xd8, 0x31,
+	0x01, 0x64, 0x32, 0x31,
+	0x5b, 0x64, 0xc8, 0x28,
+	0x30, 0x64, 0xca, 0x18,
+	0x01, 0x6c, 0xc8, 0x30,
+	0xff, 0x64, 0x8e, 0x79,
+	0x08, 0x01, 0x02, 0x00,
+	0x02, 0x0b, 0x80, 0x79,
+	0x01, 0x64, 0x86, 0x61,
+	0xf7, 0x01, 0x02, 0x08,
+	0x01, 0x06, 0xd8, 0x31,
+	0x01, 0x06, 0x32, 0x31,
+	0xff, 0x64, 0xc8, 0x18,
+	0xff, 0x64, 0x80, 0x69,
+	0xf7, 0x3c, 0x78, 0x08,
+	0x00, 0x65, 0x20, 0x41,
+	0x40, 0xaa, 0x7e, 0x10,
+	0x04, 0xaa, 0x6e, 0x5d,
+	0x00, 0x65, 0x56, 0x42,
+	0xc4, 0x6a, 0x6e, 0x5d,
+	0xc0, 0x6a, 0x7e, 0x00,
+	0x00, 0xa8, 0x84, 0x5d,
+	0xe4, 0x6a, 0x06, 0x00,
+	0x00, 0x6a, 0x84, 0x5d,
+	0x00, 0x65, 0x4c, 0x41,
+	0x10, 0x3c, 0xa8, 0x69,
+	0x00, 0xbb, 0x8a, 0x44,
+	0x18, 0x6a, 0xda, 0x01,
+	0x01, 0x69, 0xd8, 0x31,
+	0x1c, 0x6a, 0xd0, 0x01,
+	0x09, 0xee, 0xdc, 0x01,
+	0x80, 0xee, 0xb0, 0x79,
+	0xff, 0x6a, 0xdc, 0x09,
+	0x01, 0x93, 0x26, 0x01,
+	0x03, 0x6a, 0x2a, 0x01,
+	0x01, 0x69, 0x32, 0x31,
+	0x1c, 0x6a, 0xe0, 0x5d,
+	0x0a, 0x93, 0x26, 0x01,
+	0x00, 0x65, 0xa8, 0x5e,
+	0x01, 0x50, 0xa0, 0x18,
+	0x02, 0x6a, 0x22, 0x05,
+	0x1a, 0x01, 0x02, 0x00,
+	0x80, 0x6a, 0x74, 0x00,
+	0x40, 0x6a, 0x78, 0x00,
+	0x40, 0x6a, 0x16, 0x00,
+	0x00, 0x65, 0xd8, 0x5d,
+	0x01, 0x3f, 0xc8, 0x30,
+	0xbf, 0x64, 0x56, 0x7a,
+	0x80, 0x64, 0x9e, 0x73,
+	0xa0, 0x64, 0x00, 0x74,
+	0xc0, 0x64, 0xf4, 0x73,
+	0xe0, 0x64, 0x30, 0x74,
+	0x01, 0x6a, 0xd8, 0x5e,
+	0x00, 0x65, 0xcc, 0x41,
+	0xf7, 0x11, 0x22, 0x08,
+	0x01, 0x06, 0xd4, 0x30,
+	0xff, 0x6a, 0x24, 0x08,
+	0xf7, 0x01, 0x02, 0x08,
+	0x09, 0x0c, 0xe6, 0x79,
+	0x08, 0x0c, 0x04, 0x68,
+	0xb1, 0x6a, 0xd8, 0x5e,
+	0xff, 0x6a, 0x26, 0x09,
+	0x12, 0x01, 0x02, 0x00,
+	0x02, 0x6a, 0x08, 0x30,
+	0xff, 0x6a, 0x08, 0x08,
+	0xdf, 0x01, 0x02, 0x08,
+	0x01, 0x6a, 0x7e, 0x00,
+	0xc0, 0x6a, 0x78, 0x04,
+	0xff, 0x6a, 0xc8, 0x08,
+	0x08, 0xa4, 0x48, 0x19,
+	0x00, 0xa5, 0x4a, 0x21,
+	0x00, 0xa6, 0x4c, 0x21,
+	0x00, 0xa7, 0x4e, 0x25,
+	0x08, 0xeb, 0xdc, 0x7e,
+	0x80, 0xeb, 0x06, 0x7a,
+	0xff, 0x6a, 0xd6, 0x09,
+	0x08, 0xeb, 0x0a, 0x6a,
+	0xff, 0x6a, 0xd4, 0x0c,
+	0x80, 0xa3, 0xdc, 0x6e,
+	0x88, 0xeb, 0x20, 0x72,
+	0x08, 0xeb, 0xdc, 0x6e,
+	0x04, 0xea, 0x24, 0xe2,
+	0x08, 0xee, 0xdc, 0x6e,
+	0x04, 0x6a, 0xd0, 0x81,
+	0x05, 0xa4, 0xc0, 0x89,
+	0x03, 0xa5, 0xc2, 0x31,
+	0x09, 0x6a, 0xd6, 0x05,
+	0x00, 0x65, 0x08, 0x5a,
+	0x06, 0xa4, 0xd4, 0x89,
+	0x80, 0x94, 0xdc, 0x7e,
+	0x07, 0xe9, 0x10, 0x31,
+	0x01, 0xe9, 0x46, 0x31,
+	0x00, 0xa3, 0xba, 0x5e,
+	0x00, 0x65, 0xfa, 0x59,
+	0x01, 0xa4, 0xca, 0x30,
+	0x80, 0xa3, 0x34, 0x7a,
+	0x02, 0x65, 0xca, 0x00,
+	0x01, 0x65, 0xf8, 0x31,
+	0x80, 0x93, 0x26, 0x01,
+	0xff, 0x6a, 0xd4, 0x0c,
+	0x01, 0x8c, 0xc8, 0x30,
+	0x00, 0x88, 0xc8, 0x18,
+	0x02, 0x64, 0xc8, 0x88,
+	0xff, 0x64, 0xdc, 0x7e,
+	0xff, 0x8d, 0x4a, 0x6a,
+	0xff, 0x8e, 0x4a, 0x6a,
+	0x03, 0x8c, 0xd4, 0x98,
+	0x00, 0x65, 0xdc, 0x56,
+	0x01, 0x64, 0x70, 0x30,
+	0xff, 0x64, 0xc8, 0x10,
+	0x01, 0x64, 0xc8, 0x18,
+	0x00, 0x8c, 0x18, 0x19,
+	0xff, 0x8d, 0x1a, 0x21,
+	0xff, 0x8e, 0x1c, 0x25,
+	0xc0, 0x3c, 0x5a, 0x7a,
+	0x21, 0x6a, 0xd8, 0x5e,
+	0xa8, 0x6a, 0x76, 0x00,
+	0x79, 0x6a, 0x76, 0x00,
+	0x40, 0x3f, 0x62, 0x6a,
+	0x04, 0x3b, 0x76, 0x00,
+	0x04, 0x6a, 0xd4, 0x81,
+	0x20, 0x3c, 0x6a, 0x7a,
+	0x51, 0x6a, 0xd8, 0x5e,
+	0x00, 0x65, 0x82, 0x42,
+	0x20, 0x3c, 0x78, 0x00,
+	0x00, 0xb3, 0xba, 0x5e,
+	0x07, 0xac, 0x10, 0x31,
+	0x05, 0xb3, 0x46, 0x31,
+	0x88, 0x6a, 0xcc, 0x00,
+	0xac, 0x6a, 0xee, 0x5d,
+	0xa3, 0x6a, 0xcc, 0x00,
+	0xb3, 0x6a, 0xf2, 0x5d,
+	0x00, 0x65, 0x3a, 0x5a,
+	0xfd, 0xa4, 0x48, 0x09,
+	0x03, 0x8c, 0x10, 0x30,
+	0x00, 0x65, 0xe6, 0x5d,
+	0x01, 0xa4, 0x94, 0x7a,
+	0x04, 0x3b, 0x76, 0x08,
+	0x01, 0x3b, 0x26, 0x31,
+	0x80, 0x02, 0x04, 0x00,
+	0x10, 0x0c, 0x8a, 0x7a,
+	0x03, 0x9e, 0x8c, 0x6a,
+	0x7f, 0x02, 0x04, 0x08,
+	0x91, 0x6a, 0xd8, 0x5e,
+	0x00, 0x65, 0xcc, 0x41,
+	0x01, 0xa4, 0xca, 0x30,
+	0x80, 0xa3, 0x9a, 0x7a,
+	0x02, 0x65, 0xca, 0x00,
+	0x01, 0x65, 0xf8, 0x31,
+	0x01, 0x3b, 0x26, 0x31,
+	0x00, 0x65, 0x0e, 0x5a,
+	0x01, 0xfc, 0xa8, 0x6a,
+	0x80, 0x0b, 0x9e, 0x6a,
+	0x10, 0x0c, 0x9e, 0x7a,
+	0x20, 0x93, 0x9e, 0x6a,
+	0x02, 0x93, 0x26, 0x01,
+	0x02, 0xfc, 0xb2, 0x7a,
+	0x40, 0x0d, 0xc6, 0x6a,
+	0x01, 0xa4, 0x48, 0x01,
+	0x00, 0x65, 0xc6, 0x42,
+	0x40, 0x0d, 0xb8, 0x6a,
+	0x00, 0x65, 0x0e, 0x5a,
+	0x00, 0x65, 0xaa, 0x42,
+	0x80, 0xfc, 0xc2, 0x7a,
+	0x80, 0xa4, 0xc2, 0x6a,
+	0xff, 0xa5, 0x4a, 0x19,
+	0xff, 0xa6, 0x4c, 0x21,
+	0xff, 0xa7, 0x4e, 0x21,
+	0xf8, 0xfc, 0x48, 0x09,
+	0x7f, 0xa3, 0x46, 0x09,
+	0x04, 0x3b, 0xe2, 0x6a,
+	0x02, 0x93, 0x26, 0x01,
+	0x01, 0x94, 0xc8, 0x7a,
+	0x01, 0x94, 0xc8, 0x7a,
+	0x01, 0x94, 0xc8, 0x7a,
+	0x01, 0x94, 0xc8, 0x7a,
+	0x01, 0x94, 0xc8, 0x7a,
+	0x01, 0xa4, 0xe0, 0x7a,
+	0x01, 0xfc, 0xd6, 0x7a,
+	0x01, 0x94, 0xe2, 0x6a,
+	0x01, 0x94, 0xe2, 0x6a,
+	0x01, 0x94, 0xe2, 0x6a,
+	0x00, 0x65, 0x82, 0x42,
+	0x01, 0x94, 0xe0, 0x7a,
+	0x10, 0x94, 0xe2, 0x6a,
+	0xd7, 0x93, 0x26, 0x09,
+	0x28, 0x93, 0xe6, 0x6a,
+	0x01, 0x85, 0x0a, 0x01,
+	0x02, 0xfc, 0xee, 0x6a,
+	0x01, 0x14, 0x46, 0x31,
+	0xff, 0x6a, 0x10, 0x09,
+	0xfe, 0x85, 0x0a, 0x09,
+	0xff, 0x38, 0xfc, 0x6a,
+	0x80, 0xa3, 0xfc, 0x7a,
+	0x80, 0x0b, 0xfa, 0x7a,
+	0x04, 0x3b, 0xfc, 0x7a,
+	0xbf, 0x3b, 0x76, 0x08,
+	0x01, 0x3b, 0x26, 0x31,
+	0x00, 0x65, 0x0e, 0x5a,
+	0x01, 0x0b, 0x0a, 0x6b,
+	0x10, 0x0c, 0xfe, 0x7a,
+	0x04, 0x93, 0x08, 0x6b,
+	0x01, 0x94, 0x06, 0x7b,
+	0x10, 0x94, 0x08, 0x6b,
+	0xc7, 0x93, 0x26, 0x09,
+	0x01, 0x99, 0xd4, 0x30,
+	0x38, 0x93, 0x0c, 0x6b,
+	0xff, 0x08, 0x5a, 0x6b,
+	0xff, 0x09, 0x5a, 0x6b,
+	0xff, 0x0a, 0x5a, 0x6b,
+	0xff, 0x38, 0x28, 0x7b,
+	0x04, 0x14, 0x10, 0x31,
+	0x01, 0x38, 0x18, 0x31,
+	0x02, 0x6a, 0x1a, 0x31,
+	0x88, 0x6a, 0xcc, 0x00,
+	0x14, 0x6a, 0xf4, 0x5d,
+	0x00, 0x38, 0xe0, 0x5d,
+	0xff, 0x6a, 0x70, 0x08,
+	0x00, 0x65, 0x54, 0x43,
+	0x80, 0xa3, 0x2e, 0x7b,
+	0x01, 0xa4, 0x48, 0x01,
+	0x00, 0x65, 0x5a, 0x43,
+	0x08, 0xeb, 0x34, 0x7b,
+	0x00, 0x65, 0x0e, 0x5a,
+	0x08, 0xeb, 0x30, 0x6b,
+	0x07, 0xe9, 0x10, 0x31,
+	0x01, 0xe9, 0xca, 0x30,
+	0x01, 0x65, 0x46, 0x31,
+	0x00, 0x6a, 0xba, 0x5e,
+	0x88, 0x6a, 0xcc, 0x00,
+	0xa4, 0x6a, 0xf4, 0x5d,
+	0x08, 0x6a, 0xe0, 0x5d,
+	0x0d, 0x93, 0x26, 0x01,
+	0x00, 0x65, 0xa8, 0x5e,
+	0x88, 0x6a, 0xcc, 0x00,
+	0x00, 0x65, 0x8a, 0x5e,
+	0x01, 0x99, 0x46, 0x31,
+	0x00, 0xa3, 0xba, 0x5e,
+	0x01, 0x88, 0x10, 0x31,
+	0x00, 0x65, 0x3a, 0x5a,
+	0x00, 0x65, 0xfa, 0x59,
+	0x03, 0x8c, 0x10, 0x30,
+	0x00, 0x65, 0xe6, 0x5d,
+	0x80, 0x0b, 0x82, 0x6a,
+	0x80, 0x0b, 0x62, 0x6b,
+	0x01, 0x0c, 0x5c, 0x7b,
+	0x10, 0x0c, 0x82, 0x7a,
+	0x03, 0x9e, 0x82, 0x6a,
+	0x00, 0x65, 0x04, 0x5a,
+	0x00, 0x6a, 0xba, 0x5e,
+	0x01, 0xa4, 0x82, 0x6b,
+	0xff, 0x38, 0x78, 0x7b,
+	0x01, 0x38, 0xc8, 0x30,
+	0x00, 0x08, 0x40, 0x19,
+	0xff, 0x6a, 0xc8, 0x08,
+	0x00, 0x09, 0x42, 0x21,
+	0x00, 0x0a, 0x44, 0x21,
+	0xff, 0x6a, 0x70, 0x08,
+	0x00, 0x65, 0x7a, 0x43,
+	0x03, 0x08, 0x40, 0x31,
+	0x03, 0x08, 0x40, 0x31,
+	0x01, 0x08, 0x40, 0x31,
+	0x01, 0x09, 0x42, 0x31,
+	0x01, 0x0a, 0x44, 0x31,
+	0xfd, 0xb4, 0x68, 0x09,
+	0x12, 0x01, 0x02, 0x00,
+	0x12, 0x01, 0x02, 0x00,
+	0x04, 0x3c, 0xcc, 0x79,
+	0xfb, 0x3c, 0x78, 0x08,
+	0x04, 0x93, 0x20, 0x79,
+	0x01, 0x0c, 0x8e, 0x6b,
+	0x80, 0xba, 0x20, 0x79,
+	0x80, 0x04, 0x20, 0x79,
+	0xe4, 0x6a, 0x6e, 0x5d,
+	0x23, 0x6a, 0x84, 0x5d,
+	0x01, 0x6a, 0x84, 0x5d,
+	0x00, 0x65, 0x20, 0x41,
+	0x00, 0x65, 0xcc, 0x41,
+	0x80, 0x3c, 0xa2, 0x7b,
+	0x21, 0x6a, 0xd8, 0x5e,
+	0x01, 0xbc, 0x18, 0x31,
+	0x02, 0x6a, 0x1a, 0x31,
+	0x02, 0x6a, 0xf8, 0x01,
+	0x01, 0xbc, 0x10, 0x30,
+	0x02, 0x6a, 0x12, 0x30,
+	0x01, 0xbc, 0x10, 0x30,
+	0xff, 0x6a, 0x12, 0x08,
+	0xff, 0x6a, 0x14, 0x08,
+	0xf3, 0xbc, 0xd4, 0x18,
+	0xa0, 0x6a, 0xc8, 0x53,
+	0x04, 0xa0, 0x10, 0x31,
+	0xac, 0x6a, 0x26, 0x01,
+	0x04, 0xa0, 0x10, 0x31,
+	0x03, 0x08, 0x18, 0x31,
+	0x88, 0x6a, 0xcc, 0x00,
+	0xa0, 0x6a, 0xf4, 0x5d,
+	0x00, 0xbc, 0xe0, 0x5d,
+	0x3d, 0x6a, 0x26, 0x01,
+	0x00, 0x65, 0xe0, 0x43,
+	0xff, 0x6a, 0x10, 0x09,
+	0xa4, 0x6a, 0x26, 0x01,
+	0x0c, 0xa0, 0x32, 0x31,
+	0x05, 0x6a, 0x26, 0x01,
+	0x35, 0x6a, 0x26, 0x01,
+	0x0c, 0xa0, 0x32, 0x31,
+	0x36, 0x6a, 0x26, 0x01,
+	0x02, 0x93, 0x26, 0x01,
+	0x35, 0x6a, 0x26, 0x01,
+	0x00, 0x65, 0x9c, 0x5e,
+	0x00, 0x65, 0x9c, 0x5e,
+	0x02, 0x93, 0x26, 0x01,
+	0xbf, 0x3c, 0x78, 0x08,
+	0x04, 0x0b, 0xe6, 0x6b,
+	0x10, 0x0c, 0xe2, 0x7b,
+	0x01, 0x03, 0xe6, 0x6b,
+	0x20, 0x93, 0xe8, 0x6b,
+	0x04, 0x0b, 0xee, 0x6b,
+	0x40, 0x3c, 0x78, 0x00,
+	0xc7, 0x93, 0x26, 0x09,
+	0x38, 0x93, 0xf0, 0x6b,
+	0x00, 0x65, 0xcc, 0x41,
+	0x80, 0x3c, 0x56, 0x6c,
+	0x01, 0x06, 0x50, 0x31,
+	0x80, 0xb8, 0x70, 0x01,
+	0x00, 0x65, 0xcc, 0x41,
+	0x10, 0x3f, 0x06, 0x00,
+	0x10, 0x6a, 0x06, 0x00,
+	0x01, 0x3a, 0xca, 0x30,
+	0x80, 0x65, 0x1c, 0x64,
+	0x10, 0xb8, 0x40, 0x6c,
+	0xc0, 0x3e, 0xca, 0x00,
+	0x40, 0xb8, 0x0c, 0x6c,
+	0xbf, 0x65, 0xca, 0x08,
+	0x20, 0xb8, 0x20, 0x7c,
+	0x01, 0x65, 0x0c, 0x30,
+	0x00, 0x65, 0xd8, 0x5d,
+	0xa0, 0x3f, 0x28, 0x64,
+	0x23, 0xb8, 0x0c, 0x08,
+	0x00, 0x65, 0xd8, 0x5d,
+	0xa0, 0x3f, 0x28, 0x64,
+	0x00, 0xbb, 0x20, 0x44,
+	0xff, 0x65, 0x20, 0x64,
+	0x00, 0x65, 0x40, 0x44,
+	0x40, 0x6a, 0x18, 0x00,
+	0x01, 0x65, 0x0c, 0x30,
+	0x00, 0x65, 0xd8, 0x5d,
+	0xa0, 0x3f, 0xfc, 0x73,
+	0x40, 0x6a, 0x18, 0x00,
+	0x01, 0x3a, 0xa6, 0x30,
+	0x08, 0x6a, 0x74, 0x00,
+	0x00, 0x65, 0xcc, 0x41,
+	0x64, 0x6a, 0x68, 0x5d,
+	0x80, 0x64, 0xd8, 0x6c,
+	0x04, 0x64, 0x9a, 0x74,
+	0x02, 0x64, 0xaa, 0x74,
+	0x00, 0x6a, 0x60, 0x74,
+	0x03, 0x64, 0xc8, 0x74,
+	0x23, 0x64, 0x48, 0x74,
+	0x08, 0x64, 0x5c, 0x74,
+	0x61, 0x6a, 0xd8, 0x5e,
+	0x00, 0x65, 0xd8, 0x5d,
+	0x08, 0x51, 0xce, 0x71,
+	0x00, 0x65, 0x40, 0x44,
+	0x80, 0x04, 0x5a, 0x7c,
+	0x51, 0x6a, 0x5e, 0x5d,
+	0x01, 0x51, 0x5a, 0x64,
+	0x01, 0xa4, 0x52, 0x7c,
+	0x80, 0xba, 0x5c, 0x6c,
+	0x41, 0x6a, 0xd8, 0x5e,
+	0x00, 0x65, 0x5c, 0x44,
+	0x21, 0x6a, 0xd8, 0x5e,
+	0x00, 0x65, 0x5c, 0x44,
+	0x07, 0x6a, 0x54, 0x5d,
+	0x01, 0x06, 0xd4, 0x30,
+	0x00, 0x65, 0xcc, 0x41,
+	0x80, 0xb8, 0x56, 0x7c,
+	0xc0, 0x3c, 0x6a, 0x7c,
+	0x80, 0x3c, 0x56, 0x6c,
+	0xff, 0xa8, 0x6a, 0x6c,
+	0x40, 0x3c, 0x56, 0x6c,
+	0x10, 0xb8, 0x6e, 0x7c,
+	0xa1, 0x6a, 0xd8, 0x5e,
+	0x01, 0xb4, 0x74, 0x6c,
+	0x02, 0xb4, 0x76, 0x6c,
+	0x01, 0xa4, 0x76, 0x7c,
+	0xff, 0xa8, 0x86, 0x7c,
+	0x04, 0xb4, 0x68, 0x01,
+	0x01, 0x6a, 0x76, 0x00,
+	0x00, 0xbb, 0x12, 0x5e,
+	0xff, 0xa8, 0x86, 0x7c,
+	0x71, 0x6a, 0xd8, 0x5e,
+	0x40, 0x51, 0x86, 0x64,
+	0x00, 0x65, 0xb2, 0x5e,
+	0x00, 0x65, 0xde, 0x41,
+	0x00, 0xbb, 0x8a, 0x5c,
+	0x00, 0x65, 0xde, 0x41,
+	0x00, 0x65, 0xb2, 0x5e,
+	0x01, 0x65, 0xa2, 0x30,
+	0x01, 0xf8, 0xc8, 0x30,
+	0x01, 0x4e, 0xc8, 0x30,
+	0x00, 0x6a, 0xb6, 0xdd,
+	0x00, 0x51, 0xc8, 0x5d,
+	0x01, 0x4e, 0x9c, 0x18,
+	0x02, 0x6a, 0x22, 0x05,
+	0xc0, 0x3c, 0x56, 0x6c,
+	0x04, 0xb8, 0x70, 0x01,
+	0x00, 0x65, 0xd4, 0x5e,
+	0x20, 0xb8, 0xde, 0x69,
+	0x01, 0xbb, 0xa2, 0x30,
+	0x3f, 0xba, 0x7c, 0x08,
+	0x00, 0xb9, 0xce, 0x5c,
+	0x00, 0x65, 0xde, 0x41,
+	0x01, 0x06, 0xd4, 0x30,
+	0x20, 0x3c, 0xcc, 0x79,
+	0x20, 0x3c, 0x5c, 0x7c,
+	0x01, 0xa4, 0xb8, 0x7c,
+	0x01, 0xb4, 0x68, 0x01,
+	0x00, 0x65, 0xcc, 0x41,
+	0x00, 0x65, 0x5c, 0x44,
+	0x04, 0x14, 0x58, 0x31,
+	0x01, 0x06, 0xd4, 0x30,
+	0x08, 0xa0, 0x60, 0x31,
+	0xac, 0x6a, 0xcc, 0x00,
+	0x14, 0x6a, 0xf4, 0x5d,
+	0x01, 0x06, 0xd4, 0x30,
+	0xa0, 0x6a, 0xec, 0x5d,
+	0x00, 0x65, 0xcc, 0x41,
+	0xdf, 0x3c, 0x78, 0x08,
+	0x12, 0x01, 0x02, 0x00,
+	0x00, 0x65, 0x5c, 0x44,
+	0x4c, 0x65, 0xcc, 0x28,
+	0x01, 0x3e, 0x20, 0x31,
+	0xd0, 0x66, 0xcc, 0x18,
+	0x20, 0x66, 0xcc, 0x18,
+	0x01, 0x51, 0xda, 0x34,
+	0x4c, 0x3d, 0xca, 0x28,
+	0x3f, 0x64, 0x7c, 0x08,
+	0xd0, 0x65, 0xca, 0x18,
+	0x01, 0x3e, 0x20, 0x31,
+	0x30, 0x65, 0xd4, 0x18,
+	0x00, 0x65, 0xe6, 0x4c,
+	0xe1, 0x6a, 0x22, 0x01,
+	0xff, 0x6a, 0xd4, 0x08,
+	0x20, 0x65, 0xd4, 0x18,
+	0x00, 0x65, 0xee, 0x54,
+	0xe1, 0x6a, 0x22, 0x01,
+	0xff, 0x6a, 0xd4, 0x08,
+	0x20, 0x65, 0xca, 0x18,
+	0xe0, 0x65, 0xd4, 0x18,
+	0x00, 0x65, 0xf8, 0x4c,
+	0xe1, 0x6a, 0x22, 0x01,
+	0xff, 0x6a, 0xd4, 0x08,
+	0xd0, 0x65, 0xd4, 0x18,
+	0x00, 0x65, 0x00, 0x55,
+	0xe1, 0x6a, 0x22, 0x01,
+	0xff, 0x6a, 0xd4, 0x08,
+	0x01, 0x6c, 0xa2, 0x30,
+	0xff, 0x51, 0x12, 0x75,
+	0x00, 0x51, 0x8e, 0x5d,
+	0x01, 0x51, 0x20, 0x31,
+	0x00, 0x65, 0x34, 0x45,
+	0x3f, 0xba, 0xc8, 0x08,
+	0x00, 0x3e, 0x34, 0x75,
+	0x00, 0x65, 0xb0, 0x5e,
+	0x80, 0x3c, 0x78, 0x00,
+	0x01, 0x06, 0xd4, 0x30,
+	0x00, 0x65, 0xd8, 0x5d,
+	0x01, 0x3c, 0x78, 0x00,
+	0xe0, 0x3f, 0x50, 0x65,
+	0x02, 0x3c, 0x78, 0x00,
+	0x20, 0x12, 0x50, 0x65,
+	0x51, 0x6a, 0x5e, 0x5d,
+	0x00, 0x51, 0x8e, 0x5d,
+	0x51, 0x6a, 0x5e, 0x5d,
+	0x01, 0x51, 0x20, 0x31,
+	0x04, 0x3c, 0x78, 0x00,
+	0x01, 0xb9, 0xc8, 0x30,
+	0x00, 0x3d, 0x4e, 0x65,
+	0x08, 0x3c, 0x78, 0x00,
+	0x3f, 0xba, 0xc8, 0x08,
+	0x00, 0x3e, 0x4e, 0x65,
+	0x10, 0x3c, 0x78, 0x00,
+	0x04, 0xb8, 0x4e, 0x7d,
+	0xfb, 0xb8, 0x70, 0x09,
+	0x20, 0xb8, 0x44, 0x6d,
+	0x01, 0x90, 0xc8, 0x30,
+	0xff, 0x6a, 0xa2, 0x00,
+	0x00, 0x3d, 0xce, 0x5c,
+	0x01, 0x64, 0x20, 0x31,
+	0xff, 0x6a, 0x78, 0x08,
+	0x00, 0x65, 0xea, 0x58,
+	0x10, 0xb8, 0x5c, 0x7c,
+	0xff, 0x6a, 0x54, 0x5d,
+	0x00, 0x65, 0x5c, 0x44,
+	0x00, 0x65, 0xb0, 0x5e,
+	0x31, 0x6a, 0xd8, 0x5e,
+	0x00, 0x65, 0x5c, 0x44,
+	0x10, 0x3f, 0x06, 0x00,
+	0x10, 0x6a, 0x06, 0x00,
+	0x01, 0x65, 0x74, 0x34,
+	0x81, 0x6a, 0xd8, 0x5e,
+	0x00, 0x65, 0x60, 0x45,
+	0x01, 0x06, 0xd4, 0x30,
+	0x01, 0x0c, 0x60, 0x7d,
+	0x04, 0x0c, 0x5a, 0x6d,
+	0xe0, 0x03, 0x7e, 0x08,
+	0xe0, 0x3f, 0xcc, 0x61,
+	0x01, 0x65, 0xcc, 0x30,
+	0x01, 0x12, 0xda, 0x34,
+	0x01, 0x06, 0xd4, 0x34,
+	0x01, 0x03, 0x6e, 0x6d,
+	0x40, 0x03, 0xcc, 0x08,
+	0x01, 0x65, 0x06, 0x30,
+	0x40, 0x65, 0xc8, 0x08,
+	0x00, 0x66, 0x7c, 0x75,
+	0x40, 0x65, 0x7c, 0x7d,
+	0x00, 0x65, 0x7c, 0x5d,
+	0xff, 0x6a, 0xd4, 0x08,
+	0xff, 0x6a, 0xd4, 0x08,
+	0xff, 0x6a, 0xd4, 0x08,
+	0xff, 0x6a, 0xd4, 0x0c,
+	0x08, 0x01, 0x02, 0x00,
+	0x02, 0x0b, 0x86, 0x7d,
+	0x01, 0x65, 0x0c, 0x30,
+	0x02, 0x0b, 0x8a, 0x7d,
+	0xf7, 0x01, 0x02, 0x0c,
+	0x01, 0x65, 0xc8, 0x30,
+	0xff, 0x41, 0xae, 0x75,
+	0x01, 0x41, 0x20, 0x31,
+	0xff, 0x6a, 0xa4, 0x00,
+	0x00, 0x65, 0x9e, 0x45,
+	0xff, 0xbf, 0xae, 0x75,
+	0x01, 0x90, 0xa4, 0x30,
+	0x01, 0xbf, 0x20, 0x31,
+	0x00, 0xbb, 0x98, 0x65,
+	0xff, 0x52, 0xac, 0x75,
+	0x01, 0xbf, 0xcc, 0x30,
+	0x01, 0x90, 0xca, 0x30,
+	0x01, 0x52, 0x20, 0x31,
+	0x01, 0x66, 0x7e, 0x31,
+	0x01, 0x65, 0x20, 0x35,
+	0x01, 0xbf, 0x82, 0x34,
+	0x01, 0x64, 0xa2, 0x30,
+	0x00, 0x6a, 0xc0, 0x5e,
+	0x0d, 0x6a, 0x76, 0x00,
+	0x00, 0x51, 0x12, 0x46,
+	0x01, 0x65, 0xa4, 0x30,
+	0xe0, 0x6a, 0xcc, 0x00,
+	0x48, 0x6a, 0x06, 0x5e,
+	0x01, 0x6a, 0xd0, 0x01,
+	0x01, 0x6a, 0xdc, 0x05,
+	0x88, 0x6a, 0xcc, 0x00,
+	0x48, 0x6a, 0x06, 0x5e,
+	0x01, 0x6a, 0xe0, 0x5d,
+	0x01, 0x6a, 0x26, 0x05,
+	0x01, 0x65, 0xd8, 0x31,
+	0x09, 0xee, 0xdc, 0x01,
+	0x80, 0xee, 0xcc, 0x7d,
+	0xff, 0x6a, 0xdc, 0x0d,
+	0x01, 0x65, 0x32, 0x31,
+	0x0a, 0x93, 0x26, 0x01,
+	0x00, 0x65, 0xa8, 0x46,
+	0x81, 0x6a, 0xd8, 0x5e,
+	0x01, 0x0c, 0xd8, 0x7d,
+	0x04, 0x0c, 0xd6, 0x6d,
+	0xe0, 0x03, 0x06, 0x08,
+	0xe0, 0x03, 0x7e, 0x0c,
+	0x01, 0x65, 0x18, 0x31,
+	0xff, 0x6a, 0x1a, 0x09,
+	0xff, 0x6a, 0x1c, 0x0d,
+	0x01, 0x8c, 0x10, 0x30,
+	0x01, 0x8d, 0x12, 0x30,
+	0x01, 0x8e, 0x14, 0x34,
+	0x01, 0x6c, 0xda, 0x30,
+	0x01, 0x6c, 0xda, 0x30,
+	0x01, 0x6c, 0xda, 0x30,
+	0x01, 0x6c, 0xda, 0x30,
+	0x01, 0x6c, 0xda, 0x30,
+	0x01, 0x6c, 0xda, 0x30,
+	0x01, 0x6c, 0xda, 0x30,
+	0x01, 0x6c, 0xda, 0x34,
+	0x3d, 0x64, 0xa4, 0x28,
+	0x55, 0x64, 0xc8, 0x28,
+	0x00, 0x65, 0x06, 0x46,
+	0x2e, 0x64, 0xa4, 0x28,
+	0x66, 0x64, 0xc8, 0x28,
+	0x00, 0x6c, 0xda, 0x18,
+	0x01, 0x52, 0xc8, 0x30,
+	0x00, 0x6c, 0xda, 0x20,
+	0xff, 0x6a, 0xc8, 0x08,
+	0x00, 0x6c, 0xda, 0x20,
+	0x00, 0x6c, 0xda, 0x24,
+	0x01, 0x65, 0xc8, 0x30,
+	0xe0, 0x6a, 0xcc, 0x00,
+	0x44, 0x6a, 0x02, 0x5e,
+	0x01, 0x90, 0xe2, 0x31,
+	0x04, 0x3b, 0x26, 0x7e,
+	0x30, 0x6a, 0xd0, 0x01,
+	0x20, 0x6a, 0xd0, 0x01,
+	0x1d, 0x6a, 0xdc, 0x01,
+	0xdc, 0xee, 0x22, 0x66,
+	0x00, 0x65, 0x3e, 0x46,
+	0x20, 0x6a, 0xd0, 0x01,
+	0x01, 0x6a, 0xdc, 0x01,
+	0x20, 0xa0, 0xd8, 0x31,
+	0x09, 0xee, 0xdc, 0x01,
+	0x80, 0xee, 0x2e, 0x7e,
+	0x11, 0x6a, 0xdc, 0x01,
+	0x50, 0xee, 0x32, 0x66,
+	0x20, 0x6a, 0xd0, 0x01,
+	0x09, 0x6a, 0xdc, 0x01,
+	0x88, 0xee, 0x38, 0x66,
+	0x19, 0x6a, 0xdc, 0x01,
+	0xd8, 0xee, 0x3c, 0x66,
+	0xff, 0x6a, 0xdc, 0x09,
+	0x18, 0xee, 0x40, 0x6e,
+	0xff, 0x6a, 0xd4, 0x0c,
+	0x88, 0x6a, 0xcc, 0x00,
+	0x44, 0x6a, 0x02, 0x5e,
+	0x20, 0x6a, 0xe0, 0x5d,
+	0x01, 0x3b, 0x26, 0x31,
+	0x04, 0x3b, 0x5a, 0x6e,
+	0xa0, 0x6a, 0xca, 0x00,
+	0x20, 0x65, 0xc8, 0x18,
+	0x00, 0x65, 0x98, 0x5e,
+	0x00, 0x65, 0x52, 0x66,
+	0x0a, 0x93, 0x26, 0x01,
+	0x00, 0x65, 0xa8, 0x46,
+	0xa0, 0x6a, 0xcc, 0x00,
+	0xff, 0x6a, 0xc8, 0x08,
+	0x20, 0x94, 0x5e, 0x6e,
+	0x10, 0x94, 0x60, 0x6e,
+	0x08, 0x94, 0x7a, 0x6e,
+	0x08, 0x94, 0x7a, 0x6e,
+	0x08, 0x94, 0x7a, 0x6e,
+	0xff, 0x8c, 0xc8, 0x10,
+	0xc1, 0x64, 0xc8, 0x18,
+	0xf8, 0x64, 0xc8, 0x08,
+	0x01, 0x99, 0xda, 0x30,
+	0x00, 0x66, 0x6e, 0x66,
+	0xc0, 0x66, 0xaa, 0x76,
+	0x60, 0x66, 0xc8, 0x18,
+	0x3d, 0x64, 0xc8, 0x28,
+	0x00, 0x65, 0x5e, 0x46,
+	0xf7, 0x93, 0x26, 0x09,
+	0x08, 0x93, 0x7c, 0x6e,
+	0x00, 0x62, 0xc4, 0x18,
+	0x00, 0x65, 0xa8, 0x5e,
+	0x00, 0x65, 0x88, 0x5e,
+	0x00, 0x65, 0x88, 0x5e,
+	0x00, 0x65, 0x88, 0x5e,
+	0x01, 0x99, 0xda, 0x30,
+	0x01, 0x99, 0xda, 0x30,
+	0x01, 0x99, 0xda, 0x30,
+	0x01, 0x99, 0xda, 0x30,
+	0x01, 0x99, 0xda, 0x30,
+	0x01, 0x99, 0xda, 0x30,
+	0x01, 0x99, 0xda, 0x30,
+	0x01, 0x99, 0xda, 0x34,
+	0x01, 0x6c, 0x32, 0x31,
+	0x01, 0x6c, 0x32, 0x31,
+	0x01, 0x6c, 0x32, 0x31,
+	0x01, 0x6c, 0x32, 0x31,
+	0x01, 0x6c, 0x32, 0x31,
+	0x01, 0x6c, 0x32, 0x31,
+	0x01, 0x6c, 0x32, 0x31,
+	0x01, 0x6c, 0x32, 0x35,
+	0x08, 0x94, 0xa8, 0x7e,
+	0xf7, 0x93, 0x26, 0x09,
+	0x08, 0x93, 0xac, 0x6e,
+	0xff, 0x6a, 0xd4, 0x0c,
+	0x04, 0xb8, 0xd4, 0x6e,
+	0x01, 0x42, 0x7e, 0x31,
+	0xff, 0x6a, 0x76, 0x01,
+	0x01, 0x90, 0x84, 0x34,
+	0xff, 0x6a, 0x76, 0x05,
+	0x01, 0x85, 0x0a, 0x01,
+	0x7f, 0x65, 0x10, 0x09,
+	0xfe, 0x85, 0x0a, 0x0d,
+	0xff, 0x42, 0xd0, 0x66,
+	0xff, 0x41, 0xc8, 0x66,
+	0xd1, 0x6a, 0xd8, 0x5e,
+	0xff, 0x6a, 0xca, 0x04,
+	0x01, 0x41, 0x20, 0x31,
+	0x01, 0xbf, 0x82, 0x30,
+	0x01, 0x6a, 0x76, 0x00,
+	0x00, 0xbb, 0x12, 0x46,
+	0x01, 0x42, 0x20, 0x31,
+	0x01, 0xbf, 0x84, 0x34,
+	0x01, 0x41, 0x7e, 0x31,
+	0x01, 0x90, 0x82, 0x34,
+	0x01, 0x65, 0x22, 0x31,
+	0xff, 0x6a, 0xd4, 0x08,
+	0xff, 0x6a, 0xd4, 0x0c
+};
+
+typedef int ahc_patch_func_t (struct ahc_softc *ahc);
+static ahc_patch_func_t ahc_patch23_func;
+
+static int
+ahc_patch23_func(struct ahc_softc *ahc)
+{
+	return ((ahc->bugs & AHC_SCBCHAN_UPLOAD_BUG) != 0);
+}
+
+static ahc_patch_func_t ahc_patch22_func;
+
+static int
+ahc_patch22_func(struct ahc_softc *ahc)
+{
+	return ((ahc->features & AHC_CMD_CHAN) == 0);
+}
+
+static ahc_patch_func_t ahc_patch21_func;
+
+static int
+ahc_patch21_func(struct ahc_softc *ahc)
+{
+	return ((ahc->features & AHC_QUEUE_REGS) == 0);
+}
+
+static ahc_patch_func_t ahc_patch20_func;
+
+static int
+ahc_patch20_func(struct ahc_softc *ahc)
+{
+	return ((ahc->features & AHC_WIDE) != 0);
+}
+
+static ahc_patch_func_t ahc_patch19_func;
+
+static int
+ahc_patch19_func(struct ahc_softc *ahc)
+{
+	return ((ahc->flags & AHC_SCB_BTT) != 0);
+}
+
+static ahc_patch_func_t ahc_patch18_func;
+
+static int
+ahc_patch18_func(struct ahc_softc *ahc)
+{
+	return ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0);
+}
+
+static ahc_patch_func_t ahc_patch17_func;
+
+static int
+ahc_patch17_func(struct ahc_softc *ahc)
+{
+	return ((ahc->flags & AHC_TMODE_WIDEODD_BUG) != 0);
+}
+
+static ahc_patch_func_t ahc_patch16_func;
+
+static int
+ahc_patch16_func(struct ahc_softc *ahc)
+{
+	return ((ahc->bugs & AHC_AUTOFLUSH_BUG) != 0);
+}
+
+static ahc_patch_func_t ahc_patch15_func;
+
+static int
+ahc_patch15_func(struct ahc_softc *ahc)
+{
+	return ((ahc->features & AHC_ULTRA2) == 0);
+}
+
+static ahc_patch_func_t ahc_patch14_func;
+
+static int
+ahc_patch14_func(struct ahc_softc *ahc)
+{
+	return ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0);
+}
+
+static ahc_patch_func_t ahc_patch13_func;
+
+static int
+ahc_patch13_func(struct ahc_softc *ahc)
+{
+	return ((ahc->flags & AHC_39BIT_ADDRESSING) != 0);
+}
+
+static ahc_patch_func_t ahc_patch12_func;
+
+static int
+ahc_patch12_func(struct ahc_softc *ahc)
+{
+	return ((ahc->features & AHC_HS_MAILBOX) != 0);
+}
+
+static ahc_patch_func_t ahc_patch11_func;
+
+static int
+ahc_patch11_func(struct ahc_softc *ahc)
+{
+	return ((ahc->features & AHC_ULTRA) != 0);
+}
+
+static ahc_patch_func_t ahc_patch10_func;
+
+static int
+ahc_patch10_func(struct ahc_softc *ahc)
+{
+	return ((ahc->features & AHC_MULTI_TID) != 0);
+}
+
+static ahc_patch_func_t ahc_patch9_func;
+
+static int
+ahc_patch9_func(struct ahc_softc *ahc)
+{
+	return ((ahc->features & AHC_CMD_CHAN) != 0);
+}
+
+static ahc_patch_func_t ahc_patch8_func;
+
+static int
+ahc_patch8_func(struct ahc_softc *ahc)
+{
+	return ((ahc->flags & AHC_INITIATORROLE) != 0);
+}
+
+static ahc_patch_func_t ahc_patch7_func;
+
+static int
+ahc_patch7_func(struct ahc_softc *ahc)
+{
+	return ((ahc->flags & AHC_TARGETROLE) != 0);
+}
+
+static ahc_patch_func_t ahc_patch6_func;
+
+static int
+ahc_patch6_func(struct ahc_softc *ahc)
+{
+	return ((ahc->features & AHC_DT) == 0);
+}
+
+static ahc_patch_func_t ahc_patch5_func;
+
+static int
+ahc_patch5_func(struct ahc_softc *ahc)
+{
+	return ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0);
+}
+
+static ahc_patch_func_t ahc_patch4_func;
+
+static int
+ahc_patch4_func(struct ahc_softc *ahc)
+{
+	return ((ahc->flags & AHC_PAGESCBS) != 0);
+}
+
+static ahc_patch_func_t ahc_patch3_func;
+
+static int
+ahc_patch3_func(struct ahc_softc *ahc)
+{
+	return ((ahc->features & AHC_QUEUE_REGS) != 0);
+}
+
+static ahc_patch_func_t ahc_patch2_func;
+
+static int
+ahc_patch2_func(struct ahc_softc *ahc)
+{
+	return ((ahc->features & AHC_TWIN) != 0);
+}
+
+static ahc_patch_func_t ahc_patch1_func;
+
+static int
+ahc_patch1_func(struct ahc_softc *ahc)
+{
+	return ((ahc->features & AHC_ULTRA2) != 0);
+}
+
+static ahc_patch_func_t ahc_patch0_func;
+
+static int
+ahc_patch0_func(struct ahc_softc *ahc)
+{
+	return (0);
+}
+
+static struct patch {
+	ahc_patch_func_t		*patch_func;
+	uint32_t		 begin		:10,
+				 skip_instr	:10,
+				 skip_patch	:12;
+} patches[] = {
+	{ ahc_patch1_func, 4, 1, 1 },
+	{ ahc_patch2_func, 6, 2, 1 },
+	{ ahc_patch2_func, 9, 1, 1 },
+	{ ahc_patch3_func, 11, 1, 2 },
+	{ ahc_patch0_func, 12, 2, 1 },
+	{ ahc_patch4_func, 15, 1, 2 },
+	{ ahc_patch0_func, 16, 1, 1 },
+	{ ahc_patch5_func, 22, 2, 1 },
+	{ ahc_patch3_func, 27, 1, 2 },
+	{ ahc_patch0_func, 28, 1, 1 },
+	{ ahc_patch6_func, 34, 1, 1 },
+	{ ahc_patch7_func, 37, 54, 19 },
+	{ ahc_patch8_func, 37, 1, 1 },
+	{ ahc_patch9_func, 42, 3, 2 },
+	{ ahc_patch0_func, 45, 3, 1 },
+	{ ahc_patch10_func, 49, 1, 2 },
+	{ ahc_patch0_func, 50, 2, 3 },
+	{ ahc_patch1_func, 50, 1, 2 },
+	{ ahc_patch0_func, 51, 1, 1 },
+	{ ahc_patch2_func, 53, 2, 1 },
+	{ ahc_patch9_func, 55, 1, 2 },
+	{ ahc_patch0_func, 56, 1, 1 },
+	{ ahc_patch9_func, 60, 1, 2 },
+	{ ahc_patch0_func, 61, 1, 1 },
+	{ ahc_patch9_func, 71, 1, 2 },
+	{ ahc_patch0_func, 72, 1, 1 },
+	{ ahc_patch9_func, 75, 1, 2 },
+	{ ahc_patch0_func, 76, 1, 1 },
+	{ ahc_patch9_func, 79, 1, 2 },
+	{ ahc_patch0_func, 80, 1, 1 },
+	{ ahc_patch8_func, 91, 9, 4 },
+	{ ahc_patch1_func, 93, 1, 2 },
+	{ ahc_patch0_func, 94, 1, 1 },
+	{ ahc_patch2_func, 96, 2, 1 },
+	{ ahc_patch2_func, 105, 4, 1 },
+	{ ahc_patch1_func, 109, 1, 2 },
+	{ ahc_patch0_func, 110, 2, 3 },
+	{ ahc_patch2_func, 110, 1, 2 },
+	{ ahc_patch0_func, 111, 1, 1 },
+	{ ahc_patch7_func, 112, 4, 2 },
+	{ ahc_patch0_func, 116, 1, 1 },
+	{ ahc_patch11_func, 117, 2, 1 },
+	{ ahc_patch1_func, 119, 1, 2 },
+	{ ahc_patch0_func, 120, 1, 1 },
+	{ ahc_patch7_func, 121, 4, 1 },
+	{ ahc_patch7_func, 131, 95, 11 },
+	{ ahc_patch4_func, 151, 1, 1 },
+	{ ahc_patch1_func, 168, 1, 1 },
+	{ ahc_patch12_func, 173, 1, 2 },
+	{ ahc_patch0_func, 174, 1, 1 },
+	{ ahc_patch9_func, 185, 1, 2 },
+	{ ahc_patch0_func, 186, 1, 1 },
+	{ ahc_patch9_func, 195, 1, 2 },
+	{ ahc_patch0_func, 196, 1, 1 },
+	{ ahc_patch9_func, 212, 6, 2 },
+	{ ahc_patch0_func, 218, 6, 1 },
+	{ ahc_patch8_func, 226, 20, 2 },
+	{ ahc_patch1_func, 241, 1, 1 },
+	{ ahc_patch1_func, 248, 1, 2 },
+	{ ahc_patch0_func, 249, 2, 2 },
+	{ ahc_patch11_func, 250, 1, 1 },
+	{ ahc_patch9_func, 258, 27, 3 },
+	{ ahc_patch1_func, 274, 10, 2 },
+	{ ahc_patch13_func, 277, 1, 1 },
+	{ ahc_patch14_func, 285, 14, 1 },
+	{ ahc_patch1_func, 301, 1, 2 },
+	{ ahc_patch0_func, 302, 1, 1 },
+	{ ahc_patch9_func, 305, 1, 1 },
+	{ ahc_patch13_func, 310, 1, 1 },
+	{ ahc_patch9_func, 311, 2, 2 },
+	{ ahc_patch0_func, 313, 4, 1 },
+	{ ahc_patch14_func, 317, 1, 1 },
+	{ ahc_patch15_func, 319, 2, 3 },
+	{ ahc_patch9_func, 319, 1, 2 },
+	{ ahc_patch0_func, 320, 1, 1 },
+	{ ahc_patch6_func, 325, 1, 2 },
+	{ ahc_patch0_func, 326, 1, 1 },
+	{ ahc_patch1_func, 330, 47, 11 },
+	{ ahc_patch6_func, 337, 2, 4 },
+	{ ahc_patch7_func, 337, 1, 1 },
+	{ ahc_patch8_func, 338, 1, 1 },
+	{ ahc_patch0_func, 339, 1, 1 },
+	{ ahc_patch16_func, 340, 1, 1 },
+	{ ahc_patch6_func, 356, 6, 3 },
+	{ ahc_patch16_func, 356, 5, 1 },
+	{ ahc_patch0_func, 362, 7, 1 },
+	{ ahc_patch13_func, 372, 5, 1 },
+	{ ahc_patch0_func, 377, 52, 17 },
+	{ ahc_patch14_func, 377, 1, 1 },
+	{ ahc_patch7_func, 379, 2, 2 },
+	{ ahc_patch17_func, 380, 1, 1 },
+	{ ahc_patch9_func, 383, 1, 1 },
+	{ ahc_patch18_func, 390, 1, 1 },
+	{ ahc_patch14_func, 395, 9, 3 },
+	{ ahc_patch9_func, 396, 3, 2 },
+	{ ahc_patch0_func, 399, 3, 1 },
+	{ ahc_patch9_func, 407, 6, 2 },
+	{ ahc_patch0_func, 413, 9, 2 },
+	{ ahc_patch13_func, 413, 1, 1 },
+	{ ahc_patch13_func, 422, 2, 1 },
+	{ ahc_patch14_func, 424, 1, 1 },
+	{ ahc_patch9_func, 426, 1, 2 },
+	{ ahc_patch0_func, 427, 1, 1 },
+	{ ahc_patch7_func, 428, 1, 1 },
+	{ ahc_patch7_func, 429, 1, 1 },
+	{ ahc_patch8_func, 430, 3, 3 },
+	{ ahc_patch6_func, 431, 1, 2 },
+	{ ahc_patch0_func, 432, 1, 1 },
+	{ ahc_patch9_func, 433, 1, 1 },
+	{ ahc_patch15_func, 434, 1, 2 },
+	{ ahc_patch13_func, 434, 1, 1 },
+	{ ahc_patch14_func, 436, 9, 4 },
+	{ ahc_patch9_func, 436, 1, 1 },
+	{ ahc_patch9_func, 443, 2, 1 },
+	{ ahc_patch0_func, 445, 4, 3 },
+	{ ahc_patch9_func, 445, 1, 2 },
+	{ ahc_patch0_func, 446, 3, 1 },
+	{ ahc_patch1_func, 450, 2, 1 },
+	{ ahc_patch7_func, 452, 10, 2 },
+	{ ahc_patch0_func, 462, 1, 1 },
+	{ ahc_patch8_func, 463, 118, 22 },
+	{ ahc_patch1_func, 465, 3, 2 },
+	{ ahc_patch0_func, 468, 5, 3 },
+	{ ahc_patch9_func, 468, 2, 2 },
+	{ ahc_patch0_func, 470, 3, 1 },
+	{ ahc_patch1_func, 475, 2, 2 },
+	{ ahc_patch0_func, 477, 6, 3 },
+	{ ahc_patch9_func, 477, 2, 2 },
+	{ ahc_patch0_func, 479, 3, 1 },
+	{ ahc_patch1_func, 485, 2, 2 },
+	{ ahc_patch0_func, 487, 9, 7 },
+	{ ahc_patch9_func, 487, 5, 6 },
+	{ ahc_patch19_func, 487, 1, 2 },
+	{ ahc_patch0_func, 488, 1, 1 },
+	{ ahc_patch19_func, 490, 1, 2 },
+	{ ahc_patch0_func, 491, 1, 1 },
+	{ ahc_patch0_func, 492, 4, 1 },
+	{ ahc_patch6_func, 497, 3, 2 },
+	{ ahc_patch0_func, 500, 1, 1 },
+	{ ahc_patch6_func, 510, 1, 2 },
+	{ ahc_patch0_func, 511, 1, 1 },
+	{ ahc_patch20_func, 548, 7, 1 },
+	{ ahc_patch3_func, 583, 1, 2 },
+	{ ahc_patch0_func, 584, 1, 1 },
+	{ ahc_patch21_func, 587, 1, 1 },
+	{ ahc_patch8_func, 589, 106, 33 },
+	{ ahc_patch4_func, 591, 1, 1 },
+	{ ahc_patch1_func, 597, 2, 2 },
+	{ ahc_patch0_func, 599, 1, 1 },
+	{ ahc_patch1_func, 602, 1, 2 },
+	{ ahc_patch0_func, 603, 1, 1 },
+	{ ahc_patch9_func, 604, 3, 3 },
+	{ ahc_patch15_func, 605, 1, 1 },
+	{ ahc_patch0_func, 607, 4, 1 },
+	{ ahc_patch19_func, 616, 2, 2 },
+	{ ahc_patch0_func, 618, 1, 1 },
+	{ ahc_patch19_func, 622, 10, 3 },
+	{ ahc_patch5_func, 624, 8, 1 },
+	{ ahc_patch0_func, 632, 9, 2 },
+	{ ahc_patch5_func, 633, 8, 1 },
+	{ ahc_patch4_func, 643, 1, 2 },
+	{ ahc_patch0_func, 644, 1, 1 },
+	{ ahc_patch19_func, 645, 1, 2 },
+	{ ahc_patch0_func, 646, 3, 2 },
+	{ ahc_patch4_func, 648, 1, 1 },
+	{ ahc_patch5_func, 649, 1, 1 },
+	{ ahc_patch5_func, 652, 1, 1 },
+	{ ahc_patch5_func, 654, 1, 1 },
+	{ ahc_patch4_func, 656, 2, 2 },
+	{ ahc_patch0_func, 658, 2, 1 },
+	{ ahc_patch5_func, 660, 1, 1 },
+	{ ahc_patch5_func, 663, 1, 1 },
+	{ ahc_patch5_func, 666, 1, 1 },
+	{ ahc_patch19_func, 670, 1, 1 },
+	{ ahc_patch19_func, 673, 1, 1 },
+	{ ahc_patch4_func, 679, 1, 1 },
+	{ ahc_patch6_func, 682, 1, 2 },
+	{ ahc_patch0_func, 683, 1, 1 },
+	{ ahc_patch7_func, 695, 16, 1 },
+	{ ahc_patch4_func, 711, 20, 1 },
+	{ ahc_patch9_func, 732, 4, 2 },
+	{ ahc_patch0_func, 736, 4, 1 },
+	{ ahc_patch9_func, 740, 4, 2 },
+	{ ahc_patch0_func, 744, 3, 1 },
+	{ ahc_patch6_func, 750, 1, 1 },
+	{ ahc_patch22_func, 752, 14, 1 },
+	{ ahc_patch7_func, 766, 3, 1 },
+	{ ahc_patch9_func, 778, 24, 8 },
+	{ ahc_patch19_func, 782, 1, 2 },
+	{ ahc_patch0_func, 783, 1, 1 },
+	{ ahc_patch15_func, 788, 4, 2 },
+	{ ahc_patch0_func, 792, 7, 3 },
+	{ ahc_patch23_func, 792, 5, 2 },
+	{ ahc_patch0_func, 797, 2, 1 },
+	{ ahc_patch0_func, 802, 42, 3 },
+	{ ahc_patch18_func, 814, 18, 2 },
+	{ ahc_patch0_func, 832, 1, 1 },
+	{ ahc_patch4_func, 856, 1, 1 },
+	{ ahc_patch4_func, 857, 3, 2 },
+	{ ahc_patch0_func, 860, 1, 1 },
+	{ ahc_patch13_func, 861, 3, 1 },
+	{ ahc_patch4_func, 864, 12, 1 }
+};
+
+static struct cs {
+	uint16_t	begin;
+	uint16_t	end;
+} critical_sections[] = {
+	{ 11, 18 },
+	{ 21, 30 },
+	{ 711, 727 },
+	{ 857, 860 },
+	{ 864, 870 },
+	{ 872, 874 },
+	{ 874, 876 }
+};
+
+static const int num_critical_sections = sizeof(critical_sections)
+				       / sizeof(*critical_sections);
diff --git a/drivers/scsi/aic7xxx/aicasm/Makefile b/drivers/scsi/aic7xxx/aicasm/Makefile
new file mode 100644
index 0000000..8c91fda
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/Makefile
@@ -0,0 +1,78 @@
+PROG=	aicasm
+
+.SUFFIXES= .l .y .c .h
+
+CSRCS=	aicasm.c aicasm_symbol.c
+YSRCS=	aicasm_gram.y aicasm_macro_gram.y
+LSRCS=	aicasm_scan.l aicasm_macro_scan.l
+
+GENHDRS=	aicdb.h $(YSRCS:.y=.h)
+GENSRCS=	$(YSRCS:.y=.c) $(LSRCS:.l=.c)
+
+SRCS=	${CSRCS} ${GENSRCS}
+LIBS=	-ldb
+clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG)
+# Override default kernel CFLAGS.  This is a userland app.
+AICASM_CFLAGS:= -I/usr/include -I.
+YFLAGS= -d
+
+NOMAN=	noman
+
+ifneq ($(HOSTCC),)
+AICASM_CC= $(HOSTCC)
+else
+AICASM_CC= $(CC)
+endif
+
+ifdef DEBUG
+CFLAGS+= -DDEBUG -g
+YFLAGS+= -t -v
+LFLAGS= -d
+endif
+
+$(PROG):  ${GENHDRS} $(SRCS)
+	$(AICASM_CC) $(AICASM_CFLAGS) $(SRCS) -o $(PROG) $(LIBS)
+
+aicdb.h:
+	@if [ -e "/usr/include/db4/db_185.h" ]; then		\
+		echo "#include <db4/db_185.h>" > aicdb.h;	\
+	 elif [ -e "/usr/include/db3/db_185.h" ]; then		\
+		echo "#include <db3/db_185.h>" > aicdb.h;	\
+	 elif [ -e "/usr/include/db2/db_185.h" ]; then		\
+		echo "#include <db2/db_185.h>" > aicdb.h;	\
+	 elif [ -e "/usr/include/db1/db_185.h" ]; then		\
+		echo "#include <db1/db_185.h>" > aicdb.h;	\
+	 elif [ -e "/usr/include/db/db_185.h" ]; then		\
+		echo "#include <db/db_185.h>" > aicdb.h;	\
+	 elif [ -e "/usr/include/db_185.h" ]; then		\
+		echo "#include <db_185.h>" > aicdb.h;		\
+	 else							\
+		echo "*** Install db development libraries";	\
+	 fi
+
+clean:
+	rm -f $(clean-files)
+
+# Create a dependency chain in generated files
+# to avoid concurrent invocations of the single
+# rule that builds them all.
+aicasm_gram.c: aicasm_gram.h
+aicasm_gram.c aicasm_gram.h: aicasm_gram.y
+	$(YACC) $(YFLAGS) -b $(<:.y=) $<
+	mv $(<:.y=).tab.c $(<:.y=.c)
+	mv $(<:.y=).tab.h $(<:.y=.h)
+
+# Create a dependency chain in generated files
+# to avoid concurrent invocations of the single
+# rule that builds them all.
+aicasm_macro_gram.c: aicasm_macro_gram.h
+aicasm_macro_gram.c aicasm_macro_gram.h: aicasm_macro_gram.y
+	$(YACC) $(YFLAGS) -b $(<:.y=) -p mm $<
+	mv $(<:.y=).tab.c $(<:.y=.c)
+	mv $(<:.y=).tab.h $(<:.y=.h)
+
+aicasm_scan.c: aicasm_scan.l
+	$(LEX) $(LFLAGS) -o$@ $<
+
+aicasm_macro_scan.c: aicasm_macro_scan.l
+	$(LEX) $(LFLAGS) -Pmm -o$@ $<
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
new file mode 100644
index 0000000..c346394
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -0,0 +1,835 @@
+/*
+ * Aic7xxx SCSI host adapter firmware asssembler
+ *
+ * Copyright (c) 1997, 1998, 2000, 2001 Justin T. Gibbs.
+ * Copyright (c) 2001, 2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm.c#22 $
+ *
+ * $FreeBSD$
+ */
+#include <sys/types.h>
+#include <sys/mman.h>
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <regex.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sysexits.h>
+#include <unistd.h>
+
+#if linux
+#include <endian.h>
+#else
+#include <machine/endian.h>
+#endif
+
+#include "aicasm.h"
+#include "aicasm_symbol.h"
+#include "aicasm_insformat.h"
+
+typedef struct patch {
+	STAILQ_ENTRY(patch) links;
+	int		patch_func;
+	u_int		begin;
+	u_int		skip_instr;
+	u_int		skip_patch;
+} patch_t;
+
+STAILQ_HEAD(patch_list, patch) patches;
+
+static void usage(void);
+static void back_patch(void);
+static void output_code(void);
+static void output_listing(char *ifilename);
+static void dump_scope(scope_t *scope);
+static void emit_patch(scope_t *scope, int patch);
+static int check_patch(patch_t **start_patch, int start_instr,
+		       int *skip_addr, int *func_vals);
+
+struct path_list search_path;
+int includes_search_curdir;
+char *appname;
+char *stock_include_file;
+FILE *ofile;
+char *ofilename;
+char *regfilename;
+FILE *regfile;
+char *listfilename;
+FILE *listfile;
+char *regdiagfilename;
+FILE *regdiagfile;
+int   src_mode;
+int   dst_mode;
+
+static STAILQ_HEAD(,instruction) seq_program;
+struct cs_tailq cs_tailq;
+struct scope_list scope_stack;
+symlist_t patch_functions;
+
+#if DEBUG
+extern int yy_flex_debug;
+extern int mm_flex_debug;
+extern int yydebug;
+extern int mmdebug;
+#endif
+extern FILE *yyin;
+extern int yyparse(void);
+
+int main(int argc, char *argv[]);
+
+int
+main(int argc, char *argv[])
+{
+	extern char *optarg;
+	extern int optind;
+	int  ch;
+	int  retval;
+	char *inputfilename;
+	scope_t *sentinal;
+
+	STAILQ_INIT(&patches);
+	SLIST_INIT(&search_path);
+	STAILQ_INIT(&seq_program);
+	TAILQ_INIT(&cs_tailq);
+	SLIST_INIT(&scope_stack);
+
+	/* Set Sentinal scope node */
+	sentinal = scope_alloc();
+	sentinal->type = SCOPE_ROOT;
+	
+	includes_search_curdir = 1;
+	appname = *argv;
+	regfile = NULL;
+	listfile = NULL;
+#if DEBUG
+	yy_flex_debug = 0;
+	mm_flex_debug = 0;
+	yydebug = 0;
+	mmdebug = 0;
+#endif
+	while ((ch = getopt(argc, argv, "d:i:l:n:o:p:r:I:")) != -1) {
+		switch(ch) {
+		case 'd':
+#if DEBUG
+			if (strcmp(optarg, "s") == 0) {
+				yy_flex_debug = 1;
+				mm_flex_debug = 1;
+			} else if (strcmp(optarg, "p") == 0) {
+				yydebug = 1;
+				mmdebug = 1;
+			} else {
+				fprintf(stderr, "%s: -d Requires either an "
+					"'s' or 'p' argument\n", appname);
+				usage();
+			}
+#else
+			stop("-d: Assembler not built with debugging "
+			     "information", EX_SOFTWARE);
+#endif
+			break;
+		case 'i':
+			stock_include_file = optarg;
+			break;
+		case 'l':
+			/* Create a program listing */
+			if ((listfile = fopen(optarg, "w")) == NULL) {
+				perror(optarg);
+				stop(NULL, EX_CANTCREAT);
+			}
+			listfilename = optarg;
+			break;
+		case 'n':
+			/* Don't complain about the -nostdinc directrive */
+			if (strcmp(optarg, "ostdinc")) {
+				fprintf(stderr, "%s: Unknown option -%c%s\n",
+					appname, ch, optarg);
+				usage();
+				/* NOTREACHED */
+			}
+			break;
+		case 'o':
+			if ((ofile = fopen(optarg, "w")) == NULL) {
+				perror(optarg);
+				stop(NULL, EX_CANTCREAT);
+			}
+			ofilename = optarg;
+			break;
+		case 'p':
+			/* Create Register Diagnostic "printing" Functions */
+			if ((regdiagfile = fopen(optarg, "w")) == NULL) {
+				perror(optarg);
+				stop(NULL, EX_CANTCREAT);
+			}
+			regdiagfilename = optarg;
+			break;
+		case 'r':
+			if ((regfile = fopen(optarg, "w")) == NULL) {
+				perror(optarg);
+				stop(NULL, EX_CANTCREAT);
+			}
+			regfilename = optarg;
+			break;
+		case 'I':
+		{
+			path_entry_t include_dir;
+
+			if (strcmp(optarg, "-") == 0) {
+				if (includes_search_curdir == 0) {
+					fprintf(stderr, "%s: Warning - '-I-' "
+							"specified multiple "
+							"times\n", appname);
+				}
+				includes_search_curdir = 0;
+				for (include_dir = SLIST_FIRST(&search_path);
+				     include_dir != NULL;
+				     include_dir = SLIST_NEXT(include_dir,
+							      links))
+					/*
+					 * All entries before a '-I-' only
+					 * apply to includes specified with
+					 * quotes instead of "<>".
+					 */
+					include_dir->quoted_includes_only = 1;
+			} else {
+				include_dir =
+				    (path_entry_t)malloc(sizeof(*include_dir));
+				if (include_dir == NULL) {
+					perror(optarg);
+					stop(NULL, EX_OSERR);
+				}
+				include_dir->directory = strdup(optarg);
+				if (include_dir->directory == NULL) {
+					perror(optarg);
+					stop(NULL, EX_OSERR);
+				}
+				include_dir->quoted_includes_only = 0;
+				SLIST_INSERT_HEAD(&search_path, include_dir,
+						  links);
+			}
+			break;
+		}
+		case '?':
+		default:
+			usage();
+			/* NOTREACHED */
+		}
+	}
+	argc -= optind;
+	argv += optind;
+
+	if (argc != 1) {
+		fprintf(stderr, "%s: No input file specifiled\n", appname);
+		usage();
+		/* NOTREACHED */
+	}
+
+	if (regdiagfile != NULL
+	 && (regfile == NULL || stock_include_file == NULL)) {
+		fprintf(stderr,
+			"%s: The -p option requires the -r and -i options.\n",
+			appname);
+		usage();
+		/* NOTREACHED */
+	}
+	symtable_open();
+	inputfilename = *argv;
+	include_file(*argv, SOURCE_FILE);
+	retval = yyparse();
+	if (retval == 0) {
+		if (SLIST_FIRST(&scope_stack) == NULL
+		 || SLIST_FIRST(&scope_stack)->type != SCOPE_ROOT) {
+			stop("Unterminated conditional expression", EX_DATAERR);
+			/* NOTREACHED */
+		}
+
+		/* Process outmost scope */
+		process_scope(SLIST_FIRST(&scope_stack));
+		/*
+		 * Decend the tree of scopes and insert/emit
+		 * patches as appropriate.  We perform a depth first
+		 * tranversal, recursively handling each scope.
+		 */
+		/* start at the root scope */
+		dump_scope(SLIST_FIRST(&scope_stack));
+
+		/* Patch up forward jump addresses */
+		back_patch();
+
+		if (ofile != NULL)
+			output_code();
+		if (regfile != NULL)
+			symtable_dump(regfile, regdiagfile);
+		if (listfile != NULL)
+			output_listing(inputfilename);
+	}
+
+	stop(NULL, 0);
+	/* NOTREACHED */
+	return (0);
+}
+
+static void
+usage()
+{
+
+	(void)fprintf(stderr,
+"usage: %-16s [-nostdinc] [-I-] [-I directory] [-o output_file]\n"
+"	[-r register_output_file [-p register_diag_file -i includefile]]\n"
+"	[-l program_list_file]\n"
+"	input_file\n", appname);
+	exit(EX_USAGE);
+}
+
+static void
+back_patch()
+{
+	struct instruction *cur_instr;
+
+	for (cur_instr = STAILQ_FIRST(&seq_program);
+	     cur_instr != NULL;
+	     cur_instr = STAILQ_NEXT(cur_instr, links)) {
+		if (cur_instr->patch_label != NULL) {
+			struct ins_format3 *f3_instr;
+			u_int address;
+
+			if (cur_instr->patch_label->type != LABEL) {
+				char buf[255];
+
+				snprintf(buf, sizeof(buf),
+					 "Undefined label %s",
+					 cur_instr->patch_label->name);
+				stop(buf, EX_DATAERR);
+				/* NOTREACHED */
+			}
+			f3_instr = &cur_instr->format.format3;
+			address = f3_instr->address;
+			address += cur_instr->patch_label->info.linfo->address;
+			f3_instr->address = address;
+		}
+	}
+}
+
+static void
+output_code()
+{
+	struct instruction *cur_instr;
+	patch_t *cur_patch;
+	critical_section_t *cs;
+	symbol_node_t *cur_node;
+	int instrcount;
+
+	instrcount = 0;
+	fprintf(ofile,
+"/*\n"
+" * DO NOT EDIT - This file is automatically generated\n"
+" *		 from the following source files:\n"
+" *\n"
+"%s */\n", versions);
+
+	fprintf(ofile, "static uint8_t seqprog[] = {\n");
+	for (cur_instr = STAILQ_FIRST(&seq_program);
+	     cur_instr != NULL;
+	     cur_instr = STAILQ_NEXT(cur_instr, links)) {
+
+		fprintf(ofile, "%s\t0x%02x, 0x%02x, 0x%02x, 0x%02x",
+			cur_instr == STAILQ_FIRST(&seq_program) ? "" : ",\n",
+#if BYTE_ORDER == LITTLE_ENDIAN
+			cur_instr->format.bytes[0],
+			cur_instr->format.bytes[1],
+			cur_instr->format.bytes[2],
+			cur_instr->format.bytes[3]);
+#else
+			cur_instr->format.bytes[3],
+			cur_instr->format.bytes[2],
+			cur_instr->format.bytes[1],
+			cur_instr->format.bytes[0]);
+#endif
+		instrcount++;
+	}
+	fprintf(ofile, "\n};\n\n");
+
+	if (patch_arg_list == NULL)
+		stop("Patch argument list not defined",
+		     EX_DATAERR);
+
+	/*
+	 *  Output patch information.  Patch functions first.
+	 */
+	fprintf(ofile,
+"typedef int %spatch_func_t (%s);\n", prefix, patch_arg_list);
+
+	for (cur_node = SLIST_FIRST(&patch_functions);
+	     cur_node != NULL;
+	     cur_node = SLIST_NEXT(cur_node,links)) {
+		fprintf(ofile,
+"static %spatch_func_t %spatch%d_func;\n"
+"\n"
+"static int\n"
+"%spatch%d_func(%s)\n"
+"{\n"
+"	return (%s);\n"
+"}\n\n",
+			prefix,
+			prefix,
+			cur_node->symbol->info.condinfo->func_num,
+			prefix,
+			cur_node->symbol->info.condinfo->func_num,
+			patch_arg_list,
+			cur_node->symbol->name);
+	}
+
+	fprintf(ofile,
+"static struct patch {\n"
+"	%spatch_func_t		*patch_func;\n"
+"	uint32_t		 begin		:10,\n"
+"				 skip_instr	:10,\n"
+"				 skip_patch	:12;\n"
+"} patches[] = {\n", prefix);
+
+	for (cur_patch = STAILQ_FIRST(&patches);
+	     cur_patch != NULL;
+	     cur_patch = STAILQ_NEXT(cur_patch,links)) {
+		fprintf(ofile, "%s\t{ %spatch%d_func, %d, %d, %d }",
+			cur_patch == STAILQ_FIRST(&patches) ? "" : ",\n",
+			prefix,
+			cur_patch->patch_func, cur_patch->begin,
+			cur_patch->skip_instr, cur_patch->skip_patch);
+	}
+
+	fprintf(ofile, "\n};\n\n");
+
+	fprintf(ofile,
+"static struct cs {\n"
+"	uint16_t	begin;\n"
+"	uint16_t	end;\n"
+"} critical_sections[] = {\n");
+
+	for (cs = TAILQ_FIRST(&cs_tailq);
+	     cs != NULL;
+	     cs = TAILQ_NEXT(cs, links)) {
+		fprintf(ofile, "%s\t{ %d, %d }",
+			cs == TAILQ_FIRST(&cs_tailq) ? "" : ",\n",
+			cs->begin_addr, cs->end_addr);
+	}
+
+	fprintf(ofile, "\n};\n\n");
+
+	fprintf(ofile,
+"static const int num_critical_sections = sizeof(critical_sections)\n"
+"				       / sizeof(*critical_sections);\n");
+
+	fprintf(stderr, "%s: %d instructions used\n", appname, instrcount);
+}
+
+static void
+dump_scope(scope_t *scope)
+{
+	scope_t *cur_scope;
+
+	/*
+	 * Emit the first patch for this scope
+	 */
+	emit_patch(scope, 0);
+
+	/*
+	 * Dump each scope within this one.
+	 */
+	cur_scope = TAILQ_FIRST(&scope->inner_scope);
+
+	while (cur_scope != NULL) {
+
+		dump_scope(cur_scope);
+
+		cur_scope = TAILQ_NEXT(cur_scope, scope_links);
+	}
+
+	/*
+	 * Emit the second, closing, patch for this scope
+	 */
+	emit_patch(scope, 1);
+}
+
+void
+emit_patch(scope_t *scope, int patch)
+{
+	patch_info_t *pinfo;
+	patch_t *new_patch;
+
+	pinfo = &scope->patches[patch];
+
+	if (pinfo->skip_instr == 0)
+		/* No-Op patch */
+		return;
+
+	new_patch = (patch_t *)malloc(sizeof(*new_patch));
+
+	if (new_patch == NULL)
+		stop("Could not malloc patch structure", EX_OSERR);
+
+	memset(new_patch, 0, sizeof(*new_patch));
+
+	if (patch == 0) {
+		new_patch->patch_func = scope->func_num;
+		new_patch->begin = scope->begin_addr;
+	} else {
+		new_patch->patch_func = 0;
+		new_patch->begin = scope->end_addr;
+	}
+	new_patch->skip_instr = pinfo->skip_instr;
+	new_patch->skip_patch = pinfo->skip_patch;
+	STAILQ_INSERT_TAIL(&patches, new_patch, links);
+}
+
+void
+output_listing(char *ifilename)
+{
+	char buf[1024];
+	FILE *ifile;
+	struct instruction *cur_instr;
+	patch_t *cur_patch;
+	symbol_node_t *cur_func;
+	int *func_values;
+	int instrcount;
+	int instrptr;
+	int line;
+	int func_count;
+	int skip_addr;
+
+	instrcount = 0;
+	instrptr = 0;
+	line = 1;
+	skip_addr = 0;
+	if ((ifile = fopen(ifilename, "r")) == NULL) {
+		perror(ifilename);
+		stop(NULL, EX_DATAERR);
+	}
+
+	/*
+	 * Determine which options to apply to this listing.
+	 */
+	for (func_count = 0, cur_func = SLIST_FIRST(&patch_functions);
+	    cur_func != NULL;
+	    cur_func = SLIST_NEXT(cur_func, links))
+		func_count++;
+
+	func_values = NULL;
+	if (func_count != 0) {
+		func_values = (int *)malloc(func_count * sizeof(int));
+
+		if (func_values == NULL)
+			stop("Could not malloc", EX_OSERR);
+		
+		func_values[0] = 0; /* FALSE func */
+		func_count--;
+
+		/*
+		 * Ask the user to fill in the return values for
+		 * the rest of the functions.
+		 */
+		
+		
+		for (cur_func = SLIST_FIRST(&patch_functions);
+		     cur_func != NULL && SLIST_NEXT(cur_func, links) != NULL;
+		     cur_func = SLIST_NEXT(cur_func, links), func_count--) {
+			int input;
+			
+			fprintf(stdout, "\n(%s)\n", cur_func->symbol->name);
+			fprintf(stdout,
+				"Enter the return value for "
+				"this expression[T/F]:");
+
+			while (1) {
+
+				input = getchar();
+				input = toupper(input);
+
+				if (input == 'T') {
+					func_values[func_count] = 1;
+					break;
+				} else if (input == 'F') {
+					func_values[func_count] = 0;
+					break;
+				}
+			}
+			if (isatty(fileno(stdin)) == 0)
+				putchar(input);
+		}
+		fprintf(stdout, "\nThanks!\n");
+	}
+
+	/* Now output the listing */
+	cur_patch = STAILQ_FIRST(&patches);
+	for (cur_instr = STAILQ_FIRST(&seq_program);
+	     cur_instr != NULL;
+	     cur_instr = STAILQ_NEXT(cur_instr, links), instrcount++) {
+
+		if (check_patch(&cur_patch, instrcount,
+				&skip_addr, func_values) == 0) {
+			/* Don't count this instruction as it is in a patch
+			 * that was removed.
+			 */
+                        continue;
+		}
+
+		while (line < cur_instr->srcline) {
+			fgets(buf, sizeof(buf), ifile);
+				fprintf(listfile, "\t\t%s", buf);
+				line++;
+		}
+		fprintf(listfile, "%03x %02x%02x%02x%02x", instrptr,
+#if BYTE_ORDER == LITTLE_ENDIAN
+			cur_instr->format.bytes[0],
+			cur_instr->format.bytes[1],
+			cur_instr->format.bytes[2],
+			cur_instr->format.bytes[3]);
+#else
+			cur_instr->format.bytes[3],
+			cur_instr->format.bytes[2],
+			cur_instr->format.bytes[1],
+			cur_instr->format.bytes[0]);
+#endif
+		fgets(buf, sizeof(buf), ifile);
+		fprintf(listfile, "\t%s", buf);
+		line++;
+		instrptr++;
+	}
+	/* Dump the remainder of the file */
+	while(fgets(buf, sizeof(buf), ifile) != NULL)
+		fprintf(listfile, "\t\t%s", buf);
+
+	fclose(ifile);
+}
+
+static int
+check_patch(patch_t **start_patch, int start_instr,
+	    int *skip_addr, int *func_vals)
+{
+	patch_t *cur_patch;
+
+	cur_patch = *start_patch;
+
+	while (cur_patch != NULL && start_instr == cur_patch->begin) {
+		if (func_vals[cur_patch->patch_func] == 0) {
+			int skip;
+
+			/* Start rejecting code */
+			*skip_addr = start_instr + cur_patch->skip_instr;
+			for (skip = cur_patch->skip_patch;
+			     skip > 0 && cur_patch != NULL;
+			     skip--)
+				cur_patch = STAILQ_NEXT(cur_patch, links);
+		} else {
+			/* Accepted this patch.  Advance to the next
+			 * one and wait for our intruction pointer to
+			 * hit this point.
+			 */
+			cur_patch = STAILQ_NEXT(cur_patch, links);
+		}
+	}
+
+	*start_patch = cur_patch;
+	if (start_instr < *skip_addr)
+		/* Still skipping */
+		return (0);
+
+	return (1);
+}
+
+/*
+ * Print out error information if appropriate, and clean up before
+ * terminating the program.
+ */
+void
+stop(const char *string, int err_code)
+{
+	if (string != NULL) {
+		fprintf(stderr, "%s: ", appname);
+		if (yyfilename != NULL) {
+			fprintf(stderr, "Stopped at file %s, line %d - ",
+				yyfilename, yylineno);
+		}
+		fprintf(stderr, "%s\n", string);
+	}
+
+	if (ofile != NULL) {
+		fclose(ofile);
+		if (err_code != 0) {
+			fprintf(stderr, "%s: Removing %s due to error\n",
+				appname, ofilename);
+			unlink(ofilename);
+		}
+	}
+
+	if (regfile != NULL) {
+		fclose(regfile);
+		if (err_code != 0) {
+			fprintf(stderr, "%s: Removing %s due to error\n",
+				appname, regfilename);
+			unlink(regfilename);
+		}
+	}
+
+	if (listfile != NULL) {
+		fclose(listfile);
+		if (err_code != 0) {
+			fprintf(stderr, "%s: Removing %s due to error\n",
+				appname, listfilename);
+			unlink(listfilename);
+		}
+	}
+
+	symlist_free(&patch_functions);
+	symtable_close();
+
+	exit(err_code);
+}
+
+struct instruction *
+seq_alloc()
+{
+	struct instruction *new_instr;
+
+	new_instr = (struct instruction *)malloc(sizeof(struct instruction));
+	if (new_instr == NULL)
+		stop("Unable to malloc instruction object", EX_SOFTWARE);
+	memset(new_instr, 0, sizeof(*new_instr));
+	STAILQ_INSERT_TAIL(&seq_program, new_instr, links);
+	new_instr->srcline = yylineno;
+	return new_instr;
+}
+
+critical_section_t *
+cs_alloc()
+{
+	critical_section_t *new_cs;
+
+	new_cs= (critical_section_t *)malloc(sizeof(critical_section_t));
+	if (new_cs == NULL)
+		stop("Unable to malloc critical_section object", EX_SOFTWARE);
+	memset(new_cs, 0, sizeof(*new_cs));
+	
+	TAILQ_INSERT_TAIL(&cs_tailq, new_cs, links);
+	return new_cs;
+}
+
+scope_t *
+scope_alloc()
+{
+	scope_t *new_scope;
+
+	new_scope = (scope_t *)malloc(sizeof(scope_t));
+	if (new_scope == NULL)
+		stop("Unable to malloc scope object", EX_SOFTWARE);
+	memset(new_scope, 0, sizeof(*new_scope));
+	TAILQ_INIT(&new_scope->inner_scope);
+	
+	if (SLIST_FIRST(&scope_stack) != NULL) {
+		TAILQ_INSERT_TAIL(&SLIST_FIRST(&scope_stack)->inner_scope,
+				  new_scope, scope_links);
+	}
+	/* This patch is now the current scope */
+	SLIST_INSERT_HEAD(&scope_stack, new_scope, scope_stack_links);
+	return new_scope;
+}
+
+void
+process_scope(scope_t *scope)
+{
+	/*
+	 * We are "leaving" this scope.  We should now have
+	 * enough information to process the lists of scopes
+	 * we encapsulate.
+	 */
+	scope_t *cur_scope;
+	u_int skip_patch_count;
+	u_int skip_instr_count;
+
+	cur_scope = TAILQ_LAST(&scope->inner_scope, scope_tailq);
+	skip_patch_count = 0;
+	skip_instr_count = 0;
+	while (cur_scope != NULL) {
+		u_int patch0_patch_skip;
+
+		patch0_patch_skip = 0;
+		switch (cur_scope->type) {
+		case SCOPE_IF:
+		case SCOPE_ELSE_IF:
+			if (skip_instr_count != 0) {
+				/* Create a tail patch */
+				patch0_patch_skip++;
+				cur_scope->patches[1].skip_patch =
+				    skip_patch_count + 1;
+				cur_scope->patches[1].skip_instr =
+				    skip_instr_count;
+			}
+
+			/* Count Head patch */
+			patch0_patch_skip++;
+
+			/* Count any patches contained in our inner scope */
+			patch0_patch_skip += cur_scope->inner_scope_patches;
+
+			cur_scope->patches[0].skip_patch = patch0_patch_skip;
+			cur_scope->patches[0].skip_instr =
+			    cur_scope->end_addr - cur_scope->begin_addr;
+
+			skip_instr_count += cur_scope->patches[0].skip_instr;
+
+			skip_patch_count += patch0_patch_skip;
+			if (cur_scope->type == SCOPE_IF) {
+				scope->inner_scope_patches += skip_patch_count;
+				skip_patch_count = 0;
+			        skip_instr_count = 0;
+			}
+			break;
+		case SCOPE_ELSE:
+			/* Count any patches contained in our innter scope */
+			skip_patch_count += cur_scope->inner_scope_patches;
+
+			skip_instr_count += cur_scope->end_addr
+					  - cur_scope->begin_addr;
+			break;
+		case SCOPE_ROOT:
+			stop("Unexpected scope type encountered", EX_SOFTWARE);
+			/* NOTREACHED */
+		}
+
+		cur_scope = TAILQ_PREV(cur_scope, scope_tailq, scope_links);
+	}
+}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.h b/drivers/scsi/aic7xxx/aicasm/aicasm.h
new file mode 100644
index 0000000..51678dd
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.h
@@ -0,0 +1,95 @@
+/*
+ * Assembler for the sequencer program downloaded to Aic7xxx SCSI host adapters
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * Copyright (c) 2001, 2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm.h#14 $
+ *
+ * $FreeBSD$
+ */
+
+#ifdef __linux__
+#include "../queue.h"
+#else
+#include <sys/queue.h>
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+typedef struct path_entry {
+	char	*directory;
+	int	quoted_includes_only;
+	SLIST_ENTRY(path_entry) links;
+} *path_entry_t;
+
+typedef enum {  
+	QUOTED_INCLUDE,
+	BRACKETED_INCLUDE,
+	SOURCE_FILE
+} include_type;
+
+SLIST_HEAD(path_list, path_entry);
+
+extern struct path_list search_path;
+extern struct cs_tailq cs_tailq;
+extern struct scope_list scope_stack;
+extern struct symlist patch_functions;
+extern int includes_search_curdir;		/* False if we've seen -I- */
+extern char *appname;
+extern char *stock_include_file;
+extern int yylineno;
+extern char *yyfilename;
+extern char *prefix;
+extern char *patch_arg_list;
+extern char *versions;
+extern int   src_mode;
+extern int   dst_mode;
+struct symbol;
+
+void stop(const char *errstring, int err_code);
+void include_file(char *file_name, include_type type);
+void expand_macro(struct symbol *macro_symbol);
+struct instruction *seq_alloc(void);
+struct critical_section *cs_alloc(void);
+struct scope *scope_alloc(void);
+void process_scope(struct scope *);
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
new file mode 100644
index 0000000..67e046d
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -0,0 +1,1945 @@
+%{
+/*
+ * Parser for the Aic7xxx SCSI Host adapter sequencer assembler.
+ *
+ * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs.
+ * Copyright (c) 2001, 2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_gram.y#29 $
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+
+#include <inttypes.h>
+#include <regex.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sysexits.h>
+
+#ifdef __linux__
+#include "../queue.h"
+#else
+#include <sys/queue.h>
+#endif
+
+#include "aicasm.h"
+#include "aicasm_symbol.h"
+#include "aicasm_insformat.h"
+
+int yylineno;
+char *yyfilename;
+char stock_prefix[] = "aic_";
+char *prefix = stock_prefix;
+char *patch_arg_list;
+char *versions;
+static char errbuf[255];
+static char regex_pattern[255];
+static symbol_t *cur_symbol;
+static symbol_t *field_symbol;
+static symbol_t *scb_or_sram_symbol;
+static symtype cur_symtype;
+static symbol_ref_t accumulator;
+static symbol_ref_t mode_ptr;
+static symbol_ref_t allones;
+static symbol_ref_t allzeros;
+static symbol_ref_t none;
+static symbol_ref_t sindex;
+static int instruction_ptr;
+static int num_srams;
+static int sram_or_scb_offset;
+static int download_constant_count;
+static int in_critical_section;
+static u_int enum_increment;
+static u_int enum_next_value;
+
+static void process_field(int field_type, symbol_t *sym, int mask);
+static void initialize_symbol(symbol_t *symbol);
+static void add_macro_arg(const char *argtext, int position);
+static void add_macro_body(const char *bodytext);
+static void process_register(symbol_t **p_symbol);
+static void format_1_instr(int opcode, symbol_ref_t *dest,
+			   expression_t *immed, symbol_ref_t *src, int ret);
+static void format_2_instr(int opcode, symbol_ref_t *dest,
+			   expression_t *places, symbol_ref_t *src, int ret);
+static void format_3_instr(int opcode, symbol_ref_t *src,
+			   expression_t *immed, symbol_ref_t *address);
+static void test_readable_symbol(symbol_t *symbol);
+static void test_writable_symbol(symbol_t *symbol);
+static void type_check(symbol_t *symbol, expression_t *expression, int and_op);
+static void make_expression(expression_t *immed, int value);
+static void add_conditional(symbol_t *symbol);
+static void add_version(const char *verstring);
+static int  is_download_const(expression_t *immed);
+
+#define SRAM_SYMNAME "SRAM_BASE"
+#define SCB_SYMNAME "SCB_BASE"
+%}
+
+%union {
+	u_int		value;
+	char		*str;
+	symbol_t	*sym;
+	symbol_ref_t	sym_ref;
+	expression_t	expression;
+}
+
+%token T_REGISTER
+
+%token <value> T_CONST
+
+%token T_EXPORT
+
+%token T_DOWNLOAD
+
+%token T_SCB
+
+%token T_SRAM
+
+%token T_ALIAS
+
+%token T_SIZE
+
+%token T_EXPR_LSHIFT
+
+%token T_EXPR_RSHIFT
+
+%token <value> T_ADDRESS
+
+%token T_ACCESS_MODE
+
+%token T_MODES
+
+%token T_DEFINE
+
+%token T_SET_SRC_MODE
+
+%token T_SET_DST_MODE
+
+%token <value> T_MODE
+
+%token T_BEGIN_CS
+
+%token T_END_CS
+
+%token T_FIELD
+
+%token T_ENUM
+
+%token T_MASK
+
+%token <value> T_NUMBER
+
+%token <str> T_PATH T_STRING T_ARG T_MACROBODY
+
+%token <sym> T_CEXPR
+
+%token T_EOF T_INCLUDE T_VERSION T_PREFIX T_PATCH_ARG_LIST
+
+%token <value> T_SHR T_SHL T_ROR T_ROL
+
+%token <value> T_MVI T_MOV T_CLR T_BMOV
+
+%token <value> T_JMP T_JC T_JNC T_JE T_JNE T_JNZ T_JZ T_CALL
+
+%token <value> T_ADD T_ADC
+
+%token <value> T_INC T_DEC
+
+%token <value> T_STC T_CLC
+
+%token <value> T_CMP T_NOT T_XOR
+
+%token <value> T_TEST T_AND
+
+%token <value> T_OR
+
+%token T_RET
+
+%token T_NOP
+
+%token T_ACCUM T_ALLONES T_ALLZEROS T_NONE T_SINDEX T_MODE_PTR
+
+%token T_A
+
+%token <sym> T_SYMBOL
+
+%token T_NL
+
+%token T_IF T_ELSE T_ELSE_IF T_ENDIF
+
+%type <sym_ref> reg_symbol address destination source opt_source
+
+%type <expression> expression immediate immediate_or_a
+
+%type <value> export ret f1_opcode f2_opcode jmp_jc_jnc_call jz_jnz je_jne
+
+%type <value> mode_value mode_list macro_arglist
+
+%left '|'
+%left '&'
+%left T_EXPR_LSHIFT T_EXPR_RSHIFT
+%left '+' '-'
+%left '*' '/'
+%right '~'
+%nonassoc UMINUS
+%%
+
+program:
+	include
+|	program include
+|	prefix
+|	program prefix
+|	patch_arg_list
+|	program patch_arg_list
+|	version
+|	program version
+|	register
+|	program register
+|	constant
+|	program constant
+|	macrodefn
+|	program macrodefn
+|	scratch_ram
+|	program scratch_ram
+|	scb
+|	program scb
+|	label
+|	program label
+|	set_src_mode
+|	program set_src_mode
+|	set_dst_mode
+|	program set_dst_mode
+|	critical_section_start
+|	program critical_section_start
+|	critical_section_end
+|	program critical_section_end
+|	conditional
+|	program conditional
+|	code
+|	program code
+;
+
+include:
+	T_INCLUDE '<' T_PATH '>'
+	{
+		include_file($3, BRACKETED_INCLUDE);
+	}
+|	T_INCLUDE '"' T_PATH '"'
+	{
+		include_file($3, QUOTED_INCLUDE);
+	}
+;
+
+prefix:
+	T_PREFIX '=' T_STRING
+	{
+		if (prefix != stock_prefix)
+			stop("Prefix multiply defined",
+			     EX_DATAERR);
+		prefix = strdup($3);
+		if (prefix == NULL)
+			stop("Unable to record prefix", EX_SOFTWARE);
+	}
+;
+
+patch_arg_list:
+	T_PATCH_ARG_LIST '=' T_STRING
+	{
+		if (patch_arg_list != NULL)
+			stop("Patch argument list multiply defined",
+			     EX_DATAERR);
+		patch_arg_list = strdup($3);
+		if (patch_arg_list == NULL)
+			stop("Unable to record patch arg list", EX_SOFTWARE);
+	}
+;
+
+version:
+	T_VERSION '=' T_STRING
+	{ add_version($3); }
+;
+
+register:
+	T_REGISTER { cur_symtype = REGISTER; } reg_definition
+;
+
+reg_definition:
+	T_SYMBOL '{'
+		{
+			if ($1->type != UNINITIALIZED) {
+				stop("Register multiply defined", EX_DATAERR);
+				/* NOTREACHED */
+			}
+			cur_symbol = $1; 
+			cur_symbol->type = cur_symtype;
+			initialize_symbol(cur_symbol);
+		}
+		reg_attribute_list
+	'}'
+		{                    
+			/*
+			 * Default to allowing everything in for registers
+			 * with no bit or mask definitions.
+			 */
+			if (cur_symbol->info.rinfo->valid_bitmask == 0)
+				cur_symbol->info.rinfo->valid_bitmask = 0xFF;
+
+			if (cur_symbol->info.rinfo->size == 0)
+				cur_symbol->info.rinfo->size = 1;
+
+			/*
+			 * This might be useful for registers too.
+			 */
+			if (cur_symbol->type != REGISTER) {
+				if (cur_symbol->info.rinfo->address == 0)
+					cur_symbol->info.rinfo->address =
+					    sram_or_scb_offset;
+				sram_or_scb_offset +=
+				    cur_symbol->info.rinfo->size;
+			}
+			cur_symbol = NULL;
+		}
+;
+
+reg_attribute_list:
+	reg_attribute
+|	reg_attribute_list reg_attribute
+;
+
+reg_attribute:		
+	reg_address
+|	size
+|	access_mode
+|	modes
+|	field_defn
+|	enum_defn
+|	mask_defn
+|	alias
+|	accumulator
+|	mode_pointer
+|	allones
+|	allzeros
+|	none
+|	sindex
+;
+
+reg_address:
+	T_ADDRESS T_NUMBER
+	{
+		cur_symbol->info.rinfo->address = $2;
+	}
+;
+
+size:
+	T_SIZE T_NUMBER
+	{
+		cur_symbol->info.rinfo->size = $2;
+		if (scb_or_sram_symbol != NULL) {
+			u_int max_addr;
+			u_int sym_max_addr;
+
+			max_addr = scb_or_sram_symbol->info.rinfo->address
+				 + scb_or_sram_symbol->info.rinfo->size;
+			sym_max_addr = cur_symbol->info.rinfo->address
+				     + cur_symbol->info.rinfo->size;
+
+			if (sym_max_addr > max_addr)
+				stop("SCB or SRAM space exhausted", EX_DATAERR);
+		}
+	}
+;
+
+access_mode:
+	T_ACCESS_MODE T_MODE
+	{
+		cur_symbol->info.rinfo->mode = $2;
+	}
+;
+
+modes:
+	T_MODES mode_list
+	{
+		cur_symbol->info.rinfo->modes = $2;
+	}
+;
+
+mode_list:
+	mode_value
+	{
+		$$ = $1;
+	}
+|	mode_list ',' mode_value
+	{
+		$$ = $1 | $3;
+	}
+;
+
+mode_value:
+	T_NUMBER
+	{
+		if ($1 > 4) {
+			stop("Valid register modes range between 0 and 4.",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+
+		$$ = (0x1 << $1);
+	}
+|	T_SYMBOL
+	{
+		symbol_t *symbol;
+
+		symbol = $1;
+		if (symbol->type != CONST) {
+			stop("Only \"const\" symbols allowed in "
+			     "mode definitions.", EX_DATAERR);
+			/* NOTREACHED */
+		}
+		if (symbol->info.cinfo->value > 4) {
+			stop("Valid register modes range between 0 and 4.",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		$$ = (0x1 << symbol->info.cinfo->value);
+	}
+;
+
+field_defn:
+	T_FIELD
+		{
+			field_symbol = NULL;
+			enum_next_value = 0;
+			enum_increment = 1;
+		}
+	'{' enum_entry_list '}'
+|	T_FIELD T_SYMBOL expression
+		{
+			process_field(FIELD, $2, $3.value);
+			field_symbol = $2;
+			enum_next_value = 0;
+			enum_increment = 0x01 << (ffs($3.value) - 1);
+		}
+	'{' enum_entry_list '}'
+|	T_FIELD T_SYMBOL expression
+	{
+		process_field(FIELD, $2, $3.value);
+	}
+;
+
+enum_defn:
+	T_ENUM
+		{
+			field_symbol = NULL;
+			enum_next_value = 0;
+			enum_increment = 1;
+		}
+	'{' enum_entry_list '}'
+|	T_ENUM T_SYMBOL expression
+		{
+			process_field(ENUM, $2, $3.value);
+			field_symbol = $2;
+			enum_next_value = 0;
+			enum_increment = 0x01 << (ffs($3.value) - 1);
+		}
+	'{' enum_entry_list '}'
+;
+
+enum_entry_list:
+	enum_entry
+|	enum_entry_list ',' enum_entry
+;
+
+enum_entry:
+	T_SYMBOL
+	{
+		process_field(ENUM_ENTRY, $1, enum_next_value);
+		enum_next_value += enum_increment;
+	}
+|	T_SYMBOL expression
+	{
+		process_field(ENUM_ENTRY, $1, $2.value);
+		enum_next_value = $2.value + enum_increment;
+	}
+;
+
+mask_defn:
+	T_MASK T_SYMBOL expression
+	{
+		process_field(MASK, $2, $3.value);
+	}
+;
+
+alias:
+	T_ALIAS	T_SYMBOL
+	{
+		if ($2->type != UNINITIALIZED) {
+			stop("Re-definition of register alias",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		$2->type = ALIAS;
+		initialize_symbol($2);
+		$2->info.ainfo->parent = cur_symbol;
+	}
+;
+
+accumulator:
+	T_ACCUM
+	{
+		if (accumulator.symbol != NULL) {
+			stop("Only one accumulator definition allowed",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		accumulator.symbol = cur_symbol;
+	}
+;
+
+mode_pointer:
+	T_MODE_PTR
+	{
+		if (mode_ptr.symbol != NULL) {
+			stop("Only one mode pointer definition allowed",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		mode_ptr.symbol = cur_symbol;
+	}
+;
+
+allones:
+	T_ALLONES
+	{
+		if (allones.symbol != NULL) {
+			stop("Only one definition of allones allowed",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		allones.symbol = cur_symbol;
+	}
+;
+
+allzeros:
+	T_ALLZEROS
+	{
+		if (allzeros.symbol != NULL) {
+			stop("Only one definition of allzeros allowed",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		allzeros.symbol = cur_symbol;
+	}
+;
+
+none:
+	T_NONE
+	{
+		if (none.symbol != NULL) {
+			stop("Only one definition of none allowed",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		none.symbol = cur_symbol;
+	}
+;
+
+sindex:
+	T_SINDEX
+	{
+		if (sindex.symbol != NULL) {
+			stop("Only one definition of sindex allowed",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		sindex.symbol = cur_symbol;
+	}
+;
+
+expression:
+	expression '|' expression
+	{
+		 $$.value = $1.value | $3.value;
+		 symlist_merge(&$$.referenced_syms,
+			       &$1.referenced_syms,
+			       &$3.referenced_syms);
+	}
+|	expression '&' expression
+	{
+		$$.value = $1.value & $3.value;
+		symlist_merge(&$$.referenced_syms,
+			       &$1.referenced_syms,
+			       &$3.referenced_syms);
+	}
+|	expression '+' expression
+	{
+		$$.value = $1.value + $3.value;
+		symlist_merge(&$$.referenced_syms,
+			       &$1.referenced_syms,
+			       &$3.referenced_syms);
+	}
+|	expression '-' expression
+	{
+		$$.value = $1.value - $3.value;
+		symlist_merge(&($$.referenced_syms),
+			       &($1.referenced_syms),
+			       &($3.referenced_syms));
+	}
+|	expression '*' expression
+	{
+		$$.value = $1.value * $3.value;
+		symlist_merge(&($$.referenced_syms),
+			       &($1.referenced_syms),
+			       &($3.referenced_syms));
+	}
+|	expression '/' expression
+	{
+		$$.value = $1.value / $3.value;
+		symlist_merge(&($$.referenced_syms),
+			       &($1.referenced_syms),
+			       &($3.referenced_syms));
+	}
+| 	expression T_EXPR_LSHIFT expression
+	{
+		$$.value = $1.value << $3.value;
+		symlist_merge(&$$.referenced_syms,
+			       &$1.referenced_syms,
+			       &$3.referenced_syms);
+	}
+| 	expression T_EXPR_RSHIFT expression
+	{
+		$$.value = $1.value >> $3.value;
+		symlist_merge(&$$.referenced_syms,
+			       &$1.referenced_syms,
+			       &$3.referenced_syms);
+	}
+|	'(' expression ')'
+	{
+		$$ = $2;
+	}
+|	'~' expression
+	{
+		$$ = $2;
+		$$.value = (~$$.value) & 0xFF;
+	}
+|	'-' expression %prec UMINUS
+	{
+		$$ = $2;
+		$$.value = -$$.value;
+	}
+|	T_NUMBER
+	{
+		$$.value = $1;
+		SLIST_INIT(&$$.referenced_syms);
+	}
+|	T_SYMBOL
+	{
+		symbol_t *symbol;
+
+		symbol = $1;
+		switch (symbol->type) {
+		case ALIAS:
+			symbol = $1->info.ainfo->parent;
+		case REGISTER:
+		case SCBLOC:
+		case SRAMLOC:
+			$$.value = symbol->info.rinfo->address;
+			break;
+		case MASK:
+		case FIELD:
+		case ENUM:
+		case ENUM_ENTRY:
+			$$.value = symbol->info.finfo->value;
+			break;
+		case DOWNLOAD_CONST:
+		case CONST:
+			$$.value = symbol->info.cinfo->value;
+			break;
+		case UNINITIALIZED:
+		default:
+		{
+			snprintf(errbuf, sizeof(errbuf),
+				 "Undefined symbol %s referenced",
+				 symbol->name);
+			stop(errbuf, EX_DATAERR);
+			/* NOTREACHED */
+			break;
+		}
+		}
+		SLIST_INIT(&$$.referenced_syms);
+		symlist_add(&$$.referenced_syms, symbol, SYMLIST_INSERT_HEAD);
+	}
+;
+
+constant:
+	T_CONST T_SYMBOL expression 
+	{
+		if ($2->type != UNINITIALIZED) {
+			stop("Re-definition of symbol as a constant",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		$2->type = CONST;
+		initialize_symbol($2);
+		$2->info.cinfo->value = $3.value;
+	}
+|	T_CONST T_SYMBOL T_DOWNLOAD
+	{
+		if ($1) {
+			stop("Invalid downloaded constant declaration",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		if ($2->type != UNINITIALIZED) {
+			stop("Re-definition of symbol as a downloaded constant",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		$2->type = DOWNLOAD_CONST;
+		initialize_symbol($2);
+		$2->info.cinfo->value = download_constant_count++;
+	}
+;
+
+macrodefn_prologue:
+	T_DEFINE T_SYMBOL
+	{
+		if ($2->type != UNINITIALIZED) {
+			stop("Re-definition of symbol as a macro",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		cur_symbol = $2;
+		cur_symbol->type = MACRO;
+		initialize_symbol(cur_symbol);
+	}
+;
+
+macrodefn:
+	macrodefn_prologue T_MACROBODY
+	{
+		add_macro_body($2);
+	}
+|	macrodefn_prologue '(' macro_arglist ')' T_MACROBODY
+	{
+		add_macro_body($5);
+		cur_symbol->info.macroinfo->narg = $3;
+	}
+;
+
+macro_arglist:
+	{
+		/* Macros can take no arguments */
+		$$ = 0;
+	}
+|	T_ARG
+	{
+		$$ = 1;
+		add_macro_arg($1, 0);
+	}
+|	macro_arglist ',' T_ARG
+	{
+		if ($1 == 0) {
+			stop("Comma without preceeding argument in arg list",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		$$ = $1 + 1;
+		add_macro_arg($3, $1);
+	}
+;
+
+scratch_ram:
+	T_SRAM '{'
+		{
+			snprintf(errbuf, sizeof(errbuf), "%s%d", SRAM_SYMNAME,
+				 num_srams);
+			cur_symbol = symtable_get(SRAM_SYMNAME);
+			cur_symtype = SRAMLOC;
+			cur_symbol->type = SRAMLOC;
+			initialize_symbol(cur_symbol);
+		}
+		reg_address
+		{
+			sram_or_scb_offset = cur_symbol->info.rinfo->address;
+		}
+		size
+		{
+			scb_or_sram_symbol = cur_symbol;
+		}
+		scb_or_sram_attributes
+	'}'
+		{
+			cur_symbol = NULL;
+			scb_or_sram_symbol = NULL;
+		}
+;
+
+scb:
+	T_SCB '{'
+		{
+			cur_symbol = symtable_get(SCB_SYMNAME);
+			cur_symtype = SCBLOC;
+			if (cur_symbol->type != UNINITIALIZED) {
+				stop("Only one SRAM definition allowed",
+				     EX_SOFTWARE);
+				/* NOTREACHED */
+			}
+			cur_symbol->type = SCBLOC;
+			initialize_symbol(cur_symbol);
+			/* 64 bytes of SCB space */
+			cur_symbol->info.rinfo->size = 64;
+		}
+		reg_address
+		{
+			sram_or_scb_offset = cur_symbol->info.rinfo->address;
+		}
+		size
+		{
+			scb_or_sram_symbol = cur_symbol;
+		}
+		scb_or_sram_attributes
+	'}'
+		{
+			cur_symbol = NULL;
+			scb_or_sram_symbol = NULL;
+		}
+;
+
+scb_or_sram_attributes:
+	/* NULL definition is okay */
+|	modes
+|	scb_or_sram_reg_list
+|	modes scb_or_sram_reg_list
+;
+
+scb_or_sram_reg_list:
+	reg_definition
+|	scb_or_sram_reg_list reg_definition
+;
+
+reg_symbol:
+	T_SYMBOL
+	{
+		process_register(&$1);
+		$$.symbol = $1;
+		$$.offset = 0;
+	}
+|	T_SYMBOL '[' T_SYMBOL ']'
+	{
+		process_register(&$1);
+		if ($3->type != CONST) {
+			stop("register offset must be a constant", EX_DATAERR);
+			/* NOTREACHED */
+		}
+		if (($3->info.cinfo->value + 1) > $1->info.rinfo->size) {
+			stop("Accessing offset beyond range of register",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		$$.symbol = $1;
+		$$.offset = $3->info.cinfo->value;
+	}
+|	T_SYMBOL '[' T_NUMBER ']'
+	{
+		process_register(&$1);
+		if (($3 + 1) > $1->info.rinfo->size) {
+			stop("Accessing offset beyond range of register",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		$$.symbol = $1;
+		$$.offset = $3;
+	}
+|	T_A
+	{
+		if (accumulator.symbol == NULL) {
+			stop("No accumulator has been defined", EX_DATAERR);
+			/* NOTREACHED */
+		}
+		$$.symbol = accumulator.symbol;
+		$$.offset = 0;
+	}
+;
+
+destination:
+	reg_symbol
+	{
+		test_writable_symbol($1.symbol);
+		$$ = $1;
+	}
+;
+
+immediate:
+	expression
+	{ $$ = $1; }
+;
+
+immediate_or_a:
+	expression
+	{
+		if ($1.value == 0 && is_download_const(&$1) == 0) {
+			snprintf(errbuf, sizeof(errbuf),
+				 "\nExpression evaluates to 0 and thus "
+				 "references the accumulator.\n "
+				 "If this is the desired effect, use 'A' "
+				 "instead.\n");
+			stop(errbuf, EX_DATAERR);
+		}
+		$$ = $1;
+	}
+|	T_A
+	{
+		SLIST_INIT(&$$.referenced_syms);
+		symlist_add(&$$.referenced_syms, accumulator.symbol,
+			    SYMLIST_INSERT_HEAD);
+		$$.value = 0;
+	}
+;
+
+source:
+	reg_symbol
+	{
+		test_readable_symbol($1.symbol);
+		$$ = $1;
+	}
+;
+
+opt_source:
+	{
+		$$.symbol = NULL;
+		$$.offset = 0;
+	}
+|	',' source
+	{ $$ = $2; }
+;
+
+ret:
+	{ $$ = 0; }
+|	T_RET
+	{ $$ = 1; }
+;
+
+set_src_mode:
+	T_SET_SRC_MODE T_NUMBER ';'
+	{
+		src_mode = $2;
+	}
+;
+
+set_dst_mode:
+	T_SET_DST_MODE T_NUMBER ';'
+	{
+		dst_mode = $2;
+	}
+;
+
+critical_section_start:
+	T_BEGIN_CS ';'
+	{
+		critical_section_t *cs;
+
+		if (in_critical_section != FALSE) {
+			stop("Critical Section within Critical Section",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		cs = cs_alloc();
+		cs->begin_addr = instruction_ptr;
+		in_critical_section = TRUE;
+	}
+;
+
+critical_section_end:
+	T_END_CS ';'
+	{
+		critical_section_t *cs;
+
+		if (in_critical_section == FALSE) {
+			stop("Unballanced 'end_cs'", EX_DATAERR);
+			/* NOTREACHED */
+		}
+		cs = TAILQ_LAST(&cs_tailq, cs_tailq);
+		cs->end_addr = instruction_ptr;
+		in_critical_section = FALSE;
+	}
+;
+
+export:
+	{ $$ = 0; }
+|	T_EXPORT
+	{ $$ = 1; }
+;
+
+label:
+	export T_SYMBOL ':'
+	{
+		if ($2->type != UNINITIALIZED) {
+			stop("Program label multiply defined", EX_DATAERR);
+			/* NOTREACHED */
+		}
+		$2->type = LABEL;
+		initialize_symbol($2);
+		$2->info.linfo->address = instruction_ptr;
+		$2->info.linfo->exported = $1;
+	}
+;
+
+address:
+	T_SYMBOL
+	{
+		$$.symbol = $1;
+		$$.offset = 0;
+	}
+|	T_SYMBOL '+' T_NUMBER
+	{
+		$$.symbol = $1;
+		$$.offset = $3;
+	}
+|	T_SYMBOL '-' T_NUMBER
+	{
+		$$.symbol = $1;
+		$$.offset = -$3;
+	}
+|	'.'
+	{
+		$$.symbol = NULL;
+		$$.offset = 0;
+	}
+|	'.' '+' T_NUMBER
+	{
+		$$.symbol = NULL;
+		$$.offset = $3;
+	}
+|	'.' '-' T_NUMBER
+	{
+		$$.symbol = NULL;
+		$$.offset = -$3;
+	}
+;
+
+conditional:
+	T_IF T_CEXPR '{'
+	{
+		scope_t *new_scope;
+
+		add_conditional($2);
+		new_scope = scope_alloc();
+		new_scope->type = SCOPE_IF;
+		new_scope->begin_addr = instruction_ptr;
+		new_scope->func_num = $2->info.condinfo->func_num;
+	}
+|	T_ELSE T_IF T_CEXPR '{'
+	{
+		scope_t *new_scope;
+		scope_t *scope_context;
+		scope_t *last_scope;
+
+		/*
+		 * Ensure that the previous scope is either an
+		 * if or and else if.
+		 */
+		scope_context = SLIST_FIRST(&scope_stack);
+		last_scope = TAILQ_LAST(&scope_context->inner_scope,
+					scope_tailq);
+		if (last_scope == NULL
+		 || last_scope->type == T_ELSE) {
+
+			stop("'else if' without leading 'if'", EX_DATAERR);
+			/* NOTREACHED */
+		}
+		add_conditional($3);
+		new_scope = scope_alloc();
+		new_scope->type = SCOPE_ELSE_IF;
+		new_scope->begin_addr = instruction_ptr;
+		new_scope->func_num = $3->info.condinfo->func_num;
+	}
+|	T_ELSE '{'
+	{
+		scope_t *new_scope;
+		scope_t *scope_context;
+		scope_t *last_scope;
+
+		/*
+		 * Ensure that the previous scope is either an
+		 * if or and else if.
+		 */
+		scope_context = SLIST_FIRST(&scope_stack);
+		last_scope = TAILQ_LAST(&scope_context->inner_scope,
+					scope_tailq);
+		if (last_scope == NULL
+		 || last_scope->type == SCOPE_ELSE) {
+
+			stop("'else' without leading 'if'", EX_DATAERR);
+			/* NOTREACHED */
+		}
+		new_scope = scope_alloc();
+		new_scope->type = SCOPE_ELSE;
+		new_scope->begin_addr = instruction_ptr;
+	}
+;
+
+conditional:
+	'}'
+	{
+		scope_t *scope_context;
+
+		scope_context = SLIST_FIRST(&scope_stack);
+		if (scope_context->type == SCOPE_ROOT) {
+			stop("Unexpected '}' encountered", EX_DATAERR);
+			/* NOTREACHED */
+		}
+
+		scope_context->end_addr = instruction_ptr;
+
+		/* Pop the scope */
+		SLIST_REMOVE_HEAD(&scope_stack, scope_stack_links);
+
+		process_scope(scope_context);
+
+		if (SLIST_FIRST(&scope_stack) == NULL) {
+			stop("Unexpected '}' encountered", EX_DATAERR);
+			/* NOTREACHED */
+		}
+	}
+;
+
+f1_opcode:
+	T_AND { $$ = AIC_OP_AND; }
+|	T_XOR { $$ = AIC_OP_XOR; }
+|	T_ADD { $$ = AIC_OP_ADD; }
+|	T_ADC { $$ = AIC_OP_ADC; }
+;
+
+code:
+	f1_opcode destination ',' immediate_or_a opt_source ret ';'
+	{
+		format_1_instr($1, &$2, &$4, &$5, $6);
+	}
+;
+
+code:
+	T_OR reg_symbol ',' immediate_or_a opt_source ret ';'
+	{
+		format_1_instr(AIC_OP_OR, &$2, &$4, &$5, $6);
+	}
+;
+
+code:
+	T_INC destination opt_source ret ';'
+	{
+		expression_t immed;
+
+		make_expression(&immed, 1);
+		format_1_instr(AIC_OP_ADD, &$2, &immed, &$3, $4);
+	}
+;
+
+code:
+	T_DEC destination opt_source ret ';'
+	{
+		expression_t immed;
+
+		make_expression(&immed, -1);
+		format_1_instr(AIC_OP_ADD, &$2, &immed, &$3, $4);
+	}
+;
+
+code:
+	T_CLC ret ';'
+	{
+		expression_t immed;
+
+		make_expression(&immed, -1);
+		format_1_instr(AIC_OP_ADD, &none, &immed, &allzeros, $2);
+	}
+|	T_CLC T_MVI destination ',' immediate_or_a ret ';'
+	{
+		format_1_instr(AIC_OP_ADD, &$3, &$5, &allzeros, $6);
+	}
+;
+
+code:
+	T_STC ret ';'
+	{
+		expression_t immed;
+
+		make_expression(&immed, 1);
+		format_1_instr(AIC_OP_ADD, &none, &immed, &allones, $2);
+	}
+|	T_STC destination ret ';'
+	{
+		expression_t immed;
+
+		make_expression(&immed, 1);
+		format_1_instr(AIC_OP_ADD, &$2, &immed, &allones, $3);
+	}
+;
+
+code:
+	T_BMOV destination ',' source ',' immediate ret ';'
+	{
+		format_1_instr(AIC_OP_BMOV, &$2, &$6, &$4, $7);
+	}
+;
+
+code:
+	T_MOV destination ',' source ret ';'
+	{
+		expression_t immed;
+
+		make_expression(&immed, 1);
+		format_1_instr(AIC_OP_BMOV, &$2, &immed, &$4, $5);
+	}
+;
+
+code:
+	T_MVI destination ',' immediate ret ';'
+	{
+		if ($4.value == 0
+		 && is_download_const(&$4) == 0) {
+			expression_t immed;
+
+			/*
+			 * Allow move immediates of 0 so that macros,
+			 * that can't know the immediate's value and
+			 * otherwise compensate, still work.
+			 */
+			make_expression(&immed, 1);
+			format_1_instr(AIC_OP_BMOV, &$2, &immed, &allzeros, $5);
+		} else {
+			format_1_instr(AIC_OP_OR, &$2, &$4, &allzeros, $5);
+		}
+	}
+;
+
+code:
+	T_NOT destination opt_source ret ';'
+	{
+		expression_t immed;
+
+		make_expression(&immed, 0xff);
+		format_1_instr(AIC_OP_XOR, &$2, &immed, &$3, $4);
+	}
+;
+
+code:
+	T_CLR destination ret ';'
+	{
+		expression_t immed;
+
+		make_expression(&immed, 0xff);
+		format_1_instr(AIC_OP_AND, &$2, &immed, &allzeros, $3);
+	}
+;
+
+code:
+	T_NOP ret ';'
+	{
+		expression_t immed;
+
+		make_expression(&immed, 0xff);
+		format_1_instr(AIC_OP_AND, &none, &immed, &allzeros, $2);
+	}
+;
+
+code:
+	T_RET ';'
+	{
+		expression_t immed;
+
+		make_expression(&immed, 0xff);
+		format_1_instr(AIC_OP_AND, &none, &immed, &allzeros, TRUE);
+	}
+;
+
+	/*
+	 * This grammer differs from the one in the aic7xxx
+	 * reference manual since the grammer listed there is
+	 * ambiguous and causes a shift/reduce conflict.
+	 * It also seems more logical as the "immediate"
+	 * argument is listed as the second arg like the
+	 * other formats.
+	 */
+
+f2_opcode:
+	T_SHL { $$ = AIC_OP_SHL; }
+|	T_SHR { $$ = AIC_OP_SHR; }
+|	T_ROL { $$ = AIC_OP_ROL; }
+|	T_ROR { $$ = AIC_OP_ROR; }
+;
+
+code:
+	f2_opcode destination ',' expression opt_source ret ';'
+	{
+		format_2_instr($1, &$2, &$4, &$5, $6);
+	}
+;
+
+jmp_jc_jnc_call:
+	T_JMP	{ $$ = AIC_OP_JMP; }
+|	T_JC	{ $$ = AIC_OP_JC; }
+|	T_JNC	{ $$ = AIC_OP_JNC; }
+|	T_CALL	{ $$ = AIC_OP_CALL; }
+;
+
+jz_jnz:
+	T_JZ	{ $$ = AIC_OP_JZ; }
+|	T_JNZ	{ $$ = AIC_OP_JNZ; }
+;
+
+je_jne:
+	T_JE	{ $$ = AIC_OP_JE; }
+|	T_JNE	{ $$ = AIC_OP_JNE; }
+;
+
+code:
+	jmp_jc_jnc_call address ';'
+	{
+		expression_t immed;
+
+		make_expression(&immed, 0);
+		format_3_instr($1, &sindex, &immed, &$2);
+	}
+;
+
+code:
+	T_OR reg_symbol ',' immediate jmp_jc_jnc_call address ';'
+	{
+		format_3_instr($5, &$2, &$4, &$6);
+	}
+;
+
+code:
+	T_TEST source ',' immediate_or_a jz_jnz address ';'
+	{
+		format_3_instr($5, &$2, &$4, &$6);
+	}
+;
+
+code:
+	T_CMP source ',' immediate_or_a je_jne address ';'
+	{
+		format_3_instr($5, &$2, &$4, &$6);
+	}
+;
+
+code:
+	T_MOV source jmp_jc_jnc_call address ';'
+	{
+		expression_t immed;
+
+		make_expression(&immed, 0);
+		format_3_instr($3, &$2, &immed, &$4);
+	}
+;
+
+code:
+	T_MVI immediate jmp_jc_jnc_call address ';'
+	{
+		format_3_instr($3, &allzeros, &$2, &$4);
+	}
+;
+
+%%
+
+static void
+process_field(int field_type, symbol_t *sym, int value)
+{
+	/*
+	 * Add the current register to its
+	 * symbol list, if it already exists,
+	 * warn if we are setting it to a
+	 * different value, or in the bit to
+	 * the "allowed bits" of this register.
+	 */
+	if (sym->type == UNINITIALIZED) {
+		sym->type = field_type;
+		initialize_symbol(sym);
+		sym->info.finfo->value = value;
+		if (field_type != ENUM_ENTRY) {
+			if (field_type != MASK && value == 0) {
+				stop("Empty Field, or Enum", EX_DATAERR);
+				/* NOTREACHED */
+			}
+			sym->info.finfo->value = value;
+			sym->info.finfo->mask = value;
+		} else if (field_symbol != NULL) {
+			sym->info.finfo->mask = field_symbol->info.finfo->value;
+		} else {
+			sym->info.finfo->mask = 0xFF;
+		}
+	} else if (sym->type != field_type) {
+		stop("Field definition mirrors a definition of the same "
+		     " name, but a different type", EX_DATAERR);
+		/* NOTREACHED */
+	} else if (value != sym->info.finfo->value) {
+		stop("Field redefined with a conflicting value", EX_DATAERR);
+		/* NOTREACHED */
+	}
+	/* Fail if this symbol is already listed */
+	if (symlist_search(&(sym->info.finfo->symrefs),
+			   cur_symbol->name) != NULL) {
+		stop("Field defined multiple times for register", EX_DATAERR);
+		/* NOTREACHED */
+	}
+	symlist_add(&(sym->info.finfo->symrefs), cur_symbol,
+		    SYMLIST_INSERT_HEAD);
+	cur_symbol->info.rinfo->valid_bitmask |= sym->info.finfo->mask;
+	cur_symbol->info.rinfo->typecheck_masks = TRUE;
+	symlist_add(&(cur_symbol->info.rinfo->fields), sym, SYMLIST_SORT);
+}
+
+static void
+initialize_symbol(symbol_t *symbol)
+{
+	switch (symbol->type) {
+	case UNINITIALIZED:
+		stop("Call to initialize_symbol with type field unset",
+		     EX_SOFTWARE);
+		/* NOTREACHED */
+		break;
+	case REGISTER:
+	case SRAMLOC:
+	case SCBLOC:
+		symbol->info.rinfo =
+		    (struct reg_info *)malloc(sizeof(struct reg_info));
+		if (symbol->info.rinfo == NULL) {
+			stop("Can't create register info", EX_SOFTWARE);
+			/* NOTREACHED */
+		}
+		memset(symbol->info.rinfo, 0,
+		       sizeof(struct reg_info));
+		SLIST_INIT(&(symbol->info.rinfo->fields));
+		/*
+		 * Default to allowing access in all register modes
+		 * or to the mode specified by the SCB or SRAM space
+		 * we are in.
+		 */
+		if (scb_or_sram_symbol != NULL)
+			symbol->info.rinfo->modes =
+			    scb_or_sram_symbol->info.rinfo->modes;
+		else
+			symbol->info.rinfo->modes = ~0;
+		break;
+	case ALIAS:
+		symbol->info.ainfo =
+		    (struct alias_info *)malloc(sizeof(struct alias_info));
+		if (symbol->info.ainfo == NULL) {
+			stop("Can't create alias info", EX_SOFTWARE);
+			/* NOTREACHED */
+		}
+		memset(symbol->info.ainfo, 0,
+		       sizeof(struct alias_info));
+		break;
+	case MASK:
+	case FIELD:
+	case ENUM:
+	case ENUM_ENTRY:
+		symbol->info.finfo =
+		    (struct field_info *)malloc(sizeof(struct field_info));
+		if (symbol->info.finfo == NULL) {
+			stop("Can't create field info", EX_SOFTWARE);
+			/* NOTREACHED */
+		}
+		memset(symbol->info.finfo, 0, sizeof(struct field_info));
+		SLIST_INIT(&(symbol->info.finfo->symrefs));
+		break;
+	case CONST:
+	case DOWNLOAD_CONST:
+		symbol->info.cinfo =
+		    (struct const_info *)malloc(sizeof(struct const_info));
+		if (symbol->info.cinfo == NULL) {
+			stop("Can't create alias info", EX_SOFTWARE);
+			/* NOTREACHED */
+		}
+		memset(symbol->info.cinfo, 0,
+		       sizeof(struct const_info));
+		break;
+	case LABEL:
+		symbol->info.linfo =
+		    (struct label_info *)malloc(sizeof(struct label_info));
+		if (symbol->info.linfo == NULL) {
+			stop("Can't create label info", EX_SOFTWARE);
+			/* NOTREACHED */
+		}
+		memset(symbol->info.linfo, 0,
+		       sizeof(struct label_info));
+		break;
+	case CONDITIONAL:
+		symbol->info.condinfo =
+		    (struct cond_info *)malloc(sizeof(struct cond_info));
+		if (symbol->info.condinfo == NULL) {
+			stop("Can't create conditional info", EX_SOFTWARE);
+			/* NOTREACHED */
+		}
+		memset(symbol->info.condinfo, 0,
+		       sizeof(struct cond_info));
+		break;
+	case MACRO:
+		symbol->info.macroinfo = 
+		    (struct macro_info *)malloc(sizeof(struct macro_info));
+		if (symbol->info.macroinfo == NULL) {
+			stop("Can't create macro info", EX_SOFTWARE);
+			/* NOTREACHED */
+		}
+		memset(symbol->info.macroinfo, 0,
+		       sizeof(struct macro_info));
+		STAILQ_INIT(&symbol->info.macroinfo->args);
+		break;
+	default:
+		stop("Call to initialize_symbol with invalid symbol type",
+		     EX_SOFTWARE);
+		/* NOTREACHED */
+		break;
+	}
+}
+
+static void
+add_macro_arg(const char *argtext, int argnum)
+{
+	struct macro_arg *marg;
+	int i;
+	int retval;
+		
+
+	if (cur_symbol == NULL || cur_symbol->type != MACRO) {
+		stop("Invalid current symbol for adding macro arg",
+		     EX_SOFTWARE);
+		/* NOTREACHED */
+	}
+
+	marg = (struct macro_arg *)malloc(sizeof(*marg));
+	if (marg == NULL) {
+		stop("Can't create macro_arg structure", EX_SOFTWARE);
+		/* NOTREACHED */
+	}
+	marg->replacement_text = NULL;
+	retval = snprintf(regex_pattern, sizeof(regex_pattern),
+			  "[^-/A-Za-z0-9_](%s)([^-/A-Za-z0-9_]|$)",
+			  argtext);
+	if (retval >= sizeof(regex_pattern)) {
+		stop("Regex text buffer too small for arg",
+		     EX_SOFTWARE);
+		/* NOTREACHED */
+	}
+	retval = regcomp(&marg->arg_regex, regex_pattern, REG_EXTENDED);
+	if (retval != 0) {
+		stop("Regex compilation failed", EX_SOFTWARE);
+		/* NOTREACHED */
+	}
+	STAILQ_INSERT_TAIL(&cur_symbol->info.macroinfo->args, marg, links);
+}
+
+static void
+add_macro_body(const char *bodytext)
+{
+	if (cur_symbol == NULL || cur_symbol->type != MACRO) {
+		stop("Invalid current symbol for adding macro arg",
+		     EX_SOFTWARE);
+		/* NOTREACHED */
+	}
+	cur_symbol->info.macroinfo->body = strdup(bodytext);
+	if (cur_symbol->info.macroinfo->body == NULL) {
+		stop("Can't duplicate macro body text", EX_SOFTWARE);
+		/* NOTREACHED */
+	}
+}
+
+static void
+process_register(symbol_t **p_symbol)
+{
+	symbol_t *symbol = *p_symbol;
+
+	if (symbol->type == UNINITIALIZED) {
+		snprintf(errbuf, sizeof(errbuf), "Undefined register %s",
+			 symbol->name);
+		stop(errbuf, EX_DATAERR);
+		/* NOTREACHED */
+	} else if (symbol->type == ALIAS) {
+		*p_symbol = symbol->info.ainfo->parent;
+	} else if ((symbol->type != REGISTER)
+		&& (symbol->type != SCBLOC)
+		&& (symbol->type != SRAMLOC)) {
+		snprintf(errbuf, sizeof(errbuf),
+			 "Specified symbol %s is not a register",
+			 symbol->name);
+		stop(errbuf, EX_DATAERR);
+	}
+}
+
+static void
+format_1_instr(int opcode, symbol_ref_t *dest, expression_t *immed,
+	       symbol_ref_t *src, int ret)
+{
+	struct instruction *instr;
+	struct ins_format1 *f1_instr;
+
+	if (src->symbol == NULL)
+		src = dest;
+
+	/* Test register permissions */
+	test_writable_symbol(dest->symbol);
+	test_readable_symbol(src->symbol);
+
+	/* Ensure that immediate makes sense for this destination */
+	type_check(dest->symbol, immed, opcode);
+
+	/* Allocate sequencer space for the instruction and fill it out */
+	instr = seq_alloc();
+	f1_instr = &instr->format.format1;
+	f1_instr->ret = ret ? 1 : 0;
+	f1_instr->opcode = opcode;
+	f1_instr->destination = dest->symbol->info.rinfo->address
+			      + dest->offset;
+	f1_instr->source = src->symbol->info.rinfo->address
+			 + src->offset;
+	f1_instr->immediate = immed->value;
+
+	if (is_download_const(immed))
+		f1_instr->parity = 1;
+	else if (dest->symbol == mode_ptr.symbol) {
+		u_int src_value;
+		u_int dst_value;
+
+		/*
+		 * Attempt to update mode information if
+		 * we are operating on the mode register.
+		 */
+		if (src->symbol == allones.symbol)
+			src_value = 0xFF;
+		else if (src->symbol == allzeros.symbol)
+			src_value = 0;
+		else if (src->symbol == mode_ptr.symbol)
+			src_value = (dst_mode << 4) | src_mode;
+		else
+			goto cant_update;
+
+		switch (opcode) {
+		case AIC_OP_AND:
+			dst_value = src_value & immed->value;
+			break;
+		case AIC_OP_XOR:
+			dst_value = src_value ^ immed->value;
+			break;
+		case AIC_OP_ADD:
+			dst_value = (src_value + immed->value) & 0xFF;
+			break;
+		case AIC_OP_OR:
+			dst_value = src_value | immed->value;
+			break;
+		case AIC_OP_BMOV:
+			dst_value = src_value;
+			break;
+		default:
+			goto cant_update;
+		}
+		src_mode = dst_value & 0xF;
+		dst_mode = (dst_value >> 4) & 0xF;
+	}
+
+cant_update:
+	symlist_free(&immed->referenced_syms);
+	instruction_ptr++;
+}
+
+static void
+format_2_instr(int opcode, symbol_ref_t *dest, expression_t *places,
+	       symbol_ref_t *src, int ret)
+{
+	struct instruction *instr;
+	struct ins_format2 *f2_instr;
+	uint8_t shift_control;
+
+	if (src->symbol == NULL)
+		src = dest;
+
+	/* Test register permissions */
+	test_writable_symbol(dest->symbol);
+	test_readable_symbol(src->symbol);
+
+	/* Allocate sequencer space for the instruction and fill it out */
+	instr = seq_alloc();
+	f2_instr = &instr->format.format2;
+	f2_instr->ret = ret ? 1 : 0;
+	f2_instr->opcode = AIC_OP_ROL;
+	f2_instr->destination = dest->symbol->info.rinfo->address
+			      + dest->offset;
+	f2_instr->source = src->symbol->info.rinfo->address
+			 + src->offset;
+	if (places->value > 8 || places->value <= 0) {
+		stop("illegal shift value", EX_DATAERR);
+		/* NOTREACHED */
+	}
+	switch (opcode) {
+	case AIC_OP_SHL:
+		if (places->value == 8)
+			shift_control = 0xf0;
+		else
+			shift_control = (places->value << 4) | places->value;
+		break;
+	case AIC_OP_SHR:
+		if (places->value == 8) {
+			shift_control = 0xf8;
+		} else {
+			shift_control = (places->value << 4)
+				      | (8 - places->value)
+				      | 0x08;
+		}
+		break;
+	case AIC_OP_ROL:
+		shift_control = places->value & 0x7;
+		break;
+	case AIC_OP_ROR:
+		shift_control = (8 - places->value) | 0x08;
+		break;
+	default:
+		shift_control = 0; /* Quiet Compiler */
+		stop("Invalid shift operation specified", EX_SOFTWARE);
+		/* NOTREACHED */
+		break;
+	};
+	f2_instr->shift_control = shift_control;
+	symlist_free(&places->referenced_syms);
+	instruction_ptr++;
+}
+
+static void
+format_3_instr(int opcode, symbol_ref_t *src,
+	       expression_t *immed, symbol_ref_t *address)
+{
+	struct instruction *instr;
+	struct ins_format3 *f3_instr;
+	int addr;
+
+	/* Test register permissions */
+	test_readable_symbol(src->symbol);
+
+	/* Ensure that immediate makes sense for this source */
+	type_check(src->symbol, immed, opcode);
+
+	/* Allocate sequencer space for the instruction and fill it out */
+	instr = seq_alloc();
+	f3_instr = &instr->format.format3;
+	if (address->symbol == NULL) {
+		/* 'dot' referrence.  Use the current instruction pointer */
+		addr = instruction_ptr + address->offset;
+	} else if (address->symbol->type == UNINITIALIZED) {
+		/* forward reference */
+		addr = address->offset;
+		instr->patch_label = address->symbol;
+	} else
+		addr = address->symbol->info.linfo->address + address->offset;
+	f3_instr->opcode = opcode;
+	f3_instr->address = addr;
+	f3_instr->source = src->symbol->info.rinfo->address
+			 + src->offset;
+	f3_instr->immediate = immed->value;
+
+	if (is_download_const(immed))
+		f3_instr->parity = 1;
+
+	symlist_free(&immed->referenced_syms);
+	instruction_ptr++;
+}
+
+static void
+test_readable_symbol(symbol_t *symbol)
+{
+	
+	if ((symbol->info.rinfo->modes & (0x1 << src_mode)) == 0) {
+		snprintf(errbuf, sizeof(errbuf),
+			"Register %s unavailable in source reg mode %d",
+			symbol->name, src_mode);
+		stop(errbuf, EX_DATAERR);
+	}
+
+	if (symbol->info.rinfo->mode == WO) {
+		stop("Write Only register specified as source",
+		     EX_DATAERR);
+		/* NOTREACHED */
+	}
+}
+
+static void
+test_writable_symbol(symbol_t *symbol)
+{
+	
+	if ((symbol->info.rinfo->modes & (0x1 << dst_mode)) == 0) {
+		snprintf(errbuf, sizeof(errbuf),
+			"Register %s unavailable in destination reg mode %d",
+			symbol->name, dst_mode);
+		stop(errbuf, EX_DATAERR);
+	}
+
+	if (symbol->info.rinfo->mode == RO) {
+		stop("Read Only register specified as destination",
+		     EX_DATAERR);
+		/* NOTREACHED */
+	}
+}
+
+static void
+type_check(symbol_t *symbol, expression_t *expression, int opcode)
+{
+	symbol_node_t *node;
+	int and_op;
+
+	and_op = FALSE;
+	if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ || AIC_OP_JZ)
+		and_op = TRUE;
+
+	/*
+	 * Make sure that we aren't attempting to write something
+	 * that hasn't been defined.  If this is an and operation,
+	 * this is a mask, so "undefined" bits are okay.
+	 */
+	if (and_op == FALSE
+	 && (expression->value & ~symbol->info.rinfo->valid_bitmask) != 0) {
+		snprintf(errbuf, sizeof(errbuf),
+			 "Invalid bit(s) 0x%x in immediate written to %s",
+			 expression->value & ~symbol->info.rinfo->valid_bitmask,
+			 symbol->name);
+		stop(errbuf, EX_DATAERR);
+		/* NOTREACHED */
+	}
+
+	/*
+	 * Now make sure that all of the symbols referenced by the
+	 * expression are defined for this register.
+	 */
+	if (symbol->info.rinfo->typecheck_masks != FALSE) {
+		for(node = expression->referenced_syms.slh_first;
+		    node != NULL;
+		    node = node->links.sle_next) {
+			if ((node->symbol->type == MASK
+			  || node->symbol->type == FIELD
+			  || node->symbol->type == ENUM
+			  || node->symbol->type == ENUM_ENTRY)
+			 && symlist_search(&node->symbol->info.finfo->symrefs,
+					   symbol->name) == NULL) {
+				snprintf(errbuf, sizeof(errbuf),
+					 "Invalid field or mask %s "
+					 "for register %s",
+					 node->symbol->name, symbol->name);
+				stop(errbuf, EX_DATAERR);
+				/* NOTREACHED */
+			}
+		}
+	}
+}
+
+static void
+make_expression(expression_t *immed, int value)
+{
+	SLIST_INIT(&immed->referenced_syms);
+	immed->value = value & 0xff;
+}
+
+static void
+add_conditional(symbol_t *symbol)
+{
+	static int numfuncs;
+
+	if (numfuncs == 0) {
+		/* add a special conditional, "0" */
+		symbol_t *false_func;
+
+		false_func = symtable_get("0");
+		if (false_func->type != UNINITIALIZED) {
+			stop("Conditional expression '0' "
+			     "conflicts with a symbol", EX_DATAERR);
+			/* NOTREACHED */
+		}
+		false_func->type = CONDITIONAL;
+		initialize_symbol(false_func);
+		false_func->info.condinfo->func_num = numfuncs++;
+		symlist_add(&patch_functions, false_func, SYMLIST_INSERT_HEAD);
+	}
+
+	/* This condition has occurred before */
+	if (symbol->type == CONDITIONAL)
+		return;
+
+	if (symbol->type != UNINITIALIZED) {
+		stop("Conditional expression conflicts with a symbol",
+		     EX_DATAERR);
+		/* NOTREACHED */
+	}
+
+	symbol->type = CONDITIONAL;
+	initialize_symbol(symbol);
+	symbol->info.condinfo->func_num = numfuncs++;
+	symlist_add(&patch_functions, symbol, SYMLIST_INSERT_HEAD);
+}
+
+static void
+add_version(const char *verstring)
+{
+	const char prefix[] = " * ";
+	int newlen;
+	int oldlen;
+
+	newlen = strlen(verstring) + strlen(prefix);
+	oldlen = 0;
+	if (versions != NULL)
+		oldlen = strlen(versions);
+	versions = realloc(versions, newlen + oldlen + 2);
+	if (versions == NULL)
+		stop("Can't allocate version string", EX_SOFTWARE);
+	strcpy(&versions[oldlen], prefix);
+	strcpy(&versions[oldlen + strlen(prefix)], verstring);
+	versions[newlen + oldlen] = '\n';
+	versions[newlen + oldlen + 1] = '\0';
+}
+
+void
+yyerror(const char *string)
+{
+	stop(string, EX_DATAERR);
+}
+
+static int
+is_download_const(expression_t *immed)
+{
+	if ((immed->referenced_syms.slh_first != NULL)
+	 && (immed->referenced_syms.slh_first->symbol->type == DOWNLOAD_CONST))
+		return (TRUE);
+
+	return (FALSE);
+}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
new file mode 100644
index 0000000..3e80f07
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
@@ -0,0 +1,131 @@
+/*
+ * Instruction formats for the sequencer program downloaded to
+ * Aic7xxx SCSI host adapters
+ *
+ * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_insformat.h#11 $
+ *
+ * $FreeBSD$
+ */
+
+struct ins_format1 {
+#if BYTE_ORDER == LITTLE_ENDIAN
+	uint32_t	immediate	: 8,
+			source		: 9,
+			destination	: 9,
+			ret		: 1,
+			opcode		: 4,
+			parity		: 1;
+#else
+	uint32_t	parity		: 1,
+			opcode		: 4,
+			ret		: 1,
+			destination	: 9,
+			source		: 9,
+			immediate	: 8;
+#endif
+};
+
+struct ins_format2 {
+#if BYTE_ORDER == LITTLE_ENDIAN
+	uint32_t	shift_control	: 8,
+			source		: 9,
+			destination	: 9,
+			ret		: 1,
+			opcode		: 4,
+			parity		: 1;
+#else
+	uint32_t	parity		: 1,
+			opcode		: 4,
+			ret		: 1,
+			destination	: 9,
+			source		: 9,
+			shift_control	: 8;
+#endif
+};
+
+struct ins_format3 {
+#if BYTE_ORDER == LITTLE_ENDIAN
+	uint32_t	immediate	: 8,
+			source		: 9,
+			address		: 10,
+			opcode		: 4,
+			parity		: 1;
+#else
+	uint32_t	parity		: 1,
+			opcode		: 4,
+			address		: 10,
+			source		: 9,
+			immediate	: 8;
+#endif
+};
+
+union ins_formats {
+		struct ins_format1 format1;
+		struct ins_format2 format2;
+		struct ins_format3 format3;
+		uint8_t		   bytes[4];
+		uint32_t	   integer;
+};
+struct instruction {
+	union	ins_formats format;
+	u_int	srcline;
+	struct symbol *patch_label;
+	STAILQ_ENTRY(instruction) links;
+};
+
+#define	AIC_OP_OR	0x0
+#define	AIC_OP_AND	0x1
+#define AIC_OP_XOR	0x2
+#define	AIC_OP_ADD	0x3
+#define	AIC_OP_ADC	0x4
+#define	AIC_OP_ROL	0x5
+#define	AIC_OP_BMOV	0x6
+
+#define	AIC_OP_JMP	0x8
+#define AIC_OP_JC	0x9
+#define AIC_OP_JNC	0xa
+#define AIC_OP_CALL	0xb
+#define	AIC_OP_JNE	0xc
+#define	AIC_OP_JNZ	0xd
+#define	AIC_OP_JE	0xe
+#define	AIC_OP_JZ	0xf
+
+/* Pseudo Ops */
+#define	AIC_OP_SHL	0x10
+#define	AIC_OP_SHR	0x20
+#define	AIC_OP_ROR	0x30
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
new file mode 100644
index 0000000..439f760
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
@@ -0,0 +1,164 @@
+%{
+/*
+ * Sub-parser for macro invocation in the Aic7xxx SCSI
+ * Host adapter sequencer assembler.
+ *
+ * Copyright (c) 2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_macro_gram.y#5 $
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+
+#include <inttypes.h>
+#include <regex.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sysexits.h>
+
+#ifdef __linux__
+#include "../queue.h"
+#else
+#include <sys/queue.h>
+#endif
+
+#include "aicasm.h"
+#include "aicasm_symbol.h"
+#include "aicasm_insformat.h"
+
+static symbol_t *macro_symbol;
+
+static void add_macro_arg(const char *argtext, int position);
+
+%}
+
+%union {
+	int		value;
+	char		*str;
+	symbol_t	*sym;
+}
+
+
+%token <str> T_ARG
+
+%token <sym> T_SYMBOL
+
+%type <value> macro_arglist
+
+%%
+
+macrocall:
+	T_SYMBOL '('
+	{
+		macro_symbol = $1;
+	}
+	macro_arglist ')'
+	{
+		if (macro_symbol->info.macroinfo->narg != $4) {
+			printf("Narg == %d", macro_symbol->info.macroinfo->narg);
+			stop("Too few arguments for macro invocation",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		macro_symbol = NULL;
+		YYACCEPT;
+	}
+;
+
+macro_arglist:
+	{
+		/* Macros can take 0 arguments */
+		$$ = 0;
+	}
+|	T_ARG
+	{
+		$$ = 1;
+		add_macro_arg($1, 1);
+	}
+|	macro_arglist ',' T_ARG
+	{
+		if ($1 == 0) {
+			stop("Comma without preceeding argument in arg list",
+			     EX_DATAERR);
+			/* NOTREACHED */
+		}
+		$$ = $1 + 1;
+		add_macro_arg($3, $$);
+	}
+;
+
+%%
+
+static void
+add_macro_arg(const char *argtext, int argnum)
+{
+	struct macro_arg *marg;
+	int i;
+
+	if (macro_symbol == NULL || macro_symbol->type != MACRO) {
+		stop("Invalid current symbol for adding macro arg",
+		     EX_SOFTWARE);
+		/* NOTREACHED */
+	}
+	/*
+	 * Macro Invocation.  Find the appropriate argument and fill
+	 * in the replace ment text for this call.
+	 */
+	i = 0;
+	STAILQ_FOREACH(marg, &macro_symbol->info.macroinfo->args, links) {
+		i++;
+		if (i == argnum)
+			break;
+	}
+	if (marg == NULL) {
+		stop("Too many arguments for macro invocation", EX_DATAERR);
+		/* NOTREACHED */
+	}
+	marg->replacement_text = strdup(argtext);
+	if (marg->replacement_text == NULL) {
+		stop("Unable to replicate replacement text", EX_SOFTWARE);
+		/* NOTREACHED */
+	}
+}
+
+void
+mmerror(const char *string)
+{
+	stop(string, EX_DATAERR);
+}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
new file mode 100644
index 0000000..f06e703
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
@@ -0,0 +1,156 @@
+%{
+/*
+ * Sub-Lexical Analyzer for macro invokation in 
+ * the Aic7xxx SCSI Host adapter sequencer assembler.
+ *
+ * Copyright (c) 2001 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_macro_scan.l#8 $
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+
+#include <inttypes.h>
+#include <limits.h>
+#include <regex.h>
+#include <stdio.h>
+#include <string.h>
+#include <sysexits.h>
+#ifdef __linux__
+#include "../queue.h"
+#else
+#include <sys/queue.h>
+#endif
+
+#include "aicasm.h"
+#include "aicasm_symbol.h"
+#include "aicasm_macro_gram.h"
+
+#define MAX_STR_CONST 4096
+static char string_buf[MAX_STR_CONST];
+static char *string_buf_ptr;
+static int  parren_count;
+static char buf[255];
+%}
+
+WORD		[A-Za-z_][-A-Za-z_0-9]*
+SPACE		[ \t]+
+MCARG		[^(), \t]+
+
+%x ARGLIST
+
+%%
+\n			{
+				++yylineno;
+			}
+\r			;
+<ARGLIST>{SPACE}	;
+<ARGLIST>\(		{
+				parren_count++;
+				if (parren_count == 1) {
+					string_buf_ptr = string_buf;
+					return ('(');
+				}
+				*string_buf_ptr++ = '(';
+			}
+<ARGLIST>\)		{
+				if (parren_count == 1) {
+					if (string_buf_ptr != string_buf) {
+						/*
+						 * Return an argument and
+						 * rescan this parren so we
+						 * can return it as well.
+						 */
+						*string_buf_ptr = '\0';
+						mmlval.str = string_buf;
+						string_buf_ptr = string_buf;
+						unput(')');
+						return T_ARG;
+					}
+					BEGIN INITIAL;
+					return (')');
+				}
+				parren_count--;
+				*string_buf_ptr++ = ')';
+			}
+<ARGLIST>{MCARG}	{
+				char *yptr;
+
+				yptr = mmtext;
+				while (*yptr)
+					*string_buf_ptr++ = *yptr++;
+			}
+<ARGLIST>\,		{
+				if (string_buf_ptr != string_buf) {
+					/*
+					 * Return an argument and
+					 * rescan this comma so we
+					 * can return it as well.
+					 */
+					*string_buf_ptr = '\0';
+					mmlval.str = string_buf;
+					string_buf_ptr = string_buf;
+					unput(',');
+					return T_ARG;
+				}
+				return ',';
+			}
+{WORD}[(]		{
+				/* May be a symbol or a macro invocation. */
+				mmlval.sym = symtable_get(mmtext);
+				if (mmlval.sym->type != MACRO) {
+					stop("Expecting Macro Name",
+					     EX_DATAERR);
+				}
+				unput('(');
+				parren_count = 0;
+				BEGIN ARGLIST;
+				return T_SYMBOL;
+			}
+.			{ 
+				snprintf(buf, sizeof(buf), "Invalid character "
+					 "'%c'", mmtext[0]);
+				stop(buf, EX_DATAERR);
+			}
+%%
+
+int
+mmwrap()
+{
+	stop("EOF encountered in macro call", EX_DATAERR);
+}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
new file mode 100644
index 0000000..45c0b23
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -0,0 +1,607 @@
+%{
+/*
+ * Lexical Analyzer for the Aic7xxx SCSI Host adapter sequencer assembler.
+ *
+ * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs.
+ * Copyright (c) 2001, 2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_scan.l#19 $
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+
+#include <inttypes.h>
+#include <limits.h>
+#include <regex.h>
+#include <stdio.h>
+#include <string.h>
+#include <sysexits.h>
+#ifdef __linux__
+#include "../queue.h"
+#else
+#include <sys/queue.h>
+#endif
+
+#include "aicasm.h"
+#include "aicasm_symbol.h"
+#include "aicasm_gram.h"
+
+/* This is used for macro body capture too, so err on the large size. */
+#define MAX_STR_CONST 4096
+static char string_buf[MAX_STR_CONST];
+static char *string_buf_ptr;
+static int  parren_count;
+static int  quote_count;
+static char buf[255];
+%}
+
+PATH		([/]*[-A-Za-z0-9_.])+
+WORD		[A-Za-z_][-A-Za-z_0-9]*
+SPACE		[ \t]+
+MCARG		[^(), \t]+
+MBODY		((\\[^\n])*[^\n\\]*)+
+
+%x COMMENT
+%x CEXPR
+%x INCLUDE
+%x STRING
+%x MACRODEF
+%x MACROARGLIST
+%x MACROCALLARGS
+%x MACROBODY
+
+%%
+\n			{ ++yylineno; }
+\r			;
+"/*"			{ BEGIN COMMENT;  /* Enter comment eating state */ }
+<COMMENT>"/*"		{ fprintf(stderr, "Warning! Comment within comment."); }
+<COMMENT>\n		{ ++yylineno; }
+<COMMENT>[^*/\n]*	;
+<COMMENT>"*"+[^*/\n]*	;
+<COMMENT>"/"+[^*/\n]*	;
+<COMMENT>"*"+"/"	{ BEGIN INITIAL; }
+if[ \t]*\(		{
+				string_buf_ptr = string_buf;
+				parren_count = 1;
+				BEGIN CEXPR;
+				return T_IF;
+			}
+<CEXPR>\(		{	*string_buf_ptr++ = '('; parren_count++; }
+<CEXPR>\)		{
+				parren_count--;
+				if (parren_count == 0) {
+					/* All done */
+					BEGIN INITIAL;
+					*string_buf_ptr = '\0';
+					yylval.sym = symtable_get(string_buf);
+					return T_CEXPR;
+				} else {
+					*string_buf_ptr++ = ')';
+				}
+			}
+<CEXPR>\n		{ ++yylineno; }
+<CEXPR>\r		;
+<CEXPR>[^()\n]+	{
+				char *yptr;
+
+				yptr = yytext;
+				while (*yptr != '\0') {
+					/* Remove duplicate spaces */
+					if (*yptr == '\t')
+						*yptr = ' ';
+					if (*yptr == ' '
+					 && string_buf_ptr != string_buf
+					 && string_buf_ptr[-1] == ' ')
+						yptr++;
+					else 
+						*string_buf_ptr++ = *yptr++;
+				}
+			}
+
+VERSION			{ return T_VERSION; }
+PREFIX			{ return T_PREFIX; }
+PATCH_ARG_LIST		{ return T_PATCH_ARG_LIST; }
+\"			{
+				string_buf_ptr = string_buf;
+				BEGIN STRING;
+			}
+<STRING>[^"]+		{
+				char *yptr;
+
+				yptr = yytext;
+				while (*yptr)
+					*string_buf_ptr++ = *yptr++;
+			}
+<STRING>\"		{
+				/* All done */
+				BEGIN INITIAL;
+				*string_buf_ptr = '\0';
+				yylval.str = string_buf;
+				return T_STRING;
+			}
+{SPACE}			 ;
+
+	/* Register/SCB/SRAM definition keywords */
+export			{ return T_EXPORT; }
+register		{ return T_REGISTER; }
+const			{ yylval.value = FALSE; return T_CONST; }
+download		{ return T_DOWNLOAD; }
+address			{ return T_ADDRESS; }
+access_mode		{ return T_ACCESS_MODE; }
+modes			{ return T_MODES; }
+RW|RO|WO		{
+				 if (strcmp(yytext, "RW") == 0)
+					yylval.value = RW;
+				 else if (strcmp(yytext, "RO") == 0)
+					yylval.value = RO;
+				 else
+					yylval.value = WO;
+				 return T_MODE;
+			}
+BEGIN_CRITICAL		{ return T_BEGIN_CS; }
+END_CRITICAL		{ return T_END_CS; }
+SET_SRC_MODE		{ return T_SET_SRC_MODE; }
+SET_DST_MODE		{ return T_SET_DST_MODE; }
+field			{ return T_FIELD; }
+enum			{ return T_ENUM; }
+mask			{ return T_MASK; }
+alias			{ return T_ALIAS; }
+size			{ return T_SIZE; }
+scb			{ return T_SCB; }
+scratch_ram		{ return T_SRAM; }
+accumulator		{ return T_ACCUM; }
+mode_pointer		{ return T_MODE_PTR; }
+allones			{ return T_ALLONES; }
+allzeros		{ return T_ALLZEROS; }
+none			{ return T_NONE; }
+sindex			{ return T_SINDEX; }
+A			{ return T_A; }
+
+	/* Opcodes */
+shl			{ return T_SHL; }
+shr			{ return T_SHR; }
+ror			{ return T_ROR; }
+rol			{ return T_ROL; }
+mvi			{ return T_MVI; }
+mov			{ return T_MOV; }
+clr			{ return T_CLR; }
+jmp			{ return T_JMP; }
+jc			{ return T_JC;	}
+jnc			{ return T_JNC;	}
+je			{ return T_JE;	}
+jne			{ return T_JNE;	}
+jz			{ return T_JZ;	}
+jnz			{ return T_JNZ;	}
+call			{ return T_CALL; }
+add			{ return T_ADD; }
+adc			{ return T_ADC; }
+bmov			{ return T_BMOV; }
+inc			{ return T_INC; }
+dec			{ return T_DEC; }
+stc			{ return T_STC;	}
+clc			{ return T_CLC; }
+cmp			{ return T_CMP;	}
+not			{ return T_NOT;	}
+xor			{ return T_XOR;	}
+test			{ return T_TEST;}
+and			{ return T_AND;	}
+or			{ return T_OR;	}
+ret			{ return T_RET; }
+nop			{ return T_NOP; }
+else			{ return T_ELSE; }
+
+	/* Allowed Symbols */
+\<\<			{ return T_EXPR_LSHIFT; }
+\>\>			{ return T_EXPR_RSHIFT; }
+[-+,:()~|&."{};<>[\]/*!=] { return yytext[0]; }
+
+	/* Number processing */
+0[0-7]*			{
+				yylval.value = strtol(yytext, NULL, 8);
+				return T_NUMBER;
+			}
+
+0[xX][0-9a-fA-F]+	{
+				yylval.value = strtoul(yytext + 2, NULL, 16);
+				return T_NUMBER;
+			}
+
+[1-9][0-9]*		{
+				yylval.value = strtol(yytext, NULL, 10);
+				return T_NUMBER;
+			}
+	/* Include Files */
+#include{SPACE}		{
+				BEGIN INCLUDE;
+				quote_count = 0;
+				return T_INCLUDE;
+			}
+<INCLUDE>[<]		{ return yytext[0]; }
+<INCLUDE>[>]		{ BEGIN INITIAL; return yytext[0]; }
+<INCLUDE>[\"]		{
+				if (quote_count != 0)
+					BEGIN INITIAL;
+				quote_count++;
+				return yytext[0];
+			}
+<INCLUDE>{PATH}		{
+				char *yptr;
+
+				yptr = yytext;
+				string_buf_ptr = string_buf;
+				while (*yptr)
+					*string_buf_ptr++ = *yptr++;
+				yylval.str = string_buf;
+				*string_buf_ptr = '\0';
+				return T_PATH;
+			}
+<INCLUDE>.		{ stop("Invalid include line", EX_DATAERR); }
+#define{SPACE}		{
+				BEGIN MACRODEF;
+				return T_DEFINE;
+			}
+<MACRODEF>{WORD}{SPACE}	{ 
+				char *yptr;
+
+				/* Strip space and return as a normal symbol */
+				yptr = yytext;
+				while (*yptr != ' ' && *yptr != '\t')
+					yptr++;
+				*yptr = '\0';
+				yylval.sym = symtable_get(yytext);
+				string_buf_ptr = string_buf;
+				BEGIN MACROBODY;
+				return T_SYMBOL;
+			}
+<MACRODEF>{WORD}\(	{
+				/*
+				 * We store the symbol with its opening
+				 * parren so we can differentiate macros
+				 * that take args from macros with the
+				 * same name that do not take args as
+				 * is allowed in C.
+				 */
+				BEGIN MACROARGLIST;
+				yylval.sym = symtable_get(yytext);
+				unput('(');
+				return T_SYMBOL;
+			}
+<MACROARGLIST>{WORD}	{
+				yylval.str = yytext;
+				return T_ARG;
+			}
+<MACROARGLIST>{SPACE}   ;
+<MACROARGLIST>[(,]	{
+				return yytext[0];
+			}
+<MACROARGLIST>[)]	{
+				string_buf_ptr = string_buf;
+				BEGIN MACROBODY;
+				return ')';
+			}
+<MACROARGLIST>.		{
+				snprintf(buf, sizeof(buf), "Invalid character "
+					 "'%c' in macro argument list",
+					 yytext[0]);
+				stop(buf, EX_DATAERR);
+			}
+<MACROCALLARGS>{SPACE}  ;
+<MACROCALLARGS>\(	{
+				parren_count++;
+				if (parren_count == 1)
+					return ('(');
+				*string_buf_ptr++ = '(';
+			}
+<MACROCALLARGS>\)	{
+				parren_count--;
+				if (parren_count == 0) {
+					BEGIN INITIAL;
+					return (')');
+				}
+				*string_buf_ptr++ = ')';
+			}
+<MACROCALLARGS>{MCARG}	{
+				char *yptr;
+
+				yptr = yytext;
+				while (*yptr)
+					*string_buf_ptr++ = *yptr++;
+			}
+<MACROCALLARGS>\,	{
+				if (string_buf_ptr != string_buf) {
+					/*
+					 * Return an argument and
+					 * rescan this comma so we
+					 * can return it as well.
+					 */
+					*string_buf_ptr = '\0';
+					yylval.str = string_buf;
+					string_buf_ptr = string_buf;
+					unput(',');
+					return T_ARG;
+				}
+				return ',';
+			}
+<MACROBODY>\\\n		{
+				/* Eat escaped newlines. */
+				++yylineno;
+			}
+<MACROBODY>\r		;
+<MACROBODY>\n		{
+				/* Macros end on the first unescaped newline. */
+				BEGIN INITIAL;
+				*string_buf_ptr = '\0';
+				yylval.str = string_buf;
+				++yylineno;
+				return T_MACROBODY;
+			}
+<MACROBODY>{MBODY}	{
+				char *yptr;
+				char c;
+
+				yptr = yytext;
+				while (c = *yptr++) {
+					/*
+					 * Strip carriage returns.
+					 */
+					if (c == '\r')
+						continue;
+					*string_buf_ptr++ = c;
+				}
+			}
+{WORD}\(		{
+				char *yptr;
+				char *ycopy;
+
+				/* May be a symbol or a macro invocation. */
+				yylval.sym = symtable_get(yytext);
+				if (yylval.sym->type == MACRO) {
+					YY_BUFFER_STATE old_state;
+					YY_BUFFER_STATE temp_state;
+
+					ycopy = strdup(yytext);
+					yptr = ycopy + yyleng;
+					while (yptr > ycopy)
+						unput(*--yptr);
+					old_state = YY_CURRENT_BUFFER;
+					temp_state =
+					    yy_create_buffer(stdin,
+							     YY_BUF_SIZE);
+					yy_switch_to_buffer(temp_state);
+					mm_switch_to_buffer(old_state);
+					mmparse();
+					mm_switch_to_buffer(temp_state);
+					yy_switch_to_buffer(old_state);
+					mm_delete_buffer(temp_state);
+					expand_macro(yylval.sym);
+				} else {
+					if (yylval.sym->type == UNINITIALIZED) {
+						/* Try without the '(' */
+						symbol_delete(yylval.sym);
+						yytext[yyleng-1] = '\0';
+						yylval.sym =
+						    symtable_get(yytext);
+					}
+					unput('(');
+					return T_SYMBOL;
+				}
+			}
+{WORD}			{
+				yylval.sym = symtable_get(yytext);
+				if (yylval.sym->type == MACRO) {
+					expand_macro(yylval.sym);
+				} else {
+					return T_SYMBOL;
+				}
+			}
+.			{ 
+				snprintf(buf, sizeof(buf), "Invalid character "
+					 "'%c'", yytext[0]);
+				stop(buf, EX_DATAERR);
+			}
+%%
+
+typedef struct include {
+        YY_BUFFER_STATE  buffer;
+        int              lineno;
+        char            *filename;
+	SLIST_ENTRY(include) links;
+}include_t;
+
+SLIST_HEAD(, include) include_stack;
+
+void
+include_file(char *file_name, include_type type)
+{
+	FILE *newfile;
+	include_t *include;
+
+	newfile = NULL;
+	/* Try the current directory first */
+	if (includes_search_curdir != 0 || type == SOURCE_FILE)
+		newfile = fopen(file_name, "r");
+
+	if (newfile == NULL && type != SOURCE_FILE) {
+                path_entry_t include_dir;
+                for (include_dir = search_path.slh_first;
+                     include_dir != NULL;                
+                     include_dir = include_dir->links.sle_next) {
+			char fullname[PATH_MAX];
+
+			if ((include_dir->quoted_includes_only == TRUE)
+			 && (type != QUOTED_INCLUDE))
+				continue;
+
+			snprintf(fullname, sizeof(fullname),
+				 "%s/%s", include_dir->directory, file_name);
+
+			if ((newfile = fopen(fullname, "r")) != NULL)
+				break;
+                }
+        }
+
+	if (newfile == NULL) {
+		perror(file_name);
+		stop("Unable to open input file", EX_SOFTWARE);
+		/* NOTREACHED */
+	}
+
+	if (type != SOURCE_FILE) {
+		include = (include_t *)malloc(sizeof(include_t));
+		if (include == NULL) {
+			stop("Unable to allocate include stack entry",
+			     EX_SOFTWARE);
+			/* NOTREACHED */
+		}
+		include->buffer = YY_CURRENT_BUFFER;
+		include->lineno = yylineno;
+		include->filename = yyfilename;
+		SLIST_INSERT_HEAD(&include_stack, include, links);
+	}
+	yy_switch_to_buffer(yy_create_buffer(newfile, YY_BUF_SIZE));
+	yylineno = 1;
+	yyfilename = strdup(file_name);
+}
+
+static void next_substitution(struct symbol *mac_symbol, const char *body_pos,
+			      const char **next_match,
+			      struct macro_arg **match_marg, regmatch_t *match);
+
+void
+expand_macro(struct symbol *macro_symbol)
+{
+	struct macro_arg *marg;
+	struct macro_arg *match_marg;
+	const char *body_head;
+	const char *body_pos;
+	const char *next_match;
+
+	/*
+	 * Due to the nature of unput, we must work
+	 * backwards through the macro body performing
+	 * any expansions.
+	 */
+	body_head = macro_symbol->info.macroinfo->body;
+	body_pos = body_head + strlen(body_head);
+	while (body_pos > body_head) {
+		regmatch_t match;
+
+		next_match = body_head;
+		match_marg = NULL;
+		next_substitution(macro_symbol, body_pos, &next_match,
+				  &match_marg, &match);
+
+		/* Put back everything up until the replacement. */
+		while (body_pos > next_match)
+			unput(*--body_pos);
+
+		/* Perform the replacement. */
+		if (match_marg != NULL) {
+			const char *strp;
+
+			next_match = match_marg->replacement_text;
+			strp = next_match + strlen(next_match);
+			while (strp > next_match)
+				unput(*--strp);
+
+			/* Skip past the unexpanded macro arg. */
+			body_pos -= match.rm_eo - match.rm_so;
+		}
+	}
+
+	/* Cleanup replacement text. */
+	STAILQ_FOREACH(marg, &macro_symbol->info.macroinfo->args, links) {
+		free(marg->replacement_text);
+	}
+}
+
+/*
+ * Find the next substitution in the macro working backwards from
+ * body_pos until the beginning of the macro buffer.  next_match
+ * should be initialized to the beginning of the macro buffer prior
+ * to calling this routine.
+ */
+static void
+next_substitution(struct symbol *mac_symbol, const char *body_pos,
+		  const char **next_match, struct macro_arg **match_marg,
+		  regmatch_t *match)
+{
+	regmatch_t	  matches[2];
+	struct macro_arg *marg;
+	const char	 *search_pos;
+	int		  retval;
+
+	do {
+		search_pos = *next_match;
+
+		STAILQ_FOREACH(marg, &mac_symbol->info.macroinfo->args, links) {
+
+			retval = regexec(&marg->arg_regex, search_pos, 2,
+					 matches, 0);
+			if (retval == 0
+			 && (matches[1].rm_eo + search_pos) <= body_pos
+			 && (matches[1].rm_eo + search_pos) > *next_match) {
+				*match = matches[1];
+				*next_match = match->rm_eo + search_pos;
+				*match_marg = marg;
+			}
+		}
+	} while (search_pos != *next_match);
+}
+
+int
+yywrap()
+{
+	include_t *include;
+
+	yy_delete_buffer(YY_CURRENT_BUFFER);
+	(void)fclose(yyin);
+	if (yyfilename != NULL)
+		free(yyfilename);
+	yyfilename = NULL;
+	include = include_stack.slh_first;
+	if (include != NULL) {
+		yy_switch_to_buffer(include->buffer);
+		yylineno = include->lineno;
+		yyfilename = include->filename;
+		SLIST_REMOVE_HEAD(&include_stack, links);
+		free(include);
+		return (0);
+	}
+	return (1);
+}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
new file mode 100644
index 0000000..f1f448d
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -0,0 +1,677 @@
+/*
+ * Aic7xxx SCSI host adapter firmware asssembler symbol table implementation
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * Copyright (c) 2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_symbol.c#24 $
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+
+#ifdef __linux__
+#include "aicdb.h"
+#else
+#include <db.h>
+#endif
+#include <fcntl.h>
+#include <inttypes.h>
+#include <regex.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sysexits.h>
+
+#include "aicasm_symbol.h"
+#include "aicasm.h"
+
+static DB *symtable;
+
+symbol_t *
+symbol_create(char *name)
+{
+	symbol_t *new_symbol;
+
+	new_symbol = (symbol_t *)malloc(sizeof(symbol_t));
+	if (new_symbol == NULL) {
+		perror("Unable to create new symbol");
+		exit(EX_SOFTWARE);
+	}
+	memset(new_symbol, 0, sizeof(*new_symbol));
+	new_symbol->name = strdup(name);
+	if (new_symbol->name == NULL)
+		 stop("Unable to strdup symbol name", EX_SOFTWARE);
+	new_symbol->type = UNINITIALIZED;
+	return (new_symbol);
+}
+
+void
+symbol_delete(symbol_t *symbol)
+{
+	if (symtable != NULL) {
+		DBT	 key;
+
+		key.data = symbol->name;
+		key.size = strlen(symbol->name);
+		symtable->del(symtable, &key, /*flags*/0);
+	}
+	switch(symbol->type) {
+	case SCBLOC:
+	case SRAMLOC:
+	case REGISTER:
+		if (symbol->info.rinfo != NULL)
+			free(symbol->info.rinfo);
+		break;
+	case ALIAS:
+		if (symbol->info.ainfo != NULL)
+			free(symbol->info.ainfo);
+		break;
+	case MASK:
+	case FIELD:
+	case ENUM:
+	case ENUM_ENTRY:
+		if (symbol->info.finfo != NULL) {
+			symlist_free(&symbol->info.finfo->symrefs);
+			free(symbol->info.finfo);
+		}
+		break;
+	case DOWNLOAD_CONST:
+	case CONST:
+		if (symbol->info.cinfo != NULL)
+			free(symbol->info.cinfo);
+		break;
+	case LABEL:
+		if (symbol->info.linfo != NULL)
+			free(symbol->info.linfo);
+		break;
+	case UNINITIALIZED:
+	default:
+		break;
+	}
+	free(symbol->name);
+	free(symbol);
+}
+
+void
+symtable_open()
+{
+	symtable = dbopen(/*filename*/NULL,
+			  O_CREAT | O_NONBLOCK | O_RDWR, /*mode*/0, DB_HASH,
+			  /*openinfo*/NULL);
+
+	if (symtable == NULL) {
+		perror("Symbol table creation failed");
+		exit(EX_SOFTWARE);
+		/* NOTREACHED */
+	}
+}
+
+void
+symtable_close()
+{
+	if (symtable != NULL) {
+		DBT	 key;
+		DBT	 data;
+
+		while (symtable->seq(symtable, &key, &data, R_FIRST) == 0) {
+			symbol_t *stored_ptr;
+
+			memcpy(&stored_ptr, data.data, sizeof(stored_ptr));
+			symbol_delete(stored_ptr);
+		}
+		symtable->close(symtable);
+	}
+}
+
+/*
+ * The semantics of get is to return an uninitialized symbol entry
+ * if a lookup fails.
+ */
+symbol_t *
+symtable_get(char *name)
+{
+	symbol_t *stored_ptr;
+	DBT	  key;
+	DBT	  data;
+	int	  retval;
+
+	key.data = (void *)name;
+	key.size = strlen(name);
+
+	if ((retval = symtable->get(symtable, &key, &data, /*flags*/0)) != 0) {
+		if (retval == -1) {
+			perror("Symbol table get operation failed");
+			exit(EX_SOFTWARE);
+			/* NOTREACHED */
+		} else if (retval == 1) {
+			/* Symbol wasn't found, so create a new one */
+			symbol_t *new_symbol;
+
+			new_symbol = symbol_create(name);
+			data.data = &new_symbol;
+			data.size = sizeof(new_symbol);
+			if (symtable->put(symtable, &key, &data,
+					  /*flags*/0) !=0) {
+				perror("Symtable put failed");
+				exit(EX_SOFTWARE);
+			}
+			return (new_symbol);
+		} else {
+			perror("Unexpected return value from db get routine");
+			exit(EX_SOFTWARE);
+			/* NOTREACHED */
+		}
+	}
+	memcpy(&stored_ptr, data.data, sizeof(stored_ptr));
+	return (stored_ptr);
+}
+
+symbol_node_t *
+symlist_search(symlist_t *symlist, char *symname)
+{
+	symbol_node_t *curnode;
+
+	curnode = SLIST_FIRST(symlist);
+	while(curnode != NULL) {
+		if (strcmp(symname, curnode->symbol->name) == 0)
+			break;
+		curnode = SLIST_NEXT(curnode, links);
+	}
+	return (curnode);
+}
+
+void
+symlist_add(symlist_t *symlist, symbol_t *symbol, int how)
+{
+	symbol_node_t *newnode;
+
+	newnode = (symbol_node_t *)malloc(sizeof(symbol_node_t));
+	if (newnode == NULL) {
+		stop("symlist_add: Unable to malloc symbol_node", EX_SOFTWARE);
+		/* NOTREACHED */
+	}
+	newnode->symbol = symbol;
+	if (how == SYMLIST_SORT) {
+		symbol_node_t *curnode;
+		int field;
+
+		field = FALSE;
+		switch(symbol->type) {
+		case REGISTER:
+		case SCBLOC:
+		case SRAMLOC:
+			break;
+		case FIELD:
+		case MASK:
+		case ENUM:
+		case ENUM_ENTRY:
+			field = TRUE;
+			break;
+		default:
+			stop("symlist_add: Invalid symbol type for sorting",
+			     EX_SOFTWARE);
+			/* NOTREACHED */
+		}
+
+		curnode = SLIST_FIRST(symlist);
+		if (curnode == NULL
+		 || (field
+		  && (curnode->symbol->type > newnode->symbol->type
+		   || (curnode->symbol->type == newnode->symbol->type
+		    && (curnode->symbol->info.finfo->value >
+			newnode->symbol->info.finfo->value))))
+		 || (!field && (curnode->symbol->info.rinfo->address >
+		               newnode->symbol->info.rinfo->address))) {
+			SLIST_INSERT_HEAD(symlist, newnode, links);
+			return;
+		}
+
+		while (1) {
+			if (SLIST_NEXT(curnode, links) == NULL) {
+				SLIST_INSERT_AFTER(curnode, newnode,
+						   links);
+				break;
+			} else {
+				symbol_t *cursymbol;
+
+				cursymbol = SLIST_NEXT(curnode, links)->symbol;
+				if ((field
+		  		  && (cursymbol->type > symbol->type
+				   || (cursymbol->type == symbol->type
+				    && (cursymbol->info.finfo->value >
+					symbol->info.finfo->value))))
+				 || (!field
+				   && (cursymbol->info.rinfo->address >
+				       symbol->info.rinfo->address))) {
+					SLIST_INSERT_AFTER(curnode, newnode,
+							   links);
+					break;
+				}
+			}
+			curnode = SLIST_NEXT(curnode, links);
+		}
+	} else {
+		SLIST_INSERT_HEAD(symlist, newnode, links);
+	}
+}
+
+void
+symlist_free(symlist_t *symlist)
+{
+	symbol_node_t *node1, *node2;
+
+	node1 = SLIST_FIRST(symlist);
+	while (node1 != NULL) {
+		node2 = SLIST_NEXT(node1, links);
+		free(node1);
+		node1 = node2;
+	}
+	SLIST_INIT(symlist);
+}
+
+void
+symlist_merge(symlist_t *symlist_dest, symlist_t *symlist_src1,
+	      symlist_t *symlist_src2)
+{
+	symbol_node_t *node;
+
+	*symlist_dest = *symlist_src1;
+	while((node = SLIST_FIRST(symlist_src2)) != NULL) {
+		SLIST_REMOVE_HEAD(symlist_src2, links);
+		SLIST_INSERT_HEAD(symlist_dest, node, links);
+	}
+
+	/* These are now empty */
+	SLIST_INIT(symlist_src1);
+	SLIST_INIT(symlist_src2);
+}
+
+void
+aic_print_file_prologue(FILE *ofile)
+{
+
+	if (ofile == NULL)
+		return;
+
+	fprintf(ofile,
+"/*\n"
+" * DO NOT EDIT - This file is automatically generated\n"
+" *		 from the following source files:\n"
+" *\n"
+"%s */\n",
+		versions);
+}
+
+void
+aic_print_include(FILE *dfile, char *include_file)
+{
+
+	if (dfile == NULL)
+		return;
+	fprintf(dfile, "\n#include \"%s\"\n\n", include_file);
+}
+
+void
+aic_print_reg_dump_types(FILE *ofile)
+{
+	if (ofile == NULL)
+		return;
+		
+	fprintf(ofile,
+"typedef int (%sreg_print_t)(u_int, u_int *, u_int);\n"
+"typedef struct %sreg_parse_entry {\n"
+"	char	*name;\n"
+"	uint8_t	 value;\n"
+"	uint8_t	 mask;\n"
+"} %sreg_parse_entry_t;\n"
+"\n",
+		prefix, prefix, prefix);
+}
+
+static void
+aic_print_reg_dump_start(FILE *dfile, symbol_node_t *regnode)
+{
+	if (dfile == NULL)
+		return;
+
+	fprintf(dfile,
+"static %sreg_parse_entry_t %s_parse_table[] = {\n",
+		prefix,
+		regnode->symbol->name);
+}
+
+static void
+aic_print_reg_dump_end(FILE *ofile, FILE *dfile,
+		       symbol_node_t *regnode, u_int num_entries)
+{
+	char *lower_name;
+	char *letter;
+
+	lower_name = strdup(regnode->symbol->name);
+	if (lower_name == NULL)
+		 stop("Unable to strdup symbol name", EX_SOFTWARE);
+	
+	for (letter = lower_name; *letter != '\0'; letter++)
+		*letter = tolower(*letter);
+
+	if (dfile != NULL) {
+		if (num_entries != 0)
+			fprintf(dfile,
+"\n"
+"};\n"
+"\n");
+
+		fprintf(dfile,
+"int\n"
+"%s%s_print(u_int regvalue, u_int *cur_col, u_int wrap)\n"
+"{\n"
+"	return (%sprint_register(%s%s, %d, \"%s\",\n"
+"	    0x%02x, regvalue, cur_col, wrap));\n"
+"}\n"
+"\n",
+			prefix,
+			lower_name,
+			prefix,
+			num_entries != 0 ? regnode->symbol->name : "NULL",
+			num_entries != 0 ? "_parse_table" : "",
+			num_entries,
+			regnode->symbol->name,
+			regnode->symbol->info.rinfo->address);
+	}
+
+	fprintf(ofile,
+"#if AIC_DEBUG_REGISTERS\n"
+"%sreg_print_t %s%s_print;\n"
+"#else\n"
+"#define %s%s_print(regvalue, cur_col, wrap) \\\n"
+"    %sprint_register(NULL, 0, \"%s\", 0x%02x, regvalue, cur_col, wrap)\n"
+"#endif\n"
+"\n",
+		prefix,
+		prefix,
+		lower_name,
+		prefix,
+		lower_name,
+		prefix,
+		regnode->symbol->name,
+		regnode->symbol->info.rinfo->address);
+}
+
+static void
+aic_print_reg_dump_entry(FILE *dfile, symbol_node_t *curnode)
+{
+	int num_tabs;
+
+	if (dfile == NULL)
+		return;
+
+	fprintf(dfile,
+"	{ \"%s\",",
+		curnode->symbol->name);
+
+	num_tabs = 3 - (strlen(curnode->symbol->name) + 5) / 8;
+
+	while (num_tabs-- > 0)
+		fputc('\t', dfile);
+	fprintf(dfile, "0x%02x, 0x%02x }",
+		curnode->symbol->info.finfo->value,
+		curnode->symbol->info.finfo->mask);
+}
+
+void
+symtable_dump(FILE *ofile, FILE *dfile)
+{
+	/*
+	 * Sort the registers by address with a simple insertion sort.
+	 * Put bitmasks next to the first register that defines them.
+	 * Put constants at the end.
+	 */
+	symlist_t	 registers;
+	symlist_t	 masks;
+	symlist_t	 constants;
+	symlist_t	 download_constants;
+	symlist_t	 aliases;
+	symlist_t	 exported_labels;
+	symbol_node_t	*curnode;
+	symbol_node_t	*regnode;
+	DBT		 key;
+	DBT		 data;
+	int		 flag;
+	u_int		 i;
+
+	if (symtable == NULL)
+		return;
+
+	SLIST_INIT(&registers);
+	SLIST_INIT(&masks);
+	SLIST_INIT(&constants);
+	SLIST_INIT(&download_constants);
+	SLIST_INIT(&aliases);
+	SLIST_INIT(&exported_labels);
+	flag = R_FIRST;
+	while (symtable->seq(symtable, &key, &data, flag) == 0) {
+		symbol_t *cursym;
+
+		memcpy(&cursym, data.data, sizeof(cursym));
+		switch(cursym->type) {
+		case REGISTER:
+		case SCBLOC:
+		case SRAMLOC:
+			symlist_add(&registers, cursym, SYMLIST_SORT);
+			break;
+		case MASK:
+		case FIELD:
+		case ENUM:
+		case ENUM_ENTRY:
+			symlist_add(&masks, cursym, SYMLIST_SORT);
+			break;
+		case CONST:
+			symlist_add(&constants, cursym,
+				    SYMLIST_INSERT_HEAD);
+			break;
+		case DOWNLOAD_CONST:
+			symlist_add(&download_constants, cursym,
+				    SYMLIST_INSERT_HEAD);
+			break;
+		case ALIAS:
+			symlist_add(&aliases, cursym,
+				    SYMLIST_INSERT_HEAD);
+			break;
+		case LABEL:
+			if (cursym->info.linfo->exported == 0)
+				break;
+			symlist_add(&exported_labels, cursym,
+				    SYMLIST_INSERT_HEAD);
+			break;
+		default:
+			break;
+		}
+		flag = R_NEXT;
+	}
+
+	/* Register dianostic functions/declarations first. */
+	aic_print_file_prologue(ofile);
+	aic_print_reg_dump_types(ofile);
+	aic_print_file_prologue(dfile);
+	aic_print_include(dfile, stock_include_file);
+	SLIST_FOREACH(curnode, &registers, links) {
+
+		switch(curnode->symbol->type) {
+		case REGISTER:
+		case SCBLOC:
+		case SRAMLOC:
+		{
+			symlist_t	*fields;
+			symbol_node_t	*fieldnode;
+			int		 num_entries;
+
+			num_entries = 0;
+			fields = &curnode->symbol->info.rinfo->fields;
+			SLIST_FOREACH(fieldnode, fields, links) {
+				if (num_entries == 0)
+					aic_print_reg_dump_start(dfile,
+								 curnode);
+				else if (dfile != NULL)
+					fputs(",\n", dfile);
+				num_entries++;
+				aic_print_reg_dump_entry(dfile, fieldnode);
+			}
+			aic_print_reg_dump_end(ofile, dfile,
+					       curnode, num_entries);
+		}
+		default:
+			break;
+		}
+	}
+
+	/* Fold in the masks and bits */
+	while (SLIST_FIRST(&masks) != NULL) {
+		char *regname;
+
+		curnode = SLIST_FIRST(&masks);
+		SLIST_REMOVE_HEAD(&masks, links);
+
+		regnode = SLIST_FIRST(&curnode->symbol->info.finfo->symrefs);
+		regname = regnode->symbol->name;
+		regnode = symlist_search(&registers, regname);
+		SLIST_INSERT_AFTER(regnode, curnode, links);
+	}
+
+	/* Add the aliases */
+	while (SLIST_FIRST(&aliases) != NULL) {
+		char *regname;
+
+		curnode = SLIST_FIRST(&aliases);
+		SLIST_REMOVE_HEAD(&aliases, links);
+
+		regname = curnode->symbol->info.ainfo->parent->name;
+		regnode = symlist_search(&registers, regname);
+		SLIST_INSERT_AFTER(regnode, curnode, links);
+	}
+
+	/* Output generated #defines. */
+	while (SLIST_FIRST(&registers) != NULL) {
+		symbol_node_t *curnode;
+		u_int value;
+		char *tab_str;
+		char *tab_str2;
+
+		curnode = SLIST_FIRST(&registers);
+		SLIST_REMOVE_HEAD(&registers, links);
+		switch(curnode->symbol->type) {
+		case REGISTER:
+		case SCBLOC:
+		case SRAMLOC:
+			fprintf(ofile, "\n");
+			value = curnode->symbol->info.rinfo->address;
+			tab_str = "\t";
+			tab_str2 = "\t\t";
+			break;
+		case ALIAS:
+		{
+			symbol_t *parent;
+
+			parent = curnode->symbol->info.ainfo->parent;
+			value = parent->info.rinfo->address;
+			tab_str = "\t";
+			tab_str2 = "\t\t";
+			break;
+		}
+		case MASK:
+		case FIELD:
+		case ENUM:
+		case ENUM_ENTRY:
+			value = curnode->symbol->info.finfo->value;
+			tab_str = "\t\t";
+			tab_str2 = "\t";
+			break;
+		default:
+			value = 0; /* Quiet compiler */
+			tab_str = NULL;
+			tab_str2 = NULL;
+			stop("symtable_dump: Invalid symbol type "
+			     "encountered", EX_SOFTWARE);
+			break;
+		}
+		fprintf(ofile, "#define%s%-16s%s0x%02x\n",
+			tab_str, curnode->symbol->name, tab_str2,
+			value);
+		free(curnode);
+	}
+	fprintf(ofile, "\n\n");
+
+	while (SLIST_FIRST(&constants) != NULL) {
+		symbol_node_t *curnode;
+
+		curnode = SLIST_FIRST(&constants);
+		SLIST_REMOVE_HEAD(&constants, links);
+		fprintf(ofile, "#define\t%-8s\t0x%02x\n",
+			curnode->symbol->name,
+			curnode->symbol->info.cinfo->value);
+		free(curnode);
+	}
+
+	
+	fprintf(ofile, "\n\n/* Downloaded Constant Definitions */\n");
+
+	for (i = 0; SLIST_FIRST(&download_constants) != NULL; i++) {
+		symbol_node_t *curnode;
+
+		curnode = SLIST_FIRST(&download_constants);
+		SLIST_REMOVE_HEAD(&download_constants, links);
+		fprintf(ofile, "#define\t%-8s\t0x%02x\n",
+			curnode->symbol->name,
+			curnode->symbol->info.cinfo->value);
+		free(curnode);
+	}
+	fprintf(ofile, "#define\tDOWNLOAD_CONST_COUNT\t0x%02x\n", i);
+
+	fprintf(ofile, "\n\n/* Exported Labels */\n");
+
+	while (SLIST_FIRST(&exported_labels) != NULL) {
+		symbol_node_t *curnode;
+
+		curnode = SLIST_FIRST(&exported_labels);
+		SLIST_REMOVE_HEAD(&exported_labels, links);
+		fprintf(ofile, "#define\tLABEL_%-8s\t0x%02x\n",
+			curnode->symbol->name,
+			curnode->symbol->info.linfo->address);
+		free(curnode);
+	}
+}
+
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
new file mode 100644
index 0000000..afc22e8
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -0,0 +1,207 @@
+/*
+ * Aic7xxx SCSI host adapter firmware asssembler symbol table definitions
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * Copyright (c) 2002 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_symbol.h#17 $
+ *
+ * $FreeBSD$
+ */
+
+#ifdef __linux__
+#include "../queue.h"
+#else
+#include <sys/queue.h>
+#endif
+
+typedef enum {
+	UNINITIALIZED,
+	REGISTER,
+	ALIAS,
+	SCBLOC,
+	SRAMLOC,
+	ENUM_ENTRY,
+	FIELD,
+	MASK,
+	ENUM,
+	CONST,
+	DOWNLOAD_CONST,
+	LABEL,
+	CONDITIONAL,
+	MACRO
+} symtype;
+
+typedef enum {
+	RO = 0x01,
+	WO = 0x02,
+	RW = 0x03
+}amode_t;
+
+typedef SLIST_HEAD(symlist, symbol_node) symlist_t;
+
+struct reg_info {
+	u_int	  address;
+	int	  size;
+	amode_t	  mode;
+	symlist_t fields;
+	uint8_t	  valid_bitmask;
+	uint8_t	  modes;
+	int	  typecheck_masks;
+};
+
+struct field_info {
+	symlist_t symrefs;
+	uint8_t	  value;
+	uint8_t	  mask;
+};
+
+struct const_info {
+	u_int	value;
+	int	define;
+};
+
+struct alias_info {
+	struct symbol *parent;
+};
+
+struct label_info {
+	int	address;
+	int	exported;
+};
+
+struct cond_info {
+	int	func_num;
+};
+
+struct macro_arg {
+	STAILQ_ENTRY(macro_arg)	links;
+	regex_t	arg_regex;
+	char   *replacement_text;
+};
+STAILQ_HEAD(macro_arg_list, macro_arg) args;
+
+struct macro_info {
+	struct macro_arg_list args;
+	int   narg;
+	const char* body;
+};
+
+typedef struct expression_info {
+        symlist_t       referenced_syms;
+        int             value;
+} expression_t;
+
+typedef struct symbol {
+	char	*name;
+	symtype	type;
+	union	{
+		struct reg_info	  *rinfo;
+		struct field_info *finfo;
+		struct const_info *cinfo;
+		struct alias_info *ainfo;
+		struct label_info *linfo;
+		struct cond_info  *condinfo;
+		struct macro_info *macroinfo;
+	}info;
+} symbol_t;
+
+typedef struct symbol_ref {
+	symbol_t *symbol;
+	int	 offset;
+} symbol_ref_t;
+
+typedef struct symbol_node {
+	SLIST_ENTRY(symbol_node) links;
+	symbol_t *symbol;
+} symbol_node_t;
+
+typedef struct critical_section {
+	TAILQ_ENTRY(critical_section) links;
+	int begin_addr;
+	int end_addr;
+} critical_section_t;
+
+typedef enum {
+	SCOPE_ROOT,
+	SCOPE_IF,
+	SCOPE_ELSE_IF,
+	SCOPE_ELSE
+} scope_type;
+
+typedef struct patch_info {
+	int skip_patch;
+	int skip_instr;
+} patch_info_t;
+
+typedef struct scope {
+	SLIST_ENTRY(scope) scope_stack_links;
+	TAILQ_ENTRY(scope) scope_links;
+	TAILQ_HEAD(, scope) inner_scope;
+	scope_type type;
+	int inner_scope_patches;
+	int begin_addr;
+        int end_addr;
+	patch_info_t patches[2];
+	int func_num;
+} scope_t;
+
+TAILQ_HEAD(cs_tailq, critical_section);
+SLIST_HEAD(scope_list, scope);
+TAILQ_HEAD(scope_tailq, scope);
+
+void	symbol_delete(symbol_t *symbol);
+
+void	symtable_open(void);
+
+void	symtable_close(void);
+
+symbol_t *
+	symtable_get(char *name);
+
+symbol_node_t *
+	symlist_search(symlist_t *symlist, char *symname);
+
+void
+	symlist_add(symlist_t *symlist, symbol_t *symbol, int how);
+#define SYMLIST_INSERT_HEAD	0x00
+#define SYMLIST_SORT		0x01
+
+void	symlist_free(symlist_t *symlist);
+
+void	symlist_merge(symlist_t *symlist_dest, symlist_t *symlist_src1,
+		      symlist_t *symlist_src2);
+void	symtable_dump(FILE *ofile, FILE *dfile);
diff --git a/drivers/scsi/aic7xxx/aiclib.c b/drivers/scsi/aic7xxx/aiclib.c
new file mode 100644
index 0000000..79bfd9e
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aiclib.c
@@ -0,0 +1,1412 @@
+/*
+ * Implementation of Utility functions for all SCSI device types.
+ *
+ * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
+ * Copyright (c) 1997, 1998 Kenneth D. Merry.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/cam/scsi/scsi_all.c,v 1.38 2002/09/23 04:56:35 mjacob Exp $
+ * $Id$
+ */
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+
+/* Core SCSI definitions */
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+#include "aiclib.h"
+#include "cam.h"
+
+#ifndef FALSE
+#define FALSE   0
+#endif /* FALSE */
+#ifndef TRUE
+#define TRUE    1
+#endif /* TRUE */
+#ifndef ERESTART
+#define ERESTART        -1              /* restart syscall */
+#endif
+#ifndef EJUSTRETURN
+#define EJUSTRETURN     -2              /* don't modify regs, just return */
+#endif
+
+static int	ascentrycomp(const void *key, const void *member);
+static int	senseentrycomp(const void *key, const void *member);
+static void	fetchtableentries(int sense_key, int asc, int ascq,
+				  struct scsi_inquiry_data *,
+				  const struct sense_key_table_entry **,
+				  const struct asc_table_entry **);
+static void *	scsibsearch(const void *key, const void *base, size_t nmemb,
+			    size_t size,
+			    int (*compar)(const void *, const void *));
+typedef int (cam_quirkmatch_t)(caddr_t, caddr_t);
+static int	cam_strmatch(const u_int8_t *str, const u_int8_t *pattern,
+			     int str_len);
+static caddr_t	cam_quirkmatch(caddr_t target, caddr_t quirk_table,
+			       int num_entries, int entry_size,
+			       cam_quirkmatch_t *comp_func);
+
+#define SCSI_NO_SENSE_STRINGS 1
+#if !defined(SCSI_NO_SENSE_STRINGS)
+#define SST(asc, ascq, action, desc) \
+	asc, ascq, action, desc
+#else 
+static const char empty_string[] = "";
+
+#define SST(asc, ascq, action, desc) \
+	asc, ascq, action, empty_string
+#endif 
+
+static const struct sense_key_table_entry sense_key_table[] = 
+{
+	{ SSD_KEY_NO_SENSE, SS_NOP, "NO SENSE" },
+	{ SSD_KEY_RECOVERED_ERROR, SS_NOP|SSQ_PRINT_SENSE, "RECOVERED ERROR" },
+	{
+	  SSD_KEY_NOT_READY, SS_TUR|SSQ_MANY|SSQ_DECREMENT_COUNT|EBUSY,
+	  "NOT READY"
+	},
+	{ SSD_KEY_MEDIUM_ERROR, SS_RDEF, "MEDIUM ERROR" },
+	{ SSD_KEY_HARDWARE_ERROR, SS_RDEF, "HARDWARE FAILURE" },
+	{ SSD_KEY_ILLEGAL_REQUEST, SS_FATAL|EINVAL, "ILLEGAL REQUEST" },
+	{ SSD_KEY_UNIT_ATTENTION, SS_FATAL|ENXIO, "UNIT ATTENTION" },
+	{ SSD_KEY_DATA_PROTECT, SS_FATAL|EACCES, "DATA PROTECT" },
+	{ SSD_KEY_BLANK_CHECK, SS_FATAL|ENOSPC, "BLANK CHECK" },
+	{ SSD_KEY_Vendor_Specific, SS_FATAL|EIO, "Vendor Specific" },
+	{ SSD_KEY_COPY_ABORTED, SS_FATAL|EIO, "COPY ABORTED" },
+	{ SSD_KEY_ABORTED_COMMAND, SS_RDEF, "ABORTED COMMAND" },
+	{ SSD_KEY_EQUAL, SS_NOP, "EQUAL" },
+	{ SSD_KEY_VOLUME_OVERFLOW, SS_FATAL|EIO, "VOLUME OVERFLOW" },
+	{ SSD_KEY_MISCOMPARE, SS_NOP, "MISCOMPARE" },
+	{ SSD_KEY_RESERVED, SS_FATAL|EIO, "RESERVED" }
+};
+
+static const int sense_key_table_size =
+    sizeof(sense_key_table)/sizeof(sense_key_table[0]);
+
+static struct asc_table_entry quantum_fireball_entries[] = {
+	{SST(0x04, 0x0b, SS_START|SSQ_DECREMENT_COUNT|ENXIO, 
+	     "Logical unit not ready, initializing cmd. required")}
+};
+
+static struct asc_table_entry sony_mo_entries[] = {
+	{SST(0x04, 0x00, SS_START|SSQ_DECREMENT_COUNT|ENXIO,
+	     "Logical unit not ready, cause not reportable")}
+};
+
+static struct scsi_sense_quirk_entry sense_quirk_table[] = {
+	{
+		/*
+		 * The Quantum Fireball ST and SE like to return 0x04 0x0b when
+		 * they really should return 0x04 0x02.  0x04,0x0b isn't
+		 * defined in any SCSI spec, and it isn't mentioned in the
+		 * hardware manual for these drives.
+		 */
+		{T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "FIREBALL S*", "*"},
+		/*num_sense_keys*/0,
+		sizeof(quantum_fireball_entries)/sizeof(struct asc_table_entry),
+		/*sense key entries*/NULL,
+		quantum_fireball_entries
+	},
+	{
+		/*
+		 * This Sony MO drive likes to return 0x04, 0x00 when it
+		 * isn't spun up.
+		 */
+		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SONY", "SMO-*", "*"},
+		/*num_sense_keys*/0,
+		sizeof(sony_mo_entries)/sizeof(struct asc_table_entry),
+		/*sense key entries*/NULL,
+		sony_mo_entries
+	}
+};
+
+static const int sense_quirk_table_size =
+    sizeof(sense_quirk_table)/sizeof(sense_quirk_table[0]);
+
+static struct asc_table_entry asc_table[] = {
+/*
+ * From File: ASC-NUM.TXT
+ * SCSI ASC/ASCQ Assignments
+ * Numeric Sorted Listing
+ * as of  5/12/97
+ *
+ * D - DIRECT ACCESS DEVICE (SBC)                     device column key
+ * .T - SEQUENTIAL ACCESS DEVICE (SSC)               -------------------
+ * . L - PRINTER DEVICE (SSC)                           blank = reserved
+ * .  P - PROCESSOR DEVICE (SPC)                     not blank = allowed
+ * .  .W - WRITE ONCE READ MULTIPLE DEVICE (SBC)
+ * .  . R - CD DEVICE (MMC)
+ * .  .  S - SCANNER DEVICE (SGC)
+ * .  .  .O - OPTICAL MEMORY DEVICE (SBC)
+ * .  .  . M - MEDIA CHANGER DEVICE (SMC)
+ * .  .  .  C - COMMUNICATION DEVICE (SSC)
+ * .  .  .  .A - STORAGE ARRAY DEVICE (SCC)
+ * .  .  .  . E - ENCLOSURE SERVICES DEVICE (SES)
+ * DTLPWRSOMCAE        ASC   ASCQ  Action  Description
+ * ------------        ----  ----  ------  -----------------------------------*/
+/* DTLPWRSOMCAE */{SST(0x00, 0x00, SS_NOP,
+			"No additional sense information") },
+/*  T    S      */{SST(0x00, 0x01, SS_RDEF,
+			"Filemark detected") },
+/*  T    S      */{SST(0x00, 0x02, SS_RDEF,
+			"End-of-partition/medium detected") },
+/*  T           */{SST(0x00, 0x03, SS_RDEF,
+			"Setmark detected") },
+/*  T    S      */{SST(0x00, 0x04, SS_RDEF,
+			"Beginning-of-partition/medium detected") },
+/*  T    S      */{SST(0x00, 0x05, SS_RDEF,
+			"End-of-data detected") },
+/* DTLPWRSOMCAE */{SST(0x00, 0x06, SS_RDEF,
+			"I/O process terminated") },
+/*      R       */{SST(0x00, 0x11, SS_FATAL|EBUSY,
+			"Audio play operation in progress") },
+/*      R       */{SST(0x00, 0x12, SS_NOP,
+			"Audio play operation paused") },
+/*      R       */{SST(0x00, 0x13, SS_NOP,
+			"Audio play operation successfully completed") },
+/*      R       */{SST(0x00, 0x14, SS_RDEF,
+			"Audio play operation stopped due to error") },
+/*      R       */{SST(0x00, 0x15, SS_NOP,
+			"No current audio status to return") },
+/* DTLPWRSOMCAE */{SST(0x00, 0x16, SS_FATAL|EBUSY,
+			"Operation in progress") },
+/* DTL WRSOM AE */{SST(0x00, 0x17, SS_RDEF,
+			"Cleaning requested") },
+/* D   W  O     */{SST(0x01, 0x00, SS_RDEF,
+			"No index/sector signal") },
+/* D   WR OM    */{SST(0x02, 0x00, SS_RDEF,
+			"No seek complete") },
+/* DTL W SO     */{SST(0x03, 0x00, SS_RDEF,
+			"Peripheral device write fault") },
+/*  T           */{SST(0x03, 0x01, SS_RDEF,
+			"No write current") },
+/*  T           */{SST(0x03, 0x02, SS_RDEF,
+			"Excessive write errors") },
+/* DTLPWRSOMCAE */{SST(0x04, 0x00,
+			SS_TUR|SSQ_DELAY|SSQ_MANY|SSQ_DECREMENT_COUNT|EIO,
+			"Logical unit not ready, cause not reportable") },
+/* DTLPWRSOMCAE */{SST(0x04, 0x01,
+			SS_TUR|SSQ_DELAY|SSQ_MANY|SSQ_DECREMENT_COUNT|EBUSY,
+			"Logical unit is in process of becoming ready") },
+/* DTLPWRSOMCAE */{SST(0x04, 0x02, SS_START|SSQ_DECREMENT_COUNT|ENXIO,
+			"Logical unit not ready, initializing cmd. required") },
+/* DTLPWRSOMCAE */{SST(0x04, 0x03, SS_FATAL|ENXIO,
+			"Logical unit not ready, manual intervention required")},
+/* DTL    O     */{SST(0x04, 0x04, SS_FATAL|EBUSY,
+			"Logical unit not ready, format in progress") },
+/* DT  W  OMCA  */{SST(0x04, 0x05, SS_FATAL|EBUSY,
+			"Logical unit not ready, rebuild in progress") },
+/* DT  W  OMCA  */{SST(0x04, 0x06, SS_FATAL|EBUSY,
+			"Logical unit not ready, recalculation in progress") },
+/* DTLPWRSOMCAE */{SST(0x04, 0x07, SS_FATAL|EBUSY,
+			"Logical unit not ready, operation in progress") },
+/*      R       */{SST(0x04, 0x08, SS_FATAL|EBUSY,
+			"Logical unit not ready, long write in progress") },
+/* DTL WRSOMCAE */{SST(0x05, 0x00, SS_RDEF,
+			"Logical unit does not respond to selection") },
+/* D   WR OM    */{SST(0x06, 0x00, SS_RDEF,
+			"No reference position found") },
+/* DTL WRSOM    */{SST(0x07, 0x00, SS_RDEF,
+			"Multiple peripheral devices selected") },
+/* DTL WRSOMCAE */{SST(0x08, 0x00, SS_RDEF,
+			"Logical unit communication failure") },
+/* DTL WRSOMCAE */{SST(0x08, 0x01, SS_RDEF,
+			"Logical unit communication time-out") },
+/* DTL WRSOMCAE */{SST(0x08, 0x02, SS_RDEF,
+			"Logical unit communication parity error") },
+/* DT   R OM    */{SST(0x08, 0x03, SS_RDEF,
+			"Logical unit communication crc error (ultra-dma/32)")},
+/* DT  WR O     */{SST(0x09, 0x00, SS_RDEF,
+			"Track following error") },
+/*     WR O     */{SST(0x09, 0x01, SS_RDEF,
+			"Tracking servo failure") },
+/*     WR O     */{SST(0x09, 0x02, SS_RDEF,
+			"Focus servo failure") },
+/*     WR O     */{SST(0x09, 0x03, SS_RDEF,
+			"Spindle servo failure") },
+/* DT  WR O     */{SST(0x09, 0x04, SS_RDEF,
+			"Head select fault") },
+/* DTLPWRSOMCAE */{SST(0x0A, 0x00, SS_FATAL|ENOSPC,
+			"Error log overflow") },
+/* DTLPWRSOMCAE */{SST(0x0B, 0x00, SS_RDEF,
+			"Warning") },
+/* DTLPWRSOMCAE */{SST(0x0B, 0x01, SS_RDEF,
+			"Specified temperature exceeded") },
+/* DTLPWRSOMCAE */{SST(0x0B, 0x02, SS_RDEF,
+			"Enclosure degraded") },
+/*  T   RS      */{SST(0x0C, 0x00, SS_RDEF,
+			"Write error") },
+/* D   W  O     */{SST(0x0C, 0x01, SS_NOP|SSQ_PRINT_SENSE,
+			"Write error - recovered with auto reallocation") },
+/* D   W  O     */{SST(0x0C, 0x02, SS_RDEF,
+			"Write error - auto reallocation failed") },
+/* D   W  O     */{SST(0x0C, 0x03, SS_RDEF,
+			"Write error - recommend reassignment") },
+/* DT  W  O     */{SST(0x0C, 0x04, SS_RDEF,
+			"Compression check miscompare error") },
+/* DT  W  O     */{SST(0x0C, 0x05, SS_RDEF,
+			"Data expansion occurred during compression") },
+/* DT  W  O     */{SST(0x0C, 0x06, SS_RDEF,
+			"Block not compressible") },
+/*      R       */{SST(0x0C, 0x07, SS_RDEF,
+			"Write error - recovery needed") },
+/*      R       */{SST(0x0C, 0x08, SS_RDEF,
+			"Write error - recovery failed") },
+/*      R       */{SST(0x0C, 0x09, SS_RDEF,
+			"Write error - loss of streaming") },
+/*      R       */{SST(0x0C, 0x0A, SS_RDEF,
+			"Write error - padding blocks added") },
+/* D   W  O     */{SST(0x10, 0x00, SS_RDEF,
+			"ID CRC or ECC error") },
+/* DT  WRSO     */{SST(0x11, 0x00, SS_RDEF,
+			"Unrecovered read error") },
+/* DT  W SO     */{SST(0x11, 0x01, SS_RDEF,
+			"Read retries exhausted") },
+/* DT  W SO     */{SST(0x11, 0x02, SS_RDEF,
+			"Error too long to correct") },
+/* DT  W SO     */{SST(0x11, 0x03, SS_RDEF,
+			"Multiple read errors") },
+/* D   W  O     */{SST(0x11, 0x04, SS_RDEF,
+			"Unrecovered read error - auto reallocate failed") },
+/*     WR O     */{SST(0x11, 0x05, SS_RDEF,
+			"L-EC uncorrectable error") },
+/*     WR O     */{SST(0x11, 0x06, SS_RDEF,
+			"CIRC unrecovered error") },
+/*     W  O     */{SST(0x11, 0x07, SS_RDEF,
+			"Data re-synchronization error") },
+/*  T           */{SST(0x11, 0x08, SS_RDEF,
+			"Incomplete block read") },
+/*  T           */{SST(0x11, 0x09, SS_RDEF,
+			"No gap found") },
+/* DT     O     */{SST(0x11, 0x0A, SS_RDEF,
+			"Miscorrected error") },
+/* D   W  O     */{SST(0x11, 0x0B, SS_RDEF,
+			"Unrecovered read error - recommend reassignment") },
+/* D   W  O     */{SST(0x11, 0x0C, SS_RDEF,
+			"Unrecovered read error - recommend rewrite the data")},
+/* DT  WR O     */{SST(0x11, 0x0D, SS_RDEF,
+			"De-compression CRC error") },
+/* DT  WR O     */{SST(0x11, 0x0E, SS_RDEF,
+			"Cannot decompress using declared algorithm") },
+/*      R       */{SST(0x11, 0x0F, SS_RDEF,
+			"Error reading UPC/EAN number") },
+/*      R       */{SST(0x11, 0x10, SS_RDEF,
+			"Error reading ISRC number") },
+/*      R       */{SST(0x11, 0x11, SS_RDEF,
+			"Read error - loss of streaming") },
+/* D   W  O     */{SST(0x12, 0x00, SS_RDEF,
+			"Address mark not found for id field") },
+/* D   W  O     */{SST(0x13, 0x00, SS_RDEF,
+			"Address mark not found for data field") },
+/* DTL WRSO     */{SST(0x14, 0x00, SS_RDEF,
+			"Recorded entity not found") },
+/* DT  WR O     */{SST(0x14, 0x01, SS_RDEF,
+			"Record not found") },
+/*  T           */{SST(0x14, 0x02, SS_RDEF,
+			"Filemark or setmark not found") },
+/*  T           */{SST(0x14, 0x03, SS_RDEF,
+			"End-of-data not found") },
+/*  T           */{SST(0x14, 0x04, SS_RDEF,
+			"Block sequence error") },
+/* DT  W  O     */{SST(0x14, 0x05, SS_RDEF,
+			"Record not found - recommend reassignment") },
+/* DT  W  O     */{SST(0x14, 0x06, SS_RDEF,
+			"Record not found - data auto-reallocated") },
+/* DTL WRSOM    */{SST(0x15, 0x00, SS_RDEF,
+			"Random positioning error") },
+/* DTL WRSOM    */{SST(0x15, 0x01, SS_RDEF,
+			"Mechanical positioning error") },
+/* DT  WR O     */{SST(0x15, 0x02, SS_RDEF,
+			"Positioning error detected by read of medium") },
+/* D   W  O     */{SST(0x16, 0x00, SS_RDEF,
+			"Data synchronization mark error") },
+/* D   W  O     */{SST(0x16, 0x01, SS_RDEF,
+			"Data sync error - data rewritten") },
+/* D   W  O     */{SST(0x16, 0x02, SS_RDEF,
+			"Data sync error - recommend rewrite") },
+/* D   W  O     */{SST(0x16, 0x03, SS_NOP|SSQ_PRINT_SENSE,
+			"Data sync error - data auto-reallocated") },
+/* D   W  O     */{SST(0x16, 0x04, SS_RDEF,
+			"Data sync error - recommend reassignment") },
+/* DT  WRSO     */{SST(0x17, 0x00, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data with no error correction applied") },
+/* DT  WRSO     */{SST(0x17, 0x01, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data with retries") },
+/* DT  WR O     */{SST(0x17, 0x02, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data with positive head offset") },
+/* DT  WR O     */{SST(0x17, 0x03, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data with negative head offset") },
+/*     WR O     */{SST(0x17, 0x04, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data with retries and/or CIRC applied") },
+/* D   WR O     */{SST(0x17, 0x05, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data using previous sector id") },
+/* D   W  O     */{SST(0x17, 0x06, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data without ECC - data auto-reallocated") },
+/* D   W  O     */{SST(0x17, 0x07, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data without ECC - recommend reassignment")},
+/* D   W  O     */{SST(0x17, 0x08, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data without ECC - recommend rewrite") },
+/* D   W  O     */{SST(0x17, 0x09, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data without ECC - data rewritten") },
+/* D   W  O     */{SST(0x18, 0x00, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data with error correction applied") },
+/* D   WR O     */{SST(0x18, 0x01, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data with error corr. & retries applied") },
+/* D   WR O     */{SST(0x18, 0x02, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data - data auto-reallocated") },
+/*      R       */{SST(0x18, 0x03, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data with CIRC") },
+/*      R       */{SST(0x18, 0x04, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data with L-EC") },
+/* D   WR O     */{SST(0x18, 0x05, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data - recommend reassignment") },
+/* D   WR O     */{SST(0x18, 0x06, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data - recommend rewrite") },
+/* D   W  O     */{SST(0x18, 0x07, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered data with ECC - data rewritten") },
+/* D      O     */{SST(0x19, 0x00, SS_RDEF,
+			"Defect list error") },
+/* D      O     */{SST(0x19, 0x01, SS_RDEF,
+			"Defect list not available") },
+/* D      O     */{SST(0x19, 0x02, SS_RDEF,
+			"Defect list error in primary list") },
+/* D      O     */{SST(0x19, 0x03, SS_RDEF,
+			"Defect list error in grown list") },
+/* DTLPWRSOMCAE */{SST(0x1A, 0x00, SS_RDEF,
+			"Parameter list length error") },
+/* DTLPWRSOMCAE */{SST(0x1B, 0x00, SS_RDEF,
+			"Synchronous data transfer error") },
+/* D      O     */{SST(0x1C, 0x00, SS_RDEF,
+			"Defect list not found") },
+/* D      O     */{SST(0x1C, 0x01, SS_RDEF,
+			"Primary defect list not found") },
+/* D      O     */{SST(0x1C, 0x02, SS_RDEF,
+			"Grown defect list not found") },
+/* D   W  O     */{SST(0x1D, 0x00, SS_FATAL,
+			"Miscompare during verify operation" )},
+/* D   W  O     */{SST(0x1E, 0x00, SS_NOP|SSQ_PRINT_SENSE,
+			"Recovered id with ecc correction") },
+/* D      O     */{SST(0x1F, 0x00, SS_RDEF,
+			"Partial defect list transfer") },
+/* DTLPWRSOMCAE */{SST(0x20, 0x00, SS_FATAL|EINVAL,
+			"Invalid command operation code") },
+/* DT  WR OM    */{SST(0x21, 0x00, SS_FATAL|EINVAL,
+			"Logical block address out of range" )},
+/* DT  WR OM    */{SST(0x21, 0x01, SS_FATAL|EINVAL,
+			"Invalid element address") },
+/* D            */{SST(0x22, 0x00, SS_FATAL|EINVAL,
+			"Illegal function") }, /* Deprecated. Use 20 00, 24 00, or 26 00 instead */
+/* DTLPWRSOMCAE */{SST(0x24, 0x00, SS_FATAL|EINVAL,
+			"Invalid field in CDB") },
+/* DTLPWRSOMCAE */{SST(0x25, 0x00, SS_FATAL|ENXIO,
+			"Logical unit not supported") },
+/* DTLPWRSOMCAE */{SST(0x26, 0x00, SS_FATAL|EINVAL,
+			"Invalid field in parameter list") },
+/* DTLPWRSOMCAE */{SST(0x26, 0x01, SS_FATAL|EINVAL,
+			"Parameter not supported") },
+/* DTLPWRSOMCAE */{SST(0x26, 0x02, SS_FATAL|EINVAL,
+			"Parameter value invalid") },
+/* DTLPWRSOMCAE */{SST(0x26, 0x03, SS_FATAL|EINVAL,
+			"Threshold parameters not supported") },
+/* DTLPWRSOMCAE */{SST(0x26, 0x04, SS_FATAL|EINVAL,
+			"Invalid release of active persistent reservation") },
+/* DT  W  O     */{SST(0x27, 0x00, SS_FATAL|EACCES,
+			"Write protected") },
+/* DT  W  O     */{SST(0x27, 0x01, SS_FATAL|EACCES,
+			"Hardware write protected") },
+/* DT  W  O     */{SST(0x27, 0x02, SS_FATAL|EACCES,
+			"Logical unit software write protected") },
+/*  T           */{SST(0x27, 0x03, SS_FATAL|EACCES,
+			"Associated write protect") },
+/*  T           */{SST(0x27, 0x04, SS_FATAL|EACCES,
+			"Persistent write protect") },
+/*  T           */{SST(0x27, 0x05, SS_FATAL|EACCES,
+			"Permanent write protect") },
+/* DTLPWRSOMCAE */{SST(0x28, 0x00, SS_RDEF,
+			"Not ready to ready change, medium may have changed") },
+/* DTLPWRSOMCAE */{SST(0x28, 0x01, SS_FATAL|ENXIO,
+			"Import or export element accessed") },
+/*
+ * XXX JGibbs - All of these should use the same errno, but I don't think
+ * ENXIO is the correct choice.  Should we borrow from the networking
+ * errnos?  ECONNRESET anyone?
+ */
+/* DTLPWRSOMCAE */{SST(0x29, 0x00, SS_RDEF,
+			"Power on, reset, or bus device reset occurred") },
+/* DTLPWRSOMCAE */{SST(0x29, 0x01, SS_RDEF,
+			"Power on occurred") },
+/* DTLPWRSOMCAE */{SST(0x29, 0x02, SS_RDEF,
+			"Scsi bus reset occurred") },
+/* DTLPWRSOMCAE */{SST(0x29, 0x03, SS_RDEF,
+			"Bus device reset function occurred") },
+/* DTLPWRSOMCAE */{SST(0x29, 0x04, SS_RDEF,
+			"Device internal reset") },
+/* DTLPWRSOMCAE */{SST(0x29, 0x05, SS_RDEF,
+			"Transceiver mode changed to single-ended") },
+/* DTLPWRSOMCAE */{SST(0x29, 0x06, SS_RDEF,
+			"Transceiver mode changed to LVD") },
+/* DTL WRSOMCAE */{SST(0x2A, 0x00, SS_RDEF,
+			"Parameters changed") },
+/* DTL WRSOMCAE */{SST(0x2A, 0x01, SS_RDEF,
+			"Mode parameters changed") },
+/* DTL WRSOMCAE */{SST(0x2A, 0x02, SS_RDEF,
+			"Log parameters changed") },
+/* DTLPWRSOMCAE */{SST(0x2A, 0x03, SS_RDEF,
+			"Reservations preempted") },
+/* DTLPWRSO C   */{SST(0x2B, 0x00, SS_RDEF,
+			"Copy cannot execute since host cannot disconnect") },
+/* DTLPWRSOMCAE */{SST(0x2C, 0x00, SS_RDEF,
+			"Command sequence error") },
+/*       S      */{SST(0x2C, 0x01, SS_RDEF,
+			"Too many windows specified") },
+/*       S      */{SST(0x2C, 0x02, SS_RDEF,
+			"Invalid combination of windows specified") },
+/*      R       */{SST(0x2C, 0x03, SS_RDEF,
+			"Current program area is not empty") },
+/*      R       */{SST(0x2C, 0x04, SS_RDEF,
+			"Current program area is empty") },
+/*  T           */{SST(0x2D, 0x00, SS_RDEF,
+			"Overwrite error on update in place") },
+/* DTLPWRSOMCAE */{SST(0x2F, 0x00, SS_RDEF,
+			"Commands cleared by another initiator") },
+/* DT  WR OM    */{SST(0x30, 0x00, SS_RDEF,
+			"Incompatible medium installed") },
+/* DT  WR O     */{SST(0x30, 0x01, SS_RDEF,
+			"Cannot read medium - unknown format") },
+/* DT  WR O     */{SST(0x30, 0x02, SS_RDEF,
+			"Cannot read medium - incompatible format") },
+/* DT           */{SST(0x30, 0x03, SS_RDEF,
+			"Cleaning cartridge installed") },
+/* DT  WR O     */{SST(0x30, 0x04, SS_RDEF,
+			"Cannot write medium - unknown format") },
+/* DT  WR O     */{SST(0x30, 0x05, SS_RDEF,
+			"Cannot write medium - incompatible format") },
+/* DT  W  O     */{SST(0x30, 0x06, SS_RDEF,
+			"Cannot format medium - incompatible medium") },
+/* DTL WRSOM AE */{SST(0x30, 0x07, SS_RDEF,
+			"Cleaning failure") },
+/*      R       */{SST(0x30, 0x08, SS_RDEF,
+			"Cannot write - application code mismatch") },
+/*      R       */{SST(0x30, 0x09, SS_RDEF,
+			"Current session not fixated for append") },
+/* DT  WR O     */{SST(0x31, 0x00, SS_RDEF,
+			"Medium format corrupted") },
+/* D L  R O     */{SST(0x31, 0x01, SS_RDEF,
+			"Format command failed") },
+/* D   W  O     */{SST(0x32, 0x00, SS_RDEF,
+			"No defect spare location available") },
+/* D   W  O     */{SST(0x32, 0x01, SS_RDEF,
+			"Defect list update failure") },
+/*  T           */{SST(0x33, 0x00, SS_RDEF,
+			"Tape length error") },
+/* DTLPWRSOMCAE */{SST(0x34, 0x00, SS_RDEF,
+			"Enclosure failure") },
+/* DTLPWRSOMCAE */{SST(0x35, 0x00, SS_RDEF,
+			"Enclosure services failure") },
+/* DTLPWRSOMCAE */{SST(0x35, 0x01, SS_RDEF,
+			"Unsupported enclosure function") },
+/* DTLPWRSOMCAE */{SST(0x35, 0x02, SS_RDEF,
+			"Enclosure services unavailable") },
+/* DTLPWRSOMCAE */{SST(0x35, 0x03, SS_RDEF,
+			"Enclosure services transfer failure") },
+/* DTLPWRSOMCAE */{SST(0x35, 0x04, SS_RDEF,
+			"Enclosure services transfer refused") },
+/*   L          */{SST(0x36, 0x00, SS_RDEF,
+			"Ribbon, ink, or toner failure") },
+/* DTL WRSOMCAE */{SST(0x37, 0x00, SS_RDEF,
+			"Rounded parameter") },
+/* DTL WRSOMCAE */{SST(0x39, 0x00, SS_RDEF,
+			"Saving parameters not supported") },
+/* DTL WRSOM    */{SST(0x3A, 0x00, SS_NOP,
+			"Medium not present") },
+/* DT  WR OM    */{SST(0x3A, 0x01, SS_NOP,
+			"Medium not present - tray closed") },
+/* DT  WR OM    */{SST(0x3A, 0x01, SS_NOP,
+			"Medium not present - tray open") },
+/* DT  WR OM    */{SST(0x3A, 0x03, SS_NOP,
+			"Medium not present - Loadable") },
+/* DT  WR OM    */{SST(0x3A, 0x04, SS_NOP,
+			"Medium not present - medium auxiliary "
+			"memory accessible") },
+/* DT  WR OM    */{SST(0x3A, 0xFF, SS_NOP, NULL) },/* Range 0x05->0xFF */
+/*  TL          */{SST(0x3B, 0x00, SS_RDEF,
+			"Sequential positioning error") },
+/*  T           */{SST(0x3B, 0x01, SS_RDEF,
+			"Tape position error at beginning-of-medium") },
+/*  T           */{SST(0x3B, 0x02, SS_RDEF,
+			"Tape position error at end-of-medium") },
+/*   L          */{SST(0x3B, 0x03, SS_RDEF,
+			"Tape or electronic vertical forms unit not ready") },
+/*   L          */{SST(0x3B, 0x04, SS_RDEF,
+			"Slew failure") },
+/*   L          */{SST(0x3B, 0x05, SS_RDEF,
+			"Paper jam") },
+/*   L          */{SST(0x3B, 0x06, SS_RDEF,
+			"Failed to sense top-of-form") },
+/*   L          */{SST(0x3B, 0x07, SS_RDEF,
+			"Failed to sense bottom-of-form") },
+/*  T           */{SST(0x3B, 0x08, SS_RDEF,
+			"Reposition error") },
+/*       S      */{SST(0x3B, 0x09, SS_RDEF,
+			"Read past end of medium") },
+/*       S      */{SST(0x3B, 0x0A, SS_RDEF,
+			"Read past beginning of medium") },
+/*       S      */{SST(0x3B, 0x0B, SS_RDEF,
+			"Position past end of medium") },
+/*  T    S      */{SST(0x3B, 0x0C, SS_RDEF,
+			"Position past beginning of medium") },
+/* DT  WR OM    */{SST(0x3B, 0x0D, SS_FATAL|ENOSPC,
+			"Medium destination element full") },
+/* DT  WR OM    */{SST(0x3B, 0x0E, SS_RDEF,
+			"Medium source element empty") },
+/*      R       */{SST(0x3B, 0x0F, SS_RDEF,
+			"End of medium reached") },
+/* DT  WR OM    */{SST(0x3B, 0x11, SS_RDEF,
+			"Medium magazine not accessible") },
+/* DT  WR OM    */{SST(0x3B, 0x12, SS_RDEF,
+			"Medium magazine removed") },
+/* DT  WR OM    */{SST(0x3B, 0x13, SS_RDEF,
+			"Medium magazine inserted") },
+/* DT  WR OM    */{SST(0x3B, 0x14, SS_RDEF,
+			"Medium magazine locked") },
+/* DT  WR OM    */{SST(0x3B, 0x15, SS_RDEF,
+			"Medium magazine unlocked") },
+/* DTLPWRSOMCAE */{SST(0x3D, 0x00, SS_RDEF,
+			"Invalid bits in identify message") },
+/* DTLPWRSOMCAE */{SST(0x3E, 0x00, SS_RDEF,
+			"Logical unit has not self-configured yet") },
+/* DTLPWRSOMCAE */{SST(0x3E, 0x01, SS_RDEF,
+			"Logical unit failure") },
+/* DTLPWRSOMCAE */{SST(0x3E, 0x02, SS_RDEF,
+			"Timeout on logical unit") },
+/* DTLPWRSOMCAE */{SST(0x3F, 0x00, SS_RDEF,
+			"Target operating conditions have changed") },
+/* DTLPWRSOMCAE */{SST(0x3F, 0x01, SS_RDEF,
+			"Microcode has been changed") },
+/* DTLPWRSOMC   */{SST(0x3F, 0x02, SS_RDEF,
+			"Changed operating definition") },
+/* DTLPWRSOMCAE */{SST(0x3F, 0x03, SS_INQ_REFRESH|SSQ_DECREMENT_COUNT,
+			"Inquiry data has changed") },
+/* DT  WR OMCAE */{SST(0x3F, 0x04, SS_RDEF,
+			"Component device attached") },
+/* DT  WR OMCAE */{SST(0x3F, 0x05, SS_RDEF,
+			"Device identifier changed") },
+/* DT  WR OMCAE */{SST(0x3F, 0x06, SS_RDEF,
+			"Redundancy group created or modified") },
+/* DT  WR OMCAE */{SST(0x3F, 0x07, SS_RDEF,
+			"Redundancy group deleted") },
+/* DT  WR OMCAE */{SST(0x3F, 0x08, SS_RDEF,
+			"Spare created or modified") },
+/* DT  WR OMCAE */{SST(0x3F, 0x09, SS_RDEF,
+			"Spare deleted") },
+/* DT  WR OMCAE */{SST(0x3F, 0x0A, SS_RDEF,
+			"Volume set created or modified") },
+/* DT  WR OMCAE */{SST(0x3F, 0x0B, SS_RDEF,
+			"Volume set deleted") },
+/* DT  WR OMCAE */{SST(0x3F, 0x0C, SS_RDEF,
+			"Volume set deassigned") },
+/* DT  WR OMCAE */{SST(0x3F, 0x0D, SS_RDEF,
+			"Volume set reassigned") },
+/* DTLPWRSOMCAE */{SST(0x3F, 0x0E, SS_RDEF,
+			"Reported luns data has changed") },
+/* DTLPWRSOMCAE */{SST(0x3F, 0x0F, SS_RETRY|SSQ_DECREMENT_COUNT
+				 | SSQ_DELAY_RANDOM|EBUSY,
+			"Echo buffer overwritten") },
+/* DT  WR OM   B*/{SST(0x3F, 0x0F, SS_RDEF, "Medium Loadable") },
+/* DT  WR OM   B*/{SST(0x3F, 0x0F, SS_RDEF,
+			"Medium auxiliary memory accessible") },
+/* D            */{SST(0x40, 0x00, SS_RDEF,
+			"Ram failure") }, /* deprecated - use 40 NN instead */
+/* DTLPWRSOMCAE */{SST(0x40, 0x80, SS_RDEF,
+			"Diagnostic failure: ASCQ = Component ID") },
+/* DTLPWRSOMCAE */{SST(0x40, 0xFF, SS_RDEF|SSQ_RANGE,
+			NULL) },/* Range 0x80->0xFF */
+/* D            */{SST(0x41, 0x00, SS_RDEF,
+			"Data path failure") }, /* deprecated - use 40 NN instead */
+/* D            */{SST(0x42, 0x00, SS_RDEF,
+			"Power-on or self-test failure") }, /* deprecated - use 40 NN instead */
+/* DTLPWRSOMCAE */{SST(0x43, 0x00, SS_RDEF,
+			"Message error") },
+/* DTLPWRSOMCAE */{SST(0x44, 0x00, SS_RDEF,
+			"Internal target failure") },
+/* DTLPWRSOMCAE */{SST(0x45, 0x00, SS_RDEF,
+			"Select or reselect failure") },
+/* DTLPWRSOMC   */{SST(0x46, 0x00, SS_RDEF,
+			"Unsuccessful soft reset") },
+/* DTLPWRSOMCAE */{SST(0x47, 0x00, SS_RDEF|SSQ_FALLBACK,
+			"SCSI parity error") },
+/* DTLPWRSOMCAE */{SST(0x47, 0x01, SS_RDEF|SSQ_FALLBACK,
+			"Data Phase CRC error detected") },
+/* DTLPWRSOMCAE */{SST(0x47, 0x02, SS_RDEF|SSQ_FALLBACK,
+			"SCSI parity error detected during ST data phase") },
+/* DTLPWRSOMCAE */{SST(0x47, 0x03, SS_RDEF|SSQ_FALLBACK,
+			"Information Unit iuCRC error") },
+/* DTLPWRSOMCAE */{SST(0x47, 0x04, SS_RDEF|SSQ_FALLBACK,
+			"Asynchronous information protection error detected") },
+/* DTLPWRSOMCAE */{SST(0x47, 0x05, SS_RDEF|SSQ_FALLBACK,
+			"Protocol server CRC error") },
+/* DTLPWRSOMCAE */{SST(0x48, 0x00, SS_RDEF|SSQ_FALLBACK,
+			"Initiator detected error message received") },
+/* DTLPWRSOMCAE */{SST(0x49, 0x00, SS_RDEF,
+			"Invalid message error") },
+/* DTLPWRSOMCAE */{SST(0x4A, 0x00, SS_RDEF,
+			"Command phase error") },
+/* DTLPWRSOMCAE */{SST(0x4B, 0x00, SS_RDEF,
+			"Data phase error") },
+/* DTLPWRSOMCAE */{SST(0x4C, 0x00, SS_RDEF,
+			"Logical unit failed self-configuration") },
+/* DTLPWRSOMCAE */{SST(0x4D, 0x00, SS_RDEF,
+			"Tagged overlapped commands: ASCQ = Queue tag ID") },
+/* DTLPWRSOMCAE */{SST(0x4D, 0xFF, SS_RDEF|SSQ_RANGE,
+			NULL)}, /* Range 0x00->0xFF */
+/* DTLPWRSOMCAE */{SST(0x4E, 0x00, SS_RDEF,
+			"Overlapped commands attempted") },
+/*  T           */{SST(0x50, 0x00, SS_RDEF,
+			"Write append error") },
+/*  T           */{SST(0x50, 0x01, SS_RDEF,
+			"Write append position error") },
+/*  T           */{SST(0x50, 0x02, SS_RDEF,
+			"Position error related to timing") },
+/*  T     O     */{SST(0x51, 0x00, SS_RDEF,
+			"Erase failure") },
+/*  T           */{SST(0x52, 0x00, SS_RDEF,
+			"Cartridge fault") },
+/* DTL WRSOM    */{SST(0x53, 0x00, SS_RDEF,
+			"Media load or eject failed") },
+/*  T           */{SST(0x53, 0x01, SS_RDEF,
+			"Unload tape failure") },
+/* DT  WR OM    */{SST(0x53, 0x02, SS_RDEF,
+			"Medium removal prevented") },
+/*    P         */{SST(0x54, 0x00, SS_RDEF,
+			"Scsi to host system interface failure") },
+/*    P         */{SST(0x55, 0x00, SS_RDEF,
+			"System resource failure") },
+/* D      O     */{SST(0x55, 0x01, SS_FATAL|ENOSPC,
+			"System buffer full") },
+/*      R       */{SST(0x57, 0x00, SS_RDEF,
+			"Unable to recover table-of-contents") },
+/*        O     */{SST(0x58, 0x00, SS_RDEF,
+			"Generation does not exist") },
+/*        O     */{SST(0x59, 0x00, SS_RDEF,
+			"Updated block read") },
+/* DTLPWRSOM    */{SST(0x5A, 0x00, SS_RDEF,
+			"Operator request or state change input") },
+/* DT  WR OM    */{SST(0x5A, 0x01, SS_RDEF,
+			"Operator medium removal request") },
+/* DT  W  O     */{SST(0x5A, 0x02, SS_RDEF,
+			"Operator selected write protect") },
+/* DT  W  O     */{SST(0x5A, 0x03, SS_RDEF,
+			"Operator selected write permit") },
+/* DTLPWRSOM    */{SST(0x5B, 0x00, SS_RDEF,
+			"Log exception") },
+/* DTLPWRSOM    */{SST(0x5B, 0x01, SS_RDEF,
+			"Threshold condition met") },
+/* DTLPWRSOM    */{SST(0x5B, 0x02, SS_RDEF,
+			"Log counter at maximum") },
+/* DTLPWRSOM    */{SST(0x5B, 0x03, SS_RDEF,
+			"Log list codes exhausted") },
+/* D      O     */{SST(0x5C, 0x00, SS_RDEF,
+			"RPL status change") },
+/* D      O     */{SST(0x5C, 0x01, SS_NOP|SSQ_PRINT_SENSE,
+			"Spindles synchronized") },
+/* D      O     */{SST(0x5C, 0x02, SS_RDEF,
+			"Spindles not synchronized") },
+/* DTLPWRSOMCAE */{SST(0x5D, 0x00, SS_RDEF,
+			"Failure prediction threshold exceeded") },
+/* DTLPWRSOMCAE */{SST(0x5D, 0xFF, SS_RDEF,
+			"Failure prediction threshold exceeded (false)") },
+/* DTLPWRSO CA  */{SST(0x5E, 0x00, SS_RDEF,
+			"Low power condition on") },
+/* DTLPWRSO CA  */{SST(0x5E, 0x01, SS_RDEF,
+			"Idle condition activated by timer") },
+/* DTLPWRSO CA  */{SST(0x5E, 0x02, SS_RDEF,
+			"Standby condition activated by timer") },
+/* DTLPWRSO CA  */{SST(0x5E, 0x03, SS_RDEF,
+			"Idle condition activated by command") },
+/* DTLPWRSO CA  */{SST(0x5E, 0x04, SS_RDEF,
+			"Standby condition activated by command") },
+/*       S      */{SST(0x60, 0x00, SS_RDEF,
+			"Lamp failure") },
+/*       S      */{SST(0x61, 0x00, SS_RDEF,
+			"Video acquisition error") },
+/*       S      */{SST(0x61, 0x01, SS_RDEF,
+			"Unable to acquire video") },
+/*       S      */{SST(0x61, 0x02, SS_RDEF,
+			"Out of focus") },
+/*       S      */{SST(0x62, 0x00, SS_RDEF,
+			"Scan head positioning error") },
+/*      R       */{SST(0x63, 0x00, SS_RDEF,
+			"End of user area encountered on this track") },
+/*      R       */{SST(0x63, 0x01, SS_FATAL|ENOSPC,
+			"Packet does not fit in available space") },
+/*      R       */{SST(0x64, 0x00, SS_RDEF,
+			"Illegal mode for this track") },
+/*      R       */{SST(0x64, 0x01, SS_RDEF,
+			"Invalid packet size") },
+/* DTLPWRSOMCAE */{SST(0x65, 0x00, SS_RDEF,
+			"Voltage fault") },
+/*       S      */{SST(0x66, 0x00, SS_RDEF,
+			"Automatic document feeder cover up") },
+/*       S      */{SST(0x66, 0x01, SS_RDEF,
+			"Automatic document feeder lift up") },
+/*       S      */{SST(0x66, 0x02, SS_RDEF,
+			"Document jam in automatic document feeder") },
+/*       S      */{SST(0x66, 0x03, SS_RDEF,
+			"Document miss feed automatic in document feeder") },
+/*           A  */{SST(0x67, 0x00, SS_RDEF,
+			"Configuration failure") },
+/*           A  */{SST(0x67, 0x01, SS_RDEF,
+			"Configuration of incapable logical units failed") },
+/*           A  */{SST(0x67, 0x02, SS_RDEF,
+			"Add logical unit failed") },
+/*           A  */{SST(0x67, 0x03, SS_RDEF,
+			"Modification of logical unit failed") },
+/*           A  */{SST(0x67, 0x04, SS_RDEF,
+			"Exchange of logical unit failed") },
+/*           A  */{SST(0x67, 0x05, SS_RDEF,
+			"Remove of logical unit failed") },
+/*           A  */{SST(0x67, 0x06, SS_RDEF,
+			"Attachment of logical unit failed") },
+/*           A  */{SST(0x67, 0x07, SS_RDEF,
+			"Creation of logical unit failed") },
+/*           A  */{SST(0x68, 0x00, SS_RDEF,
+			"Logical unit not configured") },
+/*           A  */{SST(0x69, 0x00, SS_RDEF,
+			"Data loss on logical unit") },
+/*           A  */{SST(0x69, 0x01, SS_RDEF,
+			"Multiple logical unit failures") },
+/*           A  */{SST(0x69, 0x02, SS_RDEF,
+			"Parity/data mismatch") },
+/*           A  */{SST(0x6A, 0x00, SS_RDEF,
+			"Informational, refer to log") },
+/*           A  */{SST(0x6B, 0x00, SS_RDEF,
+			"State change has occurred") },
+/*           A  */{SST(0x6B, 0x01, SS_RDEF,
+			"Redundancy level got better") },
+/*           A  */{SST(0x6B, 0x02, SS_RDEF,
+			"Redundancy level got worse") },
+/*           A  */{SST(0x6C, 0x00, SS_RDEF,
+			"Rebuild failure occurred") },
+/*           A  */{SST(0x6D, 0x00, SS_RDEF,
+			"Recalculate failure occurred") },
+/*           A  */{SST(0x6E, 0x00, SS_RDEF,
+			"Command to logical unit failed") },
+/*  T           */{SST(0x70, 0x00, SS_RDEF,
+			"Decompression exception short: ASCQ = Algorithm ID") },
+/*  T           */{SST(0x70, 0xFF, SS_RDEF|SSQ_RANGE,
+			NULL) }, /* Range 0x00 -> 0xFF */
+/*  T           */{SST(0x71, 0x00, SS_RDEF,
+			"Decompression exception long: ASCQ = Algorithm ID") },
+/*  T           */{SST(0x71, 0xFF, SS_RDEF|SSQ_RANGE,
+			NULL) }, /* Range 0x00 -> 0xFF */	
+/*      R       */{SST(0x72, 0x00, SS_RDEF,
+			"Session fixation error") },
+/*      R       */{SST(0x72, 0x01, SS_RDEF,
+			"Session fixation error writing lead-in") },
+/*      R       */{SST(0x72, 0x02, SS_RDEF,
+			"Session fixation error writing lead-out") },
+/*      R       */{SST(0x72, 0x03, SS_RDEF,
+			"Session fixation error - incomplete track in session") },
+/*      R       */{SST(0x72, 0x04, SS_RDEF,
+			"Empty or partially written reserved track") },
+/*      R       */{SST(0x73, 0x00, SS_RDEF,
+			"CD control error") },
+/*      R       */{SST(0x73, 0x01, SS_RDEF,
+			"Power calibration area almost full") },
+/*      R       */{SST(0x73, 0x02, SS_FATAL|ENOSPC,
+			"Power calibration area is full") },
+/*      R       */{SST(0x73, 0x03, SS_RDEF,
+			"Power calibration area error") },
+/*      R       */{SST(0x73, 0x04, SS_RDEF,
+			"Program memory area update failure") },
+/*      R       */{SST(0x73, 0x05, SS_RDEF,
+			"program memory area is full") }
+};
+
+static const int asc_table_size = sizeof(asc_table)/sizeof(asc_table[0]);
+
+struct asc_key
+{
+	int asc;
+	int ascq;
+};
+
+static int
+ascentrycomp(const void *key, const void *member)
+{
+	int asc;
+	int ascq;
+	const struct asc_table_entry *table_entry;
+
+	asc = ((const struct asc_key *)key)->asc;
+	ascq = ((const struct asc_key *)key)->ascq;
+	table_entry = (const struct asc_table_entry *)member;
+
+	if (asc >= table_entry->asc) {
+
+		if (asc > table_entry->asc)
+			return (1);
+
+		if (ascq <= table_entry->ascq) {
+			/* Check for ranges */
+			if (ascq == table_entry->ascq
+		 	 || ((table_entry->action & SSQ_RANGE) != 0
+		  	   && ascq >= (table_entry - 1)->ascq))
+				return (0);
+			return (-1);
+		}
+		return (1);
+	}
+	return (-1);
+}
+
+static int
+senseentrycomp(const void *key, const void *member)
+{
+	int sense_key;
+	const struct sense_key_table_entry *table_entry;
+
+	sense_key = *((const int *)key);
+	table_entry = (const struct sense_key_table_entry *)member;
+
+	if (sense_key >= table_entry->sense_key) {
+		if (sense_key == table_entry->sense_key)
+			return (0);
+		return (1);
+	}
+	return (-1);
+}
+
+static void
+fetchtableentries(int sense_key, int asc, int ascq,
+		  struct scsi_inquiry_data *inq_data,
+		  const struct sense_key_table_entry **sense_entry,
+		  const struct asc_table_entry **asc_entry)
+{
+	void *match;
+	const struct asc_table_entry *asc_tables[2];
+	const struct sense_key_table_entry *sense_tables[2];
+	struct asc_key asc_ascq;
+	size_t asc_tables_size[2];
+	size_t sense_tables_size[2];
+	int num_asc_tables;
+	int num_sense_tables;
+	int i;
+
+	/* Default to failure */
+	*sense_entry = NULL;
+	*asc_entry = NULL;
+	match = NULL;
+	if (inq_data != NULL)
+		match = cam_quirkmatch((void *)inq_data,
+				       (void *)sense_quirk_table,
+				       sense_quirk_table_size,
+				       sizeof(*sense_quirk_table),
+				       aic_inquiry_match);
+
+	if (match != NULL) {
+		struct scsi_sense_quirk_entry *quirk;
+
+		quirk = (struct scsi_sense_quirk_entry *)match;
+		asc_tables[0] = quirk->asc_info;
+		asc_tables_size[0] = quirk->num_ascs;
+		asc_tables[1] = asc_table;
+		asc_tables_size[1] = asc_table_size;
+		num_asc_tables = 2;
+		sense_tables[0] = quirk->sense_key_info;
+		sense_tables_size[0] = quirk->num_sense_keys;
+		sense_tables[1] = sense_key_table;
+		sense_tables_size[1] = sense_key_table_size;
+		num_sense_tables = 2;
+	} else {
+		asc_tables[0] = asc_table;
+		asc_tables_size[0] = asc_table_size;
+		num_asc_tables = 1;
+		sense_tables[0] = sense_key_table;
+		sense_tables_size[0] = sense_key_table_size;
+		num_sense_tables = 1;
+	}
+
+	asc_ascq.asc = asc;
+	asc_ascq.ascq = ascq;
+	for (i = 0; i < num_asc_tables; i++) {
+		void *found_entry;
+
+		found_entry = scsibsearch(&asc_ascq, asc_tables[i],
+					  asc_tables_size[i],
+					  sizeof(**asc_tables),
+					  ascentrycomp);
+
+		if (found_entry) {
+			*asc_entry = (struct asc_table_entry *)found_entry;
+			break;
+		}
+	}
+
+	for (i = 0; i < num_sense_tables; i++) {
+		void *found_entry;
+
+		found_entry = scsibsearch(&sense_key, sense_tables[i],
+					  sense_tables_size[i],
+					  sizeof(**sense_tables),
+					  senseentrycomp);
+
+		if (found_entry) {
+			*sense_entry =
+			    (struct sense_key_table_entry *)found_entry;
+			break;
+		}
+	}
+}
+
+static void *
+scsibsearch(const void *key, const void *base, size_t nmemb, size_t size,
+		 int (*compar)(const void *, const void *))
+{
+	const void *entry;
+	u_int l;
+	u_int u;
+	u_int m;
+
+	l = -1;
+	u = nmemb;
+	while (l + 1 != u) {
+		m = (l + u) / 2;
+		entry = base + m * size;
+		if (compar(key, entry) > 0)
+			l = m;
+		else
+			u = m;
+	}
+
+	entry = base + u * size;
+	if (u == nmemb
+	 || compar(key, entry) != 0)
+		return (NULL);
+
+	return ((void *)entry);
+}
+
+/*
+ * Compare string with pattern, returning 0 on match.
+ * Short pattern matches trailing blanks in name,
+ * wildcard '*' in pattern matches rest of name,
+ * wildcard '?' matches a single non-space character.
+ */
+static int
+cam_strmatch(const uint8_t *str, const uint8_t *pattern, int str_len)
+{
+
+	while (*pattern != '\0'&& str_len > 0) {  
+
+		if (*pattern == '*') {
+			return (0);
+		}
+		if ((*pattern != *str)
+		 && (*pattern != '?' || *str == ' ')) {
+			return (1);
+		}
+		pattern++;
+		str++;
+		str_len--;
+	}
+	while (str_len > 0 && *str++ == ' ')
+		str_len--;
+
+	return (str_len);
+}
+
+static caddr_t
+cam_quirkmatch(caddr_t target, caddr_t quirk_table, int num_entries,
+	       int entry_size, cam_quirkmatch_t *comp_func)
+{
+	for (; num_entries > 0; num_entries--, quirk_table += entry_size) {
+		if ((*comp_func)(target, quirk_table) == 0)
+			return (quirk_table);
+	}
+	return (NULL);
+}
+
+void
+aic_sense_desc(int sense_key, int asc, int ascq,
+	       struct scsi_inquiry_data *inq_data,
+	       const char **sense_key_desc, const char **asc_desc)
+{
+	const struct asc_table_entry *asc_entry;
+	const struct sense_key_table_entry *sense_entry;
+
+	fetchtableentries(sense_key, asc, ascq,
+			  inq_data,
+			  &sense_entry,
+			  &asc_entry);
+
+	*sense_key_desc = sense_entry->desc;
+
+	if (asc_entry != NULL)
+		*asc_desc = asc_entry->desc;
+	else if (asc >= 0x80 && asc <= 0xff)
+		*asc_desc = "Vendor Specific ASC";
+	else if (ascq >= 0x80 && ascq <= 0xff)
+		*asc_desc = "Vendor Specific ASCQ";
+	else
+		*asc_desc = "Reserved ASC/ASCQ pair";
+}
+
+/*
+ * Given sense and device type information, return the appropriate action.
+ * If we do not understand the specific error as identified by the ASC/ASCQ
+ * pair, fall back on the more generic actions derived from the sense key.
+ */
+aic_sense_action
+aic_sense_error_action(struct scsi_sense_data *sense_data,
+		       struct scsi_inquiry_data *inq_data, uint32_t sense_flags)
+{
+	const struct asc_table_entry *asc_entry;
+	const struct sense_key_table_entry *sense_entry;
+	int error_code, sense_key, asc, ascq;
+	aic_sense_action action;
+
+	scsi_extract_sense(sense_data, &error_code, &sense_key, &asc, &ascq);
+
+	if (error_code == SSD_DEFERRED_ERROR) {
+		/*
+		 * XXX dufault@FreeBSD.org
+		 * This error doesn't relate to the command associated
+		 * with this request sense.  A deferred error is an error
+		 * for a command that has already returned GOOD status
+		 * (see SCSI2 8.2.14.2).
+		 *
+		 * By my reading of that section, it looks like the current
+		 * command has been cancelled, we should now clean things up
+		 * (hopefully recovering any lost data) and then retry the
+		 * current command.  There are two easy choices, both wrong:
+		 *
+		 * 1. Drop through (like we had been doing), thus treating
+		 *    this as if the error were for the current command and
+		 *    return and stop the current command.
+		 * 
+		 * 2. Issue a retry (like I made it do) thus hopefully
+		 *    recovering the current transfer, and ignoring the
+		 *    fact that we've dropped a command.
+		 *
+		 * These should probably be handled in a device specific
+		 * sense handler or punted back up to a user mode daemon
+		 */
+		action = SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE;
+	} else {
+		fetchtableentries(sense_key, asc, ascq,
+				  inq_data,
+				  &sense_entry,
+				  &asc_entry);
+
+		/*
+		 * Override the 'No additional Sense' entry (0,0)
+		 * with the error action of the sense key.
+		 */
+		if (asc_entry != NULL
+		 && (asc != 0 || ascq != 0))
+			action = asc_entry->action;
+		else
+			action = sense_entry->action;
+
+		if (sense_key == SSD_KEY_RECOVERED_ERROR) {
+			/*
+			 * The action succeeded but the device wants
+			 * the user to know that some recovery action
+			 * was required.
+			 */
+			action &= ~(SS_MASK|SSQ_MASK|SS_ERRMASK);
+			action |= SS_NOP|SSQ_PRINT_SENSE;
+		} else if (sense_key == SSD_KEY_ILLEGAL_REQUEST) {
+			if ((sense_flags & SF_QUIET_IR) != 0)
+				action &= ~SSQ_PRINT_SENSE;
+		} else if (sense_key == SSD_KEY_UNIT_ATTENTION) {
+			if ((sense_flags & SF_RETRY_UA) != 0
+			 && (action & SS_MASK) == SS_FAIL) {
+				action &= ~(SS_MASK|SSQ_MASK);
+				action |= SS_RETRY|SSQ_DECREMENT_COUNT|
+					  SSQ_PRINT_SENSE;
+			}
+		}
+	}
+
+	if ((sense_flags & SF_PRINT_ALWAYS) != 0)
+		action |= SSQ_PRINT_SENSE;
+	else if ((sense_flags & SF_NO_PRINT) != 0)
+		action &= ~SSQ_PRINT_SENSE;
+
+	return (action);
+}
+
+/*      
+ * Try make as good a match as possible with
+ * available sub drivers
+ */
+int
+aic_inquiry_match(caddr_t inqbuffer, caddr_t table_entry)
+{
+	struct scsi_inquiry_pattern *entry;
+	struct scsi_inquiry_data *inq;
+ 
+	entry = (struct scsi_inquiry_pattern *)table_entry;
+	inq = (struct scsi_inquiry_data *)inqbuffer;
+
+	if (((SID_TYPE(inq) == entry->type)
+	  || (entry->type == T_ANY))
+	 && (SID_IS_REMOVABLE(inq) ? entry->media_type & SIP_MEDIA_REMOVABLE
+				   : entry->media_type & SIP_MEDIA_FIXED)
+	 && (cam_strmatch(inq->vendor, entry->vendor, sizeof(inq->vendor)) == 0)
+	 && (cam_strmatch(inq->product, entry->product,
+			  sizeof(inq->product)) == 0)
+	 && (cam_strmatch(inq->revision, entry->revision,
+			  sizeof(inq->revision)) == 0)) {
+		return (0);
+	}
+        return (-1);
+}
+
+/*
+ * Table of syncrates that don't follow the "divisible by 4"
+ * rule. This table will be expanded in future SCSI specs.
+ */
+static struct {
+	u_int period_factor;
+	u_int period;	/* in 100ths of ns */
+} scsi_syncrates[] = {
+	{ 0x08, 625 },	/* FAST-160 */
+	{ 0x09, 1250 },	/* FAST-80 */
+	{ 0x0a, 2500 },	/* FAST-40 40MHz */
+	{ 0x0b, 3030 },	/* FAST-40 33MHz */
+	{ 0x0c, 5000 }	/* FAST-20 */
+};
+
+/*
+ * Return the frequency in kHz corresponding to the given
+ * sync period factor.
+ */
+u_int
+aic_calc_syncsrate(u_int period_factor)
+{
+	int i;
+	int num_syncrates;
+
+	num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]);
+	/* See if the period is in the "exception" table */
+	for (i = 0; i < num_syncrates; i++) {
+
+		if (period_factor == scsi_syncrates[i].period_factor) {
+			/* Period in kHz */
+			return (100000000 / scsi_syncrates[i].period);
+		}
+	}
+
+	/*
+	 * Wasn't in the table, so use the standard
+	 * 4 times conversion.
+	 */
+	return (10000000 / (period_factor * 4 * 10));
+}
+
+/*
+ * Return speed in KB/s.
+ */
+u_int
+aic_calc_speed(u_int width, u_int period, u_int offset, u_int min_rate)
+{
+	u_int freq;
+
+	if (offset != 0 && period < min_rate)
+		freq  = aic_calc_syncsrate(period);
+	else
+		/* Roughly 3.3MB/s for async */
+		freq  = 3300;
+	freq <<= width;
+	return (freq);
+}
+
+uint32_t
+aic_error_action(struct scsi_cmnd *cmd, struct scsi_inquiry_data *inq_data,
+		 cam_status status, u_int scsi_status)
+{
+	aic_sense_action  err_action;
+	int		  sense;
+
+	sense  = (cmd->result >> 24) == DRIVER_SENSE;
+
+	switch (status) {
+	case CAM_REQ_CMP:
+		err_action = SS_NOP;
+		break;
+	case CAM_AUTOSENSE_FAIL:
+	case CAM_SCSI_STATUS_ERROR:
+
+		switch (scsi_status) {
+		case SCSI_STATUS_OK:
+		case SCSI_STATUS_COND_MET:
+		case SCSI_STATUS_INTERMED:
+		case SCSI_STATUS_INTERMED_COND_MET:
+			err_action = SS_NOP;
+			break;
+		case SCSI_STATUS_CMD_TERMINATED:
+		case SCSI_STATUS_CHECK_COND:
+			if (sense != 0) {
+				struct scsi_sense_data *sense;
+
+				sense = (struct scsi_sense_data *)
+				    &cmd->sense_buffer;
+				err_action =
+				    aic_sense_error_action(sense, inq_data, 0);
+
+			} else {
+				err_action = SS_RETRY|SSQ_FALLBACK
+					   | SSQ_DECREMENT_COUNT|EIO;
+			}
+			break;
+		case SCSI_STATUS_QUEUE_FULL:
+		case SCSI_STATUS_BUSY:
+			err_action = SS_RETRY|SSQ_DELAY|SSQ_MANY
+				   | SSQ_DECREMENT_COUNT|EBUSY;
+			break;
+		case SCSI_STATUS_RESERV_CONFLICT:
+		default:
+			err_action = SS_FAIL|EBUSY;
+			break;
+		}
+		break;
+	case CAM_CMD_TIMEOUT:
+	case CAM_REQ_CMP_ERR:
+	case CAM_UNEXP_BUSFREE:
+	case CAM_UNCOR_PARITY:
+	case CAM_DATA_RUN_ERR:
+		err_action = SS_RETRY|SSQ_FALLBACK|EIO;
+		break;
+	case CAM_UA_ABORT:
+	case CAM_UA_TERMIO:
+	case CAM_MSG_REJECT_REC:
+	case CAM_SEL_TIMEOUT:
+		err_action = SS_FAIL|EIO;
+		break;
+	case CAM_REQ_INVALID:
+	case CAM_PATH_INVALID:
+	case CAM_DEV_NOT_THERE:
+	case CAM_NO_HBA:
+	case CAM_PROVIDE_FAIL:
+	case CAM_REQ_TOO_BIG:		
+	case CAM_RESRC_UNAVAIL:
+	case CAM_BUSY:
+	default:
+		/* panic??  These should never occur in our application. */
+		err_action = SS_FAIL|EIO;
+		break;
+	case CAM_SCSI_BUS_RESET:
+	case CAM_BDR_SENT:		
+	case CAM_REQUEUE_REQ:
+		/* Unconditional requeue */
+		err_action = SS_RETRY;
+		break;
+	}
+
+	return (err_action);
+}
+
+char *
+aic_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
+		       aic_option_callback_t *callback, u_long callback_arg)
+{
+	char	*tok_end;
+	char	*tok_end2;
+	int      i;
+	int      instance;
+	int	 targ;
+	int	 done;
+	char	 tok_list[] = {'.', ',', '{', '}', '\0'};
+
+	/* All options use a ':' name/arg separator */
+	if (*opt_arg != ':')
+		return (opt_arg);
+	opt_arg++;
+	instance = -1;
+	targ = -1;
+	done = FALSE;
+	/*
+	 * Restore separator that may be in
+	 * the middle of our option argument.
+	 */
+	tok_end = strchr(opt_arg, '\0');
+	if (tok_end < end)
+		*tok_end = ',';
+	while (!done) {
+		switch (*opt_arg) {
+		case '{':
+			if (instance == -1) {
+				instance = 0;
+			} else {
+				if (depth > 1) {
+					if (targ == -1)
+						targ = 0;
+				} else {
+					printf("Malformed Option %s\n",
+					       opt_name);
+					done = TRUE;
+				}
+			}
+			opt_arg++;
+			break;
+		case '}':
+			if (targ != -1)
+				targ = -1;
+			else if (instance != -1)
+				instance = -1;
+			opt_arg++;
+			break;
+		case ',':
+		case '.':
+			if (instance == -1)
+				done = TRUE;
+			else if (targ >= 0)
+				targ++;
+			else if (instance >= 0)
+				instance++;
+			opt_arg++;
+			break;
+		case '\0':
+			done = TRUE;
+			break;
+		default:
+			tok_end = end;
+			for (i = 0; tok_list[i]; i++) {
+				tok_end2 = strchr(opt_arg, tok_list[i]);
+				if ((tok_end2) && (tok_end2 < tok_end))
+					tok_end = tok_end2;
+			}
+			callback(callback_arg, instance, targ,
+				 simple_strtol(opt_arg, NULL, 0));
+			opt_arg = tok_end;
+			break;
+		}
+	}
+	return (opt_arg);
+}
diff --git a/drivers/scsi/aic7xxx/aiclib.h b/drivers/scsi/aic7xxx/aiclib.h
new file mode 100644
index 0000000..bfe6f95
--- /dev/null
+++ b/drivers/scsi/aic7xxx/aiclib.h
@@ -0,0 +1,1085 @@
+/*
+ * Largely written by Julian Elischer (julian@tfs.com)
+ * for TRW Financial Systems.
+ *
+ * TRW Financial Systems, in accordance with their agreement with Carnegie
+ * Mellon University, makes this software available to CMU to distribute
+ * or use in any manner that they see fit as long as this message is kept with
+ * the software. For this reason TFS also grants any other persons or
+ * organisations permission to use or modify this software.
+ *
+ * TFS supplies this software to be publicly redistributed
+ * on the understanding that TFS is not responsible for the correct
+ * functioning of this software in any circumstances.
+ *
+ * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992
+ *
+ * $FreeBSD: src/sys/cam/scsi/scsi_all.h,v 1.21 2002/10/08 17:12:44 ken Exp $
+ *
+ * Copyright (c) 2003 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id$
+ */
+
+#ifndef	_AICLIB_H
+#define _AICLIB_H
+
+/*
+ * Linux Interrupt Support.
+ */
+#ifndef IRQ_RETVAL
+typedef void irqreturn_t;
+#define	IRQ_RETVAL(x)
+#endif
+
+/*
+ * SCSI command format
+ */
+
+/*
+ * Define dome bits that are in ALL (or a lot of) scsi commands
+ */
+#define SCSI_CTL_LINK		0x01
+#define SCSI_CTL_FLAG		0x02
+#define SCSI_CTL_VENDOR		0xC0
+#define	SCSI_CMD_LUN		0xA0	/* these two should not be needed */
+#define	SCSI_CMD_LUN_SHIFT	5	/* LUN in the cmd is no longer SCSI */
+
+#define SCSI_MAX_CDBLEN		16	/* 
+					 * 16 byte commands are in the 
+					 * SCSI-3 spec 
+					 */
+/* 6byte CDBs special case 0 length to be 256 */
+#define SCSI_CDB6_LEN(len)	((len) == 0 ? 256 : len)
+
+/*
+ * This type defines actions to be taken when a particular sense code is
+ * received.  Right now, these flags are only defined to take up 16 bits,
+ * but can be expanded in the future if necessary.
+ */
+typedef enum {
+	SS_NOP		= 0x000000, /* Do nothing */
+	SS_RETRY	= 0x010000, /* Retry the command */
+	SS_FAIL		= 0x020000, /* Bail out */
+	SS_START	= 0x030000, /* Send a Start Unit command to the device,
+				     * then retry the original command.
+				     */
+	SS_TUR		= 0x040000, /* Send a Test Unit Ready command to the
+				     * device, then retry the original command.
+				     */
+	SS_REQSENSE	= 0x050000, /* Send a RequestSense command to the
+				     * device, then retry the original command.
+				     */
+	SS_INQ_REFRESH	= 0x060000,
+	SS_MASK		= 0xff0000
+} aic_sense_action;
+
+typedef enum {
+	SSQ_NONE		= 0x0000,
+	SSQ_DECREMENT_COUNT	= 0x0100,  /* Decrement the retry count */
+	SSQ_MANY		= 0x0200,  /* send lots of recovery commands */
+	SSQ_RANGE		= 0x0400,  /*
+					    * This table entry represents the
+					    * end of a range of ASCQs that
+					    * have identical error actions
+					    * and text.
+					    */
+	SSQ_PRINT_SENSE		= 0x0800,
+	SSQ_DELAY		= 0x1000,  /* Delay before retry. */
+	SSQ_DELAY_RANDOM	= 0x2000,  /* Randomized delay before retry. */
+	SSQ_FALLBACK		= 0x4000,  /* Do a speed fallback to recover */
+	SSQ_MASK		= 0xff00
+} aic_sense_action_qualifier;
+
+/* Mask for error status values */
+#define SS_ERRMASK	0xff
+
+/* The default, retyable, error action */
+#define SS_RDEF		SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE|EIO
+
+/* The retyable, error action, with table specified error code */
+#define SS_RET		SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE
+
+/* Fatal error action, with table specified error code */
+#define SS_FATAL	SS_FAIL|SSQ_PRINT_SENSE
+
+struct scsi_generic
+{
+	uint8_t opcode;
+	uint8_t bytes[11];
+};
+
+struct scsi_request_sense
+{
+	uint8_t opcode;
+	uint8_t byte2;
+	uint8_t unused[2];
+	uint8_t length;
+	uint8_t control;
+};
+
+struct scsi_test_unit_ready
+{
+	uint8_t opcode;
+	uint8_t byte2;
+	uint8_t unused[3];
+	uint8_t control;
+};
+
+struct scsi_send_diag
+{
+	uint8_t opcode;
+	uint8_t byte2;
+#define	SSD_UOL		0x01
+#define	SSD_DOL		0x02
+#define	SSD_SELFTEST	0x04
+#define	SSD_PF		0x10
+	uint8_t unused[1];
+	uint8_t paramlen[2];
+	uint8_t control;
+};
+
+struct scsi_sense
+{
+	uint8_t opcode;
+	uint8_t byte2;
+	uint8_t unused[2];
+	uint8_t length;
+	uint8_t control;
+};
+
+struct scsi_inquiry
+{
+	uint8_t opcode;
+	uint8_t byte2;
+#define	SI_EVPD 0x01
+	uint8_t page_code;
+	uint8_t reserved;
+	uint8_t length;
+	uint8_t control;
+};
+
+struct scsi_mode_sense_6
+{
+	uint8_t opcode;
+	uint8_t byte2;
+#define	SMS_DBD				0x08
+	uint8_t page;
+#define	SMS_PAGE_CODE 			0x3F
+#define SMS_VENDOR_SPECIFIC_PAGE	0x00
+#define SMS_DISCONNECT_RECONNECT_PAGE	0x02
+#define SMS_PERIPHERAL_DEVICE_PAGE	0x09
+#define SMS_CONTROL_MODE_PAGE		0x0A
+#define SMS_ALL_PAGES_PAGE		0x3F
+#define	SMS_PAGE_CTRL_MASK		0xC0
+#define	SMS_PAGE_CTRL_CURRENT 		0x00
+#define	SMS_PAGE_CTRL_CHANGEABLE 	0x40
+#define	SMS_PAGE_CTRL_DEFAULT 		0x80
+#define	SMS_PAGE_CTRL_SAVED 		0xC0
+	uint8_t unused;
+	uint8_t length;
+	uint8_t control;
+};
+
+struct scsi_mode_sense_10
+{
+	uint8_t opcode;
+	uint8_t byte2;		/* same bits as small version */
+	uint8_t page; 		/* same bits as small version */
+	uint8_t unused[4];
+	uint8_t length[2];
+	uint8_t control;
+};
+
+struct scsi_mode_select_6
+{
+	uint8_t opcode;
+	uint8_t byte2;
+#define	SMS_SP	0x01
+#define	SMS_PF	0x10
+	uint8_t unused[2];
+	uint8_t length;
+	uint8_t control;
+};
+
+struct scsi_mode_select_10
+{
+	uint8_t opcode;
+	uint8_t byte2;		/* same bits as small version */
+	uint8_t unused[5];
+	uint8_t length[2];
+	uint8_t control;
+};
+
+/*
+ * When sending a mode select to a tape drive, the medium type must be 0.
+ */
+struct scsi_mode_hdr_6
+{
+	uint8_t datalen;
+	uint8_t medium_type;
+	uint8_t dev_specific;
+	uint8_t block_descr_len;
+};
+
+struct scsi_mode_hdr_10
+{
+	uint8_t datalen[2];
+	uint8_t medium_type;
+	uint8_t dev_specific;
+	uint8_t reserved[2];
+	uint8_t block_descr_len[2];
+};
+
+struct scsi_mode_block_descr
+{
+	uint8_t density_code;
+	uint8_t num_blocks[3];
+	uint8_t reserved;
+	uint8_t block_len[3];
+};
+
+struct scsi_log_sense
+{
+	uint8_t opcode;
+	uint8_t byte2;
+#define	SLS_SP				0x01
+#define	SLS_PPC				0x02
+	uint8_t page;
+#define	SLS_PAGE_CODE 			0x3F
+#define	SLS_ALL_PAGES_PAGE		0x00
+#define	SLS_OVERRUN_PAGE		0x01
+#define	SLS_ERROR_WRITE_PAGE		0x02
+#define	SLS_ERROR_READ_PAGE		0x03
+#define	SLS_ERROR_READREVERSE_PAGE	0x04
+#define	SLS_ERROR_VERIFY_PAGE		0x05
+#define	SLS_ERROR_NONMEDIUM_PAGE	0x06
+#define	SLS_ERROR_LASTN_PAGE		0x07
+#define	SLS_PAGE_CTRL_MASK		0xC0
+#define	SLS_PAGE_CTRL_THRESHOLD		0x00
+#define	SLS_PAGE_CTRL_CUMULATIVE	0x40
+#define	SLS_PAGE_CTRL_THRESH_DEFAULT	0x80
+#define	SLS_PAGE_CTRL_CUMUL_DEFAULT	0xC0
+	uint8_t reserved[2];
+	uint8_t paramptr[2];
+	uint8_t length[2];
+	uint8_t control;
+};
+
+struct scsi_log_select
+{
+	uint8_t opcode;
+	uint8_t byte2;
+/*	SLS_SP				0x01 */
+#define	SLS_PCR				0x02
+	uint8_t page;
+/*	SLS_PAGE_CTRL_MASK		0xC0 */
+/*	SLS_PAGE_CTRL_THRESHOLD		0x00 */
+/*	SLS_PAGE_CTRL_CUMULATIVE	0x40 */
+/*	SLS_PAGE_CTRL_THRESH_DEFAULT	0x80 */
+/*	SLS_PAGE_CTRL_CUMUL_DEFAULT	0xC0 */
+	uint8_t reserved[4];
+	uint8_t length[2];
+	uint8_t control;
+};
+
+struct scsi_log_header
+{
+	uint8_t page;
+	uint8_t reserved;
+	uint8_t datalen[2];
+};
+
+struct scsi_log_param_header {
+	uint8_t param_code[2];
+	uint8_t param_control;
+#define	SLP_LP				0x01
+#define	SLP_LBIN			0x02
+#define	SLP_TMC_MASK			0x0C
+#define	SLP_TMC_ALWAYS			0x00
+#define	SLP_TMC_EQUAL			0x04
+#define	SLP_TMC_NOTEQUAL		0x08
+#define	SLP_TMC_GREATER			0x0C
+#define	SLP_ETC				0x10
+#define	SLP_TSD				0x20
+#define	SLP_DS				0x40
+#define	SLP_DU				0x80
+	uint8_t param_len;
+};
+
+struct scsi_control_page {
+	uint8_t page_code;
+	uint8_t page_length;
+	uint8_t rlec;
+#define SCB_RLEC			0x01	/*Report Log Exception Cond*/
+	uint8_t queue_flags;
+#define SCP_QUEUE_ALG_MASK		0xF0
+#define SCP_QUEUE_ALG_RESTRICTED	0x00
+#define SCP_QUEUE_ALG_UNRESTRICTED	0x10
+#define SCP_QUEUE_ERR			0x02	/*Queued I/O aborted for CACs*/
+#define SCP_QUEUE_DQUE			0x01	/*Queued I/O disabled*/
+	uint8_t eca_and_aen;
+#define SCP_EECA			0x80	/*Enable Extended CA*/
+#define SCP_RAENP			0x04	/*Ready AEN Permission*/
+#define SCP_UAAENP			0x02	/*UA AEN Permission*/
+#define SCP_EAENP			0x01	/*Error AEN Permission*/
+	uint8_t reserved;
+	uint8_t aen_holdoff_period[2];
+};
+
+struct scsi_reserve
+{
+	uint8_t opcode;
+	uint8_t byte2;
+	uint8_t unused[2];
+	uint8_t length;
+	uint8_t control;
+};
+
+struct scsi_release
+{
+	uint8_t opcode;
+	uint8_t byte2;
+	uint8_t unused[2];
+	uint8_t length;
+	uint8_t control;
+};
+
+struct scsi_prevent
+{
+	uint8_t opcode;
+	uint8_t byte2;
+	uint8_t unused[2];
+	uint8_t how;
+	uint8_t control;
+};
+#define	PR_PREVENT 0x01
+#define PR_ALLOW   0x00
+
+struct scsi_sync_cache
+{
+	uint8_t opcode;
+	uint8_t byte2;
+	uint8_t begin_lba[4];
+	uint8_t reserved;
+	uint8_t lb_count[2];
+	uint8_t control;	
+};
+
+
+struct scsi_changedef
+{
+	uint8_t opcode;
+	uint8_t byte2;
+	uint8_t unused1;
+	uint8_t how;
+	uint8_t unused[4];
+	uint8_t datalen;
+	uint8_t control;
+};
+
+struct scsi_read_buffer
+{
+	uint8_t opcode;
+	uint8_t byte2;
+#define	RWB_MODE		0x07
+#define	RWB_MODE_HDR_DATA	0x00
+#define	RWB_MODE_DATA		0x02
+#define	RWB_MODE_DOWNLOAD	0x04
+#define	RWB_MODE_DOWNLOAD_SAVE	0x05
+        uint8_t buffer_id;
+        uint8_t offset[3];
+        uint8_t length[3];
+        uint8_t control;
+};
+
+struct scsi_write_buffer
+{
+	uint8_t opcode;
+	uint8_t byte2;
+	uint8_t buffer_id;
+	uint8_t offset[3];
+	uint8_t length[3];
+	uint8_t control;
+};
+
+struct scsi_rw_6
+{
+	uint8_t opcode;
+	uint8_t addr[3];
+/* only 5 bits are valid in the MSB address byte */
+#define	SRW_TOPADDR	0x1F
+	uint8_t length;
+	uint8_t control;
+};
+
+struct scsi_rw_10
+{
+	uint8_t opcode;
+#define	SRW10_RELADDR	0x01
+#define SRW10_FUA	0x08
+#define	SRW10_DPO	0x10
+	uint8_t byte2;
+	uint8_t addr[4];
+	uint8_t reserved;
+	uint8_t length[2];
+	uint8_t control;
+};
+
+struct scsi_rw_12
+{
+	uint8_t opcode;
+#define	SRW12_RELADDR	0x01
+#define SRW12_FUA	0x08
+#define	SRW12_DPO	0x10
+	uint8_t byte2;
+	uint8_t addr[4];
+	uint8_t length[4];
+	uint8_t reserved;
+	uint8_t control;
+};
+
+struct scsi_start_stop_unit
+{
+	uint8_t opcode;
+	uint8_t byte2;
+#define	SSS_IMMED		0x01
+	uint8_t reserved[2];
+	uint8_t how;
+#define	SSS_START		0x01
+#define	SSS_LOEJ		0x02
+	uint8_t control;
+};
+
+#define SC_SCSI_1 0x01
+#define SC_SCSI_2 0x03
+
+/*
+ * Opcodes
+ */
+
+#define	TEST_UNIT_READY		0x00
+#define REQUEST_SENSE		0x03
+#define	READ_6			0x08
+#define WRITE_6			0x0a
+#define INQUIRY			0x12
+#define MODE_SELECT_6		0x15
+#define MODE_SENSE_6		0x1a
+#define START_STOP_UNIT		0x1b
+#define START_STOP		0x1b
+#define RESERVE      		0x16
+#define RELEASE      		0x17
+#define	RECEIVE_DIAGNOSTIC	0x1c
+#define	SEND_DIAGNOSTIC		0x1d
+#define PREVENT_ALLOW		0x1e
+#define	READ_CAPACITY		0x25
+#define	READ_10			0x28
+#define WRITE_10		0x2a
+#define POSITION_TO_ELEMENT	0x2b
+#define	SYNCHRONIZE_CACHE	0x35
+#define	WRITE_BUFFER            0x3b
+#define	READ_BUFFER             0x3c
+#define	CHANGE_DEFINITION	0x40
+#define	LOG_SELECT		0x4c
+#define	LOG_SENSE		0x4d
+#ifdef XXXCAM
+#define	MODE_SENSE_10		0x5A
+#endif
+#define	MODE_SELECT_10		0x55
+#define MOVE_MEDIUM     	0xa5
+#define READ_12			0xa8
+#define WRITE_12		0xaa
+#define READ_ELEMENT_STATUS	0xb8
+
+
+/*
+ * Device Types
+ */
+#define T_DIRECT	0x00
+#define T_SEQUENTIAL	0x01
+#define T_PRINTER	0x02
+#define T_PROCESSOR	0x03
+#define T_WORM		0x04
+#define T_CDROM		0x05
+#define T_SCANNER 	0x06
+#define T_OPTICAL 	0x07
+#define T_CHANGER	0x08
+#define T_COMM		0x09
+#define T_ASC0		0x0a
+#define T_ASC1		0x0b
+#define	T_STORARRAY	0x0c
+#define	T_ENCLOSURE	0x0d
+#define	T_RBC		0x0e
+#define	T_OCRW		0x0f
+#define T_NODEVICE	0x1F
+#define	T_ANY		0xFF	/* Used in Quirk table matches */
+
+#define T_REMOV		1
+#define	T_FIXED		0
+
+/*
+ * This length is the initial inquiry length used by the probe code, as    
+ * well as the legnth necessary for aic_print_inquiry() to function 
+ * correctly.  If either use requires a different length in the future, 
+ * the two values should be de-coupled.
+ */
+#define	SHORT_INQUIRY_LENGTH	36
+
+struct scsi_inquiry_data
+{
+	uint8_t device;
+#define	SID_TYPE(inq_data) ((inq_data)->device & 0x1f)
+#define	SID_QUAL(inq_data) (((inq_data)->device & 0xE0) >> 5)
+#define	SID_QUAL_LU_CONNECTED	0x00	/*
+					 * The specified peripheral device
+					 * type is currently connected to
+					 * logical unit.  If the target cannot
+					 * determine whether or not a physical
+					 * device is currently connected, it
+					 * shall also use this peripheral
+					 * qualifier when returning the INQUIRY
+					 * data.  This peripheral qualifier
+					 * does not mean that the device is
+					 * ready for access by the initiator.
+					 */
+#define	SID_QUAL_LU_OFFLINE	0x01	/*
+					 * The target is capable of supporting
+					 * the specified peripheral device type
+					 * on this logical unit; however, the
+					 * physical device is not currently
+					 * connected to this logical unit.
+					 */
+#define SID_QUAL_RSVD		0x02
+#define	SID_QUAL_BAD_LU		0x03	/*
+					 * The target is not capable of
+					 * supporting a physical device on
+					 * this logical unit. For this
+					 * peripheral qualifier the peripheral
+					 * device type shall be set to 1Fh to
+					 * provide compatibility with previous
+					 * versions of SCSI. All other
+					 * peripheral device type values are
+					 * reserved for this peripheral
+					 * qualifier.
+					 */
+#define	SID_QUAL_IS_VENDOR_UNIQUE(inq_data) ((SID_QUAL(inq_data) & 0x08) != 0)
+	uint8_t dev_qual2;
+#define	SID_QUAL2	0x7F
+#define	SID_IS_REMOVABLE(inq_data) (((inq_data)->dev_qual2 & 0x80) != 0)
+	uint8_t version;
+#define SID_ANSI_REV(inq_data) ((inq_data)->version & 0x07)
+#define		SCSI_REV_0		0
+#define		SCSI_REV_CCS		1
+#define		SCSI_REV_2		2
+#define		SCSI_REV_SPC		3
+#define		SCSI_REV_SPC2		4
+
+#define SID_ECMA	0x38
+#define SID_ISO		0xC0
+	uint8_t response_format;
+#define SID_AENC	0x80
+#define SID_TrmIOP	0x40
+	uint8_t additional_length;
+	uint8_t reserved[2];
+	uint8_t flags;
+#define	SID_SftRe	0x01
+#define	SID_CmdQue	0x02
+#define	SID_Linked	0x08
+#define	SID_Sync	0x10
+#define	SID_WBus16	0x20
+#define	SID_WBus32	0x40
+#define	SID_RelAdr	0x80
+#define SID_VENDOR_SIZE   8
+	char	 vendor[SID_VENDOR_SIZE];
+#define SID_PRODUCT_SIZE  16
+	char	 product[SID_PRODUCT_SIZE];
+#define SID_REVISION_SIZE 4
+	char	 revision[SID_REVISION_SIZE];
+	/*
+	 * The following fields were taken from SCSI Primary Commands - 2
+	 * (SPC-2) Revision 14, Dated 11 November 1999
+	 */
+#define	SID_VENDOR_SPECIFIC_0_SIZE	20
+	uint8_t vendor_specific0[SID_VENDOR_SPECIFIC_0_SIZE];
+	/*
+	 * An extension of SCSI Parallel Specific Values
+	 */
+#define	SID_SPI_IUS		0x01
+#define	SID_SPI_QAS		0x02
+#define	SID_SPI_CLOCK_ST	0x00
+#define	SID_SPI_CLOCK_DT	0x04
+#define	SID_SPI_CLOCK_DT_ST	0x0C
+#define	SID_SPI_MASK		0x0F
+	uint8_t spi3data;
+	uint8_t reserved2;
+	/*
+	 * Version Descriptors, stored 2 byte values.
+	 */
+	uint8_t version1[2];
+	uint8_t version2[2];
+	uint8_t version3[2];
+	uint8_t version4[2];
+	uint8_t version5[2];
+	uint8_t version6[2];
+	uint8_t version7[2];
+	uint8_t version8[2];
+
+	uint8_t reserved3[22];
+
+#define	SID_VENDOR_SPECIFIC_1_SIZE	160
+	uint8_t vendor_specific1[SID_VENDOR_SPECIFIC_1_SIZE];
+};
+
+struct scsi_vpd_unit_serial_number
+{
+	uint8_t device;
+	uint8_t page_code;
+#define SVPD_UNIT_SERIAL_NUMBER	0x80
+	uint8_t reserved;
+	uint8_t length; /* serial number length */
+#define SVPD_SERIAL_NUM_SIZE 251
+	uint8_t serial_num[SVPD_SERIAL_NUM_SIZE];
+};
+
+struct scsi_read_capacity
+{
+	uint8_t opcode;
+	uint8_t byte2;
+	uint8_t addr[4];
+	uint8_t unused[3];
+	uint8_t control;
+};
+
+struct scsi_read_capacity_data
+{
+	uint8_t addr[4];
+	uint8_t length[4];
+};
+
+struct scsi_report_luns
+{
+	uint8_t opcode;
+	uint8_t byte2;
+	uint8_t unused[3];
+	uint8_t addr[4];
+	uint8_t control;
+};
+
+struct scsi_report_luns_data {
+	uint8_t length[4];	/* length of LUN inventory, in bytes */
+	uint8_t reserved[4];	/* unused */
+	/*
+	 * LUN inventory- we only support the type zero form for now.
+	 */
+	struct {
+		uint8_t lundata[8];
+	} luns[1];
+};
+#define	RPL_LUNDATA_ATYP_MASK	0xc0	/* MBZ for type 0 lun */
+#define	RPL_LUNDATA_T0LUN	1	/* @ lundata[1] */
+
+
+struct scsi_sense_data
+{
+	uint8_t error_code;
+#define	SSD_ERRCODE			0x7F
+#define		SSD_CURRENT_ERROR	0x70
+#define		SSD_DEFERRED_ERROR	0x71
+#define	SSD_ERRCODE_VALID	0x80	
+	uint8_t segment;
+	uint8_t flags;
+#define	SSD_KEY				0x0F
+#define		SSD_KEY_NO_SENSE	0x00
+#define		SSD_KEY_RECOVERED_ERROR	0x01
+#define		SSD_KEY_NOT_READY	0x02
+#define		SSD_KEY_MEDIUM_ERROR	0x03
+#define		SSD_KEY_HARDWARE_ERROR	0x04
+#define		SSD_KEY_ILLEGAL_REQUEST	0x05
+#define		SSD_KEY_UNIT_ATTENTION	0x06
+#define		SSD_KEY_DATA_PROTECT	0x07
+#define		SSD_KEY_BLANK_CHECK	0x08
+#define		SSD_KEY_Vendor_Specific	0x09
+#define		SSD_KEY_COPY_ABORTED	0x0a
+#define		SSD_KEY_ABORTED_COMMAND	0x0b		
+#define		SSD_KEY_EQUAL		0x0c
+#define		SSD_KEY_VOLUME_OVERFLOW	0x0d
+#define		SSD_KEY_MISCOMPARE	0x0e
+#define		SSD_KEY_RESERVED	0x0f			
+#define	SSD_ILI		0x20
+#define	SSD_EOM		0x40
+#define	SSD_FILEMARK	0x80
+	uint8_t info[4];
+	uint8_t extra_len;
+	uint8_t cmd_spec_info[4];
+	uint8_t add_sense_code;
+	uint8_t add_sense_code_qual;
+	uint8_t fru;
+	uint8_t sense_key_spec[3];
+#define	SSD_SCS_VALID		0x80
+#define SSD_FIELDPTR_CMD	0x40
+#define SSD_BITPTR_VALID	0x08
+#define SSD_BITPTR_VALUE	0x07
+#define SSD_MIN_SIZE 18
+	uint8_t extra_bytes[14];
+#define SSD_FULL_SIZE sizeof(struct scsi_sense_data)
+};
+
+struct scsi_mode_header_6
+{
+	uint8_t data_length;	/* Sense data length */
+	uint8_t medium_type;
+	uint8_t dev_spec;
+	uint8_t blk_desc_len;
+};
+
+struct scsi_mode_header_10
+{
+	uint8_t data_length[2];/* Sense data length */
+	uint8_t medium_type;
+	uint8_t dev_spec;
+	uint8_t unused[2];
+	uint8_t blk_desc_len[2];
+};
+
+struct scsi_mode_page_header
+{
+	uint8_t page_code;
+	uint8_t page_length;
+};
+
+struct scsi_mode_blk_desc
+{
+	uint8_t density;
+	uint8_t nblocks[3];
+	uint8_t reserved;
+	uint8_t blklen[3];
+};
+
+#define	SCSI_DEFAULT_DENSITY	0x00	/* use 'default' density */
+#define	SCSI_SAME_DENSITY	0x7f	/* use 'same' density- >= SCSI-2 only */
+
+
+/*
+ * Status Byte
+ */
+#define	SCSI_STATUS_OK			0x00
+#define	SCSI_STATUS_CHECK_COND		0x02
+#define	SCSI_STATUS_COND_MET		0x04
+#define	SCSI_STATUS_BUSY		0x08
+#define SCSI_STATUS_INTERMED		0x10
+#define SCSI_STATUS_INTERMED_COND_MET	0x14
+#define SCSI_STATUS_RESERV_CONFLICT	0x18
+#define SCSI_STATUS_CMD_TERMINATED	0x22	/* Obsolete in SAM-2 */
+#define SCSI_STATUS_QUEUE_FULL		0x28
+#define SCSI_STATUS_ACA_ACTIVE		0x30
+#define SCSI_STATUS_TASK_ABORTED	0x40
+
+struct scsi_inquiry_pattern {
+	uint8_t   type;
+	uint8_t   media_type;
+#define	SIP_MEDIA_REMOVABLE	0x01
+#define	SIP_MEDIA_FIXED		0x02
+	const char *vendor;
+	const char *product;
+	const char *revision;
+}; 
+
+struct scsi_static_inquiry_pattern {
+	uint8_t   type;
+	uint8_t   media_type;
+	char       vendor[SID_VENDOR_SIZE+1];
+	char       product[SID_PRODUCT_SIZE+1];
+	char       revision[SID_REVISION_SIZE+1];
+};
+
+struct scsi_sense_quirk_entry {
+	struct scsi_inquiry_pattern	inq_pat;
+	int				num_sense_keys;
+	int				num_ascs;
+	struct sense_key_table_entry	*sense_key_info;
+	struct asc_table_entry		*asc_info;
+};
+
+struct sense_key_table_entry {
+	uint8_t    sense_key;
+	uint32_t   action;
+	const char *desc;
+};
+
+struct asc_table_entry {
+	uint8_t    asc;
+	uint8_t    ascq;
+	uint32_t   action;
+	const char *desc;
+};
+
+struct op_table_entry {
+	uint8_t    opcode;
+	uint16_t   opmask;
+	const char  *desc;
+};
+
+struct scsi_op_quirk_entry {
+	struct scsi_inquiry_pattern	inq_pat;
+	int				num_ops;
+	struct op_table_entry		*op_table;
+};
+
+typedef enum {
+	SSS_FLAG_NONE		= 0x00,
+	SSS_FLAG_PRINT_COMMAND	= 0x01
+} scsi_sense_string_flags;
+
+extern const char *scsi_sense_key_text[];
+
+/************************* Large Disk Handling ********************************/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+static __inline int aic_sector_div(u_long capacity, int heads, int sectors);
+
+static __inline int
+aic_sector_div(u_long capacity, int heads, int sectors)
+{
+	return (capacity / (heads * sectors));
+}
+#else
+static __inline int aic_sector_div(sector_t capacity, int heads, int sectors);
+
+static __inline int
+aic_sector_div(sector_t capacity, int heads, int sectors)
+{
+	/* ugly, ugly sector_div calling convention.. */
+	sector_div(capacity, (heads * sectors));
+	return (int)capacity;
+}
+#endif
+
+/**************************** Module Library Hack *****************************/
+/*
+ * What we'd like to do is have a single "scsi library" module that both the
+ * aic7xxx and aic79xx drivers could load and depend on.  A cursory examination
+ * of implementing module dependencies in Linux (handling the install and
+ * initrd cases) does not look promissing.  For now, we just duplicate this
+ * code in both drivers using a simple symbol renaming scheme that hides this
+ * hack from the drivers.
+ */
+#define AIC_LIB_ENTRY_CONCAT(x, prefix)	prefix ## x
+#define	AIC_LIB_ENTRY_EXPAND(x, prefix) AIC_LIB_ENTRY_CONCAT(x, prefix)
+#define AIC_LIB_ENTRY(x)		AIC_LIB_ENTRY_EXPAND(x, AIC_LIB_PREFIX)
+
+#define	aic_sense_desc			AIC_LIB_ENTRY(_sense_desc)
+#define	aic_sense_error_action		AIC_LIB_ENTRY(_sense_error_action)
+#define	aic_error_action		AIC_LIB_ENTRY(_error_action)
+#define	aic_op_desc			AIC_LIB_ENTRY(_op_desc)
+#define	aic_cdb_string			AIC_LIB_ENTRY(_cdb_string)
+#define aic_print_inquiry		AIC_LIB_ENTRY(_print_inquiry)
+#define aic_calc_syncsrate		AIC_LIB_ENTRY(_calc_syncrate)
+#define	aic_calc_syncparam		AIC_LIB_ENTRY(_calc_syncparam)
+#define	aic_calc_speed			AIC_LIB_ENTRY(_calc_speed)
+#define	aic_inquiry_match		AIC_LIB_ENTRY(_inquiry_match)
+#define	aic_static_inquiry_match	AIC_LIB_ENTRY(_static_inquiry_match)
+#define	aic_parse_brace_option		AIC_LIB_ENTRY(_parse_brace_option)
+
+/******************************************************************************/
+
+void			aic_sense_desc(int /*sense_key*/, int /*asc*/,
+				       int /*ascq*/, struct scsi_inquiry_data*,
+				       const char** /*sense_key_desc*/,
+				       const char** /*asc_desc*/);
+aic_sense_action	aic_sense_error_action(struct scsi_sense_data*,
+					       struct scsi_inquiry_data*,
+					       uint32_t /*sense_flags*/);
+uint32_t		aic_error_action(struct scsi_cmnd *,
+					 struct scsi_inquiry_data *,
+					 cam_status, u_int);
+
+#define	SF_RETRY_UA	0x01
+#define SF_NO_PRINT	0x02
+#define SF_QUIET_IR	0x04	/* Be quiet about Illegal Request reponses */
+#define SF_PRINT_ALWAYS	0x08
+
+
+const char *	aic_op_desc(uint16_t /*opcode*/, struct scsi_inquiry_data*);
+char *		aic_cdb_string(uint8_t* /*cdb_ptr*/, char* /*cdb_string*/,
+			       size_t /*len*/);
+void		aic_print_inquiry(struct scsi_inquiry_data*);
+
+u_int		aic_calc_syncsrate(u_int /*period_factor*/);
+u_int		aic_calc_syncparam(u_int /*period*/);
+u_int		aic_calc_speed(u_int width, u_int period, u_int offset,
+			       u_int min_rate);
+	
+int		aic_inquiry_match(caddr_t /*inqbuffer*/,
+				  caddr_t /*table_entry*/);
+int		aic_static_inquiry_match(caddr_t /*inqbuffer*/,
+					 caddr_t /*table_entry*/);
+
+typedef void aic_option_callback_t(u_long, int, int, int32_t);
+char *		aic_parse_brace_option(char *opt_name, char *opt_arg,
+				       char *end, int depth,
+				       aic_option_callback_t *, u_long);
+
+static __inline void	 scsi_extract_sense(struct scsi_sense_data *sense,
+					    int *error_code, int *sense_key,
+					    int *asc, int *ascq);
+static __inline void	 scsi_ulto2b(uint32_t val, uint8_t *bytes);
+static __inline void	 scsi_ulto3b(uint32_t val, uint8_t *bytes);
+static __inline void	 scsi_ulto4b(uint32_t val, uint8_t *bytes);
+static __inline uint32_t scsi_2btoul(uint8_t *bytes);
+static __inline uint32_t scsi_3btoul(uint8_t *bytes);
+static __inline int32_t	 scsi_3btol(uint8_t *bytes);
+static __inline uint32_t scsi_4btoul(uint8_t *bytes);
+
+static __inline void scsi_extract_sense(struct scsi_sense_data *sense,
+				       int *error_code, int *sense_key,
+				       int *asc, int *ascq)
+{
+	*error_code = sense->error_code & SSD_ERRCODE;
+	*sense_key = sense->flags & SSD_KEY;
+	*asc = (sense->extra_len >= 5) ? sense->add_sense_code : 0;
+	*ascq = (sense->extra_len >= 6) ? sense->add_sense_code_qual : 0;
+}
+
+static __inline void
+scsi_ulto2b(uint32_t val, uint8_t *bytes)
+{
+
+	bytes[0] = (val >> 8) & 0xff;
+	bytes[1] = val & 0xff;
+}
+
+static __inline void
+scsi_ulto3b(uint32_t val, uint8_t *bytes)
+{
+
+	bytes[0] = (val >> 16) & 0xff;
+	bytes[1] = (val >> 8) & 0xff;
+	bytes[2] = val & 0xff;
+}
+
+static __inline void
+scsi_ulto4b(uint32_t val, uint8_t *bytes)
+{
+
+	bytes[0] = (val >> 24) & 0xff;
+	bytes[1] = (val >> 16) & 0xff;
+	bytes[2] = (val >> 8) & 0xff;
+	bytes[3] = val & 0xff;
+}
+
+static __inline uint32_t
+scsi_2btoul(uint8_t *bytes)
+{
+	uint32_t rv;
+
+	rv = (bytes[0] << 8) |
+	     bytes[1];
+	return (rv);
+}
+
+static __inline uint32_t
+scsi_3btoul(uint8_t *bytes)
+{
+	uint32_t rv;
+
+	rv = (bytes[0] << 16) |
+	     (bytes[1] << 8) |
+	     bytes[2];
+	return (rv);
+}
+
+static __inline int32_t 
+scsi_3btol(uint8_t *bytes)
+{
+	uint32_t rc = scsi_3btoul(bytes);
+ 
+	if (rc & 0x00800000)
+		rc |= 0xff000000;
+
+	return (int32_t) rc;
+}
+
+static __inline uint32_t
+scsi_4btoul(uint8_t *bytes)
+{
+	uint32_t rv;
+
+	rv = (bytes[0] << 24) |
+	     (bytes[1] << 16) |
+	     (bytes[2] << 8) |
+	     bytes[3];
+	return (rv);
+}
+
+/* Macros for generating the elements of the PCI ID tables. */
+
+#define GETID(v, s) (unsigned)(((v) >> (s)) & 0xFFFF ?: PCI_ANY_ID)
+
+#define ID_C(x, c)						\
+{								\
+	GETID(x,32), GETID(x,48), GETID(x,0), GETID(x,16),	\
+	(c) << 8, 0xFFFF00, 0					\
+}
+
+#define ID2C(x)                          \
+	ID_C(x, PCI_CLASS_STORAGE_SCSI), \
+	ID_C(x, PCI_CLASS_STORAGE_RAID)
+
+#define IDIROC(x)  ((x) | ~ID_ALL_IROC_MASK)
+
+/* Generate IDs for all 16 possibilites.
+ * The argument has already masked out
+ * the 4 least significant bits of the device id.
+ * (e.g., mask: ID_9005_GENERIC_MASK).
+ */
+#define ID16(x)                          \
+	ID(x),                           \
+	ID((x) | 0x0001000000000000ull), \
+	ID((x) | 0x0002000000000000ull), \
+	ID((x) | 0x0003000000000000ull), \
+	ID((x) | 0x0004000000000000ull), \
+	ID((x) | 0x0005000000000000ull), \
+	ID((x) | 0x0006000000000000ull), \
+	ID((x) | 0x0007000000000000ull), \
+	ID((x) | 0x0008000000000000ull), \
+	ID((x) | 0x0009000000000000ull), \
+	ID((x) | 0x000A000000000000ull), \
+	ID((x) | 0x000B000000000000ull), \
+	ID((x) | 0x000C000000000000ull), \
+	ID((x) | 0x000D000000000000ull), \
+	ID((x) | 0x000E000000000000ull), \
+	ID((x) | 0x000F000000000000ull)
+
+#endif /*_AICLIB_H */
diff --git a/drivers/scsi/aic7xxx/cam.h b/drivers/scsi/aic7xxx/cam.h
new file mode 100644
index 0000000..d40ba07
--- /dev/null
+++ b/drivers/scsi/aic7xxx/cam.h
@@ -0,0 +1,111 @@
+/*
+ * Data structures and definitions for the CAM system.
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * Copyright (c) 2000 Adaptec Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/cam.h#15 $
+ */
+
+#ifndef _AIC7XXX_CAM_H
+#define _AIC7XXX_CAM_H 1
+
+#include <linux/types.h>
+
+#define	CAM_BUS_WILDCARD ((u_int)~0)
+#define	CAM_TARGET_WILDCARD ((u_int)~0)
+#define	CAM_LUN_WILDCARD ((u_int)~0)
+
+/* CAM Status field values */
+typedef enum {
+	CAM_REQ_INPROG,		/* CCB request is in progress */
+	CAM_REQ_CMP,		/* CCB request completed without error */
+	CAM_REQ_ABORTED,	/* CCB request aborted by the host */
+	CAM_UA_ABORT,		/* Unable to abort CCB request */
+	CAM_REQ_CMP_ERR,	/* CCB request completed with an error */
+	CAM_BUSY,		/* CAM subsytem is busy */
+	CAM_REQ_INVALID,	/* CCB request was invalid */
+	CAM_PATH_INVALID,	/* Supplied Path ID is invalid */
+	CAM_SEL_TIMEOUT,	/* Target Selection Timeout */
+	CAM_CMD_TIMEOUT,	/* Command timeout */
+	CAM_SCSI_STATUS_ERROR,	/* SCSI error, look at error code in CCB */
+	CAM_SCSI_BUS_RESET,	/* SCSI Bus Reset Sent/Received */
+	CAM_UNCOR_PARITY,	/* Uncorrectable parity error occurred */
+	CAM_AUTOSENSE_FAIL,	/* Autosense: request sense cmd fail */
+	CAM_NO_HBA,		/* No HBA Detected Error */
+	CAM_DATA_RUN_ERR,	/* Data Overrun error */
+	CAM_UNEXP_BUSFREE,	/* Unexpected Bus Free */
+	CAM_SEQUENCE_FAIL,	/* Protocol Violation */
+	CAM_CCB_LEN_ERR,	/* CCB length supplied is inadequate */
+	CAM_PROVIDE_FAIL,	/* Unable to provide requested capability */
+	CAM_BDR_SENT,		/* A SCSI BDR msg was sent to target */
+	CAM_REQ_TERMIO,		/* CCB request terminated by the host */
+	CAM_UNREC_HBA_ERROR,	/* Unrecoverable Host Bus Adapter Error */
+	CAM_REQ_TOO_BIG,	/* The request was too large for this host */
+	CAM_UA_TERMIO,		/* Unable to terminate I/O CCB request */
+	CAM_MSG_REJECT_REC,	/* Message Reject Received */
+	CAM_DEV_NOT_THERE,	/* SCSI Device Not Installed/there */
+	CAM_RESRC_UNAVAIL,	/* Resource Unavailable */
+	/*
+	 * This request should be requeued to preserve
+	 * transaction ordering.  This typically occurs
+	 * when the SIM recognizes an error that should
+	 * freeze the queue and must place additional
+	 * requests for the target at the sim level
+	 * back into the XPT queue.
+	 */
+	CAM_REQUEUE_REQ,
+	CAM_DEV_QFRZN		= 0x40,
+
+	CAM_STATUS_MASK		= 0x3F
+} cam_status;
+
+/*
+ * Definitions for the asynchronous callback CCB fields.
+ */
+typedef enum {
+	AC_GETDEV_CHANGED	= 0x800,/* Getdev info might have changed */
+	AC_INQ_CHANGED		= 0x400,/* Inquiry info might have changed */
+	AC_TRANSFER_NEG		= 0x200,/* New transfer settings in effect */
+	AC_LOST_DEVICE		= 0x100,/* A device went away */
+	AC_FOUND_DEVICE		= 0x080,/* A new device was found */
+	AC_PATH_DEREGISTERED	= 0x040,/* A path has de-registered */
+	AC_PATH_REGISTERED	= 0x020,/* A new path has been registered */
+	AC_SENT_BDR		= 0x010,/* A BDR message was sent to target */
+	AC_SCSI_AEN		= 0x008,/* A SCSI AEN has been received */
+	AC_UNSOL_RESEL		= 0x002,/* Unsolicited reselection occurred */
+	AC_BUS_RESET		= 0x001 /* A SCSI bus reset occurred */
+} ac_code;
+
+typedef enum {
+	CAM_DIR_IN		= SCSI_DATA_READ,
+	CAM_DIR_OUT		= SCSI_DATA_WRITE,
+	CAM_DIR_NONE		= SCSI_DATA_NONE
+} ccb_flags;
+
+#endif /* _AIC7XXX_CAM_H */
diff --git a/drivers/scsi/aic7xxx/queue.h b/drivers/scsi/aic7xxx/queue.h
new file mode 100644
index 0000000..8adf800
--- /dev/null
+++ b/drivers/scsi/aic7xxx/queue.h
@@ -0,0 +1,501 @@
+/*
+ * Copyright (c) 1991, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)queue.h	8.5 (Berkeley) 8/20/94
+ * $FreeBSD: src/sys/sys/queue.h,v 1.38 2000/05/26 02:06:56 jake Exp $
+ */
+
+#ifndef _SYS_QUEUE_H_
+#define	_SYS_QUEUE_H_
+
+/*
+ * This file defines five types of data structures: singly-linked lists,
+ * singly-linked tail queues, lists, tail queues, and circular queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction.  Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A singly-linked tail queue is headed by a pair of pointers, one to the
+ * head of the list and the other to the tail of the list. The elements are
+ * singly linked for minimum space and pointer manipulation overhead at the
+ * expense of O(n) removal for arbitrary elements. New elements can be added
+ * to the list after an existing element, at the head of the list, or at the
+ * end of the list. Elements being removed from the head of the tail queue
+ * should use the explicit macro for this purpose for optimum efficiency.
+ * A singly-linked tail queue may only be traversed in the forward direction.
+ * Singly-linked tail queues are ideal for applications with large datasets
+ * and few or no removals or for implementing a FIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ *
+ *
+ *			SLIST	LIST	STAILQ	TAILQ	CIRCLEQ
+ * _HEAD		+	+	+	+	+
+ * _HEAD_INITIALIZER	+	+	+	+	+
+ * _ENTRY		+	+	+	+	+
+ * _INIT		+	+	+	+	+
+ * _EMPTY		+	+	+	+	+
+ * _FIRST		+	+	+	+	+
+ * _NEXT		+	+	+	+	+
+ * _PREV		-	-	-	+	+
+ * _LAST		-	-	+	+	+
+ * _FOREACH		+	+	+	+	+
+ * _FOREACH_REVERSE	-	-	-	+	+
+ * _INSERT_HEAD		+	+	+	+	+
+ * _INSERT_BEFORE	-	+	-	+	+
+ * _INSERT_AFTER	+	+	+	+	+
+ * _INSERT_TAIL		-	-	+	+	+
+ * _REMOVE_HEAD		+	-	+	-	-
+ * _REMOVE		+	+	+	+	+
+ *
+ */
+
+/*
+ * Singly-linked List declarations.
+ */
+#define	SLIST_HEAD(name, type)						\
+struct name {								\
+	struct type *slh_first;	/* first element */			\
+}
+
+#define	SLIST_HEAD_INITIALIZER(head)					\
+	{ NULL }
+ 
+#define	SLIST_ENTRY(type)						\
+struct {								\
+	struct type *sle_next;	/* next element */			\
+}
+ 
+/*
+ * Singly-linked List functions.
+ */
+#define	SLIST_EMPTY(head)	((head)->slh_first == NULL)
+
+#define	SLIST_FIRST(head)	((head)->slh_first)
+
+#define	SLIST_FOREACH(var, head, field)					\
+	for ((var) = SLIST_FIRST((head));				\
+	    (var);							\
+	    (var) = SLIST_NEXT((var), field))
+
+#define	SLIST_INIT(head) do {						\
+	SLIST_FIRST((head)) = NULL;					\
+} while (0)
+
+#define	SLIST_INSERT_AFTER(slistelm, elm, field) do {			\
+	SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field);	\
+	SLIST_NEXT((slistelm), field) = (elm);				\
+} while (0)
+
+#define	SLIST_INSERT_HEAD(head, elm, field) do {			\
+	SLIST_NEXT((elm), field) = SLIST_FIRST((head));			\
+	SLIST_FIRST((head)) = (elm);					\
+} while (0)
+
+#define	SLIST_NEXT(elm, field)	((elm)->field.sle_next)
+
+#define	SLIST_REMOVE(head, elm, type, field) do {			\
+	if (SLIST_FIRST((head)) == (elm)) {				\
+		SLIST_REMOVE_HEAD((head), field);			\
+	}								\
+	else {								\
+		struct type *curelm = SLIST_FIRST((head));		\
+		while (SLIST_NEXT(curelm, field) != (elm))		\
+			curelm = SLIST_NEXT(curelm, field);		\
+		SLIST_NEXT(curelm, field) =				\
+		    SLIST_NEXT(SLIST_NEXT(curelm, field), field);	\
+	}								\
+} while (0)
+
+#define	SLIST_REMOVE_HEAD(head, field) do {				\
+	SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field);	\
+} while (0)
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define	STAILQ_HEAD(name, type)						\
+struct name {								\
+	struct type *stqh_first;/* first element */			\
+	struct type **stqh_last;/* addr of last next element */		\
+}
+
+#define	STAILQ_HEAD_INITIALIZER(head)					\
+	{ NULL, &(head).stqh_first }
+
+#define	STAILQ_ENTRY(type)						\
+struct {								\
+	struct type *stqe_next;	/* next element */			\
+}
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define	STAILQ_EMPTY(head)	((head)->stqh_first == NULL)
+
+#define	STAILQ_FIRST(head)	((head)->stqh_first)
+
+#define	STAILQ_FOREACH(var, head, field)				\
+	for((var) = STAILQ_FIRST((head));				\
+	   (var);							\
+	   (var) = STAILQ_NEXT((var), field))
+
+#define	STAILQ_INIT(head) do {						\
+	STAILQ_FIRST((head)) = NULL;					\
+	(head)->stqh_last = &STAILQ_FIRST((head));			\
+} while (0)
+
+#define	STAILQ_INSERT_AFTER(head, tqelm, elm, field) do {		\
+	if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
+		(head)->stqh_last = &STAILQ_NEXT((elm), field);		\
+	STAILQ_NEXT((tqelm), field) = (elm);				\
+} while (0)
+
+#define	STAILQ_INSERT_HEAD(head, elm, field) do {			\
+	if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL)	\
+		(head)->stqh_last = &STAILQ_NEXT((elm), field);		\
+	STAILQ_FIRST((head)) = (elm);					\
+} while (0)
+
+#define	STAILQ_INSERT_TAIL(head, elm, field) do {			\
+	STAILQ_NEXT((elm), field) = NULL;				\
+	STAILQ_LAST((head)) = (elm);					\
+	(head)->stqh_last = &STAILQ_NEXT((elm), field);			\
+} while (0)
+
+#define	STAILQ_LAST(head)	(*(head)->stqh_last)
+
+#define	STAILQ_NEXT(elm, field)	((elm)->field.stqe_next)
+
+#define	STAILQ_REMOVE(head, elm, type, field) do {			\
+	if (STAILQ_FIRST((head)) == (elm)) {				\
+		STAILQ_REMOVE_HEAD(head, field);			\
+	}								\
+	else {								\
+		struct type *curelm = STAILQ_FIRST((head));		\
+		while (STAILQ_NEXT(curelm, field) != (elm))		\
+			curelm = STAILQ_NEXT(curelm, field);		\
+		if ((STAILQ_NEXT(curelm, field) =			\
+		     STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\
+			(head)->stqh_last = &STAILQ_NEXT((curelm), field);\
+	}								\
+} while (0)
+
+#define	STAILQ_REMOVE_HEAD(head, field) do {				\
+	if ((STAILQ_FIRST((head)) =					\
+	     STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL)		\
+		(head)->stqh_last = &STAILQ_FIRST((head));		\
+} while (0)
+
+#define	STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do {			\
+	if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL)	\
+		(head)->stqh_last = &STAILQ_FIRST((head));		\
+} while (0)
+
+/*
+ * List declarations.
+ */
+#define	LIST_HEAD(name, type)						\
+struct name {								\
+	struct type *lh_first;	/* first element */			\
+}
+
+#define	LIST_HEAD_INITIALIZER(head)					\
+	{ NULL }
+
+#define	LIST_ENTRY(type)						\
+struct {								\
+	struct type *le_next;	/* next element */			\
+	struct type **le_prev;	/* address of previous next element */	\
+}
+
+/*
+ * List functions.
+ */
+
+#define	LIST_EMPTY(head)	((head)->lh_first == NULL)
+
+#define	LIST_FIRST(head)	((head)->lh_first)
+
+#define	LIST_FOREACH(var, head, field)					\
+	for ((var) = LIST_FIRST((head));				\
+	    (var);							\
+	    (var) = LIST_NEXT((var), field))
+
+#define	LIST_INIT(head) do {						\
+	LIST_FIRST((head)) = NULL;					\
+} while (0)
+
+#define	LIST_INSERT_AFTER(listelm, elm, field) do {			\
+	if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
+		LIST_NEXT((listelm), field)->field.le_prev =		\
+		    &LIST_NEXT((elm), field);				\
+	LIST_NEXT((listelm), field) = (elm);				\
+	(elm)->field.le_prev = &LIST_NEXT((listelm), field);		\
+} while (0)
+
+#define	LIST_INSERT_BEFORE(listelm, elm, field) do {			\
+	(elm)->field.le_prev = (listelm)->field.le_prev;		\
+	LIST_NEXT((elm), field) = (listelm);				\
+	*(listelm)->field.le_prev = (elm);				\
+	(listelm)->field.le_prev = &LIST_NEXT((elm), field);		\
+} while (0)
+
+#define	LIST_INSERT_HEAD(head, elm, field) do {				\
+	if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL)	\
+		LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
+	LIST_FIRST((head)) = (elm);					\
+	(elm)->field.le_prev = &LIST_FIRST((head));			\
+} while (0)
+
+#define	LIST_NEXT(elm, field)	((elm)->field.le_next)
+
+#define	LIST_REMOVE(elm, field) do {					\
+	if (LIST_NEXT((elm), field) != NULL)				\
+		LIST_NEXT((elm), field)->field.le_prev = 		\
+		    (elm)->field.le_prev;				\
+	*(elm)->field.le_prev = LIST_NEXT((elm), field);		\
+} while (0)
+
+/*
+ * Tail queue declarations.
+ */
+#define	TAILQ_HEAD(name, type)						\
+struct name {								\
+	struct type *tqh_first;	/* first element */			\
+	struct type **tqh_last;	/* addr of last next element */		\
+}
+
+#define	TAILQ_HEAD_INITIALIZER(head)					\
+	{ NULL, &(head).tqh_first }
+
+#define	TAILQ_ENTRY(type)						\
+struct {								\
+	struct type *tqe_next;	/* next element */			\
+	struct type **tqe_prev;	/* address of previous next element */	\
+}
+
+/*
+ * Tail queue functions.
+ */
+#define	TAILQ_EMPTY(head)	((head)->tqh_first == NULL)
+
+#define	TAILQ_FIRST(head)	((head)->tqh_first)
+
+#define	TAILQ_FOREACH(var, head, field)					\
+	for ((var) = TAILQ_FIRST((head));				\
+	    (var);							\
+	    (var) = TAILQ_NEXT((var), field))
+
+#define	TAILQ_FOREACH_REVERSE(var, head, headname, field)		\
+	for ((var) = TAILQ_LAST((head), headname);			\
+	    (var);							\
+	    (var) = TAILQ_PREV((var), headname, field))
+
+#define	TAILQ_INIT(head) do {						\
+	TAILQ_FIRST((head)) = NULL;					\
+	(head)->tqh_last = &TAILQ_FIRST((head));			\
+} while (0)
+
+#define	TAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
+		TAILQ_NEXT((elm), field)->field.tqe_prev = 		\
+		    &TAILQ_NEXT((elm), field);				\
+	else								\
+		(head)->tqh_last = &TAILQ_NEXT((elm), field);		\
+	TAILQ_NEXT((listelm), field) = (elm);				\
+	(elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field);		\
+} while (0)
+
+#define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
+	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
+	TAILQ_NEXT((elm), field) = (listelm);				\
+	*(listelm)->field.tqe_prev = (elm);				\
+	(listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field);		\
+} while (0)
+
+#define	TAILQ_INSERT_HEAD(head, elm, field) do {			\
+	if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL)	\
+		TAILQ_FIRST((head))->field.tqe_prev =			\
+		    &TAILQ_NEXT((elm), field);				\
+	else								\
+		(head)->tqh_last = &TAILQ_NEXT((elm), field);		\
+	TAILQ_FIRST((head)) = (elm);					\
+	(elm)->field.tqe_prev = &TAILQ_FIRST((head));			\
+} while (0)
+
+#define	TAILQ_INSERT_TAIL(head, elm, field) do {			\
+	TAILQ_NEXT((elm), field) = NULL;				\
+	(elm)->field.tqe_prev = (head)->tqh_last;			\
+	*(head)->tqh_last = (elm);					\
+	(head)->tqh_last = &TAILQ_NEXT((elm), field);			\
+} while (0)
+
+#define	TAILQ_LAST(head, headname)					\
+	(*(((struct headname *)((head)->tqh_last))->tqh_last))
+
+#define	TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+
+#define	TAILQ_PREV(elm, headname, field)				\
+	(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+
+#define	TAILQ_REMOVE(head, elm, field) do {				\
+	if ((TAILQ_NEXT((elm), field)) != NULL)				\
+		TAILQ_NEXT((elm), field)->field.tqe_prev = 		\
+		    (elm)->field.tqe_prev;				\
+	else								\
+		(head)->tqh_last = (elm)->field.tqe_prev;		\
+	*(elm)->field.tqe_prev = TAILQ_NEXT((elm), field);		\
+} while (0)
+
+/*
+ * Circular queue declarations.
+ */
+#define	CIRCLEQ_HEAD(name, type)					\
+struct name {								\
+	struct type *cqh_first;		/* first element */		\
+	struct type *cqh_last;		/* last element */		\
+}
+
+#define	CIRCLEQ_HEAD_INITIALIZER(head)					\
+	{ (void *)&(head), (void *)&(head) }
+
+#define	CIRCLEQ_ENTRY(type)						\
+struct {								\
+	struct type *cqe_next;		/* next element */		\
+	struct type *cqe_prev;		/* previous element */		\
+}
+
+/*
+ * Circular queue functions.
+ */
+#define	CIRCLEQ_EMPTY(head)	((head)->cqh_first == (void *)(head))
+
+#define	CIRCLEQ_FIRST(head)	((head)->cqh_first)
+
+#define	CIRCLEQ_FOREACH(var, head, field)				\
+	for ((var) = CIRCLEQ_FIRST((head));				\
+	    (var) != (void *)(head);					\
+	    (var) = CIRCLEQ_NEXT((var), field))
+
+#define	CIRCLEQ_FOREACH_REVERSE(var, head, field)			\
+	for ((var) = CIRCLEQ_LAST((head));				\
+	    (var) != (void *)(head);					\
+	    (var) = CIRCLEQ_PREV((var), field))
+
+#define	CIRCLEQ_INIT(head) do {						\
+	CIRCLEQ_FIRST((head)) = (void *)(head);				\
+	CIRCLEQ_LAST((head)) = (void *)(head);				\
+} while (0)
+
+#define	CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	CIRCLEQ_NEXT((elm), field) = CIRCLEQ_NEXT((listelm), field);	\
+	CIRCLEQ_PREV((elm), field) = (listelm);				\
+	if (CIRCLEQ_NEXT((listelm), field) == (void *)(head))		\
+		CIRCLEQ_LAST((head)) = (elm);				\
+	else								\
+		CIRCLEQ_PREV(CIRCLEQ_NEXT((listelm), field), field) = (elm);\
+	CIRCLEQ_NEXT((listelm), field) = (elm);				\
+} while (0)
+
+#define	CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do {		\
+	CIRCLEQ_NEXT((elm), field) = (listelm);				\
+	CIRCLEQ_PREV((elm), field) = CIRCLEQ_PREV((listelm), field);	\
+	if (CIRCLEQ_PREV((listelm), field) == (void *)(head))		\
+		CIRCLEQ_FIRST((head)) = (elm);				\
+	else								\
+		CIRCLEQ_NEXT(CIRCLEQ_PREV((listelm), field), field) = (elm);\
+	CIRCLEQ_PREV((listelm), field) = (elm);				\
+} while (0)
+
+#define	CIRCLEQ_INSERT_HEAD(head, elm, field) do {			\
+	CIRCLEQ_NEXT((elm), field) = CIRCLEQ_FIRST((head));		\
+	CIRCLEQ_PREV((elm), field) = (void *)(head);			\
+	if (CIRCLEQ_LAST((head)) == (void *)(head))			\
+		CIRCLEQ_LAST((head)) = (elm);				\
+	else								\
+		CIRCLEQ_PREV(CIRCLEQ_FIRST((head)), field) = (elm);	\
+	CIRCLEQ_FIRST((head)) = (elm);					\
+} while (0)
+
+#define	CIRCLEQ_INSERT_TAIL(head, elm, field) do {			\
+	CIRCLEQ_NEXT((elm), field) = (void *)(head);			\
+	CIRCLEQ_PREV((elm), field) = CIRCLEQ_LAST((head));		\
+	if (CIRCLEQ_FIRST((head)) == (void *)(head))			\
+		CIRCLEQ_FIRST((head)) = (elm);				\
+	else								\
+		CIRCLEQ_NEXT(CIRCLEQ_LAST((head)), field) = (elm);	\
+	CIRCLEQ_LAST((head)) = (elm);					\
+} while (0)
+
+#define	CIRCLEQ_LAST(head)	((head)->cqh_last)
+
+#define	CIRCLEQ_NEXT(elm,field)	((elm)->field.cqe_next)
+
+#define	CIRCLEQ_PREV(elm,field)	((elm)->field.cqe_prev)
+
+#define	CIRCLEQ_REMOVE(head, elm, field) do {				\
+	if (CIRCLEQ_NEXT((elm), field) == (void *)(head))		\
+		CIRCLEQ_LAST((head)) = CIRCLEQ_PREV((elm), field);	\
+	else								\
+		CIRCLEQ_PREV(CIRCLEQ_NEXT((elm), field), field) =	\
+		    CIRCLEQ_PREV((elm), field);				\
+	if (CIRCLEQ_PREV((elm), field) == (void *)(head))		\
+		CIRCLEQ_FIRST((head)) = CIRCLEQ_NEXT((elm), field);	\
+	else								\
+		CIRCLEQ_NEXT(CIRCLEQ_PREV((elm), field), field) =	\
+		    CIRCLEQ_NEXT((elm), field);				\
+} while (0)
+
+#endif /* !_SYS_QUEUE_H_ */
diff --git a/drivers/scsi/aic7xxx/scsi_iu.h b/drivers/scsi/aic7xxx/scsi_iu.h
new file mode 100644
index 0000000..0eafd3c
--- /dev/null
+++ b/drivers/scsi/aic7xxx/scsi_iu.h
@@ -0,0 +1,39 @@
+/*
+ * This file is in the public domain.
+ */
+#ifndef	_SCSI_SCSI_IU_H
+#define _SCSI_SCSI_IU_H 1
+
+struct scsi_status_iu_header
+{
+	u_int8_t reserved[2];
+	u_int8_t flags;
+#define	SIU_SNSVALID 0x2
+#define	SIU_RSPVALID 0x1
+	u_int8_t status;
+	u_int8_t sense_length[4];
+	u_int8_t pkt_failures_length[4];
+	u_int8_t pkt_failures[1];
+};
+
+#define SIU_PKTFAIL_OFFSET(siu) 12
+#define SIU_PKTFAIL_CODE(siu) (scsi_4btoul((siu)->pkt_failures) & 0xFF)
+#define		SIU_PFC_NONE			0
+#define		SIU_PFC_CIU_FIELDS_INVALID	2
+#define		SIU_PFC_TMF_NOT_SUPPORTED	4
+#define		SIU_PFC_TMF_FAILED		5
+#define		SIU_PFC_INVALID_TYPE_CODE	6
+#define		SIU_PFC_ILLEGAL_REQUEST		7
+#define SIU_SENSE_OFFSET(siu)				\
+    (12 + (((siu)->flags & SIU_RSPVALID)		\
+	? scsi_4btoul((siu)->pkt_failures_length)	\
+	: 0))
+
+#define	SIU_TASKMGMT_NONE		0x00
+#define	SIU_TASKMGMT_ABORT_TASK		0x01
+#define	SIU_TASKMGMT_ABORT_TASK_SET	0x02
+#define	SIU_TASKMGMT_CLEAR_TASK_SET	0x04
+#define	SIU_TASKMGMT_LUN_RESET		0x08
+#define	SIU_TASKMGMT_TARGET_RESET	0x20
+#define	SIU_TASKMGMT_CLEAR_ACA		0x40
+#endif /*_SCSI_SCSI_IU_H*/
diff --git a/drivers/scsi/aic7xxx/scsi_message.h b/drivers/scsi/aic7xxx/scsi_message.h
new file mode 100644
index 0000000..75811e2
--- /dev/null
+++ b/drivers/scsi/aic7xxx/scsi_message.h
@@ -0,0 +1,70 @@
+/*
+ * This file is in the public domain.
+ * $FreeBSD: src/sys/cam/scsi/scsi_message.h,v 1.2 2000/05/01 20:21:29 peter Exp $
+ */
+
+/* Messages (1 byte) */		     /* I/T (M)andatory or (O)ptional */
+#define MSG_CMDCOMPLETE		0x00 /* M/M */
+#define MSG_TASK_COMPLETE	0x00 /* M/M */ /* SPI3 Terminology */
+#define MSG_EXTENDED		0x01 /* O/O */
+#define MSG_SAVEDATAPOINTER	0x02 /* O/O */
+#define MSG_RESTOREPOINTERS	0x03 /* O/O */
+#define MSG_DISCONNECT		0x04 /* O/O */
+#define MSG_INITIATOR_DET_ERR	0x05 /* M/M */
+#define MSG_ABORT		0x06 /* O/M */
+#define MSG_ABORT_TASK_SET	0x06 /* O/M */ /* SPI3 Terminology */
+#define MSG_MESSAGE_REJECT	0x07 /* M/M */
+#define MSG_NOOP		0x08 /* M/M */
+#define MSG_PARITY_ERROR	0x09 /* M/M */
+#define MSG_LINK_CMD_COMPLETE	0x0a /* O/O */
+#define MSG_LINK_CMD_COMPLETEF	0x0b /* O/O */
+#define MSG_BUS_DEV_RESET	0x0c /* O/M */
+#define MSG_TARGET_RESET	0x0c /* O/M */ /* SPI3 Terminology */
+#define MSG_ABORT_TAG		0x0d /* O/O */
+#define MSG_ABORT_TASK		0x0d /* O/O */ /* SPI3 Terminology */
+#define MSG_CLEAR_QUEUE		0x0e /* O/O */
+#define MSG_CLEAR_TASK_SET	0x0e /* O/O */ /* SPI3 Terminology */
+#define MSG_INIT_RECOVERY	0x0f /* O/O */ /* Deprecated in SPI3 */
+#define MSG_REL_RECOVERY	0x10 /* O/O */ /* Deprecated in SPI3 */
+#define MSG_TERM_IO_PROC	0x11 /* O/O */ /* Deprecated in SPI3 */
+#define MSG_CLEAR_ACA		0x16 /* O/O */ /* SPI3 */
+#define MSG_LOGICAL_UNIT_RESET	0x17 /* O/O */ /* SPI3 */
+#define MSG_QAS_REQUEST		0x55 /* O/O */ /* SPI3 */
+
+/* Messages (2 byte) */
+#define MSG_SIMPLE_Q_TAG	0x20 /* O/O */
+#define MSG_SIMPLE_TASK		0x20 /* O/O */ /* SPI3 Terminology */
+#define MSG_HEAD_OF_Q_TAG	0x21 /* O/O */
+#define MSG_HEAD_OF_QUEUE_TASK	0x21 /* O/O */ /* SPI3 Terminology */
+#define MSG_ORDERED_Q_TAG	0x22 /* O/O */
+#define MSG_ORDERED_TASK	0x22 /* O/O */ /* SPI3 Terminology */
+#define MSG_IGN_WIDE_RESIDUE	0x23 /* O/O */
+#define MSG_ACA_TASK		0x24 /* 0/0 */ /* SPI3 */
+
+/* Identify message */		     /* M/M */	
+#define MSG_IDENTIFYFLAG	0x80 
+#define MSG_IDENTIFY_DISCFLAG	0x40 
+#define MSG_IDENTIFY(lun, disc)	(((disc) ? 0xc0 : MSG_IDENTIFYFLAG) | (lun))
+#define MSG_ISIDENTIFY(m)	((m) & MSG_IDENTIFYFLAG)
+#define MSG_IDENTIFY_LUNMASK	0x3F 
+
+/* Extended messages (opcode and length) */
+#define MSG_EXT_SDTR		0x01
+#define MSG_EXT_SDTR_LEN	0x03
+
+#define MSG_EXT_WDTR		0x03
+#define MSG_EXT_WDTR_LEN	0x02
+#define MSG_EXT_WDTR_BUS_8_BIT	0x00
+#define MSG_EXT_WDTR_BUS_16_BIT	0x01
+#define MSG_EXT_WDTR_BUS_32_BIT	0x02 /* Deprecated in SPI3 */
+
+#define MSG_EXT_PPR		0x04 /* SPI3 */
+#define MSG_EXT_PPR_LEN		0x06
+#define	MSG_EXT_PPR_PCOMP_EN	0x80
+#define	MSG_EXT_PPR_RTI		0x40
+#define	MSG_EXT_PPR_RD_STRM	0x20
+#define	MSG_EXT_PPR_WR_FLOW	0x10
+#define	MSG_EXT_PPR_HOLD_MCS	0x08
+#define	MSG_EXT_PPR_QAS_REQ	0x04
+#define	MSG_EXT_PPR_DT_REQ	0x02
+#define MSG_EXT_PPR_IU_REQ	0x01