Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/drivers/scsi/sym53c8xx_2/Makefile b/drivers/scsi/sym53c8xx_2/Makefile
new file mode 100644
index 0000000..873e8ce
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/Makefile
@@ -0,0 +1,4 @@
+# Makefile for the NCR/SYMBIOS/LSI 53C8XX PCI SCSI controllers driver.
+
+sym53c8xx-objs := sym_fw.o sym_glue.o sym_hipd.o sym_malloc.o sym_nvram.o
+obj-$(CONFIG_SCSI_SYM53C8XX_2) := sym53c8xx.o
diff --git a/drivers/scsi/sym53c8xx_2/sym53c8xx.h b/drivers/scsi/sym53c8xx_2/sym53c8xx.h
new file mode 100644
index 0000000..4811037
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym53c8xx.h
@@ -0,0 +1,217 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000  Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ *         Wolfgang Stanglmeier        <wolf@cologne.de>
+ *         Stefan Esser                <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994  Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef SYM53C8XX_H
+#define SYM53C8XX_H
+
+#include <linux/config.h>
+
+/*
+ *  DMA addressing mode.
+ *
+ *  0 : 32 bit addressing for all chips.
+ *  1 : 40 bit addressing when supported by chip.
+ *  2 : 64 bit addressing when supported by chip,
+ *      limited to 16 segments of 4 GB -> 64 GB max.
+ */
+#define	SYM_CONF_DMA_ADDRESSING_MODE CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE
+
+/*
+ *  NVRAM support.
+ */
+#if 1
+#define SYM_CONF_NVRAM_SUPPORT		(1)
+#endif
+
+/*
+ *  These options are not tunable from 'make config'
+ */
+#if 1
+#define	SYM_LINUX_PROC_INFO_SUPPORT
+#define SYM_LINUX_USER_COMMAND_SUPPORT
+#define SYM_LINUX_USER_INFO_SUPPORT
+#define SYM_LINUX_DEBUG_CONTROL_SUPPORT
+#endif
+
+/*
+ *  Also handle old NCR chips if not (0).
+ */
+#define SYM_CONF_GENERIC_SUPPORT	(1)
+
+/*
+ *  Allow tags from 2 to 256, default 8
+ */
+#ifndef CONFIG_SCSI_SYM53C8XX_MAX_TAGS
+#define CONFIG_SCSI_SYM53C8XX_MAX_TAGS	(8)
+#endif
+
+#if	CONFIG_SCSI_SYM53C8XX_MAX_TAGS < 2
+#define SYM_CONF_MAX_TAG	(2)
+#elif	CONFIG_SCSI_SYM53C8XX_MAX_TAGS > 256
+#define SYM_CONF_MAX_TAG	(256)
+#else
+#define	SYM_CONF_MAX_TAG	CONFIG_SCSI_SYM53C8XX_MAX_TAGS
+#endif
+
+#ifndef	CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS
+#define	CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS	SYM_CONF_MAX_TAG
+#endif
+
+/*
+ *  Anyway, we configure the driver for at least 64 tags per LUN. :)
+ */
+#if	SYM_CONF_MAX_TAG <= 64
+#define SYM_CONF_MAX_TAG_ORDER	(6)
+#elif	SYM_CONF_MAX_TAG <= 128
+#define SYM_CONF_MAX_TAG_ORDER	(7)
+#else
+#define SYM_CONF_MAX_TAG_ORDER	(8)
+#endif
+
+/*
+ *  Max number of SG entries.
+ */
+#define SYM_CONF_MAX_SG		(96)
+
+/*
+ *  Driver setup structure.
+ *
+ *  This structure is initialized from linux config options.
+ *  It can be overridden at boot-up by the boot command line.
+ */
+struct sym_driver_setup {
+	u_short	max_tag;
+	u_char	burst_order;
+	u_char	scsi_led;
+	u_char	scsi_diff;
+	u_char	irq_mode;
+	u_char	scsi_bus_check;
+	u_char	host_id;
+
+	u_char	verbose;
+	u_char	settle_delay;
+	u_char	use_nvram;
+	u_long	excludes[8];
+	char	tag_ctrl[100];
+};
+
+#define SYM_SETUP_MAX_TAG		sym_driver_setup.max_tag
+#define SYM_SETUP_BURST_ORDER		sym_driver_setup.burst_order
+#define SYM_SETUP_SCSI_LED		sym_driver_setup.scsi_led
+#define SYM_SETUP_SCSI_DIFF		sym_driver_setup.scsi_diff
+#define SYM_SETUP_IRQ_MODE		sym_driver_setup.irq_mode
+#define SYM_SETUP_SCSI_BUS_CHECK	sym_driver_setup.scsi_bus_check
+#define SYM_SETUP_HOST_ID		sym_driver_setup.host_id
+#define boot_verbose			sym_driver_setup.verbose
+
+/*
+ *  Initial setup.
+ *
+ *  Can be overriden at startup by a command line.
+ */
+#define SYM_LINUX_DRIVER_SETUP	{				\
+	.max_tag	= CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS,	\
+	.burst_order	= 7,					\
+	.scsi_led	= 1,					\
+	.scsi_diff	= 1,					\
+	.irq_mode	= 0,					\
+	.scsi_bus_check	= 1,					\
+	.host_id	= 7,					\
+	.verbose	= 0,					\
+	.settle_delay	= 3,					\
+	.use_nvram	= 1,					\
+}
+
+extern struct sym_driver_setup sym_driver_setup;
+extern unsigned int sym_debug_flags;
+#define DEBUG_FLAGS	sym_debug_flags
+
+/*
+ *  Max number of targets.
+ *  Maximum is 16 and you are advised not to change this value.
+ */
+#ifndef SYM_CONF_MAX_TARGET
+#define SYM_CONF_MAX_TARGET	(16)
+#endif
+
+/*
+ *  Max number of logical units.
+ *  SPI-2 allows up to 64 logical units, but in real life, target
+ *  that implements more that 7 logical units are pretty rare.
+ *  Anyway, the cost of accepting up to 64 logical unit is low in 
+ *  this driver, thus going with the maximum is acceptable.
+ */
+#ifndef SYM_CONF_MAX_LUN
+#define SYM_CONF_MAX_LUN	(64)
+#endif
+
+/*
+ *  Max number of IO control blocks queued to the controller.
+ *  Each entry needs 8 bytes and the queues are allocated contiguously.
+ *  Since we donnot want to allocate more than a page, the theorical 
+ *  maximum is PAGE_SIZE/8. For safety, we announce a bit less to the 
+ *  access method. :)
+ *  When not supplied, as it is suggested, the driver compute some 
+ *  good value for this parameter.
+ */
+/* #define SYM_CONF_MAX_START	(PAGE_SIZE/8 - 16) */
+
+/*
+ *  Support for Immediate Arbitration.
+ *  Not advised.
+ */
+/* #define SYM_CONF_IARB_SUPPORT */
+
+/*
+ *  Only relevant if IARB support configured.
+ *  - Max number of successive settings of IARB hints.
+ *  - Set IARB on arbitration lost.
+ */
+#define SYM_CONF_IARB_MAX 3
+#define SYM_CONF_SET_IARB_ON_ARB_LOST 1
+
+/*
+ *  Returning wrong residuals may make problems.
+ *  When zero, this define tells the driver to 
+ *  always return 0 as transfer residual.
+ *  Btw, all my testings of residuals have succeeded.
+ */
+#define SYM_SETUP_RESIDUAL_SUPPORT 1
+
+#endif /* SYM53C8XX_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_defs.h b/drivers/scsi/sym53c8xx_2/sym_defs.h
new file mode 100644
index 0000000..15bb891
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_defs.h
@@ -0,0 +1,792 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000  Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ *         Wolfgang Stanglmeier        <wolf@cologne.de>
+ *         Stefan Esser                <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994  Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef SYM_DEFS_H
+#define SYM_DEFS_H
+
+#define SYM_VERSION "2.2.0"
+#define SYM_DRIVER_NAME	"sym-" SYM_VERSION
+
+/*
+ *	SYM53C8XX device features descriptor.
+ */
+struct sym_chip {
+	u_short	device_id;
+	u_short	revision_id;
+	char	*name;
+	u_char	burst_max;	/* log-base-2 of max burst */
+	u_char	offset_max;
+	u_char	nr_divisor;
+	u_char	lp_probe_bit;
+	u_int	features;
+#define FE_LED0		(1<<0)
+#define FE_WIDE		(1<<1)    /* Wide data transfers */
+#define FE_ULTRA	(1<<2)	  /* Ultra speed 20Mtrans/sec */
+#define FE_ULTRA2	(1<<3)	  /* Ultra 2 - 40 Mtrans/sec */
+#define FE_DBLR		(1<<4)	  /* Clock doubler present */
+#define FE_QUAD		(1<<5)	  /* Clock quadrupler present */
+#define FE_ERL		(1<<6)    /* Enable read line */
+#define FE_CLSE		(1<<7)    /* Cache line size enable */
+#define FE_WRIE		(1<<8)    /* Write & Invalidate enable */
+#define FE_ERMP		(1<<9)    /* Enable read multiple */
+#define FE_BOF		(1<<10)   /* Burst opcode fetch */
+#define FE_DFS		(1<<11)   /* DMA fifo size */
+#define FE_PFEN		(1<<12)   /* Prefetch enable */
+#define FE_LDSTR	(1<<13)   /* Load/Store supported */
+#define FE_RAM		(1<<14)   /* On chip RAM present */
+#define FE_VARCLK	(1<<15)   /* Clock frequency may vary */
+#define FE_RAM8K	(1<<16)   /* On chip RAM sized 8Kb */
+#define FE_64BIT	(1<<17)   /* 64-bit PCI BUS interface */
+#define FE_IO256	(1<<18)   /* Requires full 256 bytes in PCI space */
+#define FE_NOPM		(1<<19)   /* Scripts handles phase mismatch */
+#define FE_LEDC		(1<<20)   /* Hardware control of LED */
+#define FE_ULTRA3	(1<<21)	  /* Ultra 3 - 80 Mtrans/sec DT */
+#define FE_66MHZ	(1<<22)	  /* 66MHz PCI support */
+#define FE_CRC		(1<<23)	  /* CRC support */
+#define FE_DIFF		(1<<24)	  /* SCSI HVD support */
+#define FE_DFBC		(1<<25)	  /* Have DFBC register */
+#define FE_LCKFRQ	(1<<26)	  /* Have LCKFRQ */
+#define FE_C10		(1<<27)	  /* Various C10 core (mis)features */
+#define FE_U3EN		(1<<28)	  /* U3EN bit usable */
+#define FE_DAC		(1<<29)	  /* Support PCI DAC (64 bit addressing) */
+#define FE_ISTAT1 	(1<<30)   /* Have ISTAT1, MBOX0, MBOX1 registers */
+
+#define FE_CACHE_SET	(FE_ERL|FE_CLSE|FE_WRIE|FE_ERMP)
+#define FE_CACHE0_SET	(FE_CACHE_SET & ~FE_ERL)
+};
+
+/*
+ *	SYM53C8XX IO register data structure.
+ */
+struct sym_reg {
+/*00*/  u8	nc_scntl0;	/* full arb., ena parity, par->ATN  */
+
+/*01*/  u8	nc_scntl1;	/* no reset                         */
+        #define   ISCON   0x10  /* connected to scsi		    */
+        #define   CRST    0x08  /* force reset                      */
+        #define   IARB    0x02  /* immediate arbitration            */
+
+/*02*/  u8	nc_scntl2;	/* no disconnect expected           */
+	#define   SDU     0x80  /* cmd: disconnect will raise error */
+	#define   CHM     0x40  /* sta: chained mode                */
+	#define   WSS     0x08  /* sta: wide scsi send           [W]*/
+	#define   WSR     0x01  /* sta: wide scsi received       [W]*/
+
+/*03*/  u8	nc_scntl3;	/* cnf system clock dependent       */
+	#define   EWS     0x08  /* cmd: enable wide scsi         [W]*/
+	#define   ULTRA   0x80  /* cmd: ULTRA enable                */
+				/* bits 0-2, 7 rsvd for C1010       */
+
+/*04*/  u8	nc_scid;	/* cnf host adapter scsi address    */
+	#define   RRE     0x40  /* r/w:e enable response to resel.  */
+	#define   SRE     0x20  /* r/w:e enable response to select  */
+
+/*05*/  u8	nc_sxfer;	/* ### Sync speed and count         */
+				/* bits 6-7 rsvd for C1010          */
+
+/*06*/  u8	nc_sdid;	/* ### Destination-ID               */
+
+/*07*/  u8	nc_gpreg;	/* ??? IO-Pins                      */
+
+/*08*/  u8	nc_sfbr;	/* ### First byte received          */
+
+/*09*/  u8	nc_socl;
+	#define   CREQ	  0x80	/* r/w: SCSI-REQ                    */
+	#define   CACK	  0x40	/* r/w: SCSI-ACK                    */
+	#define   CBSY	  0x20	/* r/w: SCSI-BSY                    */
+	#define   CSEL	  0x10	/* r/w: SCSI-SEL                    */
+	#define   CATN	  0x08	/* r/w: SCSI-ATN                    */
+	#define   CMSG	  0x04	/* r/w: SCSI-MSG                    */
+	#define   CC_D	  0x02	/* r/w: SCSI-C_D                    */
+	#define   CI_O	  0x01	/* r/w: SCSI-I_O                    */
+
+/*0a*/  u8	nc_ssid;
+
+/*0b*/  u8	nc_sbcl;
+
+/*0c*/  u8	nc_dstat;
+        #define   DFE     0x80  /* sta: dma fifo empty              */
+        #define   MDPE    0x40  /* int: master data parity error    */
+        #define   BF      0x20  /* int: script: bus fault           */
+        #define   ABRT    0x10  /* int: script: command aborted     */
+        #define   SSI     0x08  /* int: script: single step         */
+        #define   SIR     0x04  /* int: script: interrupt instruct. */
+        #define   IID     0x01  /* int: script: illegal instruct.   */
+
+/*0d*/  u8	nc_sstat0;
+        #define   ILF     0x80  /* sta: data in SIDL register lsb   */
+        #define   ORF     0x40  /* sta: data in SODR register lsb   */
+        #define   OLF     0x20  /* sta: data in SODL register lsb   */
+        #define   AIP     0x10  /* sta: arbitration in progress     */
+        #define   LOA     0x08  /* sta: arbitration lost            */
+        #define   WOA     0x04  /* sta: arbitration won             */
+        #define   IRST    0x02  /* sta: scsi reset signal           */
+        #define   SDP     0x01  /* sta: scsi parity signal          */
+
+/*0e*/  u8	nc_sstat1;
+	#define   FF3210  0xf0	/* sta: bytes in the scsi fifo      */
+
+/*0f*/  u8	nc_sstat2;
+        #define   ILF1    0x80  /* sta: data in SIDL register msb[W]*/
+        #define   ORF1    0x40  /* sta: data in SODR register msb[W]*/
+        #define   OLF1    0x20  /* sta: data in SODL register msb[W]*/
+        #define   DM      0x04  /* sta: DIFFSENS mismatch (895/6 only) */
+        #define   LDSC    0x02  /* sta: disconnect & reconnect      */
+
+/*10*/  u8	nc_dsa;		/* --> Base page                    */
+/*11*/  u8	nc_dsa1;
+/*12*/  u8	nc_dsa2;
+/*13*/  u8	nc_dsa3;
+
+/*14*/  u8	nc_istat;	/* --> Main Command and status      */
+        #define   CABRT   0x80  /* cmd: abort current operation     */
+        #define   SRST    0x40  /* mod: reset chip                  */
+        #define   SIGP    0x20  /* r/w: message from host to script */
+        #define   SEM     0x10  /* r/w: message between host + script  */
+        #define   CON     0x08  /* sta: connected to scsi           */
+        #define   INTF    0x04  /* sta: int on the fly (reset by wr)*/
+        #define   SIP     0x02  /* sta: scsi-interrupt              */
+        #define   DIP     0x01  /* sta: host/script interrupt       */
+
+/*15*/  u8	nc_istat1;	/* 896 only */
+        #define   FLSH    0x04  /* sta: chip is flushing            */
+        #define   SCRUN   0x02  /* sta: scripts are running         */
+        #define   SIRQD   0x01  /* r/w: disable INT pin             */
+
+/*16*/  u8	nc_mbox0;	/* 896 only */
+/*17*/  u8	nc_mbox1;	/* 896 only */
+
+/*18*/	u8	nc_ctest0;
+/*19*/  u8	nc_ctest1;
+
+/*1a*/  u8	nc_ctest2;
+	#define   CSIGP   0x40
+				/* bits 0-2,7 rsvd for C1010        */
+
+/*1b*/  u8	nc_ctest3;
+	#define   FLF     0x08  /* cmd: flush dma fifo              */
+	#define   CLF	  0x04	/* cmd: clear dma fifo		    */
+	#define   FM      0x02  /* mod: fetch pin mode              */
+	#define   WRIE    0x01  /* mod: write and invalidate enable */
+				/* bits 4-7 rsvd for C1010          */
+
+/*1c*/  u32	nc_temp;	/* ### Temporary stack              */
+
+/*20*/	u8	nc_dfifo;
+/*21*/  u8	nc_ctest4;
+	#define   BDIS    0x80  /* mod: burst disable               */
+	#define   MPEE    0x08  /* mod: master parity error enable  */
+
+/*22*/  u8	nc_ctest5;
+	#define   DFS     0x20  /* mod: dma fifo size               */
+				/* bits 0-1, 3-7 rsvd for C1010     */
+
+/*23*/  u8	nc_ctest6;
+
+/*24*/  u32	nc_dbc;		/* ### Byte count and command       */
+/*28*/  u32	nc_dnad;	/* ### Next command register        */
+/*2c*/  u32	nc_dsp;		/* --> Script Pointer               */
+/*30*/  u32	nc_dsps;	/* --> Script pointer save/opcode#2 */
+
+/*34*/  u8	nc_scratcha;	/* Temporary register a            */
+/*35*/  u8	nc_scratcha1;
+/*36*/  u8	nc_scratcha2;
+/*37*/  u8	nc_scratcha3;
+
+/*38*/  u8	nc_dmode;
+	#define   BL_2    0x80  /* mod: burst length shift value +2 */
+	#define   BL_1    0x40  /* mod: burst length shift value +1 */
+	#define   ERL     0x08  /* mod: enable read line            */
+	#define   ERMP    0x04  /* mod: enable read multiple        */
+	#define   BOF     0x02  /* mod: burst op code fetch         */
+
+/*39*/  u8	nc_dien;
+/*3a*/  u8	nc_sbr;
+
+/*3b*/  u8	nc_dcntl;	/* --> Script execution control     */
+	#define   CLSE    0x80  /* mod: cache line size enable      */
+	#define   PFF     0x40  /* cmd: pre-fetch flush             */
+	#define   PFEN    0x20  /* mod: pre-fetch enable            */
+	#define   SSM     0x10  /* mod: single step mode            */
+	#define   IRQM    0x08  /* mod: irq mode (1 = totem pole !) */
+	#define   STD     0x04  /* cmd: start dma mode              */
+	#define   IRQD    0x02  /* mod: irq disable                 */
+ 	#define	  NOCOM   0x01	/* cmd: protect sfbr while reselect */
+				/* bits 0-1 rsvd for C1010          */
+
+/*3c*/  u32	nc_adder;
+
+/*40*/  u16	nc_sien;	/* -->: interrupt enable            */
+/*42*/  u16	nc_sist;	/* <--: interrupt status            */
+        #define   SBMC    0x1000/* sta: SCSI Bus Mode Change (895/6 only) */
+        #define   STO     0x0400/* sta: timeout (select)            */
+        #define   GEN     0x0200/* sta: timeout (general)           */
+        #define   HTH     0x0100/* sta: timeout (handshake)         */
+        #define   MA      0x80  /* sta: phase mismatch              */
+        #define   CMP     0x40  /* sta: arbitration complete        */
+        #define   SEL     0x20  /* sta: selected by another device  */
+        #define   RSL     0x10  /* sta: reselected by another device*/
+        #define   SGE     0x08  /* sta: gross error (over/underflow)*/
+        #define   UDC     0x04  /* sta: unexpected disconnect       */
+        #define   RST     0x02  /* sta: scsi bus reset detected     */
+        #define   PAR     0x01  /* sta: scsi parity error           */
+
+/*44*/  u8	nc_slpar;
+/*45*/  u8	nc_swide;
+/*46*/  u8	nc_macntl;
+/*47*/  u8	nc_gpcntl;
+/*48*/  u8	nc_stime0;	/* cmd: timeout for select&handshake*/
+/*49*/  u8	nc_stime1;	/* cmd: timeout user defined        */
+/*4a*/  u16	nc_respid;	/* sta: Reselect-IDs                */
+
+/*4c*/  u8	nc_stest0;
+
+/*4d*/  u8	nc_stest1;
+	#define   SCLK    0x80	/* Use the PCI clock as SCSI clock	*/
+	#define   DBLEN   0x08	/* clock doubler running		*/
+	#define   DBLSEL  0x04	/* clock doubler selected		*/
+  
+
+/*4e*/  u8	nc_stest2;
+	#define   ROF     0x40	/* reset scsi offset (after gross error!) */
+	#define   EXT     0x02  /* extended filtering                     */
+
+/*4f*/  u8	nc_stest3;
+	#define   TE     0x80	/* c: tolerAnt enable */
+	#define   HSC    0x20	/* c: Halt SCSI Clock */
+	#define   CSF    0x02	/* c: clear scsi fifo */
+
+/*50*/  u16	nc_sidl;	/* Lowlevel: latched from scsi data */
+/*52*/  u8	nc_stest4;
+	#define   SMODE  0xc0	/* SCSI bus mode      (895/6 only) */
+	#define    SMODE_HVD 0x40	/* High Voltage Differential       */
+	#define    SMODE_SE  0x80	/* Single Ended                    */
+	#define    SMODE_LVD 0xc0	/* Low Voltage Differential        */
+	#define   LCKFRQ 0x20	/* Frequency Lock (895/6 only)     */
+				/* bits 0-5 rsvd for C1010         */
+
+/*53*/  u8	nc_53_;
+/*54*/  u16	nc_sodl;	/* Lowlevel: data out to scsi data  */
+/*56*/	u8	nc_ccntl0;	/* Chip Control 0 (896)             */
+	#define   ENPMJ  0x80	/* Enable Phase Mismatch Jump       */
+	#define   PMJCTL 0x40	/* Phase Mismatch Jump Control      */
+	#define   ENNDJ  0x20	/* Enable Non Data PM Jump          */
+	#define   DISFC  0x10	/* Disable Auto FIFO Clear          */
+	#define   DILS   0x02	/* Disable Internal Load/Store      */
+	#define   DPR    0x01	/* Disable Pipe Req                 */
+
+/*57*/	u8	nc_ccntl1;	/* Chip Control 1 (896)             */
+	#define   ZMOD   0x80	/* High Impedance Mode              */
+	#define   DDAC   0x08	/* Disable Dual Address Cycle       */
+	#define   XTIMOD 0x04	/* 64-bit Table Ind. Indexing Mode  */
+	#define   EXTIBMV 0x02	/* Enable 64-bit Table Ind. BMOV    */
+	#define   EXDBMV 0x01	/* Enable 64-bit Direct BMOV        */
+
+/*58*/  u16	nc_sbdl;	/* Lowlevel: data from scsi data    */
+/*5a*/  u16	nc_5a_;
+
+/*5c*/  u8	nc_scr0;	/* Working register B               */
+/*5d*/  u8	nc_scr1;
+/*5e*/  u8	nc_scr2;
+/*5f*/  u8	nc_scr3;
+
+/*60*/  u8	nc_scrx[64];	/* Working register C-R             */
+/*a0*/	u32	nc_mmrs;	/* Memory Move Read Selector        */
+/*a4*/	u32	nc_mmws;	/* Memory Move Write Selector       */
+/*a8*/	u32	nc_sfs;		/* Script Fetch Selector            */
+/*ac*/	u32	nc_drs;		/* DSA Relative Selector            */
+/*b0*/	u32	nc_sbms;	/* Static Block Move Selector       */
+/*b4*/	u32	nc_dbms;	/* Dynamic Block Move Selector      */
+/*b8*/	u32	nc_dnad64;	/* DMA Next Address 64              */
+/*bc*/	u16	nc_scntl4;	/* C1010 only                       */
+	#define   U3EN    0x80	/* Enable Ultra 3                   */
+	#define   AIPCKEN 0x40  /* AIP checking enable              */
+				/* Also enable AIP generation on C10-33*/
+	#define   XCLKH_DT 0x08 /* Extra clock of data hold on DT edge */
+	#define   XCLKH_ST 0x04 /* Extra clock of data hold on ST edge */
+	#define   XCLKS_DT 0x02 /* Extra clock of data set  on DT edge */
+	#define   XCLKS_ST 0x01 /* Extra clock of data set  on ST edge */
+/*be*/	u8	nc_aipcntl0;	/* AIP Control 0 C1010 only         */
+/*bf*/	u8	nc_aipcntl1;	/* AIP Control 1 C1010 only         */
+	#define DISAIP  0x08	/* Disable AIP generation C10-66 only  */
+/*c0*/	u32	nc_pmjad1;	/* Phase Mismatch Jump Address 1    */
+/*c4*/	u32	nc_pmjad2;	/* Phase Mismatch Jump Address 2    */
+/*c8*/	u8	nc_rbc;		/* Remaining Byte Count             */
+/*c9*/	u8	nc_rbc1;
+/*ca*/	u8	nc_rbc2;
+/*cb*/	u8	nc_rbc3;
+
+/*cc*/	u8	nc_ua;		/* Updated Address                  */
+/*cd*/	u8	nc_ua1;
+/*ce*/	u8	nc_ua2;
+/*cf*/	u8	nc_ua3;
+/*d0*/	u32	nc_esa;		/* Entry Storage Address            */
+/*d4*/	u8	nc_ia;		/* Instruction Address              */
+/*d5*/	u8	nc_ia1;
+/*d6*/	u8	nc_ia2;
+/*d7*/	u8	nc_ia3;
+/*d8*/	u32	nc_sbc;		/* SCSI Byte Count (3 bytes only)   */
+/*dc*/	u32	nc_csbc;	/* Cumulative SCSI Byte Count       */
+                                /* Following for C1010 only         */
+/*e0*/	u16    nc_crcpad;	/* CRC Value                        */
+/*e2*/	u8     nc_crccntl0;	/* CRC control register             */
+	#define   SNDCRC  0x10	/* Send CRC Request                 */
+/*e3*/	u8     nc_crccntl1;	/* CRC control register             */
+/*e4*/	u32    nc_crcdata;	/* CRC data register                */
+/*e8*/	u32    nc_e8_;
+/*ec*/	u32    nc_ec_;
+/*f0*/	u16    nc_dfbc;		/* DMA FIFO byte count              */ 
+};
+
+/*-----------------------------------------------------------
+ *
+ *	Utility macros for the script.
+ *
+ *-----------------------------------------------------------
+ */
+
+#define REGJ(p,r) (offsetof(struct sym_reg, p ## r))
+#define REG(r) REGJ (nc_, r)
+
+/*-----------------------------------------------------------
+ *
+ *	SCSI phases
+ *
+ *-----------------------------------------------------------
+ */
+
+#define	SCR_DATA_OUT	0x00000000
+#define	SCR_DATA_IN	0x01000000
+#define	SCR_COMMAND	0x02000000
+#define	SCR_STATUS	0x03000000
+#define	SCR_DT_DATA_OUT	0x04000000
+#define	SCR_DT_DATA_IN	0x05000000
+#define SCR_MSG_OUT	0x06000000
+#define SCR_MSG_IN      0x07000000
+/* DT phases are illegal for non Ultra3 mode */
+#define SCR_ILG_OUT	0x04000000
+#define SCR_ILG_IN	0x05000000
+
+/*-----------------------------------------------------------
+ *
+ *	Data transfer via SCSI.
+ *
+ *-----------------------------------------------------------
+ *
+ *	MOVE_ABS (LEN)
+ *	<<start address>>
+ *
+ *	MOVE_IND (LEN)
+ *	<<dnad_offset>>
+ *
+ *	MOVE_TBL
+ *	<<dnad_offset>>
+ *
+ *-----------------------------------------------------------
+ */
+
+#define OPC_MOVE          0x08000000
+
+#define SCR_MOVE_ABS(l) ((0x00000000 | OPC_MOVE) | (l))
+/* #define SCR_MOVE_IND(l) ((0x20000000 | OPC_MOVE) | (l)) */
+#define SCR_MOVE_TBL     (0x10000000 | OPC_MOVE)
+
+#define SCR_CHMOV_ABS(l) ((0x00000000) | (l))
+/* #define SCR_CHMOV_IND(l) ((0x20000000) | (l)) */
+#define SCR_CHMOV_TBL     (0x10000000)
+
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+/* We steal the `indirect addressing' flag for target mode MOVE in scripts */
+
+#define OPC_TCHMOVE        0x08000000
+
+#define SCR_TCHMOVE_ABS(l) ((0x20000000 | OPC_TCHMOVE) | (l))
+#define SCR_TCHMOVE_TBL     (0x30000000 | OPC_TCHMOVE)
+
+#define SCR_TMOV_ABS(l)    ((0x20000000) | (l))
+#define SCR_TMOV_TBL        (0x30000000)
+#endif
+
+struct sym_tblmove {
+        u32  size;
+        u32  addr;
+};
+
+/*-----------------------------------------------------------
+ *
+ *	Selection
+ *
+ *-----------------------------------------------------------
+ *
+ *	SEL_ABS | SCR_ID (0..15)    [ | REL_JMP]
+ *	<<alternate_address>>
+ *
+ *	SEL_TBL | << dnad_offset>>  [ | REL_JMP]
+ *	<<alternate_address>>
+ *
+ *-----------------------------------------------------------
+ */
+
+#define	SCR_SEL_ABS	0x40000000
+#define	SCR_SEL_ABS_ATN	0x41000000
+#define	SCR_SEL_TBL	0x42000000
+#define	SCR_SEL_TBL_ATN	0x43000000
+
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+#define	SCR_RESEL_ABS     0x40000000
+#define	SCR_RESEL_ABS_ATN 0x41000000
+#define	SCR_RESEL_TBL     0x42000000
+#define	SCR_RESEL_TBL_ATN 0x43000000
+#endif
+
+struct sym_tblsel {
+        u_char  sel_scntl4;	/* C1010 only */
+        u_char  sel_sxfer;
+        u_char  sel_id;
+        u_char  sel_scntl3;
+};
+
+#define SCR_JMP_REL     0x04000000
+#define SCR_ID(id)	(((u32)(id)) << 16)
+
+/*-----------------------------------------------------------
+ *
+ *	Waiting for Disconnect or Reselect
+ *
+ *-----------------------------------------------------------
+ *
+ *	WAIT_DISC
+ *	dummy: <<alternate_address>>
+ *
+ *	WAIT_RESEL
+ *	<<alternate_address>>
+ *
+ *-----------------------------------------------------------
+ */
+
+#define	SCR_WAIT_DISC	0x48000000
+#define SCR_WAIT_RESEL  0x50000000
+
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+#define	SCR_DISCONNECT	0x48000000
+#endif
+
+/*-----------------------------------------------------------
+ *
+ *	Bit Set / Reset
+ *
+ *-----------------------------------------------------------
+ *
+ *	SET (flags {|.. })
+ *
+ *	CLR (flags {|.. })
+ *
+ *-----------------------------------------------------------
+ */
+
+#define SCR_SET(f)     (0x58000000 | (f))
+#define SCR_CLR(f)     (0x60000000 | (f))
+
+#define	SCR_CARRY	0x00000400
+#define	SCR_TRG		0x00000200
+#define	SCR_ACK		0x00000040
+#define	SCR_ATN		0x00000008
+
+
+/*-----------------------------------------------------------
+ *
+ *	Memory to memory move
+ *
+ *-----------------------------------------------------------
+ *
+ *	COPY (bytecount)
+ *	<< source_address >>
+ *	<< destination_address >>
+ *
+ *	SCR_COPY   sets the NO FLUSH option by default.
+ *	SCR_COPY_F does not set this option.
+ *
+ *	For chips which do not support this option,
+ *	sym_fw_bind_script() will remove this bit.
+ *
+ *-----------------------------------------------------------
+ */
+
+#define SCR_NO_FLUSH 0x01000000
+
+#define SCR_COPY(n) (0xc0000000 | SCR_NO_FLUSH | (n))
+#define SCR_COPY_F(n) (0xc0000000 | (n))
+
+/*-----------------------------------------------------------
+ *
+ *	Register move and binary operations
+ *
+ *-----------------------------------------------------------
+ *
+ *	SFBR_REG (reg, op, data)        reg  = SFBR op data
+ *	<< 0 >>
+ *
+ *	REG_SFBR (reg, op, data)        SFBR = reg op data
+ *	<< 0 >>
+ *
+ *	REG_REG  (reg, op, data)        reg  = reg op data
+ *	<< 0 >>
+ *
+ *-----------------------------------------------------------
+ *
+ *	On 825A, 875, 895 and 896 chips the content 
+ *	of SFBR register can be used as data (SCR_SFBR_DATA).
+ *	The 896 has additionnal IO registers starting at 
+ *	offset 0x80. Bit 7 of register offset is stored in 
+ *	bit 7 of the SCRIPTS instruction first DWORD.
+ *
+ *-----------------------------------------------------------
+ */
+
+#define SCR_REG_OFS(ofs) ((((ofs) & 0x7f) << 16ul) + ((ofs) & 0x80)) 
+
+#define SCR_SFBR_REG(reg,op,data) \
+        (0x68000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+#define SCR_REG_SFBR(reg,op,data) \
+        (0x70000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+#define SCR_REG_REG(reg,op,data) \
+        (0x78000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+
+#define      SCR_LOAD   0x00000000
+#define      SCR_SHL    0x01000000
+#define      SCR_OR     0x02000000
+#define      SCR_XOR    0x03000000
+#define      SCR_AND    0x04000000
+#define      SCR_SHR    0x05000000
+#define      SCR_ADD    0x06000000
+#define      SCR_ADDC   0x07000000
+
+#define      SCR_SFBR_DATA   (0x00800000>>8ul)	/* Use SFBR as data */
+
+/*-----------------------------------------------------------
+ *
+ *	FROM_REG (reg)		  SFBR = reg
+ *	<< 0 >>
+ *
+ *	TO_REG	 (reg)		  reg  = SFBR
+ *	<< 0 >>
+ *
+ *	LOAD_REG (reg, data)	  reg  = <data>
+ *	<< 0 >>
+ *
+ *	LOAD_SFBR(data) 	  SFBR = <data>
+ *	<< 0 >>
+ *
+ *-----------------------------------------------------------
+ */
+
+#define	SCR_FROM_REG(reg) \
+	SCR_REG_SFBR(reg,SCR_OR,0)
+
+#define	SCR_TO_REG(reg) \
+	SCR_SFBR_REG(reg,SCR_OR,0)
+
+#define	SCR_LOAD_REG(reg,data) \
+	SCR_REG_REG(reg,SCR_LOAD,data)
+
+#define SCR_LOAD_SFBR(data) \
+        (SCR_REG_SFBR (gpreg, SCR_LOAD, data))
+
+/*-----------------------------------------------------------
+ *
+ *	LOAD  from memory   to register.
+ *	STORE from register to memory.
+ *
+ *	Only supported by 810A, 860, 825A, 875, 895 and 896.
+ *
+ *-----------------------------------------------------------
+ *
+ *	LOAD_ABS (LEN)
+ *	<<start address>>
+ *
+ *	LOAD_REL (LEN)        (DSA relative)
+ *	<<dsa_offset>>
+ *
+ *-----------------------------------------------------------
+ */
+
+#define SCR_REG_OFS2(ofs) (((ofs) & 0xff) << 16ul)
+#define SCR_NO_FLUSH2	0x02000000
+#define SCR_DSA_REL2	0x10000000
+
+#define SCR_LOAD_R(reg, how, n) \
+        (0xe1000000 | how | (SCR_REG_OFS2(REG(reg))) | (n))
+
+#define SCR_STORE_R(reg, how, n) \
+        (0xe0000000 | how | (SCR_REG_OFS2(REG(reg))) | (n))
+
+#define SCR_LOAD_ABS(reg, n)	SCR_LOAD_R(reg, SCR_NO_FLUSH2, n)
+#define SCR_LOAD_REL(reg, n)	SCR_LOAD_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2, n)
+#define SCR_LOAD_ABS_F(reg, n)	SCR_LOAD_R(reg, 0, n)
+#define SCR_LOAD_REL_F(reg, n)	SCR_LOAD_R(reg, SCR_DSA_REL2, n)
+
+#define SCR_STORE_ABS(reg, n)	SCR_STORE_R(reg, SCR_NO_FLUSH2, n)
+#define SCR_STORE_REL(reg, n)	SCR_STORE_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2,n)
+#define SCR_STORE_ABS_F(reg, n)	SCR_STORE_R(reg, 0, n)
+#define SCR_STORE_REL_F(reg, n)	SCR_STORE_R(reg, SCR_DSA_REL2, n)
+
+
+/*-----------------------------------------------------------
+ *
+ *	Waiting for Disconnect or Reselect
+ *
+ *-----------------------------------------------------------
+ *
+ *	JUMP            [ | IFTRUE/IFFALSE ( ... ) ]
+ *	<<address>>
+ *
+ *	JUMPR           [ | IFTRUE/IFFALSE ( ... ) ]
+ *	<<distance>>
+ *
+ *	CALL            [ | IFTRUE/IFFALSE ( ... ) ]
+ *	<<address>>
+ *
+ *	CALLR           [ | IFTRUE/IFFALSE ( ... ) ]
+ *	<<distance>>
+ *
+ *	RETURN          [ | IFTRUE/IFFALSE ( ... ) ]
+ *	<<dummy>>
+ *
+ *	INT             [ | IFTRUE/IFFALSE ( ... ) ]
+ *	<<ident>>
+ *
+ *	INT_FLY         [ | IFTRUE/IFFALSE ( ... ) ]
+ *	<<ident>>
+ *
+ *	Conditions:
+ *	     WHEN (phase)
+ *	     IF   (phase)
+ *	     CARRYSET
+ *	     DATA (data, mask)
+ *
+ *-----------------------------------------------------------
+ */
+
+#define SCR_NO_OP       0x80000000
+#define SCR_JUMP        0x80080000
+#define SCR_JUMP64      0x80480000
+#define SCR_JUMPR       0x80880000
+#define SCR_CALL        0x88080000
+#define SCR_CALLR       0x88880000
+#define SCR_RETURN      0x90080000
+#define SCR_INT         0x98080000
+#define SCR_INT_FLY     0x98180000
+
+#define IFFALSE(arg)   (0x00080000 | (arg))
+#define IFTRUE(arg)    (0x00000000 | (arg))
+
+#define WHEN(phase)    (0x00030000 | (phase))
+#define IF(phase)      (0x00020000 | (phase))
+
+#define DATA(D)        (0x00040000 | ((D) & 0xff))
+#define MASK(D,M)      (0x00040000 | (((M ^ 0xff) & 0xff) << 8ul)|((D) & 0xff))
+
+#define CARRYSET       (0x00200000)
+
+/*-----------------------------------------------------------
+ *
+ *	SCSI  constants.
+ *
+ *-----------------------------------------------------------
+ */
+
+/*
+ *	Messages
+ */
+
+#define	M_COMPLETE	COMMAND_COMPLETE
+#define	M_EXTENDED	EXTENDED_MESSAGE
+#define	M_SAVE_DP	SAVE_POINTERS
+#define	M_RESTORE_DP	RESTORE_POINTERS
+#define	M_DISCONNECT	DISCONNECT
+#define	M_ID_ERROR	INITIATOR_ERROR
+#define	M_ABORT		ABORT_TASK_SET
+#define	M_REJECT	MESSAGE_REJECT
+#define	M_NOOP		NOP
+#define	M_PARITY	MSG_PARITY_ERROR
+#define	M_LCOMPLETE	LINKED_CMD_COMPLETE
+#define	M_FCOMPLETE	LINKED_FLG_CMD_COMPLETE
+#define	M_RESET		TARGET_RESET
+#define	M_ABORT_TAG	ABORT_TASK
+#define	M_CLEAR_QUEUE	CLEAR_TASK_SET
+#define	M_INIT_REC	INITIATE_RECOVERY
+#define	M_REL_REC	RELEASE_RECOVERY
+#define	M_TERMINATE	(0x11)
+#define	M_SIMPLE_TAG	SIMPLE_QUEUE_TAG
+#define	M_HEAD_TAG	HEAD_OF_QUEUE_TAG
+#define	M_ORDERED_TAG	ORDERED_QUEUE_TAG
+#define	M_IGN_RESIDUE	IGNORE_WIDE_RESIDUE
+
+#define	M_X_MODIFY_DP	EXTENDED_MODIFY_DATA_POINTER
+#define	M_X_SYNC_REQ	EXTENDED_SDTR
+#define	M_X_WIDE_REQ	EXTENDED_WDTR
+#define	M_X_PPR_REQ	EXTENDED_PPR
+
+/*
+ *	PPR protocol options
+ */
+#define	PPR_OPT_IU	(0x01)
+#define	PPR_OPT_DT	(0x02)
+#define	PPR_OPT_QAS	(0x04)
+#define PPR_OPT_MASK	(0x07)
+
+/*
+ *	Status
+ */
+
+#define	S_GOOD		SAM_STAT_GOOD
+#define	S_CHECK_COND	SAM_STAT_CHECK_CONDITION
+#define	S_COND_MET	SAM_STAT_CONDITION_MET
+#define	S_BUSY		SAM_STAT_BUSY
+#define	S_INT		SAM_STAT_INTERMEDIATE
+#define	S_INT_COND_MET	SAM_STAT_INTERMEDIATE_CONDITION_MET
+#define	S_CONFLICT	SAM_STAT_RESERVATION_CONFLICT
+#define	S_TERMINATED	SAM_STAT_COMMAND_TERMINATED
+#define	S_QUEUE_FULL	SAM_STAT_TASK_SET_FULL
+#define	S_ILLEGAL	(0xff)
+
+#endif /* defined SYM_DEFS_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.c b/drivers/scsi/sym53c8xx_2/sym_fw.c
new file mode 100644
index 0000000..fd36cf9
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_fw.c
@@ -0,0 +1,568 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000  Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ *         Wolfgang Stanglmeier        <wolf@cologne.de>
+ *         Stefan Esser                <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994  Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifdef __FreeBSD__
+#include <dev/sym/sym_glue.h>
+#else
+#include "sym_glue.h"
+#endif
+
+/*
+ *  Macros used for all firmwares.
+ */
+#define	SYM_GEN_A(s, label)	((short) offsetof(s, label)),
+#define	SYM_GEN_B(s, label)	((short) offsetof(s, label)),
+#define	SYM_GEN_Z(s, label)	((short) offsetof(s, label)),
+#define	PADDR_A(label)		SYM_GEN_PADDR_A(struct SYM_FWA_SCR, label)
+#define	PADDR_B(label)		SYM_GEN_PADDR_B(struct SYM_FWB_SCR, label)
+
+
+#if	SYM_CONF_GENERIC_SUPPORT
+/*
+ *  Allocate firmware #1 script area.
+ */
+#define	SYM_FWA_SCR		sym_fw1a_scr
+#define	SYM_FWB_SCR		sym_fw1b_scr
+#define	SYM_FWZ_SCR		sym_fw1z_scr
+#ifdef __FreeBSD__
+#include <dev/sym/sym_fw1.h>
+#else
+#include "sym_fw1.h"
+#endif
+static struct sym_fwa_ofs sym_fw1a_ofs = {
+	SYM_GEN_FW_A(struct SYM_FWA_SCR)
+};
+static struct sym_fwb_ofs sym_fw1b_ofs = {
+	SYM_GEN_FW_B(struct SYM_FWB_SCR)
+#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
+	SYM_GEN_B(struct SYM_FWB_SCR, data_io)
+#endif
+};
+static struct sym_fwz_ofs sym_fw1z_ofs = {
+	SYM_GEN_FW_Z(struct SYM_FWZ_SCR)
+};
+#undef	SYM_FWA_SCR
+#undef	SYM_FWB_SCR
+#undef	SYM_FWZ_SCR
+#endif	/* SYM_CONF_GENERIC_SUPPORT */
+
+/*
+ *  Allocate firmware #2 script area.
+ */
+#define	SYM_FWA_SCR		sym_fw2a_scr
+#define	SYM_FWB_SCR		sym_fw2b_scr
+#define	SYM_FWZ_SCR		sym_fw2z_scr
+#ifdef __FreeBSD__
+#include <dev/sym/sym_fw2.h>
+#else
+#include "sym_fw2.h"
+#endif
+static struct sym_fwa_ofs sym_fw2a_ofs = {
+	SYM_GEN_FW_A(struct SYM_FWA_SCR)
+};
+static struct sym_fwb_ofs sym_fw2b_ofs = {
+	SYM_GEN_FW_B(struct SYM_FWB_SCR)
+#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
+	SYM_GEN_B(struct SYM_FWB_SCR, data_io)
+#endif
+	SYM_GEN_B(struct SYM_FWB_SCR, start64)
+	SYM_GEN_B(struct SYM_FWB_SCR, pm_handle)
+};
+static struct sym_fwz_ofs sym_fw2z_ofs = {
+	SYM_GEN_FW_Z(struct SYM_FWZ_SCR)
+};
+#undef	SYM_FWA_SCR
+#undef	SYM_FWB_SCR
+#undef	SYM_FWZ_SCR
+
+#undef	SYM_GEN_A
+#undef	SYM_GEN_B
+#undef	SYM_GEN_Z
+#undef	PADDR_A
+#undef	PADDR_B
+
+#if	SYM_CONF_GENERIC_SUPPORT
+/*
+ *  Patch routine for firmware #1.
+ */
+static void
+sym_fw1_patch(struct sym_hcb *np)
+{
+	struct sym_fw1a_scr *scripta0;
+	struct sym_fw1b_scr *scriptb0;
+
+	scripta0 = (struct sym_fw1a_scr *) np->scripta0;
+	scriptb0 = (struct sym_fw1b_scr *) np->scriptb0;
+
+	/*
+	 *  Remove LED support if not needed.
+	 */
+	if (!(np->features & FE_LED0)) {
+		scripta0->idle[0]	= cpu_to_scr(SCR_NO_OP);
+		scripta0->reselected[0]	= cpu_to_scr(SCR_NO_OP);
+		scripta0->start[0]	= cpu_to_scr(SCR_NO_OP);
+	}
+
+#ifdef SYM_CONF_IARB_SUPPORT
+	/*
+	 *    If user does not want to use IMMEDIATE ARBITRATION
+	 *    when we are reselected while attempting to arbitrate,
+	 *    patch the SCRIPTS accordingly with a SCRIPT NO_OP.
+	 */
+	if (!SYM_CONF_SET_IARB_ON_ARB_LOST)
+		scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP);
+#endif
+	/*
+	 *  Patch some data in SCRIPTS.
+	 *  - start and done queue initial bus address.
+	 *  - target bus address table bus address.
+	 */
+	scriptb0->startpos[0]	= cpu_to_scr(np->squeue_ba);
+	scriptb0->done_pos[0]	= cpu_to_scr(np->dqueue_ba);
+	scriptb0->targtbl[0]	= cpu_to_scr(np->targtbl_ba);
+}
+#endif	/* SYM_CONF_GENERIC_SUPPORT */
+
+/*
+ *  Patch routine for firmware #2.
+ */
+static void
+sym_fw2_patch(struct sym_hcb *np)
+{
+	struct sym_fw2a_scr *scripta0;
+	struct sym_fw2b_scr *scriptb0;
+
+	scripta0 = (struct sym_fw2a_scr *) np->scripta0;
+	scriptb0 = (struct sym_fw2b_scr *) np->scriptb0;
+
+	/*
+	 *  Remove LED support if not needed.
+	 */
+	if (!(np->features & FE_LED0)) {
+		scripta0->idle[0]	= cpu_to_scr(SCR_NO_OP);
+		scripta0->reselected[0]	= cpu_to_scr(SCR_NO_OP);
+		scripta0->start[0]	= cpu_to_scr(SCR_NO_OP);
+	}
+
+#if   SYM_CONF_DMA_ADDRESSING_MODE == 2
+	/*
+	 *  Remove useless 64 bit DMA specific SCRIPTS, 
+	 *  when this feature is not available.
+	 */
+	if (!np->use_dac) {
+		scripta0->is_dmap_dirty[0] = cpu_to_scr(SCR_NO_OP);
+		scripta0->is_dmap_dirty[1] = 0;
+		scripta0->is_dmap_dirty[2] = cpu_to_scr(SCR_NO_OP);
+		scripta0->is_dmap_dirty[3] = 0;
+	}
+#endif
+
+#ifdef SYM_CONF_IARB_SUPPORT
+	/*
+	 *    If user does not want to use IMMEDIATE ARBITRATION
+	 *    when we are reselected while attempting to arbitrate,
+	 *    patch the SCRIPTS accordingly with a SCRIPT NO_OP.
+	 */
+	if (!SYM_CONF_SET_IARB_ON_ARB_LOST)
+		scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP);
+#endif
+	/*
+	 *  Patch some variable in SCRIPTS.
+	 *  - start and done queue initial bus address.
+	 *  - target bus address table bus address.
+	 */
+	scriptb0->startpos[0]	= cpu_to_scr(np->squeue_ba);
+	scriptb0->done_pos[0]	= cpu_to_scr(np->dqueue_ba);
+	scriptb0->targtbl[0]	= cpu_to_scr(np->targtbl_ba);
+
+	/*
+	 *  Remove the load of SCNTL4 on reselection if not a C10.
+	 */
+	if (!(np->features & FE_C10)) {
+		scripta0->resel_scntl4[0] = cpu_to_scr(SCR_NO_OP);
+		scripta0->resel_scntl4[1] = cpu_to_scr(0);
+	}
+
+	/*
+	 *  Remove a couple of work-arounds specific to C1010 if 
+	 *  they are not desirable. See `sym_fw2.h' for more details.
+	 */
+	if (!(np->device_id == PCI_DEVICE_ID_LSI_53C1010_66 &&
+	      np->revision_id < 0x1 &&
+	      np->pciclk_khz < 60000)) {
+		scripta0->datao_phase[0] = cpu_to_scr(SCR_NO_OP);
+		scripta0->datao_phase[1] = cpu_to_scr(0);
+	}
+	if (!(np->device_id == PCI_DEVICE_ID_LSI_53C1010_33 &&
+	      /* np->revision_id < 0xff */ 1)) {
+		scripta0->sel_done[0] = cpu_to_scr(SCR_NO_OP);
+		scripta0->sel_done[1] = cpu_to_scr(0);
+	}
+
+	/*
+	 *  Patch some other variables in SCRIPTS.
+	 *  These ones are loaded by the SCRIPTS processor.
+	 */
+	scriptb0->pm0_data_addr[0] =
+		cpu_to_scr(np->scripta_ba + 
+			   offsetof(struct sym_fw2a_scr, pm0_data));
+	scriptb0->pm1_data_addr[0] =
+		cpu_to_scr(np->scripta_ba + 
+			   offsetof(struct sym_fw2a_scr, pm1_data));
+}
+
+/*
+ *  Fill the data area in scripts.
+ *  To be done for all firmwares.
+ */
+static void
+sym_fw_fill_data (u32 *in, u32 *out)
+{
+	int	i;
+
+	for (i = 0; i < SYM_CONF_MAX_SG; i++) {
+		*in++  = SCR_CHMOV_TBL ^ SCR_DATA_IN;
+		*in++  = offsetof (struct sym_dsb, data[i]);
+		*out++ = SCR_CHMOV_TBL ^ SCR_DATA_OUT;
+		*out++ = offsetof (struct sym_dsb, data[i]);
+	}
+}
+
+/*
+ *  Setup useful script bus addresses.
+ *  To be done for all firmwares.
+ */
+static void 
+sym_fw_setup_bus_addresses(struct sym_hcb *np, struct sym_fw *fw)
+{
+	u32 *pa;
+	u_short *po;
+	int i;
+
+	/*
+	 *  Build the bus address table for script A 
+	 *  from the script A offset table.
+	 */
+	po = (u_short *) fw->a_ofs;
+	pa = (u32 *) &np->fwa_bas;
+	for (i = 0 ; i < sizeof(np->fwa_bas)/sizeof(u32) ; i++)
+		pa[i] = np->scripta_ba + po[i];
+
+	/*
+	 *  Same for script B.
+	 */
+	po = (u_short *) fw->b_ofs;
+	pa = (u32 *) &np->fwb_bas;
+	for (i = 0 ; i < sizeof(np->fwb_bas)/sizeof(u32) ; i++)
+		pa[i] = np->scriptb_ba + po[i];
+
+	/*
+	 *  Same for script Z.
+	 */
+	po = (u_short *) fw->z_ofs;
+	pa = (u32 *) &np->fwz_bas;
+	for (i = 0 ; i < sizeof(np->fwz_bas)/sizeof(u32) ; i++)
+		pa[i] = np->scriptz_ba + po[i];
+}
+
+#if	SYM_CONF_GENERIC_SUPPORT
+/*
+ *  Setup routine for firmware #1.
+ */
+static void 
+sym_fw1_setup(struct sym_hcb *np, struct sym_fw *fw)
+{
+	struct sym_fw1a_scr *scripta0;
+	struct sym_fw1b_scr *scriptb0;
+
+	scripta0 = (struct sym_fw1a_scr *) np->scripta0;
+	scriptb0 = (struct sym_fw1b_scr *) np->scriptb0;
+
+	/*
+	 *  Fill variable parts in scripts.
+	 */
+	sym_fw_fill_data(scripta0->data_in, scripta0->data_out);
+
+	/*
+	 *  Setup bus addresses used from the C code..
+	 */
+	sym_fw_setup_bus_addresses(np, fw);
+}
+#endif	/* SYM_CONF_GENERIC_SUPPORT */
+
+/*
+ *  Setup routine for firmware #2.
+ */
+static void 
+sym_fw2_setup(struct sym_hcb *np, struct sym_fw *fw)
+{
+	struct sym_fw2a_scr *scripta0;
+	struct sym_fw2b_scr *scriptb0;
+
+	scripta0 = (struct sym_fw2a_scr *) np->scripta0;
+	scriptb0 = (struct sym_fw2b_scr *) np->scriptb0;
+
+	/*
+	 *  Fill variable parts in scripts.
+	 */
+	sym_fw_fill_data(scripta0->data_in, scripta0->data_out);
+
+	/*
+	 *  Setup bus addresses used from the C code..
+	 */
+	sym_fw_setup_bus_addresses(np, fw);
+}
+
+/*
+ *  Allocate firmware descriptors.
+ */
+#if	SYM_CONF_GENERIC_SUPPORT
+static struct sym_fw sym_fw1 = SYM_FW_ENTRY(sym_fw1, "NCR-generic");
+#endif	/* SYM_CONF_GENERIC_SUPPORT */
+static struct sym_fw sym_fw2 = SYM_FW_ENTRY(sym_fw2, "LOAD/STORE-based");
+
+/*
+ *  Find the most appropriate firmware for a chip.
+ */
+struct sym_fw * 
+sym_find_firmware(struct sym_chip *chip)
+{
+	if (chip->features & FE_LDSTR)
+		return &sym_fw2;
+#if	SYM_CONF_GENERIC_SUPPORT
+	else if (!(chip->features & (FE_PFEN|FE_NOPM|FE_DAC)))
+		return &sym_fw1;
+#endif
+	else
+		return NULL;
+}
+
+/*
+ *  Bind a script to physical addresses.
+ */
+void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len)
+{
+	u32 opcode, new, old, tmp1, tmp2;
+	u32 *end, *cur;
+	int relocs;
+
+	cur = start;
+	end = start + len/4;
+
+	while (cur < end) {
+
+		opcode = *cur;
+
+		/*
+		 *  If we forget to change the length
+		 *  in scripts, a field will be
+		 *  padded with 0. This is an illegal
+		 *  command.
+		 */
+		if (opcode == 0) {
+			printf ("%s: ERROR0 IN SCRIPT at %d.\n",
+				sym_name(np), (int) (cur-start));
+			++cur;
+			continue;
+		};
+
+		/*
+		 *  We use the bogus value 0xf00ff00f ;-)
+		 *  to reserve data area in SCRIPTS.
+		 */
+		if (opcode == SCR_DATA_ZERO) {
+			*cur++ = 0;
+			continue;
+		}
+
+		if (DEBUG_FLAGS & DEBUG_SCRIPT)
+			printf ("%d:  <%x>\n", (int) (cur-start),
+				(unsigned)opcode);
+
+		/*
+		 *  We don't have to decode ALL commands
+		 */
+		switch (opcode >> 28) {
+		case 0xf:
+			/*
+			 *  LOAD / STORE DSA relative, don't relocate.
+			 */
+			relocs = 0;
+			break;
+		case 0xe:
+			/*
+			 *  LOAD / STORE absolute.
+			 */
+			relocs = 1;
+			break;
+		case 0xc:
+			/*
+			 *  COPY has TWO arguments.
+			 */
+			relocs = 2;
+			tmp1 = cur[1];
+			tmp2 = cur[2];
+			if ((tmp1 ^ tmp2) & 3) {
+				printf ("%s: ERROR1 IN SCRIPT at %d.\n",
+					sym_name(np), (int) (cur-start));
+			}
+			/*
+			 *  If PREFETCH feature not enabled, remove 
+			 *  the NO FLUSH bit if present.
+			 */
+			if ((opcode & SCR_NO_FLUSH) &&
+			    !(np->features & FE_PFEN)) {
+				opcode = (opcode & ~SCR_NO_FLUSH);
+			}
+			break;
+		case 0x0:
+			/*
+			 *  MOVE/CHMOV (absolute address)
+			 */
+			if (!(np->features & FE_WIDE))
+				opcode = (opcode | OPC_MOVE);
+			relocs = 1;
+			break;
+		case 0x1:
+			/*
+			 *  MOVE/CHMOV (table indirect)
+			 */
+			if (!(np->features & FE_WIDE))
+				opcode = (opcode | OPC_MOVE);
+			relocs = 0;
+			break;
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+		case 0x2:
+			/*
+			 *  MOVE/CHMOV in target role (absolute address)
+			 */
+			opcode &= ~0x20000000;
+			if (!(np->features & FE_WIDE))
+				opcode = (opcode & ~OPC_TCHMOVE);
+			relocs = 1;
+			break;
+		case 0x3:
+			/*
+			 *  MOVE/CHMOV in target role (table indirect)
+			 */
+			opcode &= ~0x20000000;
+			if (!(np->features & FE_WIDE))
+				opcode = (opcode & ~OPC_TCHMOVE);
+			relocs = 0;
+			break;
+#endif
+		case 0x8:
+			/*
+			 *  JUMP / CALL
+			 *  don't relocate if relative :-)
+			 */
+			if (opcode & 0x00800000)
+				relocs = 0;
+			else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/
+				relocs = 2;
+			else
+				relocs = 1;
+			break;
+		case 0x4:
+		case 0x5:
+		case 0x6:
+		case 0x7:
+			relocs = 1;
+			break;
+		default:
+			relocs = 0;
+			break;
+		};
+
+		/*
+		 *  Scriptify:) the opcode.
+		 */
+		*cur++ = cpu_to_scr(opcode);
+
+		/*
+		 *  If no relocation, assume 1 argument 
+		 *  and just scriptize:) it.
+		 */
+		if (!relocs) {
+			*cur = cpu_to_scr(*cur);
+			++cur;
+			continue;
+		}
+
+		/*
+		 *  Otherwise performs all needed relocations.
+		 */
+		while (relocs--) {
+			old = *cur;
+
+			switch (old & RELOC_MASK) {
+			case RELOC_REGISTER:
+				new = (old & ~RELOC_MASK) + np->mmio_ba;
+				break;
+			case RELOC_LABEL_A:
+				new = (old & ~RELOC_MASK) + np->scripta_ba;
+				break;
+			case RELOC_LABEL_B:
+				new = (old & ~RELOC_MASK) + np->scriptb_ba;
+				break;
+			case RELOC_SOFTC:
+				new = (old & ~RELOC_MASK) + np->hcb_ba;
+				break;
+			case 0:
+				/*
+				 *  Don't relocate a 0 address.
+				 *  They are mostly used for patched or 
+				 *  script self-modified areas.
+				 */
+				if (old == 0) {
+					new = old;
+					break;
+				}
+				/* fall through */
+			default:
+				new = 0;
+				panic("sym_fw_bind_script: "
+				      "weird relocation %x\n", old);
+				break;
+			}
+
+			*cur++ = cpu_to_scr(new);
+		}
+	};
+}
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.h b/drivers/scsi/sym53c8xx_2/sym_fw.h
new file mode 100644
index 0000000..43f6810
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_fw.h
@@ -0,0 +1,211 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000  Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ *         Wolfgang Stanglmeier        <wolf@cologne.de>
+ *         Stefan Esser                <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994  Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef	SYM_FW_H
+#define	SYM_FW_H
+/*
+ *  Macro used to generate interfaces for script A.
+ */
+#define SYM_GEN_FW_A(s)							\
+	SYM_GEN_A(s, start)		SYM_GEN_A(s, getjob_begin)	\
+	SYM_GEN_A(s, getjob_end)					\
+	SYM_GEN_A(s, select)		SYM_GEN_A(s, wf_sel_done)	\
+	SYM_GEN_A(s, send_ident)					\
+	SYM_GEN_A(s, dispatch)		SYM_GEN_A(s, init)		\
+	SYM_GEN_A(s, clrack)		SYM_GEN_A(s, complete_error)	\
+	SYM_GEN_A(s, done)		SYM_GEN_A(s, done_end)		\
+	SYM_GEN_A(s, idle)		SYM_GEN_A(s, ungetjob)		\
+	SYM_GEN_A(s, reselect)						\
+	SYM_GEN_A(s, resel_tag)		SYM_GEN_A(s, resel_dsa)		\
+	SYM_GEN_A(s, resel_no_tag)					\
+	SYM_GEN_A(s, data_in)		SYM_GEN_A(s, data_in2)		\
+	SYM_GEN_A(s, data_out)		SYM_GEN_A(s, data_out2)		\
+	SYM_GEN_A(s, pm0_data)		SYM_GEN_A(s, pm1_data)
+
+/*
+ *  Macro used to generate interfaces for script B.
+ */
+#define SYM_GEN_FW_B(s)							\
+	SYM_GEN_B(s, no_data)						\
+	SYM_GEN_B(s, sel_for_abort)	SYM_GEN_B(s, sel_for_abort_1)	\
+	SYM_GEN_B(s, msg_bad)		SYM_GEN_B(s, msg_weird)		\
+	SYM_GEN_B(s, wdtr_resp)		SYM_GEN_B(s, send_wdtr)		\
+	SYM_GEN_B(s, sdtr_resp)		SYM_GEN_B(s, send_sdtr)		\
+	SYM_GEN_B(s, ppr_resp)		SYM_GEN_B(s, send_ppr)		\
+	SYM_GEN_B(s, nego_bad_phase)					\
+	SYM_GEN_B(s, ident_break) 	SYM_GEN_B(s, ident_break_atn)	\
+	SYM_GEN_B(s, sdata_in)		SYM_GEN_B(s, resel_bad_lun)	\
+	SYM_GEN_B(s, bad_i_t_l)		SYM_GEN_B(s, bad_i_t_l_q)	\
+	SYM_GEN_B(s, wsr_ma_helper)
+
+/*
+ *  Macro used to generate interfaces for script Z.
+ */
+#define SYM_GEN_FW_Z(s)							\
+	SYM_GEN_Z(s, snooptest)		SYM_GEN_Z(s, snoopend)
+
+/*
+ *  Generates structure interface that contains 
+ *  offsets within script A, B and Z.
+ */
+#define	SYM_GEN_A(s, label)	s label;
+#define	SYM_GEN_B(s, label)	s label;
+#define	SYM_GEN_Z(s, label)	s label;
+struct sym_fwa_ofs {
+	SYM_GEN_FW_A(u_short)
+};
+struct sym_fwb_ofs {
+	SYM_GEN_FW_B(u_short)
+#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
+	SYM_GEN_B(u_short, data_io)
+#endif
+	SYM_GEN_B(u_short, start64)
+	SYM_GEN_B(u_short, pm_handle)
+};
+struct sym_fwz_ofs {
+	SYM_GEN_FW_Z(u_short)
+};
+
+/*
+ *  Generates structure interface that contains 
+ *  bus addresses within script A, B and Z.
+ */
+struct sym_fwa_ba {
+	SYM_GEN_FW_A(u32)
+};
+struct sym_fwb_ba {
+	SYM_GEN_FW_B(u32)
+#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
+	SYM_GEN_B(u32, data_io)
+#endif
+	SYM_GEN_B(u32, start64);
+	SYM_GEN_B(u32, pm_handle);
+};
+struct sym_fwz_ba {
+	SYM_GEN_FW_Z(u32)
+};
+#undef	SYM_GEN_A
+#undef	SYM_GEN_B
+#undef	SYM_GEN_Z
+
+/*
+ *  Let cc know about the name of the controller data structure.
+ *  We need this for function prototype declarations just below.
+ */
+struct sym_hcb;
+
+/*
+ *  Generic structure that defines a firmware.
+ */ 
+struct sym_fw {
+	char	*name;		/* Name we want to print out	*/
+	u32	*a_base;	/* Pointer to script A template	*/
+	int	a_size;		/* Size of script A		*/
+	struct	sym_fwa_ofs
+		*a_ofs;		/* Useful offsets in script A	*/
+	u32	*b_base;	/* Pointer to script B template	*/
+	int	b_size;		/* Size of script B		*/
+	struct	sym_fwb_ofs
+		*b_ofs;		/* Useful offsets in script B	*/
+	u32	*z_base;	/* Pointer to script Z template	*/
+	int	z_size;		/* Size of script Z		*/
+	struct	sym_fwz_ofs
+		*z_ofs;		/* Useful offsets in script Z	*/
+	/* Setup and patch methods for this firmware */
+	void	(*setup)(struct sym_hcb *, struct sym_fw *);
+	void	(*patch)(struct sym_hcb *);
+};
+
+/*
+ *  Macro used to declare a firmware.
+ */
+#define SYM_FW_ENTRY(fw, name)					\
+{								\
+	name,							\
+	(u32 *) &fw##a_scr, sizeof(fw##a_scr), &fw##a_ofs,	\
+	(u32 *) &fw##b_scr, sizeof(fw##b_scr), &fw##b_ofs,	\
+	(u32 *) &fw##z_scr, sizeof(fw##z_scr), &fw##z_ofs,	\
+	fw##_setup, fw##_patch					\
+}
+
+/*
+ *  Macros used from the C code to get useful
+ *  SCRIPTS bus addresses.
+ */
+#define SCRIPTA_BA(np, label)	(np->fwa_bas.label)
+#define SCRIPTB_BA(np, label)	(np->fwb_bas.label)
+#define SCRIPTZ_BA(np, label)	(np->fwz_bas.label)
+
+/*
+ *  Macros used by scripts definitions.
+ *
+ *  HADDR_1 generates a reference to a field of the controller data.
+ *  HADDR_2 generates a reference to a field of the controller data
+ *          with offset.
+ *  RADDR_1 generates a reference to a script processor register.
+ *  RADDR_2 generates a reference to a script processor register
+ *          with offset.
+ *  PADDR_A generates a reference to another part of script A.
+ *  PADDR_B generates a reference to another part of script B.
+ *
+ *  SYM_GEN_PADDR_A and SYM_GEN_PADDR_B are used to define respectively 
+ *  the PADDR_A and PADDR_B macros for each firmware by setting argument 
+ *  `s' to the name of the corresponding structure.
+ *
+ *  SCR_DATA_ZERO is used to allocate a DWORD of data in scripts areas.
+ */
+
+#define	RELOC_SOFTC	0x40000000
+#define	RELOC_LABEL_A	0x50000000
+#define	RELOC_REGISTER	0x60000000
+#define	RELOC_LABEL_B	0x80000000
+#define	RELOC_MASK	0xf0000000
+
+#define	HADDR_1(label)	   (RELOC_SOFTC    | offsetof(struct sym_hcb, label))
+#define	HADDR_2(label,ofs) (RELOC_SOFTC    | \
+				(offsetof(struct sym_hcb, label)+(ofs)))
+#define	RADDR_1(label)	   (RELOC_REGISTER | REG(label))
+#define	RADDR_2(label,ofs) (RELOC_REGISTER | ((REG(label))+(ofs)))
+
+#define SYM_GEN_PADDR_A(s, label) (RELOC_LABEL_A  | offsetof(s, label))
+#define SYM_GEN_PADDR_B(s, label) (RELOC_LABEL_B  | offsetof(s, label))
+
+#define SCR_DATA_ZERO	0xf00ff00f
+
+#endif	/* SYM_FW_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw1.h b/drivers/scsi/sym53c8xx_2/sym_fw1.h
new file mode 100644
index 0000000..cdd92d8
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_fw1.h
@@ -0,0 +1,1838 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000  Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ *         Wolfgang Stanglmeier        <wolf@cologne.de>
+ *         Stefan Esser                <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994  Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/*
+ *  Scripts for SYMBIOS-Processor
+ *
+ *  We have to know the offsets of all labels before we reach 
+ *  them (for forward jumps). Therefore we declare a struct 
+ *  here. If you make changes inside the script,
+ *
+ *  DONT FORGET TO CHANGE THE LENGTHS HERE!
+ */
+
+/*
+ *  Script fragments which are loaded into the on-chip RAM 
+ *  of 825A, 875, 876, 895, 895A, 896 and 1010 chips.
+ *  Must not exceed 4K bytes.
+ */
+struct SYM_FWA_SCR {
+	u32 start		[ 11];
+	u32 getjob_begin	[  4];
+	u32 _sms_a10		[  5];
+	u32 getjob_end		[  4];
+	u32 _sms_a20		[  4];
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+	u32 select		[  8];
+#else
+	u32 select		[  6];
+#endif
+	u32 _sms_a30		[  5];
+	u32 wf_sel_done		[  2];
+	u32 send_ident		[  2];
+#ifdef SYM_CONF_IARB_SUPPORT
+	u32 select2		[  8];
+#else
+	u32 select2		[  2];
+#endif
+	u32 command		[  2];
+	u32 dispatch		[ 28];
+	u32 sel_no_cmd		[ 10];
+	u32 init		[  6];
+	u32 clrack		[  4];
+	u32 datai_done		[ 11];
+	u32 datai_done_wsr	[ 20];
+	u32 datao_done		[ 11];
+	u32 datao_done_wss	[  6];
+	u32 datai_phase		[  5];
+	u32 datao_phase		[  5];
+	u32 msg_in		[  2];
+	u32 msg_in2		[ 10];
+#ifdef SYM_CONF_IARB_SUPPORT
+	u32 status		[ 14];
+#else
+	u32 status		[ 10];
+#endif
+	u32 complete		[  6];
+	u32 complete2		[  8];
+	u32 _sms_a40		[ 12];
+	u32 done		[  5];
+	u32 _sms_a50		[  5];
+	u32 _sms_a60		[  2];
+	u32 done_end		[  4];
+	u32 complete_error	[  5];
+	u32 save_dp		[ 11];
+	u32 restore_dp		[  7];
+	u32 disconnect		[ 11];
+	u32 disconnect2		[  5];
+	u32 _sms_a65		[  3];
+#ifdef SYM_CONF_IARB_SUPPORT
+	u32 idle		[  4];
+#else
+	u32 idle		[  2];
+#endif
+#ifdef SYM_CONF_IARB_SUPPORT
+	u32 ungetjob		[  7];
+#else
+	u32 ungetjob		[  5];
+#endif
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+	u32 reselect		[  4];
+#else
+	u32 reselect		[  2];
+#endif
+	u32 reselected		[ 19];
+	u32 _sms_a70		[  6];
+	u32 _sms_a80		[  4];
+	u32 reselected1		[ 25];
+	u32 _sms_a90		[  4];
+	u32 resel_lun0		[  7];
+	u32 _sms_a100		[  4];
+	u32 resel_tag		[  8];
+#if   SYM_CONF_MAX_TASK*4 > 512
+	u32 _sms_a110		[ 23];
+#elif SYM_CONF_MAX_TASK*4 > 256
+	u32 _sms_a110		[ 17];
+#else
+	u32 _sms_a110		[ 13];
+#endif
+	u32 _sms_a120		[  2];
+	u32 resel_go		[  4];
+	u32 _sms_a130		[  7];
+	u32 resel_dsa		[  2];
+	u32 resel_dsa1		[  4];
+	u32 _sms_a140		[  7];
+	u32 resel_no_tag	[  4];
+	u32 _sms_a145		[  7];
+	u32 data_in		[SYM_CONF_MAX_SG * 2];
+	u32 data_in2		[  4];
+	u32 data_out		[SYM_CONF_MAX_SG * 2];
+	u32 data_out2		[  4];
+	u32 pm0_data		[ 12];
+	u32 pm0_data_out	[  6];
+	u32 pm0_data_end	[  7];
+	u32 pm_data_end		[  4];
+	u32 _sms_a150		[  4];
+	u32 pm1_data		[ 12];
+	u32 pm1_data_out	[  6];
+	u32 pm1_data_end	[  9];
+};
+
+/*
+ *  Script fragments which stay in main memory for all chips 
+ *  except for chips that support 8K on-chip RAM.
+ */
+struct SYM_FWB_SCR {
+	u32 no_data		[  2];
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+	u32 sel_for_abort	[ 18];
+#else
+	u32 sel_for_abort	[ 16];
+#endif
+	u32 sel_for_abort_1	[  2];
+	u32 msg_in_etc		[ 12];
+	u32 msg_received	[  5];
+	u32 msg_weird_seen	[  5];
+	u32 msg_extended	[ 17];
+	u32 _sms_b10		[  4];
+	u32 msg_bad		[  6];
+	u32 msg_weird		[  4];
+	u32 msg_weird1		[  8];
+	u32 wdtr_resp		[  6];
+	u32 send_wdtr		[  4];
+	u32 sdtr_resp		[  6];
+	u32 send_sdtr		[  4];
+	u32 ppr_resp		[  6];
+	u32 send_ppr		[  4];
+	u32 nego_bad_phase	[  4];
+	u32 msg_out		[  4];
+	u32 msg_out_done	[  4];
+	u32 data_ovrun		[  3];
+	u32 data_ovrun1		[ 22];
+	u32 data_ovrun2		[  8];
+	u32 abort_resel		[ 16];
+	u32 resend_ident	[  4];
+	u32 ident_break		[  4];
+	u32 ident_break_atn	[  4];
+	u32 sdata_in		[  6];
+	u32 resel_bad_lun	[  4];
+	u32 bad_i_t_l		[  4];
+	u32 bad_i_t_l_q		[  4];
+	u32 bad_status		[  7];
+	u32 wsr_ma_helper	[  4];
+
+#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
+	/* Unknown direction handling */
+	u32 data_io		[  2];
+	u32 data_io_com		[  8];
+	u32 data_io_out		[  7];
+#endif
+	/* Data area */
+	u32 zero		[  1];
+	u32 scratch		[  1];
+	u32 scratch1		[  1];
+	u32 prev_done		[  1];
+	u32 done_pos		[  1];
+	u32 nextjob		[  1];
+	u32 startpos		[  1];
+	u32 targtbl		[  1];
+};
+
+/*
+ *  Script fragments used at initialisations.
+ *  Only runs out of main memory.
+ */
+struct SYM_FWZ_SCR {
+	u32 snooptest		[  9];
+	u32 snoopend		[  2];
+};
+
+static struct SYM_FWA_SCR SYM_FWA_SCR = {
+/*--------------------------< START >----------------------------*/ {
+	/*
+	 *  Switch the LED on.
+	 *  Will be patched with a NO_OP if LED
+	 *  not needed or not desired.
+	 */
+	SCR_REG_REG (gpreg, SCR_AND, 0xfe),
+		0,
+	/*
+	 *      Clear SIGP.
+	 */
+	SCR_FROM_REG (ctest2),
+		0,
+	/*
+	 *  Stop here if the C code wants to perform 
+	 *  some error recovery procedure manually.
+	 *  (Indicate this by setting SEM in ISTAT)
+	 */
+	SCR_FROM_REG (istat),
+		0,
+	/*
+	 *  Report to the C code the next position in 
+	 *  the start queue the SCRIPTS will schedule.
+	 *  The C code must not change SCRATCHA.
+	 */
+	SCR_COPY (4),
+		PADDR_B (startpos),
+		RADDR_1 (scratcha),
+	SCR_INT ^ IFTRUE (MASK (SEM, SEM)),
+		SIR_SCRIPT_STOPPED,
+	/*
+	 *  Start the next job.
+	 *
+	 *  @DSA     = start point for this job.
+	 *  SCRATCHA = address of this job in the start queue.
+	 *
+	 *  We will restore startpos with SCRATCHA if we fails the 
+	 *  arbitration or if it is the idle job.
+	 *
+	 *  The below GETJOB_BEGIN to GETJOB_END section of SCRIPTS 
+	 *  is a critical path. If it is partially executed, it then 
+	 *  may happen that the job address is not yet in the DSA 
+	 *  and the next queue position points to the next JOB.
+	 */
+}/*-------------------------< GETJOB_BEGIN >---------------------*/,{
+	/*
+	 *  Copy to a fixed location both the next STARTPOS 
+	 *  and the current JOB address, using self modifying 
+	 *  SCRIPTS.
+	 */
+	SCR_COPY (4),
+		RADDR_1 (scratcha),
+		PADDR_A (_sms_a10),
+	SCR_COPY (8),
+}/*-------------------------< _SMS_A10 >-------------------------*/,{
+		0,
+		PADDR_B (nextjob),
+	/*
+	 *  Move the start address to TEMP using self-
+	 *  modifying SCRIPTS and jump indirectly to 
+	 *  that address.
+	 */
+	SCR_COPY (4),
+		PADDR_B (nextjob),
+		RADDR_1 (dsa),
+}/*-------------------------< GETJOB_END >-----------------------*/,{
+	SCR_COPY (4),
+		RADDR_1 (dsa),
+		PADDR_A (_sms_a20),
+	SCR_COPY (4),
+}/*-------------------------< _SMS_A20 >-------------------------*/,{
+		0,
+		RADDR_1 (temp),
+	SCR_RETURN,
+		0,
+}/*-------------------------< SELECT >---------------------------*/,{
+	/*
+	 *  DSA	contains the address of a scheduled
+	 *  	data structure.
+	 *
+	 *  SCRATCHA contains the address of the start queue  
+	 *  	entry which points to the next job.
+	 *
+	 *  Set Initiator mode.
+	 *
+	 *  (Target mode is left as an exercise for the reader)
+	 */
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+	SCR_CLR (SCR_TRG),
+		0,
+#endif
+	/*
+	 *      And try to select this target.
+	 */
+	SCR_SEL_TBL_ATN ^ offsetof (struct sym_dsb, select),
+		PADDR_A (ungetjob),
+	/*
+	 *  Now there are 4 possibilities:
+	 *
+	 *  (1) The chip loses arbitration.
+	 *  This is ok, because it will try again,
+	 *  when the bus becomes idle.
+	 *  (But beware of the timeout function!)
+	 *
+	 *  (2) The chip is reselected.
+	 *  Then the script processor takes the jump
+	 *  to the RESELECT label.
+	 *
+	 *  (3) The chip wins arbitration.
+	 *  Then it will execute SCRIPTS instruction until 
+	 *  the next instruction that checks SCSI phase.
+	 *  Then will stop and wait for selection to be 
+	 *  complete or selection time-out to occur.
+	 *
+	 *  After having won arbitration, the SCRIPTS  
+	 *  processor is able to execute instructions while 
+	 *  the SCSI core is performing SCSI selection.
+	 */
+
+	/*
+	 *  Copy the CCB header to a fixed location 
+	 *  in the HCB using self-modifying SCRIPTS.
+	 */
+	SCR_COPY (4),
+		RADDR_1 (dsa),
+		PADDR_A (_sms_a30),
+	SCR_COPY (sizeof(struct sym_ccbh)),
+}/*-------------------------< _SMS_A30 >-------------------------*/,{
+		0,
+		HADDR_1 (ccb_head),
+	/*
+	 *  Initialize the status register
+	 */
+	SCR_COPY (4),
+		HADDR_1 (ccb_head.status),
+		RADDR_1 (scr0),
+}/*-------------------------< WF_SEL_DONE >----------------------*/,{
+	SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+		SIR_SEL_ATN_NO_MSG_OUT,
+}/*-------------------------< SEND_IDENT >-----------------------*/,{
+	/*
+	 *  Selection complete.
+	 *  Send the IDENTIFY and possibly the TAG message 
+	 *  and negotiation message if present.
+	 */
+	SCR_MOVE_TBL ^ SCR_MSG_OUT,
+		offsetof (struct sym_dsb, smsg),
+}/*-------------------------< SELECT2 >--------------------------*/,{
+#ifdef SYM_CONF_IARB_SUPPORT
+	/*
+	 *  Set IMMEDIATE ARBITRATION if we have been given 
+	 *  a hint to do so. (Some job to do after this one).
+	 */
+	SCR_FROM_REG (HF_REG),
+		0,
+	SCR_JUMPR ^ IFFALSE (MASK (HF_HINT_IARB, HF_HINT_IARB)),
+		8,
+	SCR_REG_REG (scntl1, SCR_OR, IARB),
+		0,
+#endif
+	/*
+	 *  Anticipate the COMMAND phase.
+	 *  This is the PHASE we expect at this point.
+	 */
+	SCR_JUMP ^ IFFALSE (WHEN (SCR_COMMAND)),
+		PADDR_A (sel_no_cmd),
+}/*-------------------------< COMMAND >--------------------------*/,{
+	/*
+	 *  ... and send the command
+	 */
+	SCR_MOVE_TBL ^ SCR_COMMAND,
+		offsetof (struct sym_dsb, cmd),
+}/*-------------------------< DISPATCH >-------------------------*/,{
+	/*
+	 *  MSG_IN is the only phase that shall be 
+	 *  entered at least once for each (re)selection.
+	 *  So we test it first.
+	 */
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+		PADDR_A (msg_in),
+	SCR_JUMP ^ IFTRUE (IF (SCR_DATA_OUT)),
+		PADDR_A (datao_phase),
+	SCR_JUMP ^ IFTRUE (IF (SCR_DATA_IN)),
+		PADDR_A (datai_phase),
+	SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)),
+		PADDR_A (status),
+	SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)),
+		PADDR_A (command),
+	SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)),
+		PADDR_B (msg_out),
+	/*
+	 *  Discard as many illegal phases as 
+	 *  required and tell the C code about.
+	 */
+	SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_OUT)),
+		16,
+	SCR_MOVE_ABS (1) ^ SCR_ILG_OUT,
+		HADDR_1 (scratch),
+	SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_OUT)),
+		-16,
+	SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_IN)),
+		16,
+	SCR_MOVE_ABS (1) ^ SCR_ILG_IN,
+		HADDR_1 (scratch),
+	SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_IN)),
+		-16,
+	SCR_INT,
+		SIR_BAD_PHASE,
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< SEL_NO_CMD >-----------------------*/,{
+	/*
+	 *  The target does not switch to command 
+	 *  phase after IDENTIFY has been sent.
+	 *
+	 *  If it stays in MSG OUT phase send it 
+	 *  the IDENTIFY again.
+	 */
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+		PADDR_B (resend_ident),
+	/*
+	 *  If target does not switch to MSG IN phase 
+	 *  and we sent a negotiation, assert the 
+	 *  failure immediately.
+	 */
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+		PADDR_A (dispatch),
+	SCR_FROM_REG (HS_REG),
+		0,
+	SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)),
+		SIR_NEGO_FAILED,
+	/*
+	 *  Jump to dispatcher.
+	 */
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< INIT >-----------------------------*/,{
+	/*
+	 *  Wait for the SCSI RESET signal to be 
+	 *  inactive before restarting operations, 
+	 *  since the chip may hang on SEL_ATN 
+	 *  if SCSI RESET is active.
+	 */
+	SCR_FROM_REG (sstat0),
+		0,
+	SCR_JUMPR ^ IFTRUE (MASK (IRST, IRST)),
+		-16,
+	SCR_JUMP,
+		PADDR_A (start),
+}/*-------------------------< CLRACK >---------------------------*/,{
+	/*
+	 *  Terminate possible pending message phase.
+	 */
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< DATAI_DONE >-----------------------*/,{
+	/*
+	 *  Save current pointer to LASTP.
+	 */
+	SCR_COPY (4),
+		RADDR_1 (temp),
+		HADDR_1 (ccb_head.lastp),
+	/*
+	 *  If the SWIDE is not full, jump to dispatcher.
+	 *  We anticipate a STATUS phase.
+	 */
+	SCR_FROM_REG (scntl2),
+		0,
+	SCR_JUMP ^ IFTRUE (MASK (WSR, WSR)),
+		PADDR_A (datai_done_wsr),
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)),
+		PADDR_A (status),
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< DATAI_DONE_WSR >-------------------*/,{
+	/*
+	 *  The SWIDE is full.
+	 *  Clear this condition.
+	 */
+	SCR_REG_REG (scntl2, SCR_OR, WSR),
+		0,
+	/*
+	 *  We are expecting an IGNORE RESIDUE message 
+	 *  from the device, otherwise we are in data 
+	 *  overrun condition. Check against MSG_IN phase.
+	 */
+	SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
+		SIR_SWIDE_OVERRUN,
+	SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+		PADDR_A (dispatch),
+	/*
+	 *  We are in MSG_IN phase,
+	 *  Read the first byte of the message.
+	 *  If it is not an IGNORE RESIDUE message,
+	 *  signal overrun and jump to message 
+	 *  processing.
+	 */
+	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+		HADDR_1 (msgin[0]),
+	SCR_INT ^ IFFALSE (DATA (M_IGN_RESIDUE)),
+		SIR_SWIDE_OVERRUN,
+	SCR_JUMP ^ IFFALSE (DATA (M_IGN_RESIDUE)),
+		PADDR_A (msg_in2),
+	/*
+	 *  We got the message we expected.
+	 *  Read the 2nd byte, and jump to dispatcher.
+	 */
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+		HADDR_1 (msgin[1]),
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< DATAO_DONE >-----------------------*/,{
+	/*
+	 *  Save current pointer to LASTP.
+	 */
+	SCR_COPY (4),
+		RADDR_1 (temp),
+		HADDR_1 (ccb_head.lastp),
+	/*
+	 *  If the SODL is not full jump to dispatcher.
+	 *  We anticipate a STATUS phase.
+	 */
+	SCR_FROM_REG (scntl2),
+		0,
+	SCR_JUMP ^ IFTRUE (MASK (WSS, WSS)),
+		PADDR_A (datao_done_wss),
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)),
+		PADDR_A (status),
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< DATAO_DONE_WSS >-------------------*/,{
+	/*
+	 *  The SODL is full, clear this condition.
+	 */
+	SCR_REG_REG (scntl2, SCR_OR, WSS),
+		0,
+	/*
+	 *  And signal a DATA UNDERRUN condition 
+	 *  to the C code.
+	 */
+	SCR_INT,
+		SIR_SODL_UNDERRUN,
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< DATAI_PHASE >----------------------*/,{
+	/*
+	 *  Jump to current pointer.
+	 */
+	SCR_COPY (4),
+		HADDR_1 (ccb_head.lastp),
+		RADDR_1 (temp),
+	SCR_RETURN,
+		0,
+}/*-------------------------< DATAO_PHASE >----------------------*/,{
+	/*
+	 *  Jump to current pointer.
+	 */
+	SCR_COPY (4),
+		HADDR_1 (ccb_head.lastp),
+		RADDR_1 (temp),
+	SCR_RETURN,
+		0,
+}/*-------------------------< MSG_IN >---------------------------*/,{
+	/*
+	 *  Get the first byte of the message.
+	 *
+	 *  The script processor doesn't negate the
+	 *  ACK signal after this transfer.
+	 */
+	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+		HADDR_1 (msgin[0]),
+}/*-------------------------< MSG_IN2 >--------------------------*/,{
+	/*
+	 *  Check first against 1 byte messages 
+	 *  that we handle from SCRIPTS.
+	 */
+	SCR_JUMP ^ IFTRUE (DATA (M_COMPLETE)),
+		PADDR_A (complete),
+	SCR_JUMP ^ IFTRUE (DATA (M_DISCONNECT)),
+		PADDR_A (disconnect),
+	SCR_JUMP ^ IFTRUE (DATA (M_SAVE_DP)),
+		PADDR_A (save_dp),
+	SCR_JUMP ^ IFTRUE (DATA (M_RESTORE_DP)),
+		PADDR_A (restore_dp),
+	/*
+	 *  We handle all other messages from the 
+	 *  C code, so no need to waste on-chip RAM 
+	 *  for those ones.
+	 */
+	SCR_JUMP,
+		PADDR_B (msg_in_etc),
+}/*-------------------------< STATUS >---------------------------*/,{
+	/*
+	 *  get the status
+	 */
+	SCR_MOVE_ABS (1) ^ SCR_STATUS,
+		HADDR_1 (scratch),
+#ifdef SYM_CONF_IARB_SUPPORT
+	/*
+	 *  If STATUS is not GOOD, clear IMMEDIATE ARBITRATION, 
+	 *  since we may have to tamper the start queue from 
+	 *  the C code.
+	 */
+	SCR_JUMPR ^ IFTRUE (DATA (S_GOOD)),
+		8,
+	SCR_REG_REG (scntl1, SCR_AND, ~IARB),
+		0,
+#endif
+	/*
+	 *  save status to scsi_status.
+	 *  mark as complete.
+	 */
+	SCR_TO_REG (SS_REG),
+		0,
+	SCR_LOAD_REG (HS_REG, HS_COMPLETE),
+		0,
+	/*
+	 *  Anticipate the MESSAGE PHASE for 
+	 *  the TASK COMPLETE message.
+	 */
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+		PADDR_A (msg_in),
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< COMPLETE >-------------------------*/,{
+	/*
+	 *  Complete message.
+	 *
+	 *  When we terminate the cycle by clearing ACK,
+	 *  the target may disconnect immediately.
+	 *
+	 *  We don't want to be told of an "unexpected disconnect",
+	 *  so we disable this feature.
+	 */
+	SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+		0,
+	/*
+	 *  Terminate cycle ...
+	 */
+	SCR_CLR (SCR_ACK|SCR_ATN),
+		0,
+	/*
+	 *  ... and wait for the disconnect.
+	 */
+	SCR_WAIT_DISC,
+		0,
+}/*-------------------------< COMPLETE2 >------------------------*/,{
+	/*
+	 *  Save host status.
+	 */
+	SCR_COPY (4),
+		RADDR_1 (scr0),
+		HADDR_1 (ccb_head.status),
+	/*
+	 *  Move back the CCB header using self-modifying 
+	 *  SCRIPTS.
+	 */
+	SCR_COPY (4),
+		RADDR_1 (dsa),
+		PADDR_A (_sms_a40),
+	SCR_COPY (sizeof(struct sym_ccbh)),
+		HADDR_1 (ccb_head),
+}/*-------------------------< _SMS_A40 >-------------------------*/,{
+		0,
+	/*
+	 *  Some bridges may reorder DMA writes to memory.
+	 *  We donnot want the CPU to deal with completions  
+	 *  without all the posted write having been flushed 
+	 *  to memory. This DUMMY READ should flush posted 
+	 *  buffers prior to the CPU having to deal with 
+	 *  completions.
+	 */
+	SCR_COPY (4),			/* DUMMY READ */
+		HADDR_1 (ccb_head.status),
+		RADDR_1 (scr0),
+	/*
+	 *  If command resulted in not GOOD status,
+	 *  call the C code if needed.
+	 */
+	SCR_FROM_REG (SS_REG),
+		0,
+	SCR_CALL ^ IFFALSE (DATA (S_GOOD)),
+		PADDR_B (bad_status),
+	/*
+	 *  If we performed an auto-sense, call 
+	 *  the C code to synchronyze task aborts 
+	 *  with UNIT ATTENTION conditions.
+	 */
+	SCR_FROM_REG (HF_REG),
+		0,
+	SCR_JUMP ^ IFFALSE (MASK (0 ,(HF_SENSE|HF_EXT_ERR))),
+		PADDR_A (complete_error),
+}/*-------------------------< DONE >-----------------------------*/,{
+	/*
+	 *  Copy the DSA to the DONE QUEUE and 
+	 *  signal completion to the host.
+	 *  If we are interrupted between DONE 
+	 *  and DONE_END, we must reset, otherwise 
+	 *  the completed CCB may be lost.
+	 */
+	SCR_COPY (4),
+		PADDR_B (done_pos),
+		PADDR_A (_sms_a50),
+	SCR_COPY (4),
+		RADDR_1 (dsa),
+}/*-------------------------< _SMS_A50 >-------------------------*/,{
+		0,
+	SCR_COPY (4),
+		PADDR_B (done_pos),
+		PADDR_A (_sms_a60),
+	/*
+	 *  The instruction below reads the DONE QUEUE next 
+	 *  free position from memory.
+	 *  In addition it ensures that all PCI posted writes  
+	 *  are flushed and so the DSA value of the done 
+	 *  CCB is visible by the CPU before INTFLY is raised.
+	 */
+	SCR_COPY (8),
+}/*-------------------------< _SMS_A60 >-------------------------*/,{
+		0,
+		PADDR_B (prev_done),
+}/*-------------------------< DONE_END >-------------------------*/,{
+	SCR_INT_FLY,
+		0,
+	SCR_JUMP,
+		PADDR_A (start),
+}/*-------------------------< COMPLETE_ERROR >-------------------*/,{
+	SCR_COPY (4),
+		PADDR_B (startpos),
+		RADDR_1 (scratcha),
+	SCR_INT,
+		SIR_COMPLETE_ERROR,
+}/*-------------------------< SAVE_DP >--------------------------*/,{
+	/*
+	 *  Clear ACK immediately.
+	 *  No need to delay it.
+	 */
+	SCR_CLR (SCR_ACK),
+		0,
+	/*
+	 *  Keep track we received a SAVE DP, so 
+	 *  we will switch to the other PM context 
+	 *  on the next PM since the DP may point 
+	 *  to the current PM context.
+	 */
+	SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED),
+		0,
+	/*
+	 *  SAVE_DP message:
+	 *  Copy LASTP to SAVEP.
+	 */
+	SCR_COPY (4),
+		HADDR_1 (ccb_head.lastp),
+		HADDR_1 (ccb_head.savep),
+	/*
+	 *  Anticipate the MESSAGE PHASE for 
+	 *  the DISCONNECT message.
+	 */
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+		PADDR_A (msg_in),
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< RESTORE_DP >-----------------------*/,{
+	/*
+	 *  Clear ACK immediately.
+	 *  No need to delay it.
+	 */
+	SCR_CLR (SCR_ACK),
+		0,
+	/*
+	 *  Copy SAVEP to LASTP.
+	 */
+	SCR_COPY (4),
+		HADDR_1 (ccb_head.savep),
+		HADDR_1 (ccb_head.lastp),
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< DISCONNECT >-----------------------*/,{
+	/*
+	 *  DISCONNECTing  ...
+	 *
+	 *  disable the "unexpected disconnect" feature,
+	 *  and remove the ACK signal.
+	 */
+	SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+		0,
+	SCR_CLR (SCR_ACK|SCR_ATN),
+		0,
+	/*
+	 *  Wait for the disconnect.
+	 */
+	SCR_WAIT_DISC,
+		0,
+	/*
+	 *  Status is: DISCONNECTED.
+	 */
+	SCR_LOAD_REG (HS_REG, HS_DISCONNECT),
+		0,
+	/*
+	 *  Save host status.
+	 */
+	SCR_COPY (4),
+		RADDR_1 (scr0),
+		HADDR_1 (ccb_head.status),
+}/*-------------------------< DISCONNECT2 >----------------------*/,{
+	/*
+	 *  Move back the CCB header using self-modifying 
+	 *  SCRIPTS.
+	 */
+	SCR_COPY (4),
+		RADDR_1 (dsa),
+		PADDR_A (_sms_a65),
+	SCR_COPY (sizeof(struct sym_ccbh)),
+		HADDR_1 (ccb_head),
+}/*-------------------------< _SMS_A65 >-------------------------*/,{
+		0,
+	SCR_JUMP,
+		PADDR_A (start),
+}/*-------------------------< IDLE >-----------------------------*/,{
+	/*
+	 *  Nothing to do?
+	 *  Switch the LED off and wait for reselect.
+	 *  Will be patched with a NO_OP if LED
+	 *  not needed or not desired.
+	 */
+	SCR_REG_REG (gpreg, SCR_OR, 0x01),
+		0,
+#ifdef SYM_CONF_IARB_SUPPORT
+	SCR_JUMPR,
+		8,
+#endif
+}/*-------------------------< UNGETJOB >-------------------------*/,{
+#ifdef SYM_CONF_IARB_SUPPORT
+	/*
+	 *  Set IMMEDIATE ARBITRATION, for the next time.
+	 *  This will give us better chance to win arbitration 
+	 *  for the job we just wanted to do.
+	 */
+	SCR_REG_REG (scntl1, SCR_OR, IARB),
+		0,
+#endif
+	/*
+	 *  We are not able to restart the SCRIPTS if we are 
+	 *  interrupted and these instruction haven't been 
+	 *  all executed. BTW, this is very unlikely to 
+	 *  happen, but we check that from the C code.
+	 */
+	SCR_LOAD_REG (dsa, 0xff),
+		0,
+	SCR_COPY (4),
+		RADDR_1 (scratcha),
+		PADDR_B (startpos),
+}/*-------------------------< RESELECT >-------------------------*/,{
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+	/*
+	 *  Make sure we are in initiator mode.
+	 */
+	SCR_CLR (SCR_TRG),
+		0,
+#endif
+	/*
+	 *  Sleep waiting for a reselection.
+	 */
+	SCR_WAIT_RESEL,
+		PADDR_A(start),
+}/*-------------------------< RESELECTED >-----------------------*/,{
+	/*
+	 *  Switch the LED on.
+	 *  Will be patched with a NO_OP if LED
+	 *  not needed or not desired.
+	 */
+	SCR_REG_REG (gpreg, SCR_AND, 0xfe),
+		0,
+	/*
+	 *  load the target id into the sdid
+	 */
+	SCR_REG_SFBR (ssid, SCR_AND, 0x8F),
+		0,
+	SCR_TO_REG (sdid),
+		0,
+	/*
+	 *  Load the target control block address
+	 */
+	SCR_COPY (4),
+		PADDR_B (targtbl),
+		RADDR_1 (dsa),
+	SCR_SFBR_REG (dsa, SCR_SHL, 0),
+		0,
+	SCR_REG_REG (dsa, SCR_SHL, 0),
+		0,
+	SCR_REG_REG (dsa, SCR_AND, 0x3c),
+		0,
+	SCR_COPY (4),
+		RADDR_1 (dsa),
+		PADDR_A (_sms_a70),
+	SCR_COPY (4),
+}/*-------------------------< _SMS_A70 >-------------------------*/,{
+		0,
+		RADDR_1 (dsa),
+	/*
+	 *  Copy the TCB header to a fixed place in 
+	 *  the HCB.
+	 */
+	SCR_COPY (4),
+		RADDR_1 (dsa),
+		PADDR_A (_sms_a80),
+	SCR_COPY (sizeof(struct sym_tcbh)),
+}/*-------------------------< _SMS_A80 >-------------------------*/,{
+		0,
+		HADDR_1 (tcb_head),
+	/*
+	 *  We expect MESSAGE IN phase.
+	 *  If not, get help from the C code.
+	 */
+	SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
+		SIR_RESEL_NO_MSG_IN,
+}/*-------------------------< RESELECTED1 >----------------------*/,{
+	/*
+	 *  Load the synchronous transfer registers.
+	 */
+	SCR_COPY (1),
+		HADDR_1 (tcb_head.wval),
+		RADDR_1 (scntl3),
+	SCR_COPY (1),
+		HADDR_1 (tcb_head.sval),
+		RADDR_1 (sxfer),
+	/*
+	 *  Get the IDENTIFY message.
+	 */
+	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+		HADDR_1 (msgin),
+	/*
+	 *  If IDENTIFY LUN #0, use a faster path 
+	 *  to find the LCB structure.
+	 */
+	SCR_JUMP ^ IFTRUE (MASK (0x80, 0xbf)),
+		PADDR_A (resel_lun0),
+	/*
+	 *  If message isn't an IDENTIFY, 
+	 *  tell the C code about.
+	 */
+	SCR_INT ^ IFFALSE (MASK (0x80, 0x80)),
+		SIR_RESEL_NO_IDENTIFY,
+	/*
+	 *  It is an IDENTIFY message,
+	 *  Load the LUN control block address.
+	 */
+	SCR_COPY (4),
+		HADDR_1 (tcb_head.luntbl_sa),
+		RADDR_1 (dsa),
+	SCR_SFBR_REG (dsa, SCR_SHL, 0),
+		0,
+	SCR_REG_REG (dsa, SCR_SHL, 0),
+		0,
+	SCR_REG_REG (dsa, SCR_AND, 0xfc),
+		0,
+	SCR_COPY (4),
+		RADDR_1 (dsa),
+		PADDR_A (_sms_a90),
+	SCR_COPY (4),
+}/*-------------------------< _SMS_A90 >-------------------------*/,{
+		0,
+		RADDR_1 (dsa),
+	SCR_JUMPR,
+		12,
+}/*-------------------------< RESEL_LUN0 >-----------------------*/,{
+	/*
+	 *  LUN 0 special case (but usual one :))
+	 */
+	SCR_COPY (4),
+		HADDR_1 (tcb_head.lun0_sa),
+		RADDR_1 (dsa),
+	/*
+	 *  Jump indirectly to the reselect action for this LUN.
+	 *  (lcb.head.resel_sa assumed at offset zero of lcb).
+	 */
+	SCR_COPY (4),
+		RADDR_1 (dsa),
+		PADDR_A (_sms_a100),
+	SCR_COPY (4),
+}/*-------------------------< _SMS_A100 >------------------------*/,{
+		0,
+		RADDR_1 (temp),
+	SCR_RETURN,
+		0,
+	/* In normal situations, we jump to RESEL_TAG or RESEL_NO_TAG */
+}/*-------------------------< RESEL_TAG >------------------------*/,{
+	/*
+	 *  ACK the IDENTIFY previously received.
+	 */
+	SCR_CLR (SCR_ACK),
+		0,
+	/*
+	 *  It shall be a tagged command.
+	 *  Read SIMPLE+TAG.
+	 *  The C code will deal with errors.
+	 *  Agressive optimization, is'nt it? :)
+	 */
+	SCR_MOVE_ABS (2) ^ SCR_MSG_IN,
+		HADDR_1 (msgin),
+	/*
+	 *  Copy the LCB header to a fixed place in 
+	 *  the HCB using self-modifying SCRIPTS.
+	 */
+	SCR_COPY (4),
+		RADDR_1 (dsa),
+		PADDR_A (_sms_a110),
+	SCR_COPY (sizeof(struct sym_lcbh)),
+}/*-------------------------< _SMS_A110 >------------------------*/,{
+		0,
+		HADDR_1 (lcb_head),
+	/*
+	 *  Load the pointer to the tagged task 
+	 *  table for this LUN.
+	 */
+	SCR_COPY (4),
+		HADDR_1 (lcb_head.itlq_tbl_sa),
+		RADDR_1 (dsa),
+	/*
+	 *  The SIDL still contains the TAG value.
+	 *  Agressive optimization, isn't it? :):)
+	 */
+	SCR_REG_SFBR (sidl, SCR_SHL, 0),
+		0,
+#if SYM_CONF_MAX_TASK*4 > 512
+	SCR_JUMPR ^ IFFALSE (CARRYSET),
+		8,
+	SCR_REG_REG (dsa1, SCR_OR, 2),
+		0,
+	SCR_REG_REG (sfbr, SCR_SHL, 0),
+		0,
+	SCR_JUMPR ^ IFFALSE (CARRYSET),
+		8,
+	SCR_REG_REG (dsa1, SCR_OR, 1),
+		0,
+#elif SYM_CONF_MAX_TASK*4 > 256
+	SCR_JUMPR ^ IFFALSE (CARRYSET),
+		8,
+	SCR_REG_REG (dsa1, SCR_OR, 1),
+		0,
+#endif
+	/*
+	 *  Retrieve the DSA of this task.
+	 *  JUMP indirectly to the restart point of the CCB.
+	 */
+	SCR_SFBR_REG (dsa, SCR_AND, 0xfc),
+		0,
+	SCR_COPY (4),
+		RADDR_1 (dsa),
+		PADDR_A (_sms_a120),
+	SCR_COPY (4),
+}/*-------------------------< _SMS_A120 >------------------------*/,{
+		0,
+		RADDR_1 (dsa),
+}/*-------------------------< RESEL_GO >-------------------------*/,{
+	SCR_COPY (4),
+		RADDR_1 (dsa),
+		PADDR_A (_sms_a130),
+	/*
+	 *  Move 'ccb.phys.head.go' action to 
+	 *  scratch/scratch1. So scratch1 will 
+	 *  contain the 'restart' field of the 
+	 *  'go' structure.
+	 */
+	SCR_COPY (8),
+}/*-------------------------< _SMS_A130 >------------------------*/,{
+		0,
+		PADDR_B (scratch),
+	SCR_COPY (4),
+		PADDR_B (scratch1), /* phys.head.go.restart */
+		RADDR_1 (temp),
+	SCR_RETURN,
+		0,
+	/* In normal situations we branch to RESEL_DSA */
+}/*-------------------------< RESEL_DSA >------------------------*/,{
+	/*
+	 *  ACK the IDENTIFY or TAG previously received.
+	 */
+	SCR_CLR (SCR_ACK),
+		0,
+}/*-------------------------< RESEL_DSA1 >-----------------------*/,{
+	/*
+	 *  Copy the CCB header to a fixed location 
+	 *  in the HCB using self-modifying SCRIPTS.
+	 */
+	SCR_COPY (4),
+		RADDR_1 (dsa),
+		PADDR_A (_sms_a140),
+	SCR_COPY (sizeof(struct sym_ccbh)),
+}/*-------------------------< _SMS_A140 >------------------------*/,{
+		0,
+		HADDR_1 (ccb_head),
+	/*
+	 *  Initialize the status register
+	 */
+	SCR_COPY (4),
+		HADDR_1 (ccb_head.status),
+		RADDR_1 (scr0),
+	/*
+	 *  Jump to dispatcher.
+	 */
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< RESEL_NO_TAG >---------------------*/,{
+	/*
+	 *  Copy the LCB header to a fixed place in 
+	 *  the HCB using self-modifying SCRIPTS.
+	 */
+	SCR_COPY (4),
+		RADDR_1 (dsa),
+		PADDR_A (_sms_a145),
+	SCR_COPY (sizeof(struct sym_lcbh)),
+}/*-------------------------< _SMS_A145 >------------------------*/,{
+		0,
+		HADDR_1 (lcb_head),
+	/*
+	 *  Load the DSA with the unique ITL task.
+	 */
+	SCR_COPY (4),
+		HADDR_1 (lcb_head.itl_task_sa),
+		RADDR_1 (dsa),
+	SCR_JUMP,
+		PADDR_A (resel_go),
+}/*-------------------------< DATA_IN >--------------------------*/,{
+/*
+ *  Because the size depends on the
+ *  #define SYM_CONF_MAX_SG parameter,
+ *  it is filled in at runtime.
+ *
+ *  ##===========< i=0; i<SYM_CONF_MAX_SG >=========
+ *  ||	SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ *  ||		offsetof (struct sym_dsb, data[ i]),
+ *  ##==========================================
+ */
+0
+}/*-------------------------< DATA_IN2 >-------------------------*/,{
+	SCR_CALL,
+		PADDR_A (datai_done),
+	SCR_JUMP,
+		PADDR_B (data_ovrun),
+}/*-------------------------< DATA_OUT >-------------------------*/,{
+/*
+ *  Because the size depends on the
+ *  #define SYM_CONF_MAX_SG parameter,
+ *  it is filled in at runtime.
+ *
+ *  ##===========< i=0; i<SYM_CONF_MAX_SG >=========
+ *  ||	SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+ *  ||		offsetof (struct sym_dsb, data[ i]),
+ *  ##==========================================
+ */
+0
+}/*-------------------------< DATA_OUT2 >------------------------*/,{
+	SCR_CALL,
+		PADDR_A (datao_done),
+	SCR_JUMP,
+		PADDR_B (data_ovrun),
+}/*-------------------------< PM0_DATA >-------------------------*/,{
+	/*
+	 *  Read our host flags to SFBR, so we will be able 
+	 *  to check against the data direction we expect.
+	 */
+	SCR_FROM_REG (HF_REG),
+		0,
+	/*
+	 *  Check against actual DATA PHASE.
+	 */
+	SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
+		PADDR_A (pm0_data_out),
+	/*
+	 *  Actual phase is DATA IN.
+	 *  Check against expected direction.
+	 */
+	SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
+		PADDR_B (data_ovrun),
+	/*
+	 *  Keep track we are moving data from the 
+	 *  PM0 DATA mini-script.
+	 */
+	SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
+		0,
+	/*
+	 *  Move the data to memory.
+	 */
+	SCR_CHMOV_TBL ^ SCR_DATA_IN,
+		offsetof (struct sym_ccb, phys.pm0.sg),
+	SCR_JUMP,
+		PADDR_A (pm0_data_end),
+}/*-------------------------< PM0_DATA_OUT >---------------------*/,{
+	/*
+	 *  Actual phase is DATA OUT.
+	 *  Check against expected direction.
+	 */
+	SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
+		PADDR_B (data_ovrun),
+	/*
+	 *  Keep track we are moving data from the 
+	 *  PM0 DATA mini-script.
+	 */
+	SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
+		0,
+	/*
+	 *  Move the data from memory.
+	 */
+	SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+		offsetof (struct sym_ccb, phys.pm0.sg),
+}/*-------------------------< PM0_DATA_END >---------------------*/,{
+	/*
+	 *  Clear the flag that told we were moving  
+	 *  data from the PM0 DATA mini-script.
+	 */
+	SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM0)),
+		0,
+	/*
+	 *  Return to the previous DATA script which 
+	 *  is guaranteed by design (if no bug) to be 
+	 *  the main DATA script for this transfer.
+	 */
+	SCR_COPY (4),
+		RADDR_1 (dsa),
+		RADDR_1 (scratcha),
+	SCR_REG_REG (scratcha, SCR_ADD, offsetof (struct sym_ccb,phys.pm0.ret)),
+		0,
+}/*-------------------------< PM_DATA_END >----------------------*/,{
+	SCR_COPY (4),
+		RADDR_1 (scratcha),
+		PADDR_A (_sms_a150),
+	SCR_COPY (4),
+}/*-------------------------< _SMS_A150 >------------------------*/,{
+		0,
+		RADDR_1 (temp),
+	SCR_RETURN,
+		0,
+}/*-------------------------< PM1_DATA >-------------------------*/,{
+	/*
+	 *  Read our host flags to SFBR, so we will be able 
+	 *  to check against the data direction we expect.
+	 */
+	SCR_FROM_REG (HF_REG),
+		0,
+	/*
+	 *  Check against actual DATA PHASE.
+	 */
+	SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
+		PADDR_A (pm1_data_out),
+	/*
+	 *  Actual phase is DATA IN.
+	 *  Check against expected direction.
+	 */
+	SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
+		PADDR_B (data_ovrun),
+	/*
+	 *  Keep track we are moving data from the 
+	 *  PM1 DATA mini-script.
+	 */
+	SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
+		0,
+	/*
+	 *  Move the data to memory.
+	 */
+	SCR_CHMOV_TBL ^ SCR_DATA_IN,
+		offsetof (struct sym_ccb, phys.pm1.sg),
+	SCR_JUMP,
+		PADDR_A (pm1_data_end),
+}/*-------------------------< PM1_DATA_OUT >---------------------*/,{
+	/*
+	 *  Actual phase is DATA OUT.
+	 *  Check against expected direction.
+	 */
+	SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
+		PADDR_B (data_ovrun),
+	/*
+	 *  Keep track we are moving data from the 
+	 *  PM1 DATA mini-script.
+	 */
+	SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
+		0,
+	/*
+	 *  Move the data from memory.
+	 */
+	SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+		offsetof (struct sym_ccb, phys.pm1.sg),
+}/*-------------------------< PM1_DATA_END >---------------------*/,{
+	/*
+	 *  Clear the flag that told we were moving  
+	 *  data from the PM1 DATA mini-script.
+	 */
+	SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM1)),
+		0,
+	/*
+	 *  Return to the previous DATA script which 
+	 *  is guaranteed by design (if no bug) to be 
+	 *  the main DATA script for this transfer.
+	 */
+	SCR_COPY (4),
+		RADDR_1 (dsa),
+		RADDR_1 (scratcha),
+	SCR_REG_REG (scratcha, SCR_ADD, offsetof (struct sym_ccb,phys.pm1.ret)),
+		0,
+	SCR_JUMP,
+		PADDR_A (pm_data_end),
+}/*--------------------------<>----------------------------------*/
+};
+
+static struct SYM_FWB_SCR SYM_FWB_SCR = {
+/*-------------------------< NO_DATA >--------------------------*/ {
+	SCR_JUMP,
+		PADDR_B (data_ovrun),
+}/*-------------------------< SEL_FOR_ABORT >--------------------*/,{
+	/*
+	 *  We are jumped here by the C code, if we have 
+	 *  some target to reset or some disconnected 
+	 *  job to abort. Since error recovery is a serious 
+	 *  busyness, we will really reset the SCSI BUS, if 
+	 *  case of a SCSI interrupt occurring in this path.
+	 */
+
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+	/*
+	 *  Set initiator mode.
+	 */
+	SCR_CLR (SCR_TRG),
+		0,
+#endif
+	/*
+	 *      And try to select this target.
+	 */
+	SCR_SEL_TBL_ATN ^ offsetof (struct sym_hcb, abrt_sel),
+		PADDR_A (reselect),
+	/*
+	 *  Wait for the selection to complete or 
+	 *  the selection to time out.
+	 */
+	SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+		-8,
+	/*
+	 *  Call the C code.
+	 */
+	SCR_INT,
+		SIR_TARGET_SELECTED,
+	/*
+	 *  The C code should let us continue here. 
+	 *  Send the 'kiss of death' message.
+	 *  We expect an immediate disconnect once 
+	 *  the target has eaten the message.
+	 */
+	SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+		0,
+	SCR_MOVE_TBL ^ SCR_MSG_OUT,
+		offsetof (struct sym_hcb, abrt_tbl),
+	SCR_CLR (SCR_ACK|SCR_ATN),
+		0,
+	SCR_WAIT_DISC,
+		0,
+	/*
+	 *  Tell the C code that we are done.
+	 */
+	SCR_INT,
+		SIR_ABORT_SENT,
+}/*-------------------------< SEL_FOR_ABORT_1 >------------------*/,{
+	/*
+	 *  Jump at scheduler.
+	 */
+	SCR_JUMP,
+		PADDR_A (start),
+}/*-------------------------< MSG_IN_ETC >-----------------------*/,{
+	/*
+	 *  If it is an EXTENDED (variable size message)
+	 *  Handle it.
+	 */
+	SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)),
+		PADDR_B (msg_extended),
+	/*
+	 *  Let the C code handle any other 
+	 *  1 byte message.
+	 */
+	SCR_JUMP ^ IFTRUE (MASK (0x00, 0xf0)),
+		PADDR_B (msg_received),
+	SCR_JUMP ^ IFTRUE (MASK (0x10, 0xf0)),
+		PADDR_B (msg_received),
+	/*
+	 *  We donnot handle 2 bytes messages from SCRIPTS.
+	 *  So, let the C code deal with these ones too.
+	 */
+	SCR_JUMP ^ IFFALSE (MASK (0x20, 0xf0)),
+		PADDR_B (msg_weird_seen),
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+		HADDR_1 (msgin[1]),
+}/*-------------------------< MSG_RECEIVED >---------------------*/,{
+	SCR_COPY (4),			/* DUMMY READ */
+		HADDR_1 (scratch),
+		RADDR_1 (scratcha),
+	SCR_INT,
+		SIR_MSG_RECEIVED,
+}/*-------------------------< MSG_WEIRD_SEEN >-------------------*/,{
+	SCR_COPY (4),			/* DUMMY READ */
+		HADDR_1 (scratch),
+		RADDR_1 (scratcha),
+	SCR_INT,
+		SIR_MSG_WEIRD,
+}/*-------------------------< MSG_EXTENDED >---------------------*/,{
+	/*
+	 *  Clear ACK and get the next byte 
+	 *  assumed to be the message length.
+	 */
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+		HADDR_1 (msgin[1]),
+	/*
+	 *  Try to catch some unlikely situations as 0 length 
+	 *  or too large the length.
+	 */
+	SCR_JUMP ^ IFTRUE (DATA (0)),
+		PADDR_B (msg_weird_seen),
+	SCR_TO_REG (scratcha),
+		0,
+	SCR_REG_REG (sfbr, SCR_ADD, (256-8)),
+		0,
+	SCR_JUMP ^ IFTRUE (CARRYSET),
+		PADDR_B (msg_weird_seen),
+	/*
+	 *  We donnot handle extended messages from SCRIPTS.
+	 *  Read the amount of data correponding to the 
+	 *  message length and call the C code.
+	 */
+	SCR_COPY (1),
+		RADDR_1 (scratcha),
+		PADDR_B (_sms_b10),
+	SCR_CLR (SCR_ACK),
+		0,
+}/*-------------------------< _SMS_B10 >-------------------------*/,{
+	SCR_MOVE_ABS (0) ^ SCR_MSG_IN,
+		HADDR_1 (msgin[2]),
+	SCR_JUMP,
+		PADDR_B (msg_received),
+}/*-------------------------< MSG_BAD >--------------------------*/,{
+	/*
+	 *  unimplemented message - reject it.
+	 */
+	SCR_INT,
+		SIR_REJECT_TO_SEND,
+	SCR_SET (SCR_ATN),
+		0,
+	SCR_JUMP,
+		PADDR_A (clrack),
+}/*-------------------------< MSG_WEIRD >------------------------*/,{
+	/*
+	 *  weird message received
+	 *  ignore all MSG IN phases and reject it.
+	 */
+	SCR_INT,
+		SIR_REJECT_TO_SEND,
+	SCR_SET (SCR_ATN),
+		0,
+}/*-------------------------< MSG_WEIRD1 >-----------------------*/,{
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+		PADDR_A (dispatch),
+	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+		HADDR_1 (scratch),
+	SCR_JUMP,
+		PADDR_B (msg_weird1),
+}/*-------------------------< WDTR_RESP >------------------------*/,{
+	/*
+	 *  let the target fetch our answer.
+	 */
+	SCR_SET (SCR_ATN),
+		0,
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+		PADDR_B (nego_bad_phase),
+}/*-------------------------< SEND_WDTR >------------------------*/,{
+	/*
+	 *  Send the M_X_WIDE_REQ
+	 */
+	SCR_MOVE_ABS (4) ^ SCR_MSG_OUT,
+		HADDR_1 (msgout),
+	SCR_JUMP,
+		PADDR_B (msg_out_done),
+}/*-------------------------< SDTR_RESP >------------------------*/,{
+	/*
+	 *  let the target fetch our answer.
+	 */
+	SCR_SET (SCR_ATN),
+		0,
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+		PADDR_B (nego_bad_phase),
+}/*-------------------------< SEND_SDTR >------------------------*/,{
+	/*
+	 *  Send the M_X_SYNC_REQ
+	 */
+	SCR_MOVE_ABS (5) ^ SCR_MSG_OUT,
+		HADDR_1 (msgout),
+	SCR_JUMP,
+		PADDR_B (msg_out_done),
+}/*-------------------------< PPR_RESP >-------------------------*/,{
+	/*
+	 *  let the target fetch our answer.
+	 */
+	SCR_SET (SCR_ATN),
+		0,
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+		PADDR_B (nego_bad_phase),
+}/*-------------------------< SEND_PPR >-------------------------*/,{
+	/*
+	 *  Send the M_X_PPR_REQ
+	 */
+	SCR_MOVE_ABS (8) ^ SCR_MSG_OUT,
+		HADDR_1 (msgout),
+	SCR_JUMP,
+		PADDR_B (msg_out_done),
+}/*-------------------------< NEGO_BAD_PHASE >-------------------*/,{
+	SCR_INT,
+		SIR_NEGO_PROTO,
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< MSG_OUT >--------------------------*/,{
+	/*
+	 *  The target requests a message.
+	 *  We donnot send messages that may 
+	 *  require the device to go to bus free.
+	 */
+	SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+		HADDR_1 (msgout),
+	/*
+	 *  ... wait for the next phase
+	 *  if it's a message out, send it again, ...
+	 */
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+		PADDR_B (msg_out),
+}/*-------------------------< MSG_OUT_DONE >---------------------*/,{
+	/*
+	 *  Let the C code be aware of the 
+	 *  sent message and clear the message.
+	 */
+	SCR_INT,
+		SIR_MSG_OUT_DONE,
+	/*
+	 *  ... and process the next phase
+	 */
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< DATA_OVRUN >-----------------------*/,{
+	/*
+	 *  Zero scratcha that will count the 
+	 *  extras bytes.
+	 */
+	SCR_COPY (4),
+		PADDR_B (zero),
+		RADDR_1 (scratcha),
+}/*-------------------------< DATA_OVRUN1 >----------------------*/,{
+	/*
+	 *  The target may want to transfer too much data.
+	 *
+	 *  If phase is DATA OUT write 1 byte and count it.
+	 */
+	SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)),
+		16,
+	SCR_CHMOV_ABS (1) ^ SCR_DATA_OUT,
+		HADDR_1 (scratch),
+	SCR_JUMP,
+		PADDR_B (data_ovrun2),
+	/*
+	 *  If WSR is set, clear this condition, and 
+	 *  count this byte.
+	 */
+	SCR_FROM_REG (scntl2),
+		0,
+	SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)),
+		16,
+	SCR_REG_REG (scntl2, SCR_OR, WSR),
+		0,
+	SCR_JUMP,
+		PADDR_B (data_ovrun2),
+	/*
+	 *  Finally check against DATA IN phase.
+	 *  Signal data overrun to the C code 
+	 *  and jump to dispatcher if not so.
+	 *  Read 1 byte otherwise and count it.
+	 */
+	SCR_JUMPR ^ IFTRUE (WHEN (SCR_DATA_IN)),
+		16,
+	SCR_INT,
+		SIR_DATA_OVERRUN,
+	SCR_JUMP,
+		PADDR_A (dispatch),
+	SCR_CHMOV_ABS (1) ^ SCR_DATA_IN,
+		HADDR_1 (scratch),
+}/*-------------------------< DATA_OVRUN2 >----------------------*/,{
+	/*
+	 *  Count this byte.
+	 *  This will allow to return a negative 
+	 *  residual to user.
+	 */
+	SCR_REG_REG (scratcha,  SCR_ADD,  0x01),
+		0,
+	SCR_REG_REG (scratcha1, SCR_ADDC, 0),
+		0,
+	SCR_REG_REG (scratcha2, SCR_ADDC, 0),
+		0,
+	/*
+	 *  .. and repeat as required.
+	 */
+	SCR_JUMP,
+		PADDR_B (data_ovrun1),
+}/*-------------------------< ABORT_RESEL >----------------------*/,{
+	SCR_SET (SCR_ATN),
+		0,
+	SCR_CLR (SCR_ACK),
+		0,
+	/*
+	 *  send the abort/abortag/reset message
+	 *  we expect an immediate disconnect
+	 */
+	SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+		0,
+	SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+		HADDR_1 (msgout),
+	SCR_CLR (SCR_ACK|SCR_ATN),
+		0,
+	SCR_WAIT_DISC,
+		0,
+	SCR_INT,
+		SIR_RESEL_ABORTED,
+	SCR_JUMP,
+		PADDR_A (start),
+}/*-------------------------< RESEND_IDENT >---------------------*/,{
+	/*
+	 *  The target stays in MSG OUT phase after having acked 
+	 *  Identify [+ Tag [+ Extended message ]]. Targets shall
+	 *  behave this way on parity error.
+	 *  We must send it again all the messages.
+	 */
+	SCR_SET (SCR_ATN), /* Shall be asserted 2 deskew delays before the  */
+		0,         /* 1rst ACK = 90 ns. Hope the chip isn't too fast */
+	SCR_JUMP,
+		PADDR_A (send_ident),
+}/*-------------------------< IDENT_BREAK >----------------------*/,{
+	SCR_CLR (SCR_ATN),
+		0,
+	SCR_JUMP,
+		PADDR_A (select2),
+}/*-------------------------< IDENT_BREAK_ATN >------------------*/,{
+	SCR_SET (SCR_ATN),
+		0,
+	SCR_JUMP,
+		PADDR_A (select2),
+}/*-------------------------< SDATA_IN >-------------------------*/,{
+	SCR_CHMOV_TBL ^ SCR_DATA_IN,
+		offsetof (struct sym_dsb, sense),
+	SCR_CALL,
+		PADDR_A (datai_done),
+	SCR_JUMP,
+		PADDR_B (data_ovrun),
+}/*-------------------------< RESEL_BAD_LUN >--------------------*/,{
+	/*
+	 *  Message is an IDENTIFY, but lun is unknown.
+	 *  Signal problem to C code for logging the event.
+	 *  Send a M_ABORT to clear all pending tasks.
+	 */
+	SCR_INT,
+		SIR_RESEL_BAD_LUN,
+	SCR_JUMP,
+		PADDR_B (abort_resel),
+}/*-------------------------< BAD_I_T_L >------------------------*/,{
+	/*
+	 *  We donnot have a task for that I_T_L.
+	 *  Signal problem to C code for logging the event.
+	 *  Send a M_ABORT message.
+	 */
+	SCR_INT,
+		SIR_RESEL_BAD_I_T_L,
+	SCR_JUMP,
+		PADDR_B (abort_resel),
+}/*-------------------------< BAD_I_T_L_Q >----------------------*/,{
+	/*
+	 *  We donnot have a task that matches the tag.
+	 *  Signal problem to C code for logging the event.
+	 *  Send a M_ABORTTAG message.
+	 */
+	SCR_INT,
+		SIR_RESEL_BAD_I_T_L_Q,
+	SCR_JUMP,
+		PADDR_B (abort_resel),
+}/*-------------------------< BAD_STATUS >-----------------------*/,{
+	/*
+	 *  Anything different from INTERMEDIATE 
+	 *  CONDITION MET should be a bad SCSI status, 
+	 *  given that GOOD status has already been tested.
+	 *  Call the C code.
+	 */
+	SCR_COPY (4),
+		PADDR_B (startpos),
+		RADDR_1 (scratcha),
+	SCR_INT ^ IFFALSE (DATA (S_COND_MET)),
+		SIR_BAD_SCSI_STATUS,
+	SCR_RETURN,
+		0,
+}/*-------------------------< WSR_MA_HELPER >--------------------*/,{
+	/*
+	 *  Helper for the C code when WSR bit is set.
+	 *  Perform the move of the residual byte.
+	 */
+	SCR_CHMOV_TBL ^ SCR_DATA_IN,
+		offsetof (struct sym_ccb, phys.wresid),
+	SCR_JUMP,
+		PADDR_A (dispatch),
+
+#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
+}/*-------------------------< DATA_IO >--------------------------*/,{
+	/*
+	 *  We jump here if the data direction was unknown at the 
+	 *  time we had to queue the command to the scripts processor.
+	 *  Pointers had been set as follow in this situation:
+	 *    savep   -->   DATA_IO
+	 *    lastp   -->   start pointer when DATA_IN
+	 *    wlastp  -->   start pointer when DATA_OUT
+	 *  This script sets savep and lastp according to the 
+	 *  direction chosen by the target.
+	 */
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_OUT)),
+		PADDR_B (data_io_out),
+}/*-------------------------< DATA_IO_COM >----------------------*/,{
+	/*
+	 *  Direction is DATA IN.
+	 */
+	SCR_COPY  (4),
+		HADDR_1 (ccb_head.lastp),
+		HADDR_1 (ccb_head.savep),
+	/*
+	 *  Jump to the SCRIPTS according to actual direction.
+	 */
+	SCR_COPY  (4),
+		HADDR_1 (ccb_head.savep),
+		RADDR_1 (temp),
+	SCR_RETURN,
+		0,
+}/*-------------------------< DATA_IO_OUT >----------------------*/,{
+	/*
+	 *  Direction is DATA OUT.
+	 */
+	SCR_REG_REG (HF_REG, SCR_AND, (~HF_DATA_IN)),
+		0,
+	SCR_COPY  (4),
+		HADDR_1 (ccb_head.wlastp),
+		HADDR_1 (ccb_head.lastp),
+	SCR_JUMP,
+		PADDR_B(data_io_com),
+#endif /* SYM_OPT_HANDLE_DIR_UNKNOWN */
+
+}/*-------------------------< ZERO >-----------------------------*/,{
+	SCR_DATA_ZERO,
+}/*-------------------------< SCRATCH >--------------------------*/,{
+	SCR_DATA_ZERO, /* MUST BE BEFORE SCRATCH1 */
+}/*-------------------------< SCRATCH1 >-------------------------*/,{
+	SCR_DATA_ZERO,
+}/*-------------------------< PREV_DONE >------------------------*/,{
+	SCR_DATA_ZERO, /* MUST BE BEFORE DONE_POS ! */
+}/*-------------------------< DONE_POS >-------------------------*/,{
+	SCR_DATA_ZERO,
+}/*-------------------------< NEXTJOB >--------------------------*/,{
+	SCR_DATA_ZERO, /* MUST BE BEFORE STARTPOS ! */
+}/*-------------------------< STARTPOS >-------------------------*/,{
+	SCR_DATA_ZERO,
+}/*-------------------------< TARGTBL >--------------------------*/,{
+	SCR_DATA_ZERO,
+}/*--------------------------<>----------------------------------*/
+};
+
+static struct SYM_FWZ_SCR SYM_FWZ_SCR = {
+ /*-------------------------< SNOOPTEST >------------------------*/{
+	/*
+	 *  Read the variable.
+	 */
+	SCR_COPY (4),
+		HADDR_1 (scratch),
+		RADDR_1 (scratcha),
+	/*
+	 *  Write the variable.
+	 */
+	SCR_COPY (4),
+		RADDR_1 (temp),
+		HADDR_1 (scratch),
+	/*
+	 *  Read back the variable.
+	 */
+	SCR_COPY (4),
+		HADDR_1 (scratch),
+		RADDR_1 (temp),
+}/*-------------------------< SNOOPEND >-------------------------*/,{
+	/*
+	 *  And stop.
+	 */
+	SCR_INT,
+		99,
+}/*--------------------------<>----------------------------------*/
+};
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw2.h b/drivers/scsi/sym53c8xx_2/sym_fw2.h
new file mode 100644
index 0000000..7ea7151
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_fw2.h
@@ -0,0 +1,1927 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000  Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ *         Wolfgang Stanglmeier        <wolf@cologne.de>
+ *         Stefan Esser                <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994  Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/*
+ *  Scripts for SYMBIOS-Processor
+ *
+ *  We have to know the offsets of all labels before we reach 
+ *  them (for forward jumps). Therefore we declare a struct 
+ *  here. If you make changes inside the script,
+ *
+ *  DONT FORGET TO CHANGE THE LENGTHS HERE!
+ */
+
+/*
+ *  Script fragments which are loaded into the on-chip RAM 
+ *  of 825A, 875, 876, 895, 895A, 896 and 1010 chips.
+ *  Must not exceed 4K bytes.
+ */
+struct SYM_FWA_SCR {
+	u32 start		[ 14];
+	u32 getjob_begin	[  4];
+	u32 getjob_end		[  4];
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+	u32 select		[  6];
+#else
+	u32 select		[  4];
+#endif
+#if	SYM_CONF_DMA_ADDRESSING_MODE == 2
+	u32 is_dmap_dirty	[  4];
+#endif
+	u32 wf_sel_done		[  2];
+	u32 sel_done		[  2];
+	u32 send_ident		[  2];
+#ifdef SYM_CONF_IARB_SUPPORT
+	u32 select2		[  8];
+#else
+	u32 select2		[  2];
+#endif
+	u32 command		[  2];
+	u32 dispatch		[ 28];
+	u32 sel_no_cmd		[ 10];
+	u32 init		[  6];
+	u32 clrack		[  4];
+	u32 datai_done		[ 10];
+	u32 datai_done_wsr	[ 20];
+	u32 datao_done		[ 10];
+	u32 datao_done_wss	[  6];
+	u32 datai_phase		[  4];
+	u32 datao_phase		[  6];
+	u32 msg_in		[  2];
+	u32 msg_in2		[ 10];
+#ifdef SYM_CONF_IARB_SUPPORT
+	u32 status		[ 14];
+#else
+	u32 status		[ 10];
+#endif
+	u32 complete		[  6];
+	u32 complete2		[ 12];
+	u32 done		[ 14];
+	u32 done_end		[  2];
+	u32 complete_error	[  4];
+	u32 save_dp		[ 12];
+	u32 restore_dp		[  8];
+	u32 disconnect		[ 12];
+#ifdef SYM_CONF_IARB_SUPPORT
+	u32 idle		[  4];
+#else
+	u32 idle		[  2];
+#endif
+#ifdef SYM_CONF_IARB_SUPPORT
+	u32 ungetjob		[  6];
+#else
+	u32 ungetjob		[  4];
+#endif
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+	u32 reselect		[  4];
+#else
+	u32 reselect		[  2];
+#endif
+	u32 reselected		[ 22];
+	u32 resel_scntl4	[ 20];
+	u32 resel_lun0		[  6];
+#if   SYM_CONF_MAX_TASK*4 > 512
+	u32 resel_tag		[ 26];
+#elif SYM_CONF_MAX_TASK*4 > 256
+	u32 resel_tag		[ 20];
+#else
+	u32 resel_tag		[ 16];
+#endif
+	u32 resel_dsa		[  2];
+	u32 resel_dsa1		[  4];
+	u32 resel_no_tag	[  6];
+	u32 data_in		[SYM_CONF_MAX_SG * 2];
+	u32 data_in2		[  4];
+	u32 data_out		[SYM_CONF_MAX_SG * 2];
+	u32 data_out2		[  4];
+	u32 pm0_data		[ 12];
+	u32 pm0_data_out	[  6];
+	u32 pm0_data_end	[  6];
+	u32 pm1_data		[ 12];
+	u32 pm1_data_out	[  6];
+	u32 pm1_data_end	[  6];
+};
+
+/*
+ *  Script fragments which stay in main memory for all chips 
+ *  except for chips that support 8K on-chip RAM.
+ */
+struct SYM_FWB_SCR {
+	u32 start64		[  2];
+	u32 no_data		[  2];
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+	u32 sel_for_abort	[ 18];
+#else
+	u32 sel_for_abort	[ 16];
+#endif
+	u32 sel_for_abort_1	[  2];
+	u32 msg_in_etc		[ 12];
+	u32 msg_received	[  4];
+	u32 msg_weird_seen	[  4];
+	u32 msg_extended	[ 20];
+	u32 msg_bad		[  6];
+	u32 msg_weird		[  4];
+	u32 msg_weird1		[  8];
+
+	u32 wdtr_resp		[  6];
+	u32 send_wdtr		[  4];
+	u32 sdtr_resp		[  6];
+	u32 send_sdtr		[  4];
+	u32 ppr_resp		[  6];
+	u32 send_ppr		[  4];
+	u32 nego_bad_phase	[  4];
+	u32 msg_out		[  4];
+	u32 msg_out_done	[  4];
+	u32 data_ovrun		[  2];
+	u32 data_ovrun1		[ 22];
+	u32 data_ovrun2		[  8];
+	u32 abort_resel		[ 16];
+	u32 resend_ident	[  4];
+	u32 ident_break		[  4];
+	u32 ident_break_atn	[  4];
+	u32 sdata_in		[  6];
+	u32 resel_bad_lun	[  4];
+	u32 bad_i_t_l		[  4];
+	u32 bad_i_t_l_q		[  4];
+	u32 bad_status		[  6];
+	u32 pm_handle		[ 20];
+	u32 pm_handle1		[  4];
+	u32 pm_save		[  4];
+	u32 pm0_save		[ 12];
+	u32 pm_save_end		[  4];
+	u32 pm1_save		[ 14];
+
+	/* WSR handling */
+	u32 pm_wsr_handle	[ 38];
+	u32 wsr_ma_helper	[  4];
+
+#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
+	/* Unknown direction handling */
+	u32 data_io		[  2];
+	u32 data_io_in		[  2];
+	u32 data_io_com		[  6];
+	u32 data_io_out		[  8];
+#endif
+	/* Data area */
+	u32 zero		[  1];
+	u32 scratch		[  1];
+	u32 pm0_data_addr	[  1];
+	u32 pm1_data_addr	[  1];
+	u32 done_pos		[  1];
+	u32 startpos		[  1];
+	u32 targtbl		[  1];
+};
+
+/*
+ *  Script fragments used at initialisations.
+ *  Only runs out of main memory.
+ */
+struct SYM_FWZ_SCR {
+	u32 snooptest		[  6];
+	u32 snoopend		[  2];
+};
+
+static struct SYM_FWA_SCR SYM_FWA_SCR = {
+/*--------------------------< START >----------------------------*/ {
+	/*
+	 *  Switch the LED on.
+	 *  Will be patched with a NO_OP if LED
+	 *  not needed or not desired.
+	 */
+	SCR_REG_REG (gpreg, SCR_AND, 0xfe),
+		0,
+	/*
+	 *      Clear SIGP.
+	 */
+	SCR_FROM_REG (ctest2),
+		0,
+	/*
+	 *  Stop here if the C code wants to perform 
+	 *  some error recovery procedure manually.
+	 *  (Indicate this by setting SEM in ISTAT)
+	 */
+	SCR_FROM_REG (istat),
+		0,
+	/*
+	 *  Report to the C code the next position in 
+	 *  the start queue the SCRIPTS will schedule.
+	 *  The C code must not change SCRATCHA.
+	 */
+	SCR_LOAD_ABS (scratcha, 4),
+		PADDR_B (startpos),
+	SCR_INT ^ IFTRUE (MASK (SEM, SEM)),
+		SIR_SCRIPT_STOPPED,
+	/*
+	 *  Start the next job.
+	 *
+	 *  @DSA     = start point for this job.
+	 *  SCRATCHA = address of this job in the start queue.
+	 *
+	 *  We will restore startpos with SCRATCHA if we fails the 
+	 *  arbitration or if it is the idle job.
+	 *
+	 *  The below GETJOB_BEGIN to GETJOB_END section of SCRIPTS 
+	 *  is a critical path. If it is partially executed, it then 
+	 *  may happen that the job address is not yet in the DSA 
+	 *  and the next queue position points to the next JOB.
+	 */
+	SCR_LOAD_ABS (dsa, 4),
+		PADDR_B (startpos),
+	SCR_LOAD_REL (temp, 4),
+		4,
+}/*-------------------------< GETJOB_BEGIN >---------------------*/,{
+	SCR_STORE_ABS (temp, 4),
+		PADDR_B (startpos),
+	SCR_LOAD_REL (dsa, 4),
+		0,
+}/*-------------------------< GETJOB_END >-----------------------*/,{
+	SCR_LOAD_REL (temp, 4),
+		0,
+	SCR_RETURN,
+		0,
+}/*-------------------------< SELECT >---------------------------*/,{
+	/*
+	 *  DSA	contains the address of a scheduled
+	 *  	data structure.
+	 *
+	 *  SCRATCHA contains the address of the start queue  
+	 *  	entry which points to the next job.
+	 *
+	 *  Set Initiator mode.
+	 *
+	 *  (Target mode is left as an exercise for the reader)
+	 */
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+	SCR_CLR (SCR_TRG),
+		0,
+#endif
+	/*
+	 *      And try to select this target.
+	 */
+	SCR_SEL_TBL_ATN ^ offsetof (struct sym_dsb, select),
+		PADDR_A (ungetjob),
+	/*
+	 *  Now there are 4 possibilities:
+	 *
+	 *  (1) The chip loses arbitration.
+	 *  This is ok, because it will try again,
+	 *  when the bus becomes idle.
+	 *  (But beware of the timeout function!)
+	 *
+	 *  (2) The chip is reselected.
+	 *  Then the script processor takes the jump
+	 *  to the RESELECT label.
+	 *
+	 *  (3) The chip wins arbitration.
+	 *  Then it will execute SCRIPTS instruction until 
+	 *  the next instruction that checks SCSI phase.
+	 *  Then will stop and wait for selection to be 
+	 *  complete or selection time-out to occur.
+	 *
+	 *  After having won arbitration, the SCRIPTS  
+	 *  processor is able to execute instructions while 
+	 *  the SCSI core is performing SCSI selection.
+	 */
+	/*
+	 *      Initialize the status registers
+	 */
+	SCR_LOAD_REL (scr0, 4),
+		offsetof (struct sym_ccb, phys.head.status),
+	/*
+	 *  We may need help from CPU if the DMA segment 
+	 *  registers aren't up-to-date for this IO.
+	 *  Patched with NOOP for chips that donnot 
+	 *  support DAC addressing.
+	 */
+#if	SYM_CONF_DMA_ADDRESSING_MODE == 2
+}/*-------------------------< IS_DMAP_DIRTY >--------------------*/,{
+	SCR_FROM_REG (HX_REG),
+		0,
+	SCR_INT ^ IFTRUE (MASK (HX_DMAP_DIRTY, HX_DMAP_DIRTY)),
+		SIR_DMAP_DIRTY,
+#endif
+}/*-------------------------< WF_SEL_DONE >----------------------*/,{
+	SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+		SIR_SEL_ATN_NO_MSG_OUT,
+}/*-------------------------< SEL_DONE >-------------------------*/,{
+	/*
+	 *  C1010-33 errata work-around.
+	 *  Due to a race, the SCSI core may not have 
+	 *  loaded SCNTL3 on SEL_TBL instruction.
+	 *  We reload it once phase is stable.
+	 *  Patched with a NOOP for other chips.
+	 */
+	SCR_LOAD_REL (scntl3, 1),
+		offsetof(struct sym_dsb, select.sel_scntl3),
+}/*-------------------------< SEND_IDENT >-----------------------*/,{
+	/*
+	 *  Selection complete.
+	 *  Send the IDENTIFY and possibly the TAG message 
+	 *  and negotiation message if present.
+	 */
+	SCR_MOVE_TBL ^ SCR_MSG_OUT,
+		offsetof (struct sym_dsb, smsg),
+}/*-------------------------< SELECT2 >--------------------------*/,{
+#ifdef SYM_CONF_IARB_SUPPORT
+	/*
+	 *  Set IMMEDIATE ARBITRATION if we have been given 
+	 *  a hint to do so. (Some job to do after this one).
+	 */
+	SCR_FROM_REG (HF_REG),
+		0,
+	SCR_JUMPR ^ IFFALSE (MASK (HF_HINT_IARB, HF_HINT_IARB)),
+		8,
+	SCR_REG_REG (scntl1, SCR_OR, IARB),
+		0,
+#endif
+	/*
+	 *  Anticipate the COMMAND phase.
+	 *  This is the PHASE we expect at this point.
+	 */
+	SCR_JUMP ^ IFFALSE (WHEN (SCR_COMMAND)),
+		PADDR_A (sel_no_cmd),
+}/*-------------------------< COMMAND >--------------------------*/,{
+	/*
+	 *  ... and send the command
+	 */
+	SCR_MOVE_TBL ^ SCR_COMMAND,
+		offsetof (struct sym_dsb, cmd),
+}/*-------------------------< DISPATCH >-------------------------*/,{
+	/*
+	 *  MSG_IN is the only phase that shall be 
+	 *  entered at least once for each (re)selection.
+	 *  So we test it first.
+	 */
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+		PADDR_A (msg_in),
+	SCR_JUMP ^ IFTRUE (IF (SCR_DATA_OUT)),
+		PADDR_A (datao_phase),
+	SCR_JUMP ^ IFTRUE (IF (SCR_DATA_IN)),
+		PADDR_A (datai_phase),
+	SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)),
+		PADDR_A (status),
+	SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)),
+		PADDR_A (command),
+	SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)),
+		PADDR_B (msg_out),
+	/*
+	 *  Discard as many illegal phases as 
+	 *  required and tell the C code about.
+	 */
+	SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_OUT)),
+		16,
+	SCR_MOVE_ABS (1) ^ SCR_ILG_OUT,
+		HADDR_1 (scratch),
+	SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_OUT)),
+		-16,
+	SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_IN)),
+		16,
+	SCR_MOVE_ABS (1) ^ SCR_ILG_IN,
+		HADDR_1 (scratch),
+	SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_IN)),
+		-16,
+	SCR_INT,
+		SIR_BAD_PHASE,
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< SEL_NO_CMD >-----------------------*/,{
+	/*
+	 *  The target does not switch to command 
+	 *  phase after IDENTIFY has been sent.
+	 *
+	 *  If it stays in MSG OUT phase send it 
+	 *  the IDENTIFY again.
+	 */
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+		PADDR_B (resend_ident),
+	/*
+	 *  If target does not switch to MSG IN phase 
+	 *  and we sent a negotiation, assert the 
+	 *  failure immediately.
+	 */
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+		PADDR_A (dispatch),
+	SCR_FROM_REG (HS_REG),
+		0,
+	SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)),
+		SIR_NEGO_FAILED,
+	/*
+	 *  Jump to dispatcher.
+	 */
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< INIT >-----------------------------*/,{
+	/*
+	 *  Wait for the SCSI RESET signal to be 
+	 *  inactive before restarting operations, 
+	 *  since the chip may hang on SEL_ATN 
+	 *  if SCSI RESET is active.
+	 */
+	SCR_FROM_REG (sstat0),
+		0,
+	SCR_JUMPR ^ IFTRUE (MASK (IRST, IRST)),
+		-16,
+	SCR_JUMP,
+		PADDR_A (start),
+}/*-------------------------< CLRACK >---------------------------*/,{
+	/*
+	 *  Terminate possible pending message phase.
+	 */
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< DATAI_DONE >-----------------------*/,{
+	/*
+	 *  Save current pointer to LASTP.
+	 */
+	SCR_STORE_REL (temp, 4),
+		offsetof (struct sym_ccb, phys.head.lastp),
+	/*
+	 *  If the SWIDE is not full, jump to dispatcher.
+	 *  We anticipate a STATUS phase.
+	 */
+	SCR_FROM_REG (scntl2),
+		0,
+	SCR_JUMP ^ IFTRUE (MASK (WSR, WSR)),
+		PADDR_A (datai_done_wsr),
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)),
+		PADDR_A (status),
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< DATAI_DONE_WSR >-------------------*/,{
+	/*
+	 *  The SWIDE is full.
+	 *  Clear this condition.
+	 */
+	SCR_REG_REG (scntl2, SCR_OR, WSR),
+		0,
+	/*
+	 *  We are expecting an IGNORE RESIDUE message 
+	 *  from the device, otherwise we are in data 
+	 *  overrun condition. Check against MSG_IN phase.
+	 */
+	SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
+		SIR_SWIDE_OVERRUN,
+	SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+		PADDR_A (dispatch),
+	/*
+	 *  We are in MSG_IN phase,
+	 *  Read the first byte of the message.
+	 *  If it is not an IGNORE RESIDUE message,
+	 *  signal overrun and jump to message 
+	 *  processing.
+	 */
+	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+		HADDR_1 (msgin[0]),
+	SCR_INT ^ IFFALSE (DATA (M_IGN_RESIDUE)),
+		SIR_SWIDE_OVERRUN,
+	SCR_JUMP ^ IFFALSE (DATA (M_IGN_RESIDUE)),
+		PADDR_A (msg_in2),
+	/*
+	 *  We got the message we expected.
+	 *  Read the 2nd byte, and jump to dispatcher.
+	 */
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+		HADDR_1 (msgin[1]),
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< DATAO_DONE >-----------------------*/,{
+	/*
+	 *  Save current pointer to LASTP.
+	 */
+	SCR_STORE_REL (temp, 4),
+		offsetof (struct sym_ccb, phys.head.lastp),
+	/*
+	 *  If the SODL is not full jump to dispatcher.
+	 *  We anticipate a STATUS phase.
+	 */
+	SCR_FROM_REG (scntl2),
+		0,
+	SCR_JUMP ^ IFTRUE (MASK (WSS, WSS)),
+		PADDR_A (datao_done_wss),
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)),
+		PADDR_A (status),
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< DATAO_DONE_WSS >-------------------*/,{
+	/*
+	 *  The SODL is full, clear this condition.
+	 */
+	SCR_REG_REG (scntl2, SCR_OR, WSS),
+		0,
+	/*
+	 *  And signal a DATA UNDERRUN condition 
+	 *  to the C code.
+	 */
+	SCR_INT,
+		SIR_SODL_UNDERRUN,
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< DATAI_PHASE >----------------------*/,{
+	/*
+	 *  Jump to current pointer.
+	 */
+	SCR_LOAD_REL (temp, 4),
+		offsetof (struct sym_ccb, phys.head.lastp),
+	SCR_RETURN,
+		0,
+}/*-------------------------< DATAO_PHASE >----------------------*/,{
+	/*
+	 *  C1010-66 errata work-around.
+	 *  Extra clocks of data hold must be inserted 
+	 *  in DATA OUT phase on 33 MHz PCI BUS.
+	 *  Patched with a NOOP for other chips.
+	 */
+	SCR_REG_REG (scntl4, SCR_OR, (XCLKH_DT|XCLKH_ST)),
+		0,
+	/*
+	 *  Jump to current pointer.
+	 */
+	SCR_LOAD_REL (temp, 4),
+		offsetof (struct sym_ccb, phys.head.lastp),
+	SCR_RETURN,
+		0,
+}/*-------------------------< MSG_IN >---------------------------*/,{
+	/*
+	 *  Get the first byte of the message.
+	 *
+	 *  The script processor doesn't negate the
+	 *  ACK signal after this transfer.
+	 */
+	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+		HADDR_1 (msgin[0]),
+}/*-------------------------< MSG_IN2 >--------------------------*/,{
+	/*
+	 *  Check first against 1 byte messages 
+	 *  that we handle from SCRIPTS.
+	 */
+	SCR_JUMP ^ IFTRUE (DATA (M_COMPLETE)),
+		PADDR_A (complete),
+	SCR_JUMP ^ IFTRUE (DATA (M_DISCONNECT)),
+		PADDR_A (disconnect),
+	SCR_JUMP ^ IFTRUE (DATA (M_SAVE_DP)),
+		PADDR_A (save_dp),
+	SCR_JUMP ^ IFTRUE (DATA (M_RESTORE_DP)),
+		PADDR_A (restore_dp),
+	/*
+	 *  We handle all other messages from the 
+	 *  C code, so no need to waste on-chip RAM 
+	 *  for those ones.
+	 */
+	SCR_JUMP,
+		PADDR_B (msg_in_etc),
+}/*-------------------------< STATUS >---------------------------*/,{
+	/*
+	 *  get the status
+	 */
+	SCR_MOVE_ABS (1) ^ SCR_STATUS,
+		HADDR_1 (scratch),
+#ifdef SYM_CONF_IARB_SUPPORT
+	/*
+	 *  If STATUS is not GOOD, clear IMMEDIATE ARBITRATION, 
+	 *  since we may have to tamper the start queue from 
+	 *  the C code.
+	 */
+	SCR_JUMPR ^ IFTRUE (DATA (S_GOOD)),
+		8,
+	SCR_REG_REG (scntl1, SCR_AND, ~IARB),
+		0,
+#endif
+	/*
+	 *  save status to scsi_status.
+	 *  mark as complete.
+	 */
+	SCR_TO_REG (SS_REG),
+		0,
+	SCR_LOAD_REG (HS_REG, HS_COMPLETE),
+		0,
+	/*
+	 *  Anticipate the MESSAGE PHASE for 
+	 *  the TASK COMPLETE message.
+	 */
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+		PADDR_A (msg_in),
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< COMPLETE >-------------------------*/,{
+	/*
+	 *  Complete message.
+	 *
+	 *  When we terminate the cycle by clearing ACK,
+	 *  the target may disconnect immediately.
+	 *
+	 *  We don't want to be told of an "unexpected disconnect",
+	 *  so we disable this feature.
+	 */
+	SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+		0,
+	/*
+	 *  Terminate cycle ...
+	 */
+	SCR_CLR (SCR_ACK|SCR_ATN),
+		0,
+	/*
+	 *  ... and wait for the disconnect.
+	 */
+	SCR_WAIT_DISC,
+		0,
+}/*-------------------------< COMPLETE2 >------------------------*/,{
+	/*
+	 *  Save host status.
+	 */
+	SCR_STORE_REL (scr0, 4),
+		offsetof (struct sym_ccb, phys.head.status),
+	/*
+	 *  Some bridges may reorder DMA writes to memory.
+	 *  We donnot want the CPU to deal with completions  
+	 *  without all the posted write having been flushed 
+	 *  to memory. This DUMMY READ should flush posted 
+	 *  buffers prior to the CPU having to deal with 
+	 *  completions.
+	 */
+	SCR_LOAD_REL (scr0, 4),	/* DUMMY READ */
+		offsetof (struct sym_ccb, phys.head.status),
+
+	/*
+	 *  If command resulted in not GOOD status,
+	 *  call the C code if needed.
+	 */
+	SCR_FROM_REG (SS_REG),
+		0,
+	SCR_CALL ^ IFFALSE (DATA (S_GOOD)),
+		PADDR_B (bad_status),
+	/*
+	 *  If we performed an auto-sense, call 
+	 *  the C code to synchronyze task aborts 
+	 *  with UNIT ATTENTION conditions.
+	 */
+	SCR_FROM_REG (HF_REG),
+		0,
+	SCR_JUMP ^ IFFALSE (MASK (0 ,(HF_SENSE|HF_EXT_ERR))),
+		PADDR_A (complete_error),
+}/*-------------------------< DONE >-----------------------------*/,{
+	/*
+	 *  Copy the DSA to the DONE QUEUE and 
+	 *  signal completion to the host.
+	 *  If we are interrupted between DONE 
+	 *  and DONE_END, we must reset, otherwise 
+	 *  the completed CCB may be lost.
+	 */
+	SCR_STORE_ABS (dsa, 4),
+		PADDR_B (scratch),
+	SCR_LOAD_ABS (dsa, 4),
+		PADDR_B (done_pos),
+	SCR_LOAD_ABS (scratcha, 4),
+		PADDR_B (scratch),
+	SCR_STORE_REL (scratcha, 4),
+		0,
+	/*
+	 *  The instruction below reads the DONE QUEUE next 
+	 *  free position from memory.
+	 *  In addition it ensures that all PCI posted writes  
+	 *  are flushed and so the DSA value of the done 
+	 *  CCB is visible by the CPU before INTFLY is raised.
+	 */
+	SCR_LOAD_REL (scratcha, 4),
+		4,
+	SCR_INT_FLY,
+		0,
+	SCR_STORE_ABS (scratcha, 4),
+		PADDR_B (done_pos),
+}/*-------------------------< DONE_END >-------------------------*/,{
+	SCR_JUMP,
+		PADDR_A (start),
+}/*-------------------------< COMPLETE_ERROR >-------------------*/,{
+	SCR_LOAD_ABS (scratcha, 4),
+		PADDR_B (startpos),
+	SCR_INT,
+		SIR_COMPLETE_ERROR,
+}/*-------------------------< SAVE_DP >--------------------------*/,{
+	/*
+	 *  Clear ACK immediately.
+	 *  No need to delay it.
+	 */
+	SCR_CLR (SCR_ACK),
+		0,
+	/*
+	 *  Keep track we received a SAVE DP, so 
+	 *  we will switch to the other PM context 
+	 *  on the next PM since the DP may point 
+	 *  to the current PM context.
+	 */
+	SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED),
+		0,
+	/*
+	 *  SAVE_DP message:
+	 *  Copy LASTP to SAVEP.
+	 */
+	SCR_LOAD_REL (scratcha, 4),
+		offsetof (struct sym_ccb, phys.head.lastp),
+	SCR_STORE_REL (scratcha, 4),
+		offsetof (struct sym_ccb, phys.head.savep),
+	/*
+	 *  Anticipate the MESSAGE PHASE for 
+	 *  the DISCONNECT message.
+	 */
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+		PADDR_A (msg_in),
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< RESTORE_DP >-----------------------*/,{
+	/*
+	 *  Clear ACK immediately.
+	 *  No need to delay it.
+	 */
+	SCR_CLR (SCR_ACK),
+		0,
+	/*
+	 *  Copy SAVEP to LASTP.
+	 */
+	SCR_LOAD_REL  (scratcha, 4),
+		offsetof (struct sym_ccb, phys.head.savep),
+	SCR_STORE_REL (scratcha, 4),
+		offsetof (struct sym_ccb, phys.head.lastp),
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< DISCONNECT >-----------------------*/,{
+	/*
+	 *  DISCONNECTing  ...
+	 *
+	 *  disable the "unexpected disconnect" feature,
+	 *  and remove the ACK signal.
+	 */
+	SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+		0,
+	SCR_CLR (SCR_ACK|SCR_ATN),
+		0,
+	/*
+	 *  Wait for the disconnect.
+	 */
+	SCR_WAIT_DISC,
+		0,
+	/*
+	 *  Status is: DISCONNECTED.
+	 */
+	SCR_LOAD_REG (HS_REG, HS_DISCONNECT),
+		0,
+	/*
+	 *  Save host status.
+	 */
+	SCR_STORE_REL (scr0, 4),
+		offsetof (struct sym_ccb, phys.head.status),
+	SCR_JUMP,
+		PADDR_A (start),
+}/*-------------------------< IDLE >-----------------------------*/,{
+	/*
+	 *  Nothing to do?
+	 *  Switch the LED off and wait for reselect.
+	 *  Will be patched with a NO_OP if LED
+	 *  not needed or not desired.
+	 */
+	SCR_REG_REG (gpreg, SCR_OR, 0x01),
+		0,
+#ifdef SYM_CONF_IARB_SUPPORT
+	SCR_JUMPR,
+		8,
+#endif
+}/*-------------------------< UNGETJOB >-------------------------*/,{
+#ifdef SYM_CONF_IARB_SUPPORT
+	/*
+	 *  Set IMMEDIATE ARBITRATION, for the next time.
+	 *  This will give us better chance to win arbitration 
+	 *  for the job we just wanted to do.
+	 */
+	SCR_REG_REG (scntl1, SCR_OR, IARB),
+		0,
+#endif
+	/*
+	 *  We are not able to restart the SCRIPTS if we are 
+	 *  interrupted and these instruction haven't been 
+	 *  all executed. BTW, this is very unlikely to 
+	 *  happen, but we check that from the C code.
+	 */
+	SCR_LOAD_REG (dsa, 0xff),
+		0,
+	SCR_STORE_ABS (scratcha, 4),
+		PADDR_B (startpos),
+}/*-------------------------< RESELECT >-------------------------*/,{
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+	/*
+	 *  Make sure we are in initiator mode.
+	 */
+	SCR_CLR (SCR_TRG),
+		0,
+#endif
+	/*
+	 *  Sleep waiting for a reselection.
+	 */
+	SCR_WAIT_RESEL,
+		PADDR_A(start),
+}/*-------------------------< RESELECTED >-----------------------*/,{
+	/*
+	 *  Switch the LED on.
+	 *  Will be patched with a NO_OP if LED
+	 *  not needed or not desired.
+	 */
+	SCR_REG_REG (gpreg, SCR_AND, 0xfe),
+		0,
+	/*
+	 *  load the target id into the sdid
+	 */
+	SCR_REG_SFBR (ssid, SCR_AND, 0x8F),
+		0,
+	SCR_TO_REG (sdid),
+		0,
+	/*
+	 *  Load the target control block address
+	 */
+	SCR_LOAD_ABS (dsa, 4),
+		PADDR_B (targtbl),
+	SCR_SFBR_REG (dsa, SCR_SHL, 0),
+		0,
+	SCR_REG_REG (dsa, SCR_SHL, 0),
+		0,
+	SCR_REG_REG (dsa, SCR_AND, 0x3c),
+		0,
+	SCR_LOAD_REL (dsa, 4),
+		0,
+	/*
+	 *  We expect MESSAGE IN phase.
+	 *  If not, get help from the C code.
+	 */
+	SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
+		SIR_RESEL_NO_MSG_IN,
+	/*
+	 *  Load the legacy synchronous transfer registers.
+	 */
+	SCR_LOAD_REL (scntl3, 1),
+		offsetof(struct sym_tcb, head.wval),
+	SCR_LOAD_REL (sxfer, 1),
+		offsetof(struct sym_tcb, head.sval),
+}/*-------------------------< RESEL_SCNTL4 >---------------------*/,{
+	/*
+	 *  The C1010 uses a new synchronous timing scheme.
+	 *  Will be patched with a NO_OP if not a C1010.
+	 */
+	SCR_LOAD_REL (scntl4, 1),
+		offsetof(struct sym_tcb, head.uval),
+	/*
+	 *  Get the IDENTIFY message.
+	 */
+	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+		HADDR_1 (msgin),
+	/*
+	 *  If IDENTIFY LUN #0, use a faster path 
+	 *  to find the LCB structure.
+	 */
+	SCR_JUMP ^ IFTRUE (MASK (0x80, 0xbf)),
+		PADDR_A (resel_lun0),
+	/*
+	 *  If message isn't an IDENTIFY, 
+	 *  tell the C code about.
+	 */
+	SCR_INT ^ IFFALSE (MASK (0x80, 0x80)),
+		SIR_RESEL_NO_IDENTIFY,
+	/*
+	 *  It is an IDENTIFY message,
+	 *  Load the LUN control block address.
+	 */
+	SCR_LOAD_REL (dsa, 4),
+		offsetof(struct sym_tcb, head.luntbl_sa),
+	SCR_SFBR_REG (dsa, SCR_SHL, 0),
+		0,
+	SCR_REG_REG (dsa, SCR_SHL, 0),
+		0,
+	SCR_REG_REG (dsa, SCR_AND, 0xfc),
+		0,
+	SCR_LOAD_REL (dsa, 4),
+		0,
+	SCR_JUMPR,
+		8,
+}/*-------------------------< RESEL_LUN0 >-----------------------*/,{
+	/*
+	 *  LUN 0 special case (but usual one :))
+	 */
+	SCR_LOAD_REL (dsa, 4),
+		offsetof(struct sym_tcb, head.lun0_sa),
+	/*
+	 *  Jump indirectly to the reselect action for this LUN.
+	 */
+	SCR_LOAD_REL (temp, 4),
+		offsetof(struct sym_lcb, head.resel_sa),
+	SCR_RETURN,
+		0,
+	/* In normal situations, we jump to RESEL_TAG or RESEL_NO_TAG */
+}/*-------------------------< RESEL_TAG >------------------------*/,{
+	/*
+	 *  ACK the IDENTIFY previously received.
+	 */
+	SCR_CLR (SCR_ACK),
+		0,
+	/*
+	 *  It shall be a tagged command.
+	 *  Read SIMPLE+TAG.
+	 *  The C code will deal with errors.
+	 *  Agressive optimization, is'nt it? :)
+	 */
+	SCR_MOVE_ABS (2) ^ SCR_MSG_IN,
+		HADDR_1 (msgin),
+	/*
+	 *  Load the pointer to the tagged task 
+	 *  table for this LUN.
+	 */
+	SCR_LOAD_REL (dsa, 4),
+		offsetof(struct sym_lcb, head.itlq_tbl_sa),
+	/*
+	 *  The SIDL still contains the TAG value.
+	 *  Agressive optimization, isn't it? :):)
+	 */
+	SCR_REG_SFBR (sidl, SCR_SHL, 0),
+		0,
+#if SYM_CONF_MAX_TASK*4 > 512
+	SCR_JUMPR ^ IFFALSE (CARRYSET),
+		8,
+	SCR_REG_REG (dsa1, SCR_OR, 2),
+		0,
+	SCR_REG_REG (sfbr, SCR_SHL, 0),
+		0,
+	SCR_JUMPR ^ IFFALSE (CARRYSET),
+		8,
+	SCR_REG_REG (dsa1, SCR_OR, 1),
+		0,
+#elif SYM_CONF_MAX_TASK*4 > 256
+	SCR_JUMPR ^ IFFALSE (CARRYSET),
+		8,
+	SCR_REG_REG (dsa1, SCR_OR, 1),
+		0,
+#endif
+	/*
+	 *  Retrieve the DSA of this task.
+	 *  JUMP indirectly to the restart point of the CCB.
+	 */
+	SCR_SFBR_REG (dsa, SCR_AND, 0xfc),
+		0,
+	SCR_LOAD_REL (dsa, 4),
+		0,
+	SCR_LOAD_REL (temp, 4),
+		offsetof(struct sym_ccb, phys.head.go.restart),
+	SCR_RETURN,
+		0,
+	/* In normal situations we branch to RESEL_DSA */
+}/*-------------------------< RESEL_DSA >------------------------*/,{
+	/*
+	 *  ACK the IDENTIFY or TAG previously received.
+	 */
+	SCR_CLR (SCR_ACK),
+		0,
+}/*-------------------------< RESEL_DSA1 >-----------------------*/,{
+	/*
+	 *      Initialize the status registers
+	 */
+	SCR_LOAD_REL (scr0, 4),
+		offsetof (struct sym_ccb, phys.head.status),
+	/*
+	 *  Jump to dispatcher.
+	 */
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< RESEL_NO_TAG >---------------------*/,{
+	/*
+	 *  Load the DSA with the unique ITL task.
+	 */
+	SCR_LOAD_REL (dsa, 4),
+		offsetof(struct sym_lcb, head.itl_task_sa),
+	/*
+	 *  JUMP indirectly to the restart point of the CCB.
+	 */
+	SCR_LOAD_REL (temp, 4),
+		offsetof(struct sym_ccb, phys.head.go.restart),
+	SCR_RETURN,
+		0,
+	/* In normal situations we branch to RESEL_DSA */
+}/*-------------------------< DATA_IN >--------------------------*/,{
+/*
+ *  Because the size depends on the
+ *  #define SYM_CONF_MAX_SG parameter,
+ *  it is filled in at runtime.
+ *
+ *  ##===========< i=0; i<SYM_CONF_MAX_SG >=========
+ *  ||	SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ *  ||		offsetof (struct sym_dsb, data[ i]),
+ *  ##==========================================
+ */
+0
+}/*-------------------------< DATA_IN2 >-------------------------*/,{
+	SCR_CALL,
+		PADDR_A (datai_done),
+	SCR_JUMP,
+		PADDR_B (data_ovrun),
+}/*-------------------------< DATA_OUT >-------------------------*/,{
+/*
+ *  Because the size depends on the
+ *  #define SYM_CONF_MAX_SG parameter,
+ *  it is filled in at runtime.
+ *
+ *  ##===========< i=0; i<SYM_CONF_MAX_SG >=========
+ *  ||	SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+ *  ||		offsetof (struct sym_dsb, data[ i]),
+ *  ##==========================================
+ */
+0
+}/*-------------------------< DATA_OUT2 >------------------------*/,{
+	SCR_CALL,
+		PADDR_A (datao_done),
+	SCR_JUMP,
+		PADDR_B (data_ovrun),
+}/*-------------------------< PM0_DATA >-------------------------*/,{
+	/*
+	 *  Read our host flags to SFBR, so we will be able 
+	 *  to check against the data direction we expect.
+	 */
+	SCR_FROM_REG (HF_REG),
+		0,
+	/*
+	 *  Check against actual DATA PHASE.
+	 */
+	SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
+		PADDR_A (pm0_data_out),
+	/*
+	 *  Actual phase is DATA IN.
+	 *  Check against expected direction.
+	 */
+	SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
+		PADDR_B (data_ovrun),
+	/*
+	 *  Keep track we are moving data from the 
+	 *  PM0 DATA mini-script.
+	 */
+	SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
+		0,
+	/*
+	 *  Move the data to memory.
+	 */
+	SCR_CHMOV_TBL ^ SCR_DATA_IN,
+		offsetof (struct sym_ccb, phys.pm0.sg),
+	SCR_JUMP,
+		PADDR_A (pm0_data_end),
+}/*-------------------------< PM0_DATA_OUT >---------------------*/,{
+	/*
+	 *  Actual phase is DATA OUT.
+	 *  Check against expected direction.
+	 */
+	SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
+		PADDR_B (data_ovrun),
+	/*
+	 *  Keep track we are moving data from the 
+	 *  PM0 DATA mini-script.
+	 */
+	SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
+		0,
+	/*
+	 *  Move the data from memory.
+	 */
+	SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+		offsetof (struct sym_ccb, phys.pm0.sg),
+}/*-------------------------< PM0_DATA_END >---------------------*/,{
+	/*
+	 *  Clear the flag that told we were moving  
+	 *  data from the PM0 DATA mini-script.
+	 */
+	SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM0)),
+		0,
+	/*
+	 *  Return to the previous DATA script which 
+	 *  is guaranteed by design (if no bug) to be 
+	 *  the main DATA script for this transfer.
+	 */
+	SCR_LOAD_REL (temp, 4),
+		offsetof (struct sym_ccb, phys.pm0.ret),
+	SCR_RETURN,
+		0,
+}/*-------------------------< PM1_DATA >-------------------------*/,{
+	/*
+	 *  Read our host flags to SFBR, so we will be able 
+	 *  to check against the data direction we expect.
+	 */
+	SCR_FROM_REG (HF_REG),
+		0,
+	/*
+	 *  Check against actual DATA PHASE.
+	 */
+	SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
+		PADDR_A (pm1_data_out),
+	/*
+	 *  Actual phase is DATA IN.
+	 *  Check against expected direction.
+	 */
+	SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
+		PADDR_B (data_ovrun),
+	/*
+	 *  Keep track we are moving data from the 
+	 *  PM1 DATA mini-script.
+	 */
+	SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
+		0,
+	/*
+	 *  Move the data to memory.
+	 */
+	SCR_CHMOV_TBL ^ SCR_DATA_IN,
+		offsetof (struct sym_ccb, phys.pm1.sg),
+	SCR_JUMP,
+		PADDR_A (pm1_data_end),
+}/*-------------------------< PM1_DATA_OUT >---------------------*/,{
+	/*
+	 *  Actual phase is DATA OUT.
+	 *  Check against expected direction.
+	 */
+	SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
+		PADDR_B (data_ovrun),
+	/*
+	 *  Keep track we are moving data from the 
+	 *  PM1 DATA mini-script.
+	 */
+	SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
+		0,
+	/*
+	 *  Move the data from memory.
+	 */
+	SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+		offsetof (struct sym_ccb, phys.pm1.sg),
+}/*-------------------------< PM1_DATA_END >---------------------*/,{
+	/*
+	 *  Clear the flag that told we were moving  
+	 *  data from the PM1 DATA mini-script.
+	 */
+	SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM1)),
+		0,
+	/*
+	 *  Return to the previous DATA script which 
+	 *  is guaranteed by design (if no bug) to be 
+	 *  the main DATA script for this transfer.
+	 */
+	SCR_LOAD_REL (temp, 4),
+		offsetof (struct sym_ccb, phys.pm1.ret),
+	SCR_RETURN,
+		0,
+}/*-------------------------<>-----------------------------------*/
+};
+
+static struct SYM_FWB_SCR SYM_FWB_SCR = {
+/*--------------------------< START64 >--------------------------*/ {
+	/*
+	 *  SCRIPT entry point for the 895A, 896 and 1010.
+	 *  For now, there is no specific stuff for those 
+	 *  chips at this point, but this may come.
+	 */
+	SCR_JUMP,
+		PADDR_A (init),
+}/*-------------------------< NO_DATA >--------------------------*/,{
+	SCR_JUMP,
+		PADDR_B (data_ovrun),
+}/*-------------------------< SEL_FOR_ABORT >--------------------*/,{
+	/*
+	 *  We are jumped here by the C code, if we have 
+	 *  some target to reset or some disconnected 
+	 *  job to abort. Since error recovery is a serious 
+	 *  busyness, we will really reset the SCSI BUS, if 
+	 *  case of a SCSI interrupt occurring in this path.
+	 */
+#ifdef SYM_CONF_TARGET_ROLE_SUPPORT
+	/*
+	 *  Set initiator mode.
+	 */
+	SCR_CLR (SCR_TRG),
+		0,
+#endif
+	/*
+	 *      And try to select this target.
+	 */
+	SCR_SEL_TBL_ATN ^ offsetof (struct sym_hcb, abrt_sel),
+		PADDR_A (reselect),
+	/*
+	 *  Wait for the selection to complete or 
+	 *  the selection to time out.
+	 */
+	SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+		-8,
+	/*
+	 *  Call the C code.
+	 */
+	SCR_INT,
+		SIR_TARGET_SELECTED,
+	/*
+	 *  The C code should let us continue here. 
+	 *  Send the 'kiss of death' message.
+	 *  We expect an immediate disconnect once 
+	 *  the target has eaten the message.
+	 */
+	SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+		0,
+	SCR_MOVE_TBL ^ SCR_MSG_OUT,
+		offsetof (struct sym_hcb, abrt_tbl),
+	SCR_CLR (SCR_ACK|SCR_ATN),
+		0,
+	SCR_WAIT_DISC,
+		0,
+	/*
+	 *  Tell the C code that we are done.
+	 */
+	SCR_INT,
+		SIR_ABORT_SENT,
+}/*-------------------------< SEL_FOR_ABORT_1 >------------------*/,{
+	/*
+	 *  Jump at scheduler.
+	 */
+	SCR_JUMP,
+		PADDR_A (start),
+}/*-------------------------< MSG_IN_ETC >-----------------------*/,{
+	/*
+	 *  If it is an EXTENDED (variable size message)
+	 *  Handle it.
+	 */
+	SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)),
+		PADDR_B (msg_extended),
+	/*
+	 *  Let the C code handle any other 
+	 *  1 byte message.
+	 */
+	SCR_JUMP ^ IFTRUE (MASK (0x00, 0xf0)),
+		PADDR_B (msg_received),
+	SCR_JUMP ^ IFTRUE (MASK (0x10, 0xf0)),
+		PADDR_B (msg_received),
+	/*
+	 *  We donnot handle 2 bytes messages from SCRIPTS.
+	 *  So, let the C code deal with these ones too.
+	 */
+	SCR_JUMP ^ IFFALSE (MASK (0x20, 0xf0)),
+		PADDR_B (msg_weird_seen),
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+		HADDR_1 (msgin[1]),
+}/*-------------------------< MSG_RECEIVED >---------------------*/,{
+	SCR_LOAD_REL (scratcha, 4),	/* DUMMY READ */
+		0,
+	SCR_INT,
+		SIR_MSG_RECEIVED,
+}/*-------------------------< MSG_WEIRD_SEEN >-------------------*/,{
+	SCR_LOAD_REL (scratcha, 4),	/* DUMMY READ */
+		0,
+	SCR_INT,
+		SIR_MSG_WEIRD,
+}/*-------------------------< MSG_EXTENDED >---------------------*/,{
+	/*
+	 *  Clear ACK and get the next byte 
+	 *  assumed to be the message length.
+	 */
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+		HADDR_1 (msgin[1]),
+	/*
+	 *  Try to catch some unlikely situations as 0 length 
+	 *  or too large the length.
+	 */
+	SCR_JUMP ^ IFTRUE (DATA (0)),
+		PADDR_B (msg_weird_seen),
+	SCR_TO_REG (scratcha),
+		0,
+	SCR_REG_REG (sfbr, SCR_ADD, (256-8)),
+		0,
+	SCR_JUMP ^ IFTRUE (CARRYSET),
+		PADDR_B (msg_weird_seen),
+	/*
+	 *  We donnot handle extended messages from SCRIPTS.
+	 *  Read the amount of data correponding to the 
+	 *  message length and call the C code.
+	 */
+	SCR_STORE_REL (scratcha, 1),
+		offsetof (struct sym_dsb, smsg_ext.size),
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_MOVE_TBL ^ SCR_MSG_IN,
+		offsetof (struct sym_dsb, smsg_ext),
+	SCR_JUMP,
+		PADDR_B (msg_received),
+}/*-------------------------< MSG_BAD >--------------------------*/,{
+	/*
+	 *  unimplemented message - reject it.
+	 */
+	SCR_INT,
+		SIR_REJECT_TO_SEND,
+	SCR_SET (SCR_ATN),
+		0,
+	SCR_JUMP,
+		PADDR_A (clrack),
+}/*-------------------------< MSG_WEIRD >------------------------*/,{
+	/*
+	 *  weird message received
+	 *  ignore all MSG IN phases and reject it.
+	 */
+	SCR_INT,
+		SIR_REJECT_TO_SEND,
+	SCR_SET (SCR_ATN),
+		0,
+}/*-------------------------< MSG_WEIRD1 >-----------------------*/,{
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+		PADDR_A (dispatch),
+	SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+		HADDR_1 (scratch),
+	SCR_JUMP,
+		PADDR_B (msg_weird1),
+}/*-------------------------< WDTR_RESP >------------------------*/,{
+	/*
+	 *  let the target fetch our answer.
+	 */
+	SCR_SET (SCR_ATN),
+		0,
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+		PADDR_B (nego_bad_phase),
+}/*-------------------------< SEND_WDTR >------------------------*/,{
+	/*
+	 *  Send the M_X_WIDE_REQ
+	 */
+	SCR_MOVE_ABS (4) ^ SCR_MSG_OUT,
+		HADDR_1 (msgout),
+	SCR_JUMP,
+		PADDR_B (msg_out_done),
+}/*-------------------------< SDTR_RESP >------------------------*/,{
+	/*
+	 *  let the target fetch our answer.
+	 */
+	SCR_SET (SCR_ATN),
+		0,
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+		PADDR_B (nego_bad_phase),
+}/*-------------------------< SEND_SDTR >------------------------*/,{
+	/*
+	 *  Send the M_X_SYNC_REQ
+	 */
+	SCR_MOVE_ABS (5) ^ SCR_MSG_OUT,
+		HADDR_1 (msgout),
+	SCR_JUMP,
+		PADDR_B (msg_out_done),
+}/*-------------------------< PPR_RESP >-------------------------*/,{
+	/*
+	 *  let the target fetch our answer.
+	 */
+	SCR_SET (SCR_ATN),
+		0,
+	SCR_CLR (SCR_ACK),
+		0,
+	SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+		PADDR_B (nego_bad_phase),
+}/*-------------------------< SEND_PPR >-------------------------*/,{
+	/*
+	 *  Send the M_X_PPR_REQ
+	 */
+	SCR_MOVE_ABS (8) ^ SCR_MSG_OUT,
+		HADDR_1 (msgout),
+	SCR_JUMP,
+		PADDR_B (msg_out_done),
+}/*-------------------------< NEGO_BAD_PHASE >-------------------*/,{
+	SCR_INT,
+		SIR_NEGO_PROTO,
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< MSG_OUT >--------------------------*/,{
+	/*
+	 *  The target requests a message.
+	 *  We donnot send messages that may 
+	 *  require the device to go to bus free.
+	 */
+	SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+		HADDR_1 (msgout),
+	/*
+	 *  ... wait for the next phase
+	 *  if it's a message out, send it again, ...
+	 */
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+		PADDR_B (msg_out),
+}/*-------------------------< MSG_OUT_DONE >---------------------*/,{
+	/*
+	 *  Let the C code be aware of the 
+	 *  sent message and clear the message.
+	 */
+	SCR_INT,
+		SIR_MSG_OUT_DONE,
+	/*
+	 *  ... and process the next phase
+	 */
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< DATA_OVRUN >-----------------------*/,{
+	/*
+	 *  Use scratcha to count the extra bytes.
+	 */
+	SCR_LOAD_ABS (scratcha, 4),
+		PADDR_B (zero),
+}/*-------------------------< DATA_OVRUN1 >----------------------*/,{
+	/*
+	 *  The target may want to transfer too much data.
+	 *
+	 *  If phase is DATA OUT write 1 byte and count it.
+	 */
+	SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)),
+		16,
+	SCR_CHMOV_ABS (1) ^ SCR_DATA_OUT,
+		HADDR_1 (scratch),
+	SCR_JUMP,
+		PADDR_B (data_ovrun2),
+	/*
+	 *  If WSR is set, clear this condition, and 
+	 *  count this byte.
+	 */
+	SCR_FROM_REG (scntl2),
+		0,
+	SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)),
+		16,
+	SCR_REG_REG (scntl2, SCR_OR, WSR),
+		0,
+	SCR_JUMP,
+		PADDR_B (data_ovrun2),
+	/*
+	 *  Finally check against DATA IN phase.
+	 *  Signal data overrun to the C code 
+	 *  and jump to dispatcher if not so.
+	 *  Read 1 byte otherwise and count it.
+	 */
+	SCR_JUMPR ^ IFTRUE (WHEN (SCR_DATA_IN)),
+		16,
+	SCR_INT,
+		SIR_DATA_OVERRUN,
+	SCR_JUMP,
+		PADDR_A (dispatch),
+	SCR_CHMOV_ABS (1) ^ SCR_DATA_IN,
+		HADDR_1 (scratch),
+}/*-------------------------< DATA_OVRUN2 >----------------------*/,{
+	/*
+	 *  Count this byte.
+	 *  This will allow to return a negative 
+	 *  residual to user.
+	 */
+	SCR_REG_REG (scratcha,  SCR_ADD,  0x01),
+		0,
+	SCR_REG_REG (scratcha1, SCR_ADDC, 0),
+		0,
+	SCR_REG_REG (scratcha2, SCR_ADDC, 0),
+		0,
+	/*
+	 *  .. and repeat as required.
+	 */
+	SCR_JUMP,
+		PADDR_B (data_ovrun1),
+}/*-------------------------< ABORT_RESEL >----------------------*/,{
+	SCR_SET (SCR_ATN),
+		0,
+	SCR_CLR (SCR_ACK),
+		0,
+	/*
+	 *  send the abort/abortag/reset message
+	 *  we expect an immediate disconnect
+	 */
+	SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+		0,
+	SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+		HADDR_1 (msgout),
+	SCR_CLR (SCR_ACK|SCR_ATN),
+		0,
+	SCR_WAIT_DISC,
+		0,
+	SCR_INT,
+		SIR_RESEL_ABORTED,
+	SCR_JUMP,
+		PADDR_A (start),
+}/*-------------------------< RESEND_IDENT >---------------------*/,{
+	/*
+	 *  The target stays in MSG OUT phase after having acked 
+	 *  Identify [+ Tag [+ Extended message ]]. Targets shall
+	 *  behave this way on parity error.
+	 *  We must send it again all the messages.
+	 */
+	SCR_SET (SCR_ATN), /* Shall be asserted 2 deskew delays before the  */
+		0,         /* 1rst ACK = 90 ns. Hope the chip isn't too fast */
+	SCR_JUMP,
+		PADDR_A (send_ident),
+}/*-------------------------< IDENT_BREAK >----------------------*/,{
+	SCR_CLR (SCR_ATN),
+		0,
+	SCR_JUMP,
+		PADDR_A (select2),
+}/*-------------------------< IDENT_BREAK_ATN >------------------*/,{
+	SCR_SET (SCR_ATN),
+		0,
+	SCR_JUMP,
+		PADDR_A (select2),
+}/*-------------------------< SDATA_IN >-------------------------*/,{
+	SCR_CHMOV_TBL ^ SCR_DATA_IN,
+		offsetof (struct sym_dsb, sense),
+	SCR_CALL,
+		PADDR_A (datai_done),
+	SCR_JUMP,
+		PADDR_B (data_ovrun),
+}/*-------------------------< RESEL_BAD_LUN >--------------------*/,{
+	/*
+	 *  Message is an IDENTIFY, but lun is unknown.
+	 *  Signal problem to C code for logging the event.
+	 *  Send a M_ABORT to clear all pending tasks.
+	 */
+	SCR_INT,
+		SIR_RESEL_BAD_LUN,
+	SCR_JUMP,
+		PADDR_B (abort_resel),
+}/*-------------------------< BAD_I_T_L >------------------------*/,{
+	/*
+	 *  We donnot have a task for that I_T_L.
+	 *  Signal problem to C code for logging the event.
+	 *  Send a M_ABORT message.
+	 */
+	SCR_INT,
+		SIR_RESEL_BAD_I_T_L,
+	SCR_JUMP,
+		PADDR_B (abort_resel),
+}/*-------------------------< BAD_I_T_L_Q >----------------------*/,{
+	/*
+	 *  We donnot have a task that matches the tag.
+	 *  Signal problem to C code for logging the event.
+	 *  Send a M_ABORTTAG message.
+	 */
+	SCR_INT,
+		SIR_RESEL_BAD_I_T_L_Q,
+	SCR_JUMP,
+		PADDR_B (abort_resel),
+}/*-------------------------< BAD_STATUS >-----------------------*/,{
+	/*
+	 *  Anything different from INTERMEDIATE 
+	 *  CONDITION MET should be a bad SCSI status, 
+	 *  given that GOOD status has already been tested.
+	 *  Call the C code.
+	 */
+	SCR_LOAD_ABS (scratcha, 4),
+		PADDR_B (startpos),
+	SCR_INT ^ IFFALSE (DATA (S_COND_MET)),
+		SIR_BAD_SCSI_STATUS,
+	SCR_RETURN,
+		0,
+}/*-------------------------< PM_HANDLE >------------------------*/,{
+	/*
+	 *  Phase mismatch handling.
+	 *
+	 *  Since we have to deal with 2 SCSI data pointers  
+	 *  (current and saved), we need at least 2 contexts.
+	 *  Each context (pm0 and pm1) has a saved area, a 
+	 *  SAVE mini-script and a DATA phase mini-script.
+	 */
+	/*
+	 *  Get the PM handling flags.
+	 */
+	SCR_FROM_REG (HF_REG),
+		0,
+	/*
+	 *  If no flags (1rst PM for example), avoid 
+	 *  all the below heavy flags testing.
+	 *  This makes the normal case a bit faster.
+	 */
+	SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED))),
+		PADDR_B (pm_handle1),
+	/*
+	 *  If we received a SAVE DP, switch to the 
+	 *  other PM context since the savep may point 
+	 *  to the current PM context.
+	 */
+	SCR_JUMPR ^ IFFALSE (MASK (HF_DP_SAVED, HF_DP_SAVED)),
+		8,
+	SCR_REG_REG (sfbr, SCR_XOR, HF_ACT_PM),
+		0,
+	/*
+	 *  If we have been interrupt in a PM DATA mini-script,
+	 *  we take the return address from the corresponding 
+	 *  saved area.
+	 *  This ensure the return address always points to the 
+	 *  main DATA script for this transfer.
+	 */
+	SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1))),
+		PADDR_B (pm_handle1),
+	SCR_JUMPR ^ IFFALSE (MASK (HF_IN_PM0, HF_IN_PM0)),
+		16,
+	SCR_LOAD_REL (ia, 4),
+		offsetof(struct sym_ccb, phys.pm0.ret),
+	SCR_JUMP,
+		PADDR_B (pm_save),
+	SCR_LOAD_REL (ia, 4),
+		offsetof(struct sym_ccb, phys.pm1.ret),
+	SCR_JUMP,
+		PADDR_B (pm_save),
+}/*-------------------------< PM_HANDLE1 >-----------------------*/,{
+	/*
+	 *  Normal case.
+	 *  Update the return address so that it 
+	 *  will point after the interrupted MOVE.
+	 */
+	SCR_REG_REG (ia, SCR_ADD, 8),
+		0,
+	SCR_REG_REG (ia1, SCR_ADDC, 0),
+		0,
+}/*-------------------------< PM_SAVE >--------------------------*/,{
+	/*
+	 *  Clear all the flags that told us if we were 
+	 *  interrupted in a PM DATA mini-script and/or 
+	 *  we received a SAVE DP.
+	 */
+	SCR_SFBR_REG (HF_REG, SCR_AND, (~(HF_IN_PM0|HF_IN_PM1|HF_DP_SAVED))),
+		0,
+	/*
+	 *  Choose the current PM context.
+	 */
+	SCR_JUMP ^ IFTRUE (MASK (HF_ACT_PM, HF_ACT_PM)),
+		PADDR_B (pm1_save),
+}/*-------------------------< PM0_SAVE >-------------------------*/,{
+	SCR_STORE_REL (ia, 4),
+		offsetof(struct sym_ccb, phys.pm0.ret),
+	/*
+	 *  If WSR bit is set, either UA and RBC may 
+	 *  have to be changed whether the device wants 
+	 *  to ignore this residue or not.
+	 */
+	SCR_FROM_REG (scntl2),
+		0,
+	SCR_CALL ^ IFTRUE (MASK (WSR, WSR)),
+		PADDR_B (pm_wsr_handle),
+	/*
+	 *  Save the remaining byte count, the updated 
+	 *  address and the return address.
+	 */
+	SCR_STORE_REL (rbc, 4),
+		offsetof(struct sym_ccb, phys.pm0.sg.size),
+	SCR_STORE_REL (ua, 4),
+		offsetof(struct sym_ccb, phys.pm0.sg.addr),
+	/*
+	 *  Set the current pointer at the PM0 DATA mini-script.
+	 */
+	SCR_LOAD_ABS (ia, 4),
+		PADDR_B (pm0_data_addr),
+}/*-------------------------< PM_SAVE_END >----------------------*/,{
+	SCR_STORE_REL (ia, 4),
+		offsetof(struct sym_ccb, phys.head.lastp),
+	SCR_JUMP,
+		PADDR_A (dispatch),
+}/*-------------------------< PM1_SAVE >-------------------------*/,{
+	SCR_STORE_REL (ia, 4),
+		offsetof(struct sym_ccb, phys.pm1.ret),
+	/*
+	 *  If WSR bit is set, either UA and RBC may 
+	 *  have to be changed whether the device wants 
+	 *  to ignore this residue or not.
+	 */
+	SCR_FROM_REG (scntl2),
+		0,
+	SCR_CALL ^ IFTRUE (MASK (WSR, WSR)),
+		PADDR_B (pm_wsr_handle),
+	/*
+	 *  Save the remaining byte count, the updated 
+	 *  address and the return address.
+	 */
+	SCR_STORE_REL (rbc, 4),
+		offsetof(struct sym_ccb, phys.pm1.sg.size),
+	SCR_STORE_REL (ua, 4),
+		offsetof(struct sym_ccb, phys.pm1.sg.addr),
+	/*
+	 *  Set the current pointer at the PM1 DATA mini-script.
+	 */
+	SCR_LOAD_ABS (ia, 4),
+		PADDR_B (pm1_data_addr),
+	SCR_JUMP,
+		PADDR_B (pm_save_end),
+}/*-------------------------< PM_WSR_HANDLE >--------------------*/,{
+	/*
+	 *  Phase mismatch handling from SCRIPT with WSR set.
+	 *  Such a condition can occur if the chip wants to 
+	 *  execute a CHMOV(size > 1) when the WSR bit is 
+	 *  set and the target changes PHASE.
+	 *
+	 *  We must move the residual byte to memory.
+	 *
+	 *  UA contains bit 0..31 of the address to 
+	 *  move the residual byte.
+	 *  Move it to the table indirect.
+	 */
+	SCR_STORE_REL (ua, 4),
+		offsetof (struct sym_ccb, phys.wresid.addr),
+	/*
+	 *  Increment UA (move address to next position).
+	 */
+	SCR_REG_REG (ua, SCR_ADD, 1),
+		0,
+	SCR_REG_REG (ua1, SCR_ADDC, 0),
+		0,
+	SCR_REG_REG (ua2, SCR_ADDC, 0),
+		0,
+	SCR_REG_REG (ua3, SCR_ADDC, 0),
+		0,
+	/*
+	 *  Compute SCRATCHA as:
+	 *  - size to transfer = 1 byte.
+	 *  - bit 24..31 = high address bit [32...39].
+	 */
+	SCR_LOAD_ABS (scratcha, 4),
+		PADDR_B (zero),
+	SCR_REG_REG (scratcha, SCR_OR, 1),
+		0,
+	SCR_FROM_REG (rbc3),
+		0,
+	SCR_TO_REG (scratcha3),
+		0,
+	/*
+	 *  Move this value to the table indirect.
+	 */
+	SCR_STORE_REL (scratcha, 4),
+		offsetof (struct sym_ccb, phys.wresid.size),
+	/*
+	 *  Wait for a valid phase.
+	 *  While testing with bogus QUANTUM drives, the C1010 
+	 *  sometimes raised a spurious phase mismatch with 
+	 *  WSR and the CHMOV(1) triggered another PM.
+	 *  Waiting explicitely for the PHASE seemed to avoid 
+	 *  the nested phase mismatch. Btw, this didn't happen 
+	 *  using my IBM drives.
+	 */
+	SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_IN)),
+		0,
+	/*
+	 *  Perform the move of the residual byte.
+	 */
+	SCR_CHMOV_TBL ^ SCR_DATA_IN,
+		offsetof (struct sym_ccb, phys.wresid),
+	/*
+	 *  We can now handle the phase mismatch with UA fixed.
+	 *  RBC[0..23]=0 is a special case that does not require 
+	 *  a PM context. The C code also checks against this.
+	 */
+	SCR_FROM_REG (rbc),
+		0,
+	SCR_RETURN ^ IFFALSE (DATA (0)),
+		0,
+	SCR_FROM_REG (rbc1),
+		0,
+	SCR_RETURN ^ IFFALSE (DATA (0)),
+		0,
+	SCR_FROM_REG (rbc2),
+		0,
+	SCR_RETURN ^ IFFALSE (DATA (0)),
+		0,
+	/*
+	 *  RBC[0..23]=0.
+	 *  Not only we donnot need a PM context, but this would 
+	 *  lead to a bogus CHMOV(0). This condition means that 
+	 *  the residual was the last byte to move from this CHMOV.
+	 *  So, we just have to move the current data script pointer 
+	 *  (i.e. TEMP) to the SCRIPTS address following the 
+	 *  interrupted CHMOV and jump to dispatcher.
+	 *  IA contains the data pointer to save.
+	 */
+	SCR_JUMP,
+		PADDR_B (pm_save_end),
+}/*-------------------------< WSR_MA_HELPER >--------------------*/,{
+	/*
+	 *  Helper for the C code when WSR bit is set.
+	 *  Perform the move of the residual byte.
+	 */
+	SCR_CHMOV_TBL ^ SCR_DATA_IN,
+		offsetof (struct sym_ccb, phys.wresid),
+	SCR_JUMP,
+		PADDR_A (dispatch),
+
+#ifdef SYM_OPT_HANDLE_DIR_UNKNOWN
+}/*-------------------------< DATA_IO >--------------------------*/,{
+	/*
+	 *  We jump here if the data direction was unknown at the 
+	 *  time we had to queue the command to the scripts processor.
+	 *  Pointers had been set as follow in this situation:
+	 *    savep   -->   DATA_IO
+	 *    lastp   -->   start pointer when DATA_IN
+	 *    wlastp  -->   start pointer when DATA_OUT
+	 *  This script sets savep and lastp according to the 
+	 *  direction chosen by the target.
+	 */
+	SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_OUT)),
+		PADDR_B (data_io_out),
+}/*-------------------------< DATA_IO_IN >-----------------------*/,{
+	/*
+	 *  Direction is DATA IN.
+	 */
+	SCR_LOAD_REL  (scratcha, 4),
+		offsetof (struct sym_ccb, phys.head.lastp),
+}/*-------------------------< DATA_IO_COM >----------------------*/,{
+	SCR_STORE_REL (scratcha, 4),
+		offsetof (struct sym_ccb, phys.head.savep),
+
+	/*
+	 *  Jump to the SCRIPTS according to actual direction.
+	 */
+	SCR_LOAD_REL  (temp, 4),
+		offsetof (struct sym_ccb, phys.head.savep),
+	SCR_RETURN,
+		0,
+}/*-------------------------< DATA_IO_OUT >----------------------*/,{
+	/*
+	 *  Direction is DATA OUT.
+	 */
+	SCR_REG_REG (HF_REG, SCR_AND, (~HF_DATA_IN)),
+		0,
+	SCR_LOAD_REL  (scratcha, 4),
+		offsetof (struct sym_ccb, phys.head.wlastp),
+	SCR_STORE_REL (scratcha, 4),
+		offsetof (struct sym_ccb, phys.head.lastp),
+	SCR_JUMP,
+		PADDR_B(data_io_com),
+#endif /* SYM_OPT_HANDLE_DIR_UNKNOWN */
+
+}/*-------------------------< ZERO >-----------------------------*/,{
+	SCR_DATA_ZERO,
+}/*-------------------------< SCRATCH >--------------------------*/,{
+	SCR_DATA_ZERO,
+}/*-------------------------< PM0_DATA_ADDR >--------------------*/,{
+	SCR_DATA_ZERO,
+}/*-------------------------< PM1_DATA_ADDR >--------------------*/,{
+	SCR_DATA_ZERO,
+}/*-------------------------< DONE_POS >-------------------------*/,{
+	SCR_DATA_ZERO,
+}/*-------------------------< STARTPOS >-------------------------*/,{
+	SCR_DATA_ZERO,
+}/*-------------------------< TARGTBL >--------------------------*/,{
+	SCR_DATA_ZERO,
+}/*-------------------------<>-----------------------------------*/
+};
+
+static struct SYM_FWZ_SCR SYM_FWZ_SCR = {
+ /*-------------------------< SNOOPTEST >------------------------*/{
+	/*
+	 *  Read the variable from memory.
+	 */
+	SCR_LOAD_REL (scratcha, 4),
+		offsetof(struct sym_hcb, scratch),
+	/*
+	 *  Write the variable to memory.
+	 */
+	SCR_STORE_REL (temp, 4),
+		offsetof(struct sym_hcb, scratch),
+	/*
+	 *  Read back the variable from memory.
+	 */
+	SCR_LOAD_REL (temp, 4),
+		offsetof(struct sym_hcb, scratch),
+}/*-------------------------< SNOOPEND >-------------------------*/,{
+	/*
+	 *  And stop.
+	 */
+	SCR_INT,
+		99,
+}/*-------------------------<>-----------------------------------*/
+};
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
new file mode 100644
index 0000000..a1dff6d
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -0,0 +1,2196 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
+ * Copyright (c) 2003-2005  Matthew Wilcox <matthew@wil.cx>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000  Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ *         Wolfgang Stanglmeier        <wolf@cologne.de>
+ *         Stefan Esser                <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994  Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport.h>
+
+#include "sym_glue.h"
+#include "sym_nvram.h"
+
+#define NAME53C		"sym53c"
+#define NAME53C8XX	"sym53c8xx"
+
+/* SPARC just has to be different ... */
+#ifdef __sparc__
+#define IRQ_FMT "%s"
+#define IRQ_PRM(x) __irq_itoa(x)
+#else
+#define IRQ_FMT "%d"
+#define IRQ_PRM(x) (x)
+#endif
+
+struct sym_driver_setup sym_driver_setup = SYM_LINUX_DRIVER_SETUP;
+unsigned int sym_debug_flags = 0;
+
+static char *excl_string;
+static char *safe_string;
+module_param_named(cmd_per_lun, sym_driver_setup.max_tag, ushort, 0);
+module_param_string(tag_ctrl, sym_driver_setup.tag_ctrl, 100, 0);
+module_param_named(burst, sym_driver_setup.burst_order, byte, 0);
+module_param_named(led, sym_driver_setup.scsi_led, byte, 0);
+module_param_named(diff, sym_driver_setup.scsi_diff, byte, 0);
+module_param_named(irqm, sym_driver_setup.irq_mode, byte, 0);
+module_param_named(buschk, sym_driver_setup.scsi_bus_check, byte, 0);
+module_param_named(hostid, sym_driver_setup.host_id, byte, 0);
+module_param_named(verb, sym_driver_setup.verbose, byte, 0);
+module_param_named(debug, sym_debug_flags, uint, 0);
+module_param_named(settle, sym_driver_setup.settle_delay, byte, 0);
+module_param_named(nvram, sym_driver_setup.use_nvram, byte, 0);
+module_param_named(excl, excl_string, charp, 0);
+module_param_named(safe, safe_string, charp, 0);
+
+MODULE_PARM_DESC(cmd_per_lun, "The maximum number of tags to use by default");
+MODULE_PARM_DESC(tag_ctrl, "More detailed control over tags per LUN");
+MODULE_PARM_DESC(burst, "Maximum burst.  0 to disable, 255 to read from registers");
+MODULE_PARM_DESC(led, "Set to 1 to enable LED support");
+MODULE_PARM_DESC(diff, "0 for no differential mode, 1 for BIOS, 2 for always, 3 for not GPIO3");
+MODULE_PARM_DESC(irqm, "0 for open drain, 1 to leave alone, 2 for totem pole");
+MODULE_PARM_DESC(buschk, "0 to not check, 1 for detach on error, 2 for warn on error");
+MODULE_PARM_DESC(hostid, "The SCSI ID to use for the host adapters");
+MODULE_PARM_DESC(verb, "0 for minimal verbosity, 1 for normal, 2 for excessive");
+MODULE_PARM_DESC(debug, "Set bits to enable debugging");
+MODULE_PARM_DESC(settle, "Settle delay in seconds.  Default 3");
+MODULE_PARM_DESC(nvram, "Option currently not used");
+MODULE_PARM_DESC(excl, "List ioport addresses here to prevent controllers from being attached");
+MODULE_PARM_DESC(safe, "Set other settings to a \"safe mode\"");
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SYM_VERSION);
+MODULE_AUTHOR("Matthew Wilcox <matthew@wil.cx>");
+MODULE_DESCRIPTION("NCR, Symbios and LSI 8xx and 1010 PCI SCSI adapters");
+
+static void sym2_setup_params(void)
+{
+	char *p = excl_string;
+	int xi = 0;
+
+	while (p && (xi < 8)) {
+		char *next_p;
+		int val = (int) simple_strtoul(p, &next_p, 0);
+		sym_driver_setup.excludes[xi++] = val;
+		p = next_p;
+	}
+
+	if (safe_string) {
+		if (*safe_string == 'y') {
+			sym_driver_setup.max_tag = 0;
+			sym_driver_setup.burst_order = 0;
+			sym_driver_setup.scsi_led = 0;
+			sym_driver_setup.scsi_diff = 1;
+			sym_driver_setup.irq_mode = 0;
+			sym_driver_setup.scsi_bus_check = 2;
+			sym_driver_setup.host_id = 7;
+			sym_driver_setup.verbose = 2;
+			sym_driver_setup.settle_delay = 10;
+			sym_driver_setup.use_nvram = 1;
+		} else if (*safe_string != 'n') {
+			printk(KERN_WARNING NAME53C8XX "Ignoring parameter %s"
+					" passed to safe option", safe_string);
+		}
+	}
+}
+
+/*
+ * We used to try to deal with 64-bit BARs here, but don't any more.
+ * There are many parts of this driver which would need to be modified
+ * to handle a 64-bit base address, including scripts.  I'm uncomfortable
+ * with making those changes when I have no way of testing it, so I'm
+ * just going to disable it.
+ *
+ * Note that some machines (eg HP rx8620 and Superdome) have bus addresses
+ * below 4GB and physical addresses above 4GB.  These will continue to work.
+ */
+static int __devinit
+pci_get_base_address(struct pci_dev *pdev, int index, unsigned long *basep)
+{
+	u32 tmp;
+	unsigned long base;
+#define PCI_BAR_OFFSET(index) (PCI_BASE_ADDRESS_0 + (index<<2))
+
+	pci_read_config_dword(pdev, PCI_BAR_OFFSET(index++), &tmp);
+	base = tmp;
+	if ((tmp & 0x7) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
+		pci_read_config_dword(pdev, PCI_BAR_OFFSET(index++), &tmp);
+		if (tmp > 0)
+			dev_err(&pdev->dev,
+				"BAR %d is 64-bit, disabling\n", index - 1);
+		base = 0;
+	}
+
+	if ((base & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
+		base &= PCI_BASE_ADDRESS_IO_MASK;
+	} else {
+		base &= PCI_BASE_ADDRESS_MEM_MASK;
+	}
+
+	*basep = base;
+	return index;
+#undef PCI_BAR_OFFSET
+}
+
+static struct scsi_transport_template *sym2_transport_template = NULL;
+
+/*
+ *  Used by the eh thread to wait for command completion.
+ *  It is allocated on the eh thread stack.
+ */
+struct sym_eh_wait {
+	struct completion done;
+	struct timer_list timer;
+	void (*old_done)(struct scsi_cmnd *);
+	int to_do;
+	int timed_out;
+};
+
+/*
+ *  Driver private area in the SCSI command structure.
+ */
+struct sym_ucmd {		/* Override the SCSI pointer structure */
+	dma_addr_t data_mapping;
+	u_char	data_mapped;
+	struct sym_eh_wait *eh_wait;
+};
+
+#define SYM_UCMD_PTR(cmd)  ((struct sym_ucmd *)(&(cmd)->SCp))
+#define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host)
+
+static void __unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
+{
+	int dma_dir = cmd->sc_data_direction;
+
+	switch(SYM_UCMD_PTR(cmd)->data_mapped) {
+	case 2:
+		pci_unmap_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+		break;
+	case 1:
+		pci_unmap_single(pdev, SYM_UCMD_PTR(cmd)->data_mapping,
+				 cmd->request_bufflen, dma_dir);
+		break;
+	}
+	SYM_UCMD_PTR(cmd)->data_mapped = 0;
+}
+
+static dma_addr_t __map_scsi_single_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
+{
+	dma_addr_t mapping;
+	int dma_dir = cmd->sc_data_direction;
+
+	mapping = pci_map_single(pdev, cmd->request_buffer,
+				 cmd->request_bufflen, dma_dir);
+	if (mapping) {
+		SYM_UCMD_PTR(cmd)->data_mapped  = 1;
+		SYM_UCMD_PTR(cmd)->data_mapping = mapping;
+	}
+
+	return mapping;
+}
+
+static int __map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
+{
+	int use_sg;
+	int dma_dir = cmd->sc_data_direction;
+
+	use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+	if (use_sg > 0) {
+		SYM_UCMD_PTR(cmd)->data_mapped  = 2;
+		SYM_UCMD_PTR(cmd)->data_mapping = use_sg;
+	}
+
+	return use_sg;
+}
+
+#define unmap_scsi_data(np, cmd)	\
+		__unmap_scsi_data(np->s.device, cmd)
+#define map_scsi_single_data(np, cmd)	\
+		__map_scsi_single_data(np->s.device, cmd)
+#define map_scsi_sg_data(np, cmd)	\
+		__map_scsi_sg_data(np->s.device, cmd)
+/*
+ *  Complete a pending CAM CCB.
+ */
+void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd)
+{
+	unmap_scsi_data(np, cmd);
+	cmd->scsi_done(cmd);
+}
+
+static void sym_xpt_done2(struct sym_hcb *np, struct scsi_cmnd *cmd, int cam_status)
+{
+	sym_set_cam_status(cmd, cam_status);
+	sym_xpt_done(np, cmd);
+}
+
+
+/*
+ *  Tell the SCSI layer about a BUS RESET.
+ */
+void sym_xpt_async_bus_reset(struct sym_hcb *np)
+{
+	printf_notice("%s: SCSI BUS has been reset.\n", sym_name(np));
+	np->s.settle_time = jiffies + sym_driver_setup.settle_delay * HZ;
+	np->s.settle_time_valid = 1;
+	if (sym_verbose >= 2)
+		printf_info("%s: command processing suspended for %d seconds\n",
+			    sym_name(np), sym_driver_setup.settle_delay);
+}
+
+/*
+ *  Tell the SCSI layer about a BUS DEVICE RESET message sent.
+ */
+void sym_xpt_async_sent_bdr(struct sym_hcb *np, int target)
+{
+	printf_notice("%s: TARGET %d has been reset.\n", sym_name(np), target);
+}
+
+/*
+ *  Choose the more appropriate CAM status if 
+ *  the IO encountered an extended error.
+ */
+static int sym_xerr_cam_status(int cam_status, int x_status)
+{
+	if (x_status) {
+		if	(x_status & XE_PARITY_ERR)
+			cam_status = DID_PARITY;
+		else if	(x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN))
+			cam_status = DID_ERROR;
+		else if	(x_status & XE_BAD_PHASE)
+			cam_status = DID_ERROR;
+		else
+			cam_status = DID_ERROR;
+	}
+	return cam_status;
+}
+
+/*
+ *  Build CAM result for a failed or auto-sensed IO.
+ */
+void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
+{
+	struct scsi_cmnd *cmd = cp->cmd;
+	u_int cam_status, scsi_status, drv_status;
+
+	drv_status  = 0;
+	cam_status  = DID_OK;
+	scsi_status = cp->ssss_status;
+
+	if (cp->host_flags & HF_SENSE) {
+		scsi_status = cp->sv_scsi_status;
+		resid = cp->sv_resid;
+		if (sym_verbose && cp->sv_xerr_status)
+			sym_print_xerr(cmd, cp->sv_xerr_status);
+		if (cp->host_status == HS_COMPLETE &&
+		    cp->ssss_status == S_GOOD &&
+		    cp->xerr_status == 0) {
+			cam_status = sym_xerr_cam_status(DID_OK,
+							 cp->sv_xerr_status);
+			drv_status = DRIVER_SENSE;
+			/*
+			 *  Bounce back the sense data to user.
+			 */
+			memset(&cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
+			memcpy(cmd->sense_buffer, cp->sns_bbuf,
+			      min(sizeof(cmd->sense_buffer),
+				  (size_t)SYM_SNS_BBUF_LEN));
+#if 0
+			/*
+			 *  If the device reports a UNIT ATTENTION condition 
+			 *  due to a RESET condition, we should consider all 
+			 *  disconnect CCBs for this unit as aborted.
+			 */
+			if (1) {
+				u_char *p;
+				p  = (u_char *) cmd->sense_data;
+				if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29)
+					sym_clear_tasks(np, DID_ABORT,
+							cp->target,cp->lun, -1);
+			}
+#endif
+		} else {
+			/*
+			 * Error return from our internal request sense.  This
+			 * is bad: we must clear the contingent allegiance
+			 * condition otherwise the device will always return
+			 * BUSY.  Use a big stick.
+			 */
+			sym_reset_scsi_target(np, cmd->device->id);
+			cam_status = DID_ERROR;
+		}
+	} else if (cp->host_status == HS_COMPLETE) 	/* Bad SCSI status */
+		cam_status = DID_OK;
+	else if (cp->host_status == HS_SEL_TIMEOUT)	/* Selection timeout */
+		cam_status = DID_NO_CONNECT;
+	else if (cp->host_status == HS_UNEXPECTED)	/* Unexpected BUS FREE*/
+		cam_status = DID_ERROR;
+	else {						/* Extended error */
+		if (sym_verbose) {
+			sym_print_addr(cmd, "COMMAND FAILED (%x %x %x).\n",
+				cp->host_status, cp->ssss_status,
+				cp->xerr_status);
+		}
+		/*
+		 *  Set the most appropriate value for CAM status.
+		 */
+		cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status);
+	}
+	cmd->resid = resid;
+	cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status;
+}
+
+
+/*
+ *  Build the scatter/gather array for an I/O.
+ */
+
+static int sym_scatter_no_sglist(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
+{
+	struct sym_tblmove *data = &cp->phys.data[SYM_CONF_MAX_SG-1];
+	int segment;
+
+	cp->data_len = cmd->request_bufflen;
+
+	if (cmd->request_bufflen) {
+		dma_addr_t baddr = map_scsi_single_data(np, cmd);
+		if (baddr) {
+			sym_build_sge(np, data, baddr, cmd->request_bufflen);
+			segment = 1;
+		} else {
+			segment = -2;
+		}
+	} else {
+		segment = 0;
+	}
+
+	return segment;
+}
+
+static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
+{
+	int segment;
+	int use_sg = (int) cmd->use_sg;
+
+	cp->data_len = 0;
+
+	if (!use_sg)
+		segment = sym_scatter_no_sglist(np, cp, cmd);
+	else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) {
+		struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
+		struct sym_tblmove *data;
+
+		if (use_sg > SYM_CONF_MAX_SG) {
+			unmap_scsi_data(np, cmd);
+			return -1;
+		}
+
+		data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg];
+
+		for (segment = 0; segment < use_sg; segment++) {
+			dma_addr_t baddr = sg_dma_address(&scatter[segment]);
+			unsigned int len = sg_dma_len(&scatter[segment]);
+
+			sym_build_sge(np, &data[segment], baddr, len);
+			cp->data_len += len;
+		}
+	} else {
+		segment = -2;
+	}
+
+	return segment;
+}
+
+/*
+ *  Queue a SCSI command.
+ */
+static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd)
+{
+	struct scsi_device *sdev = cmd->device;
+	struct sym_tcb *tp;
+	struct sym_lcb *lp;
+	struct sym_ccb *cp;
+	int	order;
+
+	/*
+	 *  Minimal checkings, so that we will not 
+	 *  go outside our tables.
+	 */
+	if (sdev->id == np->myaddr ||
+	    sdev->id >= SYM_CONF_MAX_TARGET ||
+	    sdev->lun >= SYM_CONF_MAX_LUN) {
+		sym_xpt_done2(np, cmd, CAM_DEV_NOT_THERE);
+		return 0;
+	}
+
+	/*
+	 *  Retrieve the target descriptor.
+	 */
+	tp = &np->target[sdev->id];
+
+	/*
+	 *  Complete the 1st INQUIRY command with error 
+	 *  condition if the device is flagged NOSCAN 
+	 *  at BOOT in the NVRAM. This may speed up 
+	 *  the boot and maintain coherency with BIOS 
+	 *  device numbering. Clearing the flag allows 
+	 *  user to rescan skipped devices later.
+	 *  We also return error for devices not flagged 
+	 *  for SCAN LUNS in the NVRAM since some mono-lun 
+	 *  devices behave badly when asked for some non 
+	 *  zero LUN. Btw, this is an absolute hack.:-)
+	 */
+	if (cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 0x0) {
+		if ((tp->usrflags & SYM_SCAN_BOOT_DISABLED) ||
+		    ((tp->usrflags & SYM_SCAN_LUNS_DISABLED) && 
+		     sdev->lun != 0)) {
+			tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
+			sym_xpt_done2(np, cmd, CAM_DEV_NOT_THERE);
+			return 0;
+		}
+	}
+
+	/*
+	 *  Select tagged/untagged.
+	 */
+	lp = sym_lp(tp, sdev->lun);
+	order = (lp && lp->s.reqtags) ? M_SIMPLE_TAG : 0;
+
+	/*
+	 *  Queue the SCSI IO.
+	 */
+	cp = sym_get_ccb(np, cmd, order);
+	if (!cp)
+		return 1;	/* Means resource shortage */
+	sym_queue_scsiio(np, cmd, cp);
+	return 0;
+}
+
+/*
+ *  Setup buffers and pointers that address the CDB.
+ */
+static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
+{
+	u32	cmd_ba;
+	int	cmd_len;
+
+	/*
+	 *  CDB is 16 bytes max.
+	 */
+	if (cmd->cmd_len > sizeof(cp->cdb_buf)) {
+		sym_set_cam_status(cp->cmd, CAM_REQ_INVALID);
+		return -1;
+	}
+
+	memcpy(cp->cdb_buf, cmd->cmnd, cmd->cmd_len);
+	cmd_ba  = CCB_BA (cp, cdb_buf[0]);
+	cmd_len = cmd->cmd_len;
+
+	cp->phys.cmd.addr	= cpu_to_scr(cmd_ba);
+	cp->phys.cmd.size	= cpu_to_scr(cmd_len);
+
+	return 0;
+}
+
+/*
+ *  Setup pointers that address the data and start the I/O.
+ */
+int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
+{
+	int dir;
+	struct sym_tcb *tp = &np->target[cp->target];
+	struct sym_lcb *lp = sym_lp(tp, cp->lun);
+
+	/*
+	 *  Build the CDB.
+	 */
+	if (sym_setup_cdb(np, cmd, cp))
+		goto out_abort;
+
+	/*
+	 *  No direction means no data.
+	 */
+	dir = cmd->sc_data_direction;
+	if (dir != DMA_NONE) {
+		cp->segments = sym_scatter(np, cp, cmd);
+		if (cp->segments < 0) {
+			if (cp->segments == -2)
+				sym_set_cam_status(cmd, CAM_RESRC_UNAVAIL);
+			else
+				sym_set_cam_status(cmd, CAM_REQ_TOO_BIG);
+			goto out_abort;
+		}
+	} else {
+		cp->data_len = 0;
+		cp->segments = 0;
+	}
+
+	/*
+	 *  Set data pointers.
+	 */
+	sym_setup_data_pointers(np, cp, dir);
+
+	/*
+	 *  When `#ifed 1', the code below makes the driver 
+	 *  panic on the first attempt to write to a SCSI device.
+	 *  It is the first test we want to do after a driver 
+	 *  change that does not seem obviously safe. :)
+	 */
+#if 0
+	switch (cp->cdb_buf[0]) {
+	case 0x0A: case 0x2A: case 0xAA:
+		panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n");
+		break;
+	default:
+		break;
+	}
+#endif
+
+	/*
+	 *	activate this job.
+	 */
+	if (lp)
+		sym_start_next_ccbs(np, lp, 2);
+	else
+		sym_put_start_queue(np, cp);
+	return 0;
+
+out_abort:
+	sym_free_ccb(np, cp);
+	sym_xpt_done(np, cmd);
+	return 0;
+}
+
+
+/*
+ *  timer daemon.
+ *
+ *  Misused to keep the driver running when
+ *  interrupts are not configured correctly.
+ */
+static void sym_timer(struct sym_hcb *np)
+{
+	unsigned long thistime = jiffies;
+
+	/*
+	 *  Restart the timer.
+	 */
+	np->s.timer.expires = thistime + SYM_CONF_TIMER_INTERVAL;
+	add_timer(&np->s.timer);
+
+	/*
+	 *  If we are resetting the ncr, wait for settle_time before 
+	 *  clearing it. Then command processing will be resumed.
+	 */
+	if (np->s.settle_time_valid) {
+		if (time_before_eq(np->s.settle_time, thistime)) {
+			if (sym_verbose >= 2 )
+				printk("%s: command processing resumed\n",
+				       sym_name(np));
+			np->s.settle_time_valid = 0;
+		}
+		return;
+	}
+
+	/*
+	 *	Nothing to do for now, but that may come.
+	 */
+	if (np->s.lasttime + 4*HZ < thistime) {
+		np->s.lasttime = thistime;
+	}
+
+#ifdef SYM_CONF_PCIQ_MAY_MISS_COMPLETIONS
+	/*
+	 *  Some way-broken PCI bridges may lead to 
+	 *  completions being lost when the clearing 
+	 *  of the INTFLY flag by the CPU occurs 
+	 *  concurrently with the chip raising this flag.
+	 *  If this ever happen, lost completions will 
+	 * be reaped here.
+	 */
+	sym_wakeup_done(np);
+#endif
+}
+
+
+/*
+ *  PCI BUS error handler.
+ */
+void sym_log_bus_error(struct sym_hcb *np)
+{
+	u_short pci_sts;
+	pci_read_config_word(np->s.device, PCI_STATUS, &pci_sts);
+	if (pci_sts & 0xf900) {
+		pci_write_config_word(np->s.device, PCI_STATUS, pci_sts);
+		printf("%s: PCI STATUS = 0x%04x\n",
+			sym_name(np), pci_sts & 0xf900);
+	}
+}
+
+/*
+ * queuecommand method.  Entered with the host adapter lock held and
+ * interrupts disabled.
+ */
+static int sym53c8xx_queue_command(struct scsi_cmnd *cmd,
+					void (*done)(struct scsi_cmnd *))
+{
+	struct sym_hcb *np = SYM_SOFTC_PTR(cmd);
+	struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd);
+	int sts = 0;
+
+	cmd->scsi_done     = done;
+	memset(ucp, 0, sizeof(*ucp));
+
+	/*
+	 *  Shorten our settle_time if needed for 
+	 *  this command not to time out.
+	 */
+	if (np->s.settle_time_valid && cmd->timeout_per_command) {
+		unsigned long tlimit = jiffies + cmd->timeout_per_command;
+		tlimit -= SYM_CONF_TIMER_INTERVAL*2;
+		if (time_after(np->s.settle_time, tlimit)) {
+			np->s.settle_time = tlimit;
+		}
+	}
+
+	if (np->s.settle_time_valid)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	sts = sym_queue_command(np, cmd);
+	if (sts)
+		return SCSI_MLQUEUE_HOST_BUSY;
+	return 0;
+}
+
+/*
+ *  Linux entry point of the interrupt handler.
+ */
+static irqreturn_t sym53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs)
+{
+	unsigned long flags;
+	struct sym_hcb *np = (struct sym_hcb *)dev_id;
+
+	if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("[");
+
+	spin_lock_irqsave(np->s.host->host_lock, flags);
+	sym_interrupt(np);
+	spin_unlock_irqrestore(np->s.host->host_lock, flags);
+
+	if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("]\n");
+
+	return IRQ_HANDLED;
+}
+
+/*
+ *  Linux entry point of the timer handler
+ */
+static void sym53c8xx_timer(unsigned long npref)
+{
+	struct sym_hcb *np = (struct sym_hcb *)npref;
+	unsigned long flags;
+
+	spin_lock_irqsave(np->s.host->host_lock, flags);
+	sym_timer(np);
+	spin_unlock_irqrestore(np->s.host->host_lock, flags);
+}
+
+
+/*
+ *  What the eh thread wants us to perform.
+ */
+#define SYM_EH_ABORT		0
+#define SYM_EH_DEVICE_RESET	1
+#define SYM_EH_BUS_RESET	2
+#define SYM_EH_HOST_RESET	3
+
+/*
+ *  What we will do regarding the involved SCSI command.
+ */
+#define SYM_EH_DO_IGNORE	0
+#define SYM_EH_DO_COMPLETE	1
+#define SYM_EH_DO_WAIT		2
+
+/*
+ *  Our general completion handler.
+ */
+static void __sym_eh_done(struct scsi_cmnd *cmd, int timed_out)
+{
+	struct sym_eh_wait *ep = SYM_UCMD_PTR(cmd)->eh_wait;
+	if (!ep)
+		return;
+
+	/* Try to avoid a race here (not 100% safe) */
+	if (!timed_out) {
+		ep->timed_out = 0;
+		if (ep->to_do == SYM_EH_DO_WAIT && !del_timer(&ep->timer))
+			return;
+	}
+
+	/* Revert everything */
+	SYM_UCMD_PTR(cmd)->eh_wait = NULL;
+	cmd->scsi_done = ep->old_done;
+
+	/* Wake up the eh thread if it wants to sleep */
+	if (ep->to_do == SYM_EH_DO_WAIT)
+		complete(&ep->done);
+}
+
+/*
+ *  scsi_done() alias when error recovery is in progress. 
+ */
+static void sym_eh_done(struct scsi_cmnd *cmd) { __sym_eh_done(cmd, 0); }
+
+/*
+ *  Some timeout handler to avoid waiting too long.
+ */
+static void sym_eh_timeout(u_long p) { __sym_eh_done((struct scsi_cmnd *)p, 1); }
+
+/*
+ *  Generic method for our eh processing.
+ *  The 'op' argument tells what we have to do.
+ */
+static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
+{
+	struct sym_hcb *np = SYM_SOFTC_PTR(cmd);
+	SYM_QUEHEAD *qp;
+	int to_do = SYM_EH_DO_IGNORE;
+	int sts = -1;
+	struct sym_eh_wait eh, *ep = &eh;
+
+	dev_warn(&cmd->device->sdev_gendev, "%s operation started.\n", opname);
+
+#if 0
+	/* This one should be the result of some race, thus to ignore */
+	if (cmd->serial_number != cmd->serial_number_at_timeout)
+		goto prepare;
+#endif
+
+	/* This one is queued in some place -> to wait for completion */
+	FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
+		struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+		if (cp->cmd == cmd) {
+			to_do = SYM_EH_DO_WAIT;
+			goto prepare;
+		}
+	}
+
+prepare:
+	/* Prepare stuff to either ignore, complete or wait for completion */
+	switch(to_do) {
+	default:
+	case SYM_EH_DO_IGNORE:
+		break;
+	case SYM_EH_DO_WAIT:
+		init_completion(&ep->done);
+		/* fall through */
+	case SYM_EH_DO_COMPLETE:
+		ep->old_done = cmd->scsi_done;
+		cmd->scsi_done = sym_eh_done;
+		SYM_UCMD_PTR(cmd)->eh_wait = ep;
+	}
+
+	/* Try to proceed the operation we have been asked for */
+	sts = -1;
+	switch(op) {
+	case SYM_EH_ABORT:
+		sts = sym_abort_scsiio(np, cmd, 1);
+		break;
+	case SYM_EH_DEVICE_RESET:
+		sts = sym_reset_scsi_target(np, cmd->device->id);
+		break;
+	case SYM_EH_BUS_RESET:
+		sym_reset_scsi_bus(np, 1);
+		sts = 0;
+		break;
+	case SYM_EH_HOST_RESET:
+		sym_reset_scsi_bus(np, 0);
+		sym_start_up (np, 1);
+		sts = 0;
+		break;
+	default:
+		break;
+	}
+
+	/* On error, restore everything and cross fingers :) */
+	if (sts) {
+		SYM_UCMD_PTR(cmd)->eh_wait = NULL;
+		cmd->scsi_done = ep->old_done;
+		to_do = SYM_EH_DO_IGNORE;
+	}
+
+	ep->to_do = to_do;
+	/* Complete the command with locks held as required by the driver */
+	if (to_do == SYM_EH_DO_COMPLETE)
+		sym_xpt_done2(np, cmd, CAM_REQ_ABORTED);
+
+	/* Wait for completion with locks released, as required by kernel */
+	if (to_do == SYM_EH_DO_WAIT) {
+		init_timer(&ep->timer);
+		ep->timer.expires = jiffies + (5*HZ);
+		ep->timer.function = sym_eh_timeout;
+		ep->timer.data = (u_long)cmd;
+		ep->timed_out = 1;	/* Be pessimistic for once :) */
+		add_timer(&ep->timer);
+		spin_unlock_irq(np->s.host->host_lock);
+		wait_for_completion(&ep->done);
+		spin_lock_irq(np->s.host->host_lock);
+		if (ep->timed_out)
+			sts = -2;
+	}
+	dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname,
+			sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed");
+	return sts ? SCSI_FAILED : SCSI_SUCCESS;
+}
+
+
+/*
+ * Error handlers called from the eh thread (one thread per HBA).
+ */
+static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd)
+{
+	return sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd);
+}
+
+static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd)
+{
+	return sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd);
+}
+
+static int sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd *cmd)
+{
+	return sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd);
+}
+
+static int sym53c8xx_eh_host_reset_handler(struct scsi_cmnd *cmd)
+{
+	return sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd);
+}
+
+/*
+ *  Tune device queuing depth, according to various limits.
+ */
+static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags)
+{
+	struct sym_lcb *lp = sym_lp(tp, lun);
+	u_short	oldtags;
+
+	if (!lp)
+		return;
+
+	oldtags = lp->s.reqtags;
+
+	if (reqtags > lp->s.scdev_depth)
+		reqtags = lp->s.scdev_depth;
+
+	lp->started_limit = reqtags ? reqtags : 2;
+	lp->started_max   = 1;
+	lp->s.reqtags     = reqtags;
+
+	if (reqtags != oldtags) {
+		dev_info(&tp->sdev->sdev_target->dev,
+		         "tagged command queuing %s, command queue depth %d.\n",
+		          lp->s.reqtags ? "enabled" : "disabled",
+ 		          lp->started_limit);
+	}
+}
+
+/*
+ *  Linux select queue depths function
+ */
+#define DEF_DEPTH	(sym_driver_setup.max_tag)
+#define ALL_TARGETS	-2
+#define NO_TARGET	-1
+#define ALL_LUNS	-2
+#define NO_LUN		-1
+
+static int device_queue_depth(struct sym_hcb *np, int target, int lun)
+{
+	int c, h, t, u, v;
+	char *p = sym_driver_setup.tag_ctrl;
+	char *ep;
+
+	h = -1;
+	t = NO_TARGET;
+	u = NO_LUN;
+	while ((c = *p++) != 0) {
+		v = simple_strtoul(p, &ep, 0);
+		switch(c) {
+		case '/':
+			++h;
+			t = ALL_TARGETS;
+			u = ALL_LUNS;
+			break;
+		case 't':
+			if (t != target)
+				t = (target == v) ? v : NO_TARGET;
+			u = ALL_LUNS;
+			break;
+		case 'u':
+			if (u != lun)
+				u = (lun == v) ? v : NO_LUN;
+			break;
+		case 'q':
+			if (h == np->s.unit &&
+				(t == ALL_TARGETS || t == target) &&
+				(u == ALL_LUNS    || u == lun))
+				return v;
+			break;
+		case '-':
+			t = ALL_TARGETS;
+			u = ALL_LUNS;
+			break;
+		default:
+			break;
+		}
+		p = ep;
+	}
+	return DEF_DEPTH;
+}
+
+static int sym53c8xx_slave_alloc(struct scsi_device *device)
+{
+	struct sym_hcb *np = sym_get_hcb(device->host);
+	struct sym_tcb *tp = &np->target[device->id];
+	if (!tp->sdev)
+		tp->sdev = device;
+
+	return 0;
+}
+
+static void sym53c8xx_slave_destroy(struct scsi_device *device)
+{
+	struct sym_hcb *np = sym_get_hcb(device->host);
+	struct sym_tcb *tp = &np->target[device->id];
+	if (tp->sdev == device)
+		tp->sdev = NULL;
+}
+
+/*
+ * Linux entry point for device queue sizing.
+ */
+static int sym53c8xx_slave_configure(struct scsi_device *device)
+{
+	struct sym_hcb *np = sym_get_hcb(device->host);
+	struct sym_tcb *tp = &np->target[device->id];
+	struct sym_lcb *lp;
+	int reqtags, depth_to_use;
+
+	/*
+	 *  Allocate the LCB if not yet.
+	 *  If it fail, we may well be in the sh*t. :)
+	 */
+	lp = sym_alloc_lcb(np, device->id, device->lun);
+	if (!lp)
+		return -ENOMEM;
+
+	/*
+	 *  Get user flags.
+	 */
+	lp->curr_flags = lp->user_flags;
+
+	/*
+	 *  Select queue depth from driver setup.
+	 *  Donnot use more than configured by user.
+	 *  Use at least 2.
+	 *  Donnot use more than our maximum.
+	 */
+	reqtags = device_queue_depth(np, device->id, device->lun);
+	if (reqtags > tp->usrtags)
+		reqtags = tp->usrtags;
+	if (!device->tagged_supported)
+		reqtags = 0;
+#if 1 /* Avoid to locally queue commands for no good reasons */
+	if (reqtags > SYM_CONF_MAX_TAG)
+		reqtags = SYM_CONF_MAX_TAG;
+	depth_to_use = (reqtags ? reqtags : 2);
+#else
+	depth_to_use = (reqtags ? SYM_CONF_MAX_TAG : 2);
+#endif
+	scsi_adjust_queue_depth(device,
+				(device->tagged_supported ?
+				 MSG_SIMPLE_TAG : 0),
+				depth_to_use);
+	lp->s.scdev_depth = depth_to_use;
+	sym_tune_dev_queuing(tp, device->lun, reqtags);
+
+	if (!spi_initial_dv(device->sdev_target))
+		spi_dv_device(device);
+
+	return 0;
+}
+
+/*
+ *  Linux entry point for info() function
+ */
+static const char *sym53c8xx_info (struct Scsi_Host *host)
+{
+	return SYM_DRIVER_NAME;
+}
+
+
+#ifdef SYM_LINUX_PROC_INFO_SUPPORT
+/*
+ *  Proc file system stuff
+ *
+ *  A read operation returns adapter information.
+ *  A write operation is a control command.
+ *  The string is parsed in the driver code and the command is passed 
+ *  to the sym_usercmd() function.
+ */
+
+#ifdef SYM_LINUX_USER_COMMAND_SUPPORT
+
+struct	sym_usrcmd {
+	u_long	target;
+	u_long	lun;
+	u_long	data;
+	u_long	cmd;
+};
+
+#define UC_SETSYNC      10
+#define UC_SETTAGS	11
+#define UC_SETDEBUG	12
+#define UC_SETWIDE	14
+#define UC_SETFLAG	15
+#define UC_SETVERBOSE	17
+#define UC_RESETDEV	18
+#define UC_CLEARDEV	19
+
+static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
+{
+	struct sym_tcb *tp;
+	int t, l;
+
+	switch (uc->cmd) {
+	case 0: return;
+
+#ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
+	case UC_SETDEBUG:
+		sym_debug_flags = uc->data;
+		break;
+#endif
+	case UC_SETVERBOSE:
+		np->verbose = uc->data;
+		break;
+	default:
+		/*
+		 * We assume that other commands apply to targets.
+		 * This should always be the case and avoid the below 
+		 * 4 lines to be repeated 6 times.
+		 */
+		for (t = 0; t < SYM_CONF_MAX_TARGET; t++) {
+			if (!((uc->target >> t) & 1))
+				continue;
+			tp = &np->target[t];
+
+			switch (uc->cmd) {
+
+			case UC_SETSYNC:
+				if (!uc->data || uc->data >= 255) {
+					tp->tgoal.iu = tp->tgoal.dt =
+						tp->tgoal.qas = 0;
+					tp->tgoal.offset = 0;
+				} else if (uc->data <= 9 && np->minsync_dt) {
+					if (uc->data < np->minsync_dt)
+						uc->data = np->minsync_dt;
+					tp->tgoal.iu = tp->tgoal.dt =
+						tp->tgoal.qas = 1;
+					tp->tgoal.width = 1;
+					tp->tgoal.period = uc->data;
+					tp->tgoal.offset = np->maxoffs_dt;
+				} else {
+					if (uc->data < np->minsync)
+						uc->data = np->minsync;
+					tp->tgoal.iu = tp->tgoal.dt =
+						tp->tgoal.qas = 0;
+					tp->tgoal.period = uc->data;
+					tp->tgoal.offset = np->maxoffs;
+				}
+				tp->tgoal.check_nego = 1;
+				break;
+			case UC_SETWIDE:
+				tp->tgoal.width = uc->data ? 1 : 0;
+				tp->tgoal.check_nego = 1;
+				break;
+			case UC_SETTAGS:
+				for (l = 0; l < SYM_CONF_MAX_LUN; l++)
+					sym_tune_dev_queuing(tp, l, uc->data);
+				break;
+			case UC_RESETDEV:
+				tp->to_reset = 1;
+				np->istat_sem = SEM;
+				OUTB(np, nc_istat, SIGP|SEM);
+				break;
+			case UC_CLEARDEV:
+				for (l = 0; l < SYM_CONF_MAX_LUN; l++) {
+					struct sym_lcb *lp = sym_lp(tp, l);
+					if (lp) lp->to_clear = 1;
+				}
+				np->istat_sem = SEM;
+				OUTB(np, nc_istat, SIGP|SEM);
+				break;
+			case UC_SETFLAG:
+				tp->usrflags = uc->data;
+				break;
+			}
+		}
+		break;
+	}
+}
+
+static int skip_spaces(char *ptr, int len)
+{
+	int cnt, c;
+
+	for (cnt = len; cnt > 0 && (c = *ptr++) && isspace(c); cnt--);
+
+	return (len - cnt);
+}
+
+static int get_int_arg(char *ptr, int len, u_long *pv)
+{
+	char *end;
+
+	*pv = simple_strtoul(ptr, &end, 10);
+	return (end - ptr);
+}
+
+static int is_keyword(char *ptr, int len, char *verb)
+{
+	int verb_len = strlen(verb);
+
+	if (len >= verb_len && !memcmp(verb, ptr, verb_len))
+		return verb_len;
+	else
+		return 0;
+}
+
+#define SKIP_SPACES(ptr, len)						\
+	if ((arg_len = skip_spaces(ptr, len)) < 1)			\
+		return -EINVAL;						\
+	ptr += arg_len; len -= arg_len;
+
+#define GET_INT_ARG(ptr, len, v)					\
+	if (!(arg_len = get_int_arg(ptr, len, &(v))))			\
+		return -EINVAL;						\
+	ptr += arg_len; len -= arg_len;
+
+
+/*
+ * Parse a control command
+ */
+
+static int sym_user_command(struct sym_hcb *np, char *buffer, int length)
+{
+	char *ptr	= buffer;
+	int len		= length;
+	struct sym_usrcmd cmd, *uc = &cmd;
+	int		arg_len;
+	u_long 		target;
+
+	memset(uc, 0, sizeof(*uc));
+
+	if (len > 0 && ptr[len-1] == '\n')
+		--len;
+
+	if	((arg_len = is_keyword(ptr, len, "setsync")) != 0)
+		uc->cmd = UC_SETSYNC;
+	else if	((arg_len = is_keyword(ptr, len, "settags")) != 0)
+		uc->cmd = UC_SETTAGS;
+	else if	((arg_len = is_keyword(ptr, len, "setverbose")) != 0)
+		uc->cmd = UC_SETVERBOSE;
+	else if	((arg_len = is_keyword(ptr, len, "setwide")) != 0)
+		uc->cmd = UC_SETWIDE;
+#ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
+	else if	((arg_len = is_keyword(ptr, len, "setdebug")) != 0)
+		uc->cmd = UC_SETDEBUG;
+#endif
+	else if	((arg_len = is_keyword(ptr, len, "setflag")) != 0)
+		uc->cmd = UC_SETFLAG;
+	else if	((arg_len = is_keyword(ptr, len, "resetdev")) != 0)
+		uc->cmd = UC_RESETDEV;
+	else if	((arg_len = is_keyword(ptr, len, "cleardev")) != 0)
+		uc->cmd = UC_CLEARDEV;
+	else
+		arg_len = 0;
+
+#ifdef DEBUG_PROC_INFO
+printk("sym_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd);
+#endif
+
+	if (!arg_len)
+		return -EINVAL;
+	ptr += arg_len; len -= arg_len;
+
+	switch(uc->cmd) {
+	case UC_SETSYNC:
+	case UC_SETTAGS:
+	case UC_SETWIDE:
+	case UC_SETFLAG:
+	case UC_RESETDEV:
+	case UC_CLEARDEV:
+		SKIP_SPACES(ptr, len);
+		if ((arg_len = is_keyword(ptr, len, "all")) != 0) {
+			ptr += arg_len; len -= arg_len;
+			uc->target = ~0;
+		} else {
+			GET_INT_ARG(ptr, len, target);
+			uc->target = (1<<target);
+#ifdef DEBUG_PROC_INFO
+printk("sym_user_command: target=%ld\n", target);
+#endif
+		}
+		break;
+	}
+
+	switch(uc->cmd) {
+	case UC_SETVERBOSE:
+	case UC_SETSYNC:
+	case UC_SETTAGS:
+	case UC_SETWIDE:
+		SKIP_SPACES(ptr, len);
+		GET_INT_ARG(ptr, len, uc->data);
+#ifdef DEBUG_PROC_INFO
+printk("sym_user_command: data=%ld\n", uc->data);
+#endif
+		break;
+#ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT
+	case UC_SETDEBUG:
+		while (len > 0) {
+			SKIP_SPACES(ptr, len);
+			if	((arg_len = is_keyword(ptr, len, "alloc")))
+				uc->data |= DEBUG_ALLOC;
+			else if	((arg_len = is_keyword(ptr, len, "phase")))
+				uc->data |= DEBUG_PHASE;
+			else if	((arg_len = is_keyword(ptr, len, "queue")))
+				uc->data |= DEBUG_QUEUE;
+			else if	((arg_len = is_keyword(ptr, len, "result")))
+				uc->data |= DEBUG_RESULT;
+			else if	((arg_len = is_keyword(ptr, len, "scatter")))
+				uc->data |= DEBUG_SCATTER;
+			else if	((arg_len = is_keyword(ptr, len, "script")))
+				uc->data |= DEBUG_SCRIPT;
+			else if	((arg_len = is_keyword(ptr, len, "tiny")))
+				uc->data |= DEBUG_TINY;
+			else if	((arg_len = is_keyword(ptr, len, "timing")))
+				uc->data |= DEBUG_TIMING;
+			else if	((arg_len = is_keyword(ptr, len, "nego")))
+				uc->data |= DEBUG_NEGO;
+			else if	((arg_len = is_keyword(ptr, len, "tags")))
+				uc->data |= DEBUG_TAGS;
+			else if	((arg_len = is_keyword(ptr, len, "pointer")))
+				uc->data |= DEBUG_POINTER;
+			else
+				return -EINVAL;
+			ptr += arg_len; len -= arg_len;
+		}
+#ifdef DEBUG_PROC_INFO
+printk("sym_user_command: data=%ld\n", uc->data);
+#endif
+		break;
+#endif /* SYM_LINUX_DEBUG_CONTROL_SUPPORT */
+	case UC_SETFLAG:
+		while (len > 0) {
+			SKIP_SPACES(ptr, len);
+			if	((arg_len = is_keyword(ptr, len, "no_disc")))
+				uc->data &= ~SYM_DISC_ENABLED;
+			else
+				return -EINVAL;
+			ptr += arg_len; len -= arg_len;
+		}
+		break;
+	default:
+		break;
+	}
+
+	if (len)
+		return -EINVAL;
+	else {
+		unsigned long flags;
+
+		spin_lock_irqsave(np->s.host->host_lock, flags);
+		sym_exec_user_command (np, uc);
+		spin_unlock_irqrestore(np->s.host->host_lock, flags);
+	}
+	return length;
+}
+
+#endif	/* SYM_LINUX_USER_COMMAND_SUPPORT */
+
+
+#ifdef SYM_LINUX_USER_INFO_SUPPORT
+/*
+ *  Informations through the proc file system.
+ */
+struct info_str {
+	char *buffer;
+	int length;
+	int offset;
+	int pos;
+};
+
+static void copy_mem_info(struct info_str *info, char *data, int len)
+{
+	if (info->pos + len > info->length)
+		len = info->length - info->pos;
+
+	if (info->pos + len < info->offset) {
+		info->pos += len;
+		return;
+	}
+	if (info->pos < info->offset) {
+		data += (info->offset - info->pos);
+		len  -= (info->offset - info->pos);
+	}
+
+	if (len > 0) {
+		memcpy(info->buffer + info->pos, data, len);
+		info->pos += len;
+	}
+}
+
+static int copy_info(struct info_str *info, char *fmt, ...)
+{
+	va_list args;
+	char buf[81];
+	int len;
+
+	va_start(args, fmt);
+	len = vsprintf(buf, fmt, args);
+	va_end(args);
+
+	copy_mem_info(info, buf, len);
+	return len;
+}
+
+/*
+ *  Copy formatted information into the input buffer.
+ */
+static int sym_host_info(struct sym_hcb *np, char *ptr, off_t offset, int len)
+{
+	struct info_str info;
+
+	info.buffer	= ptr;
+	info.length	= len;
+	info.offset	= offset;
+	info.pos	= 0;
+
+	copy_info(&info, "Chip " NAME53C "%s, device id 0x%x, "
+			 "revision id 0x%x\n",
+			 np->s.chip_name, np->device_id, np->revision_id);
+	copy_info(&info, "At PCI address %s, IRQ " IRQ_FMT "\n",
+		pci_name(np->s.device), IRQ_PRM(np->s.irq));
+	copy_info(&info, "Min. period factor %d, %s SCSI BUS%s\n",
+			 (int) (np->minsync_dt ? np->minsync_dt : np->minsync),
+			 np->maxwide ? "Wide" : "Narrow",
+			 np->minsync_dt ? ", DT capable" : "");
+
+	copy_info(&info, "Max. started commands %d, "
+			 "max. commands per LUN %d\n",
+			 SYM_CONF_MAX_START, SYM_CONF_MAX_TAG);
+
+	return info.pos > info.offset? info.pos - info.offset : 0;
+}
+#endif /* SYM_LINUX_USER_INFO_SUPPORT */
+
+/*
+ *  Entry point of the scsi proc fs of the driver.
+ *  - func = 0 means read  (returns adapter infos)
+ *  - func = 1 means write (not yet merget from sym53c8xx)
+ */
+static int sym53c8xx_proc_info(struct Scsi_Host *host, char *buffer,
+			char **start, off_t offset, int length, int func)
+{
+	struct sym_hcb *np = sym_get_hcb(host);
+	int retv;
+
+	if (func) {
+#ifdef	SYM_LINUX_USER_COMMAND_SUPPORT
+		retv = sym_user_command(np, buffer, length);
+#else
+		retv = -EINVAL;
+#endif
+	} else {
+		if (start)
+			*start = buffer;
+#ifdef SYM_LINUX_USER_INFO_SUPPORT
+		retv = sym_host_info(np, buffer, offset, length);
+#else
+		retv = -EINVAL;
+#endif
+	}
+
+	return retv;
+}
+#endif /* SYM_LINUX_PROC_INFO_SUPPORT */
+
+/*
+ *	Free controller resources.
+ */
+static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev)
+{
+	/*
+	 *  Free O/S specific resources.
+	 */
+	if (np->s.irq)
+		free_irq(np->s.irq, np);
+	if (np->s.ioaddr)
+		pci_iounmap(pdev, np->s.ioaddr);
+	if (np->s.ramaddr)
+		pci_iounmap(pdev, np->s.ramaddr);
+	/*
+	 *  Free O/S independent resources.
+	 */
+	sym_hcb_free(np);
+
+	sym_mfree_dma(np, sizeof(*np), "HCB");
+}
+
+/*
+ *  Ask/tell the system about DMA addressing.
+ */
+static int sym_setup_bus_dma_mask(struct sym_hcb *np)
+{
+#if SYM_CONF_DMA_ADDRESSING_MODE > 0
+#if   SYM_CONF_DMA_ADDRESSING_MODE == 1
+#define	DMA_DAC_MASK	0x000000ffffffffffULL /* 40-bit */
+#elif SYM_CONF_DMA_ADDRESSING_MODE == 2
+#define	DMA_DAC_MASK	DMA_64BIT_MASK
+#endif
+	if ((np->features & FE_DAC) &&
+			!pci_set_dma_mask(np->s.device, DMA_DAC_MASK)) {
+		np->use_dac = 1;
+		return 0;
+	}
+#endif
+
+	if (!pci_set_dma_mask(np->s.device, DMA_32BIT_MASK))
+		return 0;
+
+	printf_warning("%s: No suitable DMA available\n", sym_name(np));
+	return -1;
+}
+
+/*
+ *  Host attach and initialisations.
+ *
+ *  Allocate host data and ncb structure.
+ *  Remap MMIO region.
+ *  Do chip initialization.
+ *  If all is OK, install interrupt handling and
+ *  start the timer daemon.
+ */
+static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt,
+		int unit, struct sym_device *dev)
+{
+	struct host_data *host_data;
+	struct sym_hcb *np = NULL;
+	struct Scsi_Host *instance = NULL;
+	struct pci_dev *pdev = dev->pdev;
+	unsigned long flags;
+	struct sym_fw *fw;
+
+	printk(KERN_INFO
+		"sym%d: <%s> rev 0x%x at pci %s irq " IRQ_FMT "\n",
+		unit, dev->chip.name, dev->chip.revision_id,
+		pci_name(pdev), IRQ_PRM(pdev->irq));
+
+	/*
+	 *  Get the firmware for this chip.
+	 */
+	fw = sym_find_firmware(&dev->chip);
+	if (!fw)
+		goto attach_failed;
+
+	/*
+	 *	Allocate host_data structure
+	 */
+	instance = scsi_host_alloc(tpnt, sizeof(*host_data));
+	if (!instance)
+		goto attach_failed;
+	host_data = (struct host_data *) instance->hostdata;
+
+	/*
+	 *  Allocate immediately the host control block, 
+	 *  since we are only expecting to succeed. :)
+	 *  We keep track in the HCB of all the resources that 
+	 *  are to be released on error.
+	 */
+	np = __sym_calloc_dma(&pdev->dev, sizeof(*np), "HCB");
+	if (!np)
+		goto attach_failed;
+	np->s.device = pdev;
+	np->bus_dmat = &pdev->dev; /* Result in 1 DMA pool per HBA */
+	host_data->ncb = np;
+	np->s.host = instance;
+
+	pci_set_drvdata(pdev, np);
+
+	/*
+	 *  Copy some useful infos to the HCB.
+	 */
+	np->hcb_ba	= vtobus(np);
+	np->verbose	= sym_driver_setup.verbose;
+	np->s.device	= pdev;
+	np->s.unit	= unit;
+	np->device_id	= dev->chip.device_id;
+	np->revision_id	= dev->chip.revision_id;
+	np->features	= dev->chip.features;
+	np->clock_divn	= dev->chip.nr_divisor;
+	np->maxoffs	= dev->chip.offset_max;
+	np->maxburst	= dev->chip.burst_max;
+	np->myaddr	= dev->host_id;
+
+	/*
+	 *  Edit its name.
+	 */
+	strlcpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name));
+	sprintf(np->s.inst_name, "sym%d", np->s.unit);
+
+	if (sym_setup_bus_dma_mask(np))
+		goto attach_failed;
+
+	/*
+	 *  Try to map the controller chip to
+	 *  virtual and physical memory.
+	 */
+	np->mmio_ba = (u32)dev->mmio_base;
+	np->s.ioaddr	= dev->s.ioaddr;
+	np->s.ramaddr	= dev->s.ramaddr;
+	np->s.io_ws = (np->features & FE_IO256) ? 256 : 128;
+
+	/*
+	 *  Map on-chip RAM if present and supported.
+	 */
+	if (!(np->features & FE_RAM))
+		dev->ram_base = 0;
+	if (dev->ram_base) {
+		np->ram_ba = (u32)dev->ram_base;
+		np->ram_ws = (np->features & FE_RAM8K) ? 8192 : 4096;
+	}
+
+	if (sym_hcb_attach(instance, fw, dev->nvram))
+		goto attach_failed;
+
+	/*
+	 *  Install the interrupt handler.
+	 *  If we synchonize the C code with SCRIPTS on interrupt, 
+	 *  we do not want to share the INTR line at all.
+	 */
+	if (request_irq(pdev->irq, sym53c8xx_intr, SA_SHIRQ, NAME53C8XX, np)) {
+		printf_err("%s: request irq %d failure\n",
+			sym_name(np), pdev->irq);
+		goto attach_failed;
+	}
+	np->s.irq = pdev->irq;
+
+	/*
+	 *  After SCSI devices have been opened, we cannot
+	 *  reset the bus safely, so we do it here.
+	 */
+	spin_lock_irqsave(instance->host_lock, flags);
+	if (sym_reset_scsi_bus(np, 0))
+		goto reset_failed;
+
+	/*
+	 *  Start the SCRIPTS.
+	 */
+	sym_start_up (np, 1);
+
+	/*
+	 *  Start the timer daemon
+	 */
+	init_timer(&np->s.timer);
+	np->s.timer.data     = (unsigned long) np;
+	np->s.timer.function = sym53c8xx_timer;
+	np->s.lasttime=0;
+	sym_timer (np);
+
+	/*
+	 *  Fill Linux host instance structure
+	 *  and return success.
+	 */
+	instance->max_channel	= 0;
+	instance->this_id	= np->myaddr;
+	instance->max_id	= np->maxwide ? 16 : 8;
+	instance->max_lun	= SYM_CONF_MAX_LUN;
+	instance->unique_id	= pci_resource_start(pdev, 0);
+	instance->cmd_per_lun	= SYM_CONF_MAX_TAG;
+	instance->can_queue	= (SYM_CONF_MAX_START-2);
+	instance->sg_tablesize	= SYM_CONF_MAX_SG;
+	instance->max_cmd_len	= 16;
+	BUG_ON(sym2_transport_template == NULL);
+	instance->transportt	= sym2_transport_template;
+
+	spin_unlock_irqrestore(instance->host_lock, flags);
+
+	return instance;
+
+ reset_failed:
+	printf_err("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, "
+		   "TERMINATION, DEVICE POWER etc.!\n", sym_name(np));
+	spin_unlock_irqrestore(instance->host_lock, flags);
+ attach_failed:
+	if (!instance)
+		return NULL;
+	printf_info("%s: giving up ...\n", sym_name(np));
+	if (np)
+		sym_free_resources(np, pdev);
+	scsi_host_put(instance);
+
+	return NULL;
+ }
+
+
+/*
+ *    Detect and try to read SYMBIOS and TEKRAM NVRAM.
+ */
+#if SYM_CONF_NVRAM_SUPPORT
+static void __devinit sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
+{
+	devp->nvram = nvp;
+	devp->device_id = devp->chip.device_id;
+	nvp->type = 0;
+
+	sym_read_nvram(devp, nvp);
+}
+#else
+static inline void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
+{
+}
+#endif	/* SYM_CONF_NVRAM_SUPPORT */
+
+static int __devinit sym_check_supported(struct sym_device *device)
+{
+	struct sym_chip *chip;
+	struct pci_dev *pdev = device->pdev;
+	u_char revision;
+	unsigned long io_port = pci_resource_start(pdev, 0);
+	int i;
+
+	/*
+	 *  If user excluded this chip, do not initialize it.
+	 *  I hate this code so much.  Must kill it.
+	 */
+	if (io_port) {
+		for (i = 0 ; i < 8 ; i++) {
+			if (sym_driver_setup.excludes[i] == io_port)
+				return -ENODEV;
+		}
+	}
+
+	/*
+	 * Check if the chip is supported.  Then copy the chip description
+	 * to our device structure so we can make it match the actual device
+	 * and options.
+	 */
+	pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+	chip = sym_lookup_chip_table(pdev->device, revision);
+	if (!chip) {
+		dev_info(&pdev->dev, "device not supported\n");
+		return -ENODEV;
+	}
+	memcpy(&device->chip, chip, sizeof(device->chip));
+	device->chip.revision_id = revision;
+
+	return 0;
+}
+
+/*
+ * Ignore Symbios chips controlled by various RAID controllers.
+ * These controllers set value 0x52414944 at RAM end - 16.
+ */
+static int __devinit sym_check_raid(struct sym_device *device)
+{
+	unsigned int ram_size, ram_val;
+
+	if (!device->s.ramaddr)
+		return 0;
+
+	if (device->chip.features & FE_RAM8K)
+		ram_size = 8192;
+	else
+		ram_size = 4096;
+
+	ram_val = readl(device->s.ramaddr + ram_size - 16);
+	if (ram_val != 0x52414944)
+		return 0;
+
+	dev_info(&device->pdev->dev,
+			"not initializing, driven by RAID controller.\n");
+	return -ENODEV;
+}
+
+static int __devinit sym_set_workarounds(struct sym_device *device)
+{
+	struct sym_chip *chip = &device->chip;
+	struct pci_dev *pdev = device->pdev;
+	u_short status_reg;
+
+	/*
+	 *  (ITEM 12 of a DEL about the 896 I haven't yet).
+	 *  We must ensure the chip will use WRITE AND INVALIDATE.
+	 *  The revision number limit is for now arbitrary.
+	 */
+	if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && chip->revision_id < 0x4) {
+		chip->features	|= (FE_WRIE | FE_CLSE);
+	}
+
+	/* If the chip can do Memory Write Invalidate, enable it */
+	if (chip->features & FE_WRIE) {
+		if (pci_set_mwi(pdev))
+			return -ENODEV;
+	}
+
+	/*
+	 *  Work around for errant bit in 895A. The 66Mhz
+	 *  capable bit is set erroneously. Clear this bit.
+	 *  (Item 1 DEL 533)
+	 *
+	 *  Make sure Config space and Features agree.
+	 *
+	 *  Recall: writes are not normal to status register -
+	 *  write a 1 to clear and a 0 to leave unchanged.
+	 *  Can only reset bits.
+	 */
+	pci_read_config_word(pdev, PCI_STATUS, &status_reg);
+	if (chip->features & FE_66MHZ) {
+		if (!(status_reg & PCI_STATUS_66MHZ))
+			chip->features &= ~FE_66MHZ;
+	} else {
+		if (status_reg & PCI_STATUS_66MHZ) {
+			status_reg = PCI_STATUS_66MHZ;
+			pci_write_config_word(pdev, PCI_STATUS, status_reg);
+			pci_read_config_word(pdev, PCI_STATUS, &status_reg);
+		}
+	}
+
+	return 0;
+}
+
+/*
+ *  Read and check the PCI configuration for any detected NCR 
+ *  boards and save data for attaching after all boards have 
+ *  been detected.
+ */
+static void __devinit
+sym_init_device(struct pci_dev *pdev, struct sym_device *device)
+{
+	int i;
+
+	device->host_id = SYM_SETUP_HOST_ID;
+	device->pdev = pdev;
+
+	i = pci_get_base_address(pdev, 1, &device->mmio_base);
+	pci_get_base_address(pdev, i, &device->ram_base);
+
+#ifndef CONFIG_SCSI_SYM53C8XX_IOMAPPED
+	if (device->mmio_base)
+		device->s.ioaddr = pci_iomap(pdev, 1,
+						pci_resource_len(pdev, 1));
+#endif
+	if (!device->s.ioaddr)
+		device->s.ioaddr = pci_iomap(pdev, 0,
+						pci_resource_len(pdev, 0));
+	if (device->ram_base)
+		device->s.ramaddr = pci_iomap(pdev, i,
+						pci_resource_len(pdev, i));
+}
+
+/*
+ * The NCR PQS and PDS cards are constructed as a DEC bridge
+ * behind which sits a proprietary NCR memory controller and
+ * either four or two 53c875s as separate devices.  We can tell
+ * if an 875 is part of a PQS/PDS or not since if it is, it will
+ * be on the same bus as the memory controller.  In its usual
+ * mode of operation, the 875s are slaved to the memory
+ * controller for all transfers.  To operate with the Linux
+ * driver, the memory controller is disabled and the 875s
+ * freed to function independently.  The only wrinkle is that
+ * the preset SCSI ID (which may be zero) must be read in from
+ * a special configuration space register of the 875.
+ */
+static void sym_config_pqs(struct pci_dev *pdev, struct sym_device *sym_dev)
+{
+	int slot;
+	u8 tmp;
+
+	for (slot = 0; slot < 256; slot++) {
+		struct pci_dev *memc = pci_get_slot(pdev->bus, slot);
+
+		if (!memc || memc->vendor != 0x101a || memc->device == 0x0009) {
+			pci_dev_put(memc);
+			continue;
+		}
+
+		/* bit 1: allow individual 875 configuration */
+		pci_read_config_byte(memc, 0x44, &tmp);
+		if ((tmp & 0x2) == 0) {
+			tmp |= 0x2;
+			pci_write_config_byte(memc, 0x44, tmp);
+		}
+
+		/* bit 2: drive individual 875 interrupts to the bus */
+		pci_read_config_byte(memc, 0x45, &tmp);
+		if ((tmp & 0x4) == 0) {
+			tmp |= 0x4;
+			pci_write_config_byte(memc, 0x45, tmp);
+		}
+
+		pci_dev_put(memc);
+		break;
+	}
+
+	pci_read_config_byte(pdev, 0x84, &tmp);
+	sym_dev->host_id = tmp;
+}
+
+/*
+ *  Called before unloading the module.
+ *  Detach the host.
+ *  We have to free resources and halt the NCR chip.
+ */
+static int sym_detach(struct sym_hcb *np, struct pci_dev *pdev)
+{
+	printk("%s: detaching ...\n", sym_name(np));
+
+	del_timer_sync(&np->s.timer);
+
+	/*
+	 * Reset NCR chip.
+	 * We should use sym_soft_reset(), but we don't want to do 
+	 * so, since we may not be safe if interrupts occur.
+	 */
+	printk("%s: resetting chip\n", sym_name(np));
+	OUTB(np, nc_istat, SRST);
+	udelay(10);
+	OUTB(np, nc_istat, 0);
+
+	sym_free_resources(np, pdev);
+
+	return 1;
+}
+
+/*
+ * Driver host template.
+ */
+static struct scsi_host_template sym2_template = {
+	.module			= THIS_MODULE,
+	.name			= "sym53c8xx",
+	.info			= sym53c8xx_info, 
+	.queuecommand		= sym53c8xx_queue_command,
+	.slave_alloc		= sym53c8xx_slave_alloc,
+	.slave_configure	= sym53c8xx_slave_configure,
+	.slave_destroy		= sym53c8xx_slave_destroy,
+	.eh_abort_handler	= sym53c8xx_eh_abort_handler,
+	.eh_device_reset_handler = sym53c8xx_eh_device_reset_handler,
+	.eh_bus_reset_handler	= sym53c8xx_eh_bus_reset_handler,
+	.eh_host_reset_handler	= sym53c8xx_eh_host_reset_handler,
+	.this_id		= 7,
+	.use_clustering		= DISABLE_CLUSTERING,
+#ifdef SYM_LINUX_PROC_INFO_SUPPORT
+	.proc_info		= sym53c8xx_proc_info,
+	.proc_name		= NAME53C8XX,
+#endif
+};
+
+static int attach_count;
+
+static int __devinit sym2_probe(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	struct sym_device sym_dev;
+	struct sym_nvram nvram;
+	struct Scsi_Host *instance;
+
+	memset(&sym_dev, 0, sizeof(sym_dev));
+	memset(&nvram, 0, sizeof(nvram));
+
+	if (pci_enable_device(pdev))
+		goto leave;
+
+	pci_set_master(pdev);
+
+	if (pci_request_regions(pdev, NAME53C8XX))
+		goto disable;
+
+	sym_init_device(pdev, &sym_dev);
+	if (sym_check_supported(&sym_dev))
+		goto free;
+
+	if (sym_check_raid(&sym_dev))
+		goto leave;	/* Don't disable the device */
+
+	if (sym_set_workarounds(&sym_dev))
+		goto free;
+
+	sym_config_pqs(pdev, &sym_dev);
+
+	sym_get_nvram(&sym_dev, &nvram);
+
+	instance = sym_attach(&sym2_template, attach_count, &sym_dev);
+	if (!instance)
+		goto free;
+
+	if (scsi_add_host(instance, &pdev->dev))
+		goto detach;
+	scsi_scan_host(instance);
+
+	attach_count++;
+
+	return 0;
+
+ detach:
+	sym_detach(pci_get_drvdata(pdev), pdev);
+ free:
+	pci_release_regions(pdev);
+ disable:
+	pci_disable_device(pdev);
+ leave:
+	return -ENODEV;
+}
+
+static void __devexit sym2_remove(struct pci_dev *pdev)
+{
+	struct sym_hcb *np = pci_get_drvdata(pdev);
+	struct Scsi_Host *host = np->s.host;
+
+	scsi_remove_host(host);
+	scsi_host_put(host);
+
+	sym_detach(np, pdev);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+
+	attach_count--;
+}
+
+static void sym2_get_signalling(struct Scsi_Host *shost)
+{
+	struct sym_hcb *np = sym_get_hcb(shost);
+	enum spi_signal_type type;
+
+	switch (np->scsi_mode) {
+	case SMODE_SE:
+		type = SPI_SIGNAL_SE;
+		break;
+	case SMODE_LVD:
+		type = SPI_SIGNAL_LVD;
+		break;
+	case SMODE_HVD:
+		type = SPI_SIGNAL_HVD;
+		break;
+	default:
+		type = SPI_SIGNAL_UNKNOWN;
+		break;
+	}
+	spi_signalling(shost) = type;
+}
+
+static void sym2_set_offset(struct scsi_target *starget, int offset)
+{
+	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+	struct sym_hcb *np = sym_get_hcb(shost);
+	struct sym_tcb *tp = &np->target[starget->id];
+
+	tp->tgoal.offset = offset;
+	tp->tgoal.check_nego = 1;
+}
+
+static void sym2_set_period(struct scsi_target *starget, int period)
+{
+	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+	struct sym_hcb *np = sym_get_hcb(shost);
+	struct sym_tcb *tp = &np->target[starget->id];
+
+	/* have to have DT for these transfers */
+	if (period <= np->minsync)
+		tp->tgoal.dt = 1;
+
+	tp->tgoal.period = period;
+	tp->tgoal.check_nego = 1;
+}
+
+static void sym2_set_width(struct scsi_target *starget, int width)
+{
+	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+	struct sym_hcb *np = sym_get_hcb(shost);
+	struct sym_tcb *tp = &np->target[starget->id];
+
+	/* It is illegal to have DT set on narrow transfers.  If DT is
+	 * clear, we must also clear IU and QAS.  */
+	if (width == 0)
+		tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
+
+	tp->tgoal.width = width;
+	tp->tgoal.check_nego = 1;
+}
+
+static void sym2_set_dt(struct scsi_target *starget, int dt)
+{
+	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+	struct sym_hcb *np = sym_get_hcb(shost);
+	struct sym_tcb *tp = &np->target[starget->id];
+
+	/* We must clear QAS and IU if DT is clear */
+	if (dt)
+		tp->tgoal.dt = 1;
+	else
+		tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
+	tp->tgoal.check_nego = 1;
+}
+
+static void sym2_set_iu(struct scsi_target *starget, int iu)
+{
+	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+	struct sym_hcb *np = sym_get_hcb(shost);
+	struct sym_tcb *tp = &np->target[starget->id];
+
+	if (iu)
+		tp->tgoal.iu = tp->tgoal.dt = 1;
+	else
+		tp->tgoal.iu = 0;
+	tp->tgoal.check_nego = 1;
+}
+
+static void sym2_set_qas(struct scsi_target *starget, int qas)
+{
+	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+	struct sym_hcb *np = sym_get_hcb(shost);
+	struct sym_tcb *tp = &np->target[starget->id];
+
+	if (qas)
+		tp->tgoal.dt = tp->tgoal.qas = 1;
+	else
+		tp->tgoal.qas = 0;
+	tp->tgoal.check_nego = 1;
+}
+
+
+static struct spi_function_template sym2_transport_functions = {
+	.set_offset	= sym2_set_offset,
+	.show_offset	= 1,
+	.set_period	= sym2_set_period,
+	.show_period	= 1,
+	.set_width	= sym2_set_width,
+	.show_width	= 1,
+	.set_dt		= sym2_set_dt,
+	.show_dt	= 1,
+	.set_iu		= sym2_set_iu,
+	.show_iu	= 1,
+	.set_qas	= sym2_set_qas,
+	.show_qas	= 1,
+	.get_signalling	= sym2_get_signalling,
+};
+
+static struct pci_device_id sym2_id_table[] __devinitdata = {
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C825,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C815,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C810AP,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C885,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C1510,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C895A,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C875A,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_33,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_66,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875J,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+	{ 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, sym2_id_table);
+
+static struct pci_driver sym2_driver = {
+	.name		= NAME53C8XX,
+	.id_table	= sym2_id_table,
+	.probe		= sym2_probe,
+	.remove		= __devexit_p(sym2_remove),
+};
+
+static int __init sym2_init(void)
+{
+	int error;
+
+	sym2_setup_params();
+	sym2_transport_template = spi_attach_transport(&sym2_transport_functions);
+	if (!sym2_transport_template)
+		return -ENODEV;
+
+	error = pci_register_driver(&sym2_driver);
+	if (error)
+		spi_release_transport(sym2_transport_template);
+	return error;
+}
+
+static void __exit sym2_exit(void)
+{
+	pci_unregister_driver(&sym2_driver);
+	spi_release_transport(sym2_transport_template);
+}
+
+module_init(sym2_init);
+module_exit(sym2_exit);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h
new file mode 100644
index 0000000..e943f16
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.h
@@ -0,0 +1,300 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000  Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ *         Wolfgang Stanglmeier        <wolf@cologne.de>
+ *         Stefan Esser                <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994  Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef SYM_GLUE_H
+#define SYM_GLUE_H
+
+#include <linux/config.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#ifdef __sparc__
+#  include <asm/irq.h>
+#endif
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_spi.h>
+#include <scsi/scsi_host.h>
+
+#include "sym53c8xx.h"
+#include "sym_defs.h"
+#include "sym_misc.h"
+
+/*
+ * Configuration addendum for Linux.
+ */
+#define	SYM_CONF_TIMER_INTERVAL		((HZ+1)/2)
+
+#define SYM_OPT_HANDLE_DIR_UNKNOWN
+#define SYM_OPT_HANDLE_DEVICE_QUEUEING
+#define SYM_OPT_LIMIT_COMMAND_REORDERING
+
+/*
+ *  Print a message with severity.
+ */
+#define printf_emerg(args...)	printk(KERN_EMERG args)
+#define	printf_alert(args...)	printk(KERN_ALERT args)
+#define	printf_crit(args...)	printk(KERN_CRIT args)
+#define	printf_err(args...)	printk(KERN_ERR	args)
+#define	printf_warning(args...)	printk(KERN_WARNING args)
+#define	printf_notice(args...)	printk(KERN_NOTICE args)
+#define	printf_info(args...)	printk(KERN_INFO args)
+#define	printf_debug(args...)	printk(KERN_DEBUG args)
+#define	printf(args...)		printk(args)
+
+/*
+ *  A 'read barrier' flushes any data that have been prefetched 
+ *  by the processor due to out of order execution. Such a barrier 
+ *  must notably be inserted prior to looking at data that have 
+ *  been DMAed, assuming that program does memory READs in proper 
+ *  order and that the device ensured proper ordering of WRITEs.
+ *
+ *  A 'write barrier' prevents any previous WRITEs to pass further 
+ *  WRITEs. Such barriers must be inserted each time another agent 
+ *  relies on ordering of WRITEs.
+ *
+ *  Note that, due to posting of PCI memory writes, we also must 
+ *  insert dummy PCI read transactions when some ordering involving 
+ *  both directions over the PCI does matter. PCI transactions are 
+ *  fully ordered in each direction.
+ */
+
+#define MEMORY_READ_BARRIER()	rmb()
+#define MEMORY_WRITE_BARRIER()	wmb()
+
+/*
+ *  IO functions definition for big/little endian CPU support.
+ *  For now, PCI chips are only supported in little endian addressing mode, 
+ */
+
+#ifdef	__BIG_ENDIAN
+
+#define	readw_l2b	readw
+#define	readl_l2b	readl
+#define	writew_b2l	writew
+#define	writel_b2l	writel
+
+#else	/* little endian */
+
+#define	readw_raw	readw
+#define	readl_raw	readl
+#define	writew_raw	writew
+#define	writel_raw	writel
+
+#endif /* endian */
+
+#ifdef	SYM_CONF_CHIP_BIG_ENDIAN
+#error	"Chips in BIG ENDIAN addressing mode are not (yet) supported"
+#endif
+
+/*
+ *  If the CPU and the chip use same endian-ness addressing,
+ *  no byte reordering is needed for script patching.
+ *  Macro cpu_to_scr() is to be used for script patching.
+ *  Macro scr_to_cpu() is to be used for getting a DWORD 
+ *  from the script.
+ */
+
+#define cpu_to_scr(dw)	cpu_to_le32(dw)
+#define scr_to_cpu(dw)	le32_to_cpu(dw)
+
+/*
+ *  Remap some status field values.
+ */
+#define CAM_REQ_CMP		DID_OK
+#define CAM_SEL_TIMEOUT		DID_NO_CONNECT
+#define CAM_CMD_TIMEOUT		DID_TIME_OUT
+#define CAM_REQ_ABORTED		DID_ABORT
+#define CAM_UNCOR_PARITY	DID_PARITY
+#define CAM_SCSI_BUS_RESET	DID_RESET	
+#define CAM_REQUEUE_REQ		DID_SOFT_ERROR
+#define	CAM_UNEXP_BUSFREE	DID_ERROR
+#define	CAM_SCSI_BUSY		DID_BUS_BUSY
+
+#define	CAM_DEV_NOT_THERE	DID_NO_CONNECT
+#define	CAM_REQ_INVALID		DID_ERROR
+#define	CAM_REQ_TOO_BIG		DID_ERROR
+
+#define	CAM_RESRC_UNAVAIL	DID_ERROR
+
+/*
+ *  Remap data direction values.
+ */
+#define CAM_DIR_NONE		DMA_NONE
+#define CAM_DIR_IN		DMA_FROM_DEVICE
+#define CAM_DIR_OUT		DMA_TO_DEVICE
+#define CAM_DIR_UNKNOWN		DMA_BIDIRECTIONAL
+
+/*
+ *  These ones are used as return code from 
+ *  error recovery handlers under Linux.
+ */
+#define SCSI_SUCCESS	SUCCESS
+#define SCSI_FAILED	FAILED
+
+/*
+ *  System specific target data structure.
+ *  None for now, under Linux.
+ */
+/* #define SYM_HAVE_STCB */
+
+/*
+ *  System specific lun data structure.
+ */
+#define SYM_HAVE_SLCB
+struct sym_slcb {
+	u_short	reqtags;	/* Number of tags requested by user */
+	u_short scdev_depth;	/* Queue depth set in select_queue_depth() */
+};
+
+/*
+ *  System specific command data structure.
+ *  Not needed under Linux.
+ */
+/* struct sym_sccb */
+
+/*
+ *  System specific host data structure.
+ */
+struct sym_shcb {
+	/*
+	 *  Chip and controller indentification.
+	 */
+	int		unit;
+	char		inst_name[16];
+	char		chip_name[8];
+	struct pci_dev	*device;
+
+	struct Scsi_Host *host;
+
+	void __iomem *	ioaddr;		/* MMIO kernel io address	*/
+	void __iomem *	ramaddr;	/* RAM  kernel io address	*/
+	u_short		io_ws;		/* IO window size		*/
+	int		irq;		/* IRQ number			*/
+
+	struct timer_list timer;	/* Timer handler link header	*/
+	u_long		lasttime;
+	u_long		settle_time;	/* Resetting the SCSI BUS	*/
+	u_char		settle_time_valid;
+};
+
+/*
+ *  Return the name of the controller.
+ */
+#define sym_name(np) (np)->s.inst_name
+
+struct sym_nvram;
+
+/*
+ * The IO macros require a struct called 's' and are abused in sym_nvram.c
+ */
+struct sym_device {
+	struct pci_dev *pdev;
+	unsigned long mmio_base;
+	unsigned long ram_base;
+	struct {
+		void __iomem *ioaddr;
+		void __iomem *ramaddr;
+	} s;
+	struct sym_chip chip;
+	struct sym_nvram *nvram;
+	u_short device_id;
+	u_char host_id;
+};
+
+/*
+ *  Driver host data structure.
+ */
+struct host_data {
+	struct sym_hcb *ncb;
+};
+
+static inline struct sym_hcb * sym_get_hcb(struct Scsi_Host *host)
+{
+	return ((struct host_data *)host->hostdata)->ncb;
+}
+
+#include "sym_fw.h"
+#include "sym_hipd.h"
+
+/*
+ *  Set the status field of a CAM CCB.
+ */
+static __inline void 
+sym_set_cam_status(struct scsi_cmnd *cmd, int status)
+{
+	cmd->result &= ~(0xff  << 16);
+	cmd->result |= (status << 16);
+}
+
+/*
+ *  Get the status field of a CAM CCB.
+ */
+static __inline int 
+sym_get_cam_status(struct scsi_cmnd *cmd)
+{
+	return host_byte(cmd->result);
+}
+
+/*
+ *  Build CAM result for a successful IO and for a failed IO.
+ */
+static __inline void sym_set_cam_result_ok(struct sym_ccb *cp, struct scsi_cmnd *cmd, int resid)
+{
+	cmd->resid = resid;
+	cmd->result = (((DID_OK) << 16) + ((cp->ssss_status) & 0x7f));
+}
+void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid);
+
+void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *ccb);
+#define sym_print_addr(cmd, arg...) dev_info(&cmd->device->sdev_gendev , ## arg)
+void sym_xpt_async_bus_reset(struct sym_hcb *np);
+void sym_xpt_async_sent_bdr(struct sym_hcb *np, int target);
+int  sym_setup_data_and_start (struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
+void sym_log_bus_error(struct sym_hcb *np);
+void sym_sniff_inquiry(struct sym_hcb *np, struct scsi_cmnd *cmd, int resid);
+
+#endif /* SYM_GLUE_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
new file mode 100644
index 0000000..50a176b
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -0,0 +1,5865 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
+ * Copyright (c) 2003-2005  Matthew Wilcox <matthew@wil.cx>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000  Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ *         Wolfgang Stanglmeier        <wolf@cologne.de>
+ *         Stefan Esser                <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994  Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include "sym_glue.h"
+#include "sym_nvram.h"
+
+#if 0
+#define SYM_DEBUG_GENERIC_SUPPORT
+#endif
+
+/*
+ *  Needed function prototypes.
+ */
+static void sym_int_ma (struct sym_hcb *np);
+static void sym_int_sir (struct sym_hcb *np);
+static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np);
+static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa);
+static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln);
+static void sym_complete_error (struct sym_hcb *np, struct sym_ccb *cp);
+static void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp);
+static int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp);
+
+/*
+ *  Print a buffer in hexadecimal format with a ".\n" at end.
+ */
+static void sym_printl_hex(u_char *p, int n)
+{
+	while (n-- > 0)
+		printf (" %x", *p++);
+	printf (".\n");
+}
+
+/*
+ *  Print out the content of a SCSI message.
+ */
+static int sym_show_msg (u_char * msg)
+{
+	u_char i;
+	printf ("%x",*msg);
+	if (*msg==M_EXTENDED) {
+		for (i=1;i<8;i++) {
+			if (i-1>msg[1]) break;
+			printf ("-%x",msg[i]);
+		}
+		return (i+1);
+	} else if ((*msg & 0xf0) == 0x20) {
+		printf ("-%x",msg[1]);
+		return (2);
+	}
+	return (1);
+}
+
+static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg)
+{
+	sym_print_addr(cp->cmd, "%s: ", label);
+
+	sym_show_msg(msg);
+	printf(".\n");
+}
+
+static void sym_print_nego_msg(struct sym_hcb *np, int target, char *label, u_char *msg)
+{
+	struct sym_tcb *tp = &np->target[target];
+	dev_info(&tp->sdev->sdev_target->dev, "%s: ", label);
+
+	sym_show_msg(msg);
+	printf(".\n");
+}
+
+/*
+ *  Print something that tells about extended errors.
+ */
+void sym_print_xerr(struct scsi_cmnd *cmd, int x_status)
+{
+	if (x_status & XE_PARITY_ERR) {
+		sym_print_addr(cmd, "unrecovered SCSI parity error.\n");
+	}
+	if (x_status & XE_EXTRA_DATA) {
+		sym_print_addr(cmd, "extraneous data discarded.\n");
+	}
+	if (x_status & XE_BAD_PHASE) {
+		sym_print_addr(cmd, "illegal scsi phase (4/5).\n");
+	}
+	if (x_status & XE_SODL_UNRUN) {
+		sym_print_addr(cmd, "ODD transfer in DATA OUT phase.\n");
+	}
+	if (x_status & XE_SWIDE_OVRUN) {
+		sym_print_addr(cmd, "ODD transfer in DATA IN phase.\n");
+	}
+}
+
+/*
+ *  Return a string for SCSI BUS mode.
+ */
+static char *sym_scsi_bus_mode(int mode)
+{
+	switch(mode) {
+	case SMODE_HVD:	return "HVD";
+	case SMODE_SE:	return "SE";
+	case SMODE_LVD: return "LVD";
+	}
+	return "??";
+}
+
+/*
+ *  Soft reset the chip.
+ *
+ *  Raising SRST when the chip is running may cause 
+ *  problems on dual function chips (see below).
+ *  On the other hand, LVD devices need some delay 
+ *  to settle and report actual BUS mode in STEST4.
+ */
+static void sym_chip_reset (struct sym_hcb *np)
+{
+	OUTB(np, nc_istat, SRST);
+	udelay(10);
+	OUTB(np, nc_istat, 0);
+	udelay(2000);	/* For BUS MODE to settle */
+}
+
+/*
+ *  Really soft reset the chip.:)
+ *
+ *  Some 896 and 876 chip revisions may hang-up if we set 
+ *  the SRST (soft reset) bit at the wrong time when SCRIPTS 
+ *  are running.
+ *  So, we need to abort the current operation prior to 
+ *  soft resetting the chip.
+ */
+static void sym_soft_reset (struct sym_hcb *np)
+{
+	u_char istat = 0;
+	int i;
+
+	if (!(np->features & FE_ISTAT1) || !(INB(np, nc_istat1) & SCRUN))
+		goto do_chip_reset;
+
+	OUTB(np, nc_istat, CABRT);
+	for (i = 100000 ; i ; --i) {
+		istat = INB(np, nc_istat);
+		if (istat & SIP) {
+			INW(np, nc_sist);
+		}
+		else if (istat & DIP) {
+			if (INB(np, nc_dstat) & ABRT)
+				break;
+		}
+		udelay(5);
+	}
+	OUTB(np, nc_istat, 0);
+	if (!i)
+		printf("%s: unable to abort current chip operation, "
+		       "ISTAT=0x%02x.\n", sym_name(np), istat);
+do_chip_reset:
+	sym_chip_reset(np);
+}
+
+/*
+ *  Start reset process.
+ *
+ *  The interrupt handler will reinitialize the chip.
+ */
+static void sym_start_reset(struct sym_hcb *np)
+{
+	sym_reset_scsi_bus(np, 1);
+}
+ 
+int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int)
+{
+	u32 term;
+	int retv = 0;
+
+	sym_soft_reset(np);	/* Soft reset the chip */
+	if (enab_int)
+		OUTW(np, nc_sien, RST);
+	/*
+	 *  Enable Tolerant, reset IRQD if present and 
+	 *  properly set IRQ mode, prior to resetting the bus.
+	 */
+	OUTB(np, nc_stest3, TE);
+	OUTB(np, nc_dcntl, (np->rv_dcntl & IRQM));
+	OUTB(np, nc_scntl1, CRST);
+	udelay(200);
+
+	if (!SYM_SETUP_SCSI_BUS_CHECK)
+		goto out;
+	/*
+	 *  Check for no terminators or SCSI bus shorts to ground.
+	 *  Read SCSI data bus, data parity bits and control signals.
+	 *  We are expecting RESET to be TRUE and other signals to be 
+	 *  FALSE.
+	 */
+	term =	INB(np, nc_sstat0);
+	term =	((term & 2) << 7) + ((term & 1) << 17);	/* rst sdp0 */
+	term |= ((INB(np, nc_sstat2) & 0x01) << 26) |	/* sdp1     */
+		((INW(np, nc_sbdl) & 0xff)   << 9)  |	/* d7-0     */
+		((INW(np, nc_sbdl) & 0xff00) << 10) |	/* d15-8    */
+		INB(np, nc_sbcl);	/* req ack bsy sel atn msg cd io    */
+
+	if (!np->maxwide)
+		term &= 0x3ffff;
+
+	if (term != (2<<7)) {
+		printf("%s: suspicious SCSI data while resetting the BUS.\n",
+			sym_name(np));
+		printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = "
+			"0x%lx, expecting 0x%lx\n",
+			sym_name(np),
+			(np->features & FE_WIDE) ? "dp1,d15-8," : "",
+			(u_long)term, (u_long)(2<<7));
+		if (SYM_SETUP_SCSI_BUS_CHECK == 1)
+			retv = 1;
+	}
+out:
+	OUTB(np, nc_scntl1, 0);
+	return retv;
+}
+
+/*
+ *  Select SCSI clock frequency
+ */
+static void sym_selectclock(struct sym_hcb *np, u_char scntl3)
+{
+	/*
+	 *  If multiplier not present or not selected, leave here.
+	 */
+	if (np->multiplier <= 1) {
+		OUTB(np, nc_scntl3, scntl3);
+		return;
+	}
+
+	if (sym_verbose >= 2)
+		printf ("%s: enabling clock multiplier\n", sym_name(np));
+
+	OUTB(np, nc_stest1, DBLEN);	   /* Enable clock multiplier */
+	/*
+	 *  Wait for the LCKFRQ bit to be set if supported by the chip.
+	 *  Otherwise wait 50 micro-seconds (at least).
+	 */
+	if (np->features & FE_LCKFRQ) {
+		int i = 20;
+		while (!(INB(np, nc_stest4) & LCKFRQ) && --i > 0)
+			udelay(20);
+		if (!i)
+			printf("%s: the chip cannot lock the frequency\n",
+				sym_name(np));
+	} else
+		udelay((50+10));
+	OUTB(np, nc_stest3, HSC);		/* Halt the scsi clock	*/
+	OUTB(np, nc_scntl3, scntl3);
+	OUTB(np, nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier	*/
+	OUTB(np, nc_stest3, 0x00);		/* Restart scsi clock 	*/
+}
+
+
+/*
+ *  Determine the chip's clock frequency.
+ *
+ *  This is essential for the negotiation of the synchronous 
+ *  transfer rate.
+ *
+ *  Note: we have to return the correct value.
+ *  THERE IS NO SAFE DEFAULT VALUE.
+ *
+ *  Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock.
+ *  53C860 and 53C875 rev. 1 support fast20 transfers but 
+ *  do not have a clock doubler and so are provided with a 
+ *  80 MHz clock. All other fast20 boards incorporate a doubler 
+ *  and so should be delivered with a 40 MHz clock.
+ *  The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base 
+ *  clock and provide a clock quadrupler (160 Mhz).
+ */
+
+/*
+ *  calculate SCSI clock frequency (in KHz)
+ */
+static unsigned getfreq (struct sym_hcb *np, int gen)
+{
+	unsigned int ms = 0;
+	unsigned int f;
+
+	/*
+	 * Measure GEN timer delay in order 
+	 * to calculate SCSI clock frequency
+	 *
+	 * This code will never execute too
+	 * many loop iterations (if DELAY is 
+	 * reasonably correct). It could get
+	 * too low a delay (too high a freq.)
+	 * if the CPU is slow executing the 
+	 * loop for some reason (an NMI, for
+	 * example). For this reason we will
+	 * if multiple measurements are to be 
+	 * performed trust the higher delay 
+	 * (lower frequency returned).
+	 */
+	OUTW(np, nc_sien, 0);	/* mask all scsi interrupts */
+	INW(np, nc_sist);	/* clear pending scsi interrupt */
+	OUTB(np, nc_dien, 0);	/* mask all dma interrupts */
+	INW(np, nc_sist);	/* another one, just to be sure :) */
+	/*
+	 * The C1010-33 core does not report GEN in SIST,
+	 * if this interrupt is masked in SIEN.
+	 * I don't know yet if the C1010-66 behaves the same way.
+	 */
+	if (np->features & FE_C10) {
+		OUTW(np, nc_sien, GEN);
+		OUTB(np, nc_istat1, SIRQD);
+	}
+	OUTB(np, nc_scntl3, 4);	   /* set pre-scaler to divide by 3 */
+	OUTB(np, nc_stime1, 0);	   /* disable general purpose timer */
+	OUTB(np, nc_stime1, gen);  /* set to nominal delay of 1<<gen * 125us */
+	while (!(INW(np, nc_sist) & GEN) && ms++ < 100000)
+		udelay(1000/4);    /* count in 1/4 of ms */
+	OUTB(np, nc_stime1, 0);    /* disable general purpose timer */
+	/*
+	 * Undo C1010-33 specific settings.
+	 */
+	if (np->features & FE_C10) {
+		OUTW(np, nc_sien, 0);
+		OUTB(np, nc_istat1, 0);
+	}
+ 	/*
+ 	 * set prescaler to divide by whatever 0 means
+ 	 * 0 ought to choose divide by 2, but appears
+ 	 * to set divide by 3.5 mode in my 53c810 ...
+ 	 */
+ 	OUTB(np, nc_scntl3, 0);
+
+  	/*
+ 	 * adjust for prescaler, and convert into KHz 
+  	 */
+	f = ms ? ((1 << gen) * (4340*4)) / ms : 0;
+
+	/*
+	 * The C1010-33 result is biased by a factor 
+	 * of 2/3 compared to earlier chips.
+	 */
+	if (np->features & FE_C10)
+		f = (f * 2) / 3;
+
+	if (sym_verbose >= 2)
+		printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n",
+			sym_name(np), gen, ms/4, f);
+
+	return f;
+}
+
+static unsigned sym_getfreq (struct sym_hcb *np)
+{
+	u_int f1, f2;
+	int gen = 8;
+
+	getfreq (np, gen);	/* throw away first result */
+	f1 = getfreq (np, gen);
+	f2 = getfreq (np, gen);
+	if (f1 > f2) f1 = f2;		/* trust lower result	*/
+	return f1;
+}
+
+/*
+ *  Get/probe chip SCSI clock frequency
+ */
+static void sym_getclock (struct sym_hcb *np, int mult)
+{
+	unsigned char scntl3 = np->sv_scntl3;
+	unsigned char stest1 = np->sv_stest1;
+	unsigned f1;
+
+	np->multiplier = 1;
+	f1 = 40000;
+	/*
+	 *  True with 875/895/896/895A with clock multiplier selected
+	 */
+	if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) {
+		if (sym_verbose >= 2)
+			printf ("%s: clock multiplier found\n", sym_name(np));
+		np->multiplier = mult;
+	}
+
+	/*
+	 *  If multiplier not found or scntl3 not 7,5,3,
+	 *  reset chip and get frequency from general purpose timer.
+	 *  Otherwise trust scntl3 BIOS setting.
+	 */
+	if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
+		OUTB(np, nc_stest1, 0);		/* make sure doubler is OFF */
+		f1 = sym_getfreq (np);
+
+		if (sym_verbose)
+			printf ("%s: chip clock is %uKHz\n", sym_name(np), f1);
+
+		if	(f1 <	45000)		f1 =  40000;
+		else if (f1 <	55000)		f1 =  50000;
+		else				f1 =  80000;
+
+		if (f1 < 80000 && mult > 1) {
+			if (sym_verbose >= 2)
+				printf ("%s: clock multiplier assumed\n",
+					sym_name(np));
+			np->multiplier	= mult;
+		}
+	} else {
+		if	((scntl3 & 7) == 3)	f1 =  40000;
+		else if	((scntl3 & 7) == 5)	f1 =  80000;
+		else 				f1 = 160000;
+
+		f1 /= np->multiplier;
+	}
+
+	/*
+	 *  Compute controller synchronous parameters.
+	 */
+	f1		*= np->multiplier;
+	np->clock_khz	= f1;
+}
+
+/*
+ *  Get/probe PCI clock frequency
+ */
+static int sym_getpciclock (struct sym_hcb *np)
+{
+	int f = 0;
+
+	/*
+	 *  For now, we only need to know about the actual 
+	 *  PCI BUS clock frequency for C1010-66 chips.
+	 */
+#if 1
+	if (np->features & FE_66MHZ) {
+#else
+	if (1) {
+#endif
+		OUTB(np, nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */
+		f = sym_getfreq(np);
+		OUTB(np, nc_stest1, 0);
+	}
+	np->pciclk_khz = f;
+
+	return f;
+}
+
+/*
+ *  SYMBIOS chip clock divisor table.
+ *
+ *  Divisors are multiplied by 10,000,000 in order to make 
+ *  calculations more simple.
+ */
+#define _5M 5000000
+static u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
+
+/*
+ *  Get clock factor and sync divisor for a given 
+ *  synchronous factor period.
+ */
+static int 
+sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fakp)
+{
+	u32	clk = np->clock_khz;	/* SCSI clock frequency in kHz	*/
+	int	div = np->clock_divn;	/* Number of divisors supported	*/
+	u32	fak;			/* Sync factor in sxfer		*/
+	u32	per;			/* Period in tenths of ns	*/
+	u32	kpc;			/* (per * clk)			*/
+	int	ret;
+
+	/*
+	 *  Compute the synchronous period in tenths of nano-seconds
+	 */
+	if (dt && sfac <= 9)	per = 125;
+	else if	(sfac <= 10)	per = 250;
+	else if	(sfac == 11)	per = 303;
+	else if	(sfac == 12)	per = 500;
+	else			per = 40 * sfac;
+	ret = per;
+
+	kpc = per * clk;
+	if (dt)
+		kpc <<= 1;
+
+	/*
+	 *  For earliest C10 revision 0, we cannot use extra 
+	 *  clocks for the setting of the SCSI clocking.
+	 *  Note that this limits the lowest sync data transfer 
+	 *  to 5 Mega-transfers per second and may result in
+	 *  using higher clock divisors.
+	 */
+#if 1
+	if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) {
+		/*
+		 *  Look for the lowest clock divisor that allows an 
+		 *  output speed not faster than the period.
+		 */
+		while (div > 0) {
+			--div;
+			if (kpc > (div_10M[div] << 2)) {
+				++div;
+				break;
+			}
+		}
+		fak = 0;			/* No extra clocks */
+		if (div == np->clock_divn) {	/* Are we too fast ? */
+			ret = -1;
+		}
+		*divp = div;
+		*fakp = fak;
+		return ret;
+	}
+#endif
+
+	/*
+	 *  Look for the greatest clock divisor that allows an 
+	 *  input speed faster than the period.
+	 */
+	while (div-- > 0)
+		if (kpc >= (div_10M[div] << 2)) break;
+
+	/*
+	 *  Calculate the lowest clock factor that allows an output 
+	 *  speed not faster than the period, and the max output speed.
+	 *  If fak >= 1 we will set both XCLKH_ST and XCLKH_DT.
+	 *  If fak >= 2 we will also set XCLKS_ST and XCLKS_DT.
+	 */
+	if (dt) {
+		fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2;
+		/* ret = ((2+fak)*div_10M[div])/np->clock_khz; */
+	} else {
+		fak = (kpc - 1) / div_10M[div] + 1 - 4;
+		/* ret = ((4+fak)*div_10M[div])/np->clock_khz; */
+	}
+
+	/*
+	 *  Check against our hardware limits, or bugs :).
+	 */
+	if (fak > 2) {
+		fak = 2;
+		ret = -1;
+	}
+
+	/*
+	 *  Compute and return sync parameters.
+	 */
+	*divp = div;
+	*fakp = fak;
+
+	return ret;
+}
+
+/*
+ *  SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64,
+ *  128 transfers. All chips support at least 16 transfers 
+ *  bursts. The 825A, 875 and 895 chips support bursts of up 
+ *  to 128 transfers and the 895A and 896 support bursts of up
+ *  to 64 transfers. All other chips support up to 16 
+ *  transfers bursts.
+ *
+ *  For PCI 32 bit data transfers each transfer is a DWORD.
+ *  It is a QUADWORD (8 bytes) for PCI 64 bit data transfers.
+ *
+ *  We use log base 2 (burst length) as internal code, with 
+ *  value 0 meaning "burst disabled".
+ */
+
+/*
+ *  Burst length from burst code.
+ */
+#define burst_length(bc) (!(bc))? 0 : 1 << (bc)
+
+/*
+ *  Burst code from io register bits.
+ */
+#define burst_code(dmode, ctest4, ctest5) \
+	(ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1
+
+/*
+ *  Set initial io register bits from burst code.
+ */
+static __inline void sym_init_burst(struct sym_hcb *np, u_char bc)
+{
+	np->rv_ctest4	&= ~0x80;
+	np->rv_dmode	&= ~(0x3 << 6);
+	np->rv_ctest5	&= ~0x4;
+
+	if (!bc) {
+		np->rv_ctest4	|= 0x80;
+	}
+	else {
+		--bc;
+		np->rv_dmode	|= ((bc & 0x3) << 6);
+		np->rv_ctest5	|= (bc & 0x4);
+	}
+}
+
+
+/*
+ * Print out the list of targets that have some flag disabled by user.
+ */
+static void sym_print_targets_flag(struct sym_hcb *np, int mask, char *msg)
+{
+	int cnt;
+	int i;
+
+	for (cnt = 0, i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
+		if (i == np->myaddr)
+			continue;
+		if (np->target[i].usrflags & mask) {
+			if (!cnt++)
+				printf("%s: %s disabled for targets",
+					sym_name(np), msg);
+			printf(" %d", i);
+		}
+	}
+	if (cnt)
+		printf(".\n");
+}
+
+/*
+ *  Save initial settings of some IO registers.
+ *  Assumed to have been set by BIOS.
+ *  We cannot reset the chip prior to reading the 
+ *  IO registers, since informations will be lost.
+ *  Since the SCRIPTS processor may be running, this 
+ *  is not safe on paper, but it seems to work quite 
+ *  well. :)
+ */
+static void sym_save_initial_setting (struct sym_hcb *np)
+{
+	np->sv_scntl0	= INB(np, nc_scntl0) & 0x0a;
+	np->sv_scntl3	= INB(np, nc_scntl3) & 0x07;
+	np->sv_dmode	= INB(np, nc_dmode)  & 0xce;
+	np->sv_dcntl	= INB(np, nc_dcntl)  & 0xa8;
+	np->sv_ctest3	= INB(np, nc_ctest3) & 0x01;
+	np->sv_ctest4	= INB(np, nc_ctest4) & 0x80;
+	np->sv_gpcntl	= INB(np, nc_gpcntl);
+	np->sv_stest1	= INB(np, nc_stest1);
+	np->sv_stest2	= INB(np, nc_stest2) & 0x20;
+	np->sv_stest4	= INB(np, nc_stest4);
+	if (np->features & FE_C10) {	/* Always large DMA fifo + ultra3 */
+		np->sv_scntl4	= INB(np, nc_scntl4);
+		np->sv_ctest5	= INB(np, nc_ctest5) & 0x04;
+	}
+	else
+		np->sv_ctest5	= INB(np, nc_ctest5) & 0x24;
+}
+
+/*
+ *  Prepare io register values used by sym_start_up() 
+ *  according to selected and supported features.
+ */
+static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram)
+{
+	u_char	burst_max;
+	u32	period;
+	int i;
+
+	/*
+	 *  Wide ?
+	 */
+	np->maxwide	= (np->features & FE_WIDE)? 1 : 0;
+
+	/*
+	 *  Guess the frequency of the chip's clock.
+	 */
+	if	(np->features & (FE_ULTRA3 | FE_ULTRA2))
+		np->clock_khz = 160000;
+	else if	(np->features & FE_ULTRA)
+		np->clock_khz = 80000;
+	else
+		np->clock_khz = 40000;
+
+	/*
+	 *  Get the clock multiplier factor.
+ 	 */
+	if	(np->features & FE_QUAD)
+		np->multiplier	= 4;
+	else if	(np->features & FE_DBLR)
+		np->multiplier	= 2;
+	else
+		np->multiplier	= 1;
+
+	/*
+	 *  Measure SCSI clock frequency for chips 
+	 *  it may vary from assumed one.
+	 */
+	if (np->features & FE_VARCLK)
+		sym_getclock(np, np->multiplier);
+
+	/*
+	 * Divisor to be used for async (timer pre-scaler).
+	 */
+	i = np->clock_divn - 1;
+	while (--i >= 0) {
+		if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) {
+			++i;
+			break;
+		}
+	}
+	np->rv_scntl3 = i+1;
+
+	/*
+	 * The C1010 uses hardwired divisors for async.
+	 * So, we just throw away, the async. divisor.:-)
+	 */
+	if (np->features & FE_C10)
+		np->rv_scntl3 = 0;
+
+	/*
+	 * Minimum synchronous period factor supported by the chip.
+	 * Btw, 'period' is in tenths of nanoseconds.
+	 */
+	period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz;
+
+	if	(period <= 250)		np->minsync = 10;
+	else if	(period <= 303)		np->minsync = 11;
+	else if	(period <= 500)		np->minsync = 12;
+	else				np->minsync = (period + 40 - 1) / 40;
+
+	/*
+	 * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2).
+	 */
+	if	(np->minsync < 25 &&
+		 !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3)))
+		np->minsync = 25;
+	else if	(np->minsync < 12 &&
+		 !(np->features & (FE_ULTRA2|FE_ULTRA3)))
+		np->minsync = 12;
+
+	/*
+	 * Maximum synchronous period factor supported by the chip.
+	 */
+	period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz);
+	np->maxsync = period > 2540 ? 254 : period / 10;
+
+	/*
+	 * If chip is a C1010, guess the sync limits in DT mode.
+	 */
+	if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) {
+		if (np->clock_khz == 160000) {
+			np->minsync_dt = 9;
+			np->maxsync_dt = 50;
+			np->maxoffs_dt = nvram->type ? 62 : 31;
+		}
+	}
+	
+	/*
+	 *  64 bit addressing  (895A/896/1010) ?
+	 */
+	if (np->features & FE_DAC) {
+#if   SYM_CONF_DMA_ADDRESSING_MODE == 0
+		np->rv_ccntl1	|= (DDAC);
+#elif SYM_CONF_DMA_ADDRESSING_MODE == 1
+		if (!np->use_dac)
+			np->rv_ccntl1	|= (DDAC);
+		else
+			np->rv_ccntl1	|= (XTIMOD | EXTIBMV);
+#elif SYM_CONF_DMA_ADDRESSING_MODE == 2
+		if (!np->use_dac)
+			np->rv_ccntl1	|= (DDAC);
+		else
+			np->rv_ccntl1	|= (0 | EXTIBMV);
+#endif
+	}
+
+	/*
+	 *  Phase mismatch handled by SCRIPTS (895A/896/1010) ?
+  	 */
+	if (np->features & FE_NOPM)
+		np->rv_ccntl0	|= (ENPMJ);
+
+ 	/*
+	 *  C1010-33 Errata: Part Number:609-039638 (rev. 1) is fixed.
+	 *  In dual channel mode, contention occurs if internal cycles
+	 *  are used. Disable internal cycles.
+	 */
+	if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_33 &&
+	    np->revision_id < 0x1)
+		np->rv_ccntl0	|=  DILS;
+
+	/*
+	 *  Select burst length (dwords)
+	 */
+	burst_max	= SYM_SETUP_BURST_ORDER;
+	if (burst_max == 255)
+		burst_max = burst_code(np->sv_dmode, np->sv_ctest4,
+				       np->sv_ctest5);
+	if (burst_max > 7)
+		burst_max = 7;
+	if (burst_max > np->maxburst)
+		burst_max = np->maxburst;
+
+	/*
+	 *  DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2.
+	 *  This chip and the 860 Rev 1 may wrongly use PCI cache line 
+	 *  based transactions on LOAD/STORE instructions. So we have 
+	 *  to prevent these chips from using such PCI transactions in 
+	 *  this driver. The generic ncr driver that does not use 
+	 *  LOAD/STORE instructions does not need this work-around.
+	 */
+	if ((np->device_id == PCI_DEVICE_ID_NCR_53C810 &&
+	     np->revision_id >= 0x10 && np->revision_id <= 0x11) ||
+	    (np->device_id == PCI_DEVICE_ID_NCR_53C860 &&
+	     np->revision_id <= 0x1))
+		np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP);
+
+	/*
+	 *  Select all supported special features.
+	 *  If we are using on-board RAM for scripts, prefetch (PFEN) 
+	 *  does not help, but burst op fetch (BOF) does.
+	 *  Disabling PFEN makes sure BOF will be used.
+	 */
+	if (np->features & FE_ERL)
+		np->rv_dmode	|= ERL;		/* Enable Read Line */
+	if (np->features & FE_BOF)
+		np->rv_dmode	|= BOF;		/* Burst Opcode Fetch */
+	if (np->features & FE_ERMP)
+		np->rv_dmode	|= ERMP;	/* Enable Read Multiple */
+#if 1
+	if ((np->features & FE_PFEN) && !np->ram_ba)
+#else
+	if (np->features & FE_PFEN)
+#endif
+		np->rv_dcntl	|= PFEN;	/* Prefetch Enable */
+	if (np->features & FE_CLSE)
+		np->rv_dcntl	|= CLSE;	/* Cache Line Size Enable */
+	if (np->features & FE_WRIE)
+		np->rv_ctest3	|= WRIE;	/* Write and Invalidate */
+	if (np->features & FE_DFS)
+		np->rv_ctest5	|= DFS;		/* Dma Fifo Size */
+
+	/*
+	 *  Select some other
+	 */
+	np->rv_ctest4	|= MPEE; /* Master parity checking */
+	np->rv_scntl0	|= 0x0a; /*  full arb., ena parity, par->ATN  */
+
+	/*
+	 *  Get parity checking, host ID and verbose mode from NVRAM
+	 */
+	np->myaddr = 255;
+	sym_nvram_setup_host(shost, np, nvram);
+
+	/*
+	 *  Get SCSI addr of host adapter (set by bios?).
+	 */
+	if (np->myaddr == 255) {
+		np->myaddr = INB(np, nc_scid) & 0x07;
+		if (!np->myaddr)
+			np->myaddr = SYM_SETUP_HOST_ID;
+	}
+
+	/*
+	 *  Prepare initial io register bits for burst length
+	 */
+	sym_init_burst(np, burst_max);
+
+	/*
+	 *  Set SCSI BUS mode.
+	 *  - LVD capable chips (895/895A/896/1010) report the 
+	 *    current BUS mode through the STEST4 IO register.
+	 *  - For previous generation chips (825/825A/875), 
+	 *    user has to tell us how to check against HVD, 
+	 *    since a 100% safe algorithm is not possible.
+	 */
+	np->scsi_mode = SMODE_SE;
+	if (np->features & (FE_ULTRA2|FE_ULTRA3))
+		np->scsi_mode = (np->sv_stest4 & SMODE);
+	else if	(np->features & FE_DIFF) {
+		if (SYM_SETUP_SCSI_DIFF == 1) {
+			if (np->sv_scntl3) {
+				if (np->sv_stest2 & 0x20)
+					np->scsi_mode = SMODE_HVD;
+			}
+			else if (nvram->type == SYM_SYMBIOS_NVRAM) {
+				if (!(INB(np, nc_gpreg) & 0x08))
+					np->scsi_mode = SMODE_HVD;
+			}
+		}
+		else if	(SYM_SETUP_SCSI_DIFF == 2)
+			np->scsi_mode = SMODE_HVD;
+	}
+	if (np->scsi_mode == SMODE_HVD)
+		np->rv_stest2 |= 0x20;
+
+	/*
+	 *  Set LED support from SCRIPTS.
+	 *  Ignore this feature for boards known to use a 
+	 *  specific GPIO wiring and for the 895A, 896 
+	 *  and 1010 that drive the LED directly.
+	 */
+	if ((SYM_SETUP_SCSI_LED || 
+	     (nvram->type == SYM_SYMBIOS_NVRAM ||
+	      (nvram->type == SYM_TEKRAM_NVRAM &&
+	       np->device_id == PCI_DEVICE_ID_NCR_53C895))) &&
+	    !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01))
+		np->features |= FE_LED0;
+
+	/*
+	 *  Set irq mode.
+	 */
+	switch(SYM_SETUP_IRQ_MODE & 3) {
+	case 2:
+		np->rv_dcntl	|= IRQM;
+		break;
+	case 1:
+		np->rv_dcntl	|= (np->sv_dcntl & IRQM);
+		break;
+	default:
+		break;
+	}
+
+	/*
+	 *  Configure targets according to driver setup.
+	 *  If NVRAM present get targets setup from NVRAM.
+	 */
+	for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
+		struct sym_tcb *tp = &np->target[i];
+
+		tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
+		tp->usrtags = SYM_SETUP_MAX_TAG;
+
+		sym_nvram_setup_target(np, i, nvram);
+
+		if (!tp->usrtags)
+			tp->usrflags &= ~SYM_TAGS_ENABLED;
+	}
+
+	/*
+	 *  Let user know about the settings.
+	 */
+	printf("%s: %s, ID %d, Fast-%d, %s, %s\n", sym_name(np),
+		sym_nvram_type(nvram), np->myaddr,
+		(np->features & FE_ULTRA3) ? 80 : 
+		(np->features & FE_ULTRA2) ? 40 : 
+		(np->features & FE_ULTRA)  ? 20 : 10,
+		sym_scsi_bus_mode(np->scsi_mode),
+		(np->rv_scntl0 & 0xa)	? "parity checking" : "NO parity");
+	/*
+	 *  Tell him more on demand.
+	 */
+	if (sym_verbose) {
+		printf("%s: %s IRQ line driver%s\n",
+			sym_name(np),
+			np->rv_dcntl & IRQM ? "totem pole" : "open drain",
+			np->ram_ba ? ", using on-chip SRAM" : "");
+		printf("%s: using %s firmware.\n", sym_name(np), np->fw_name);
+		if (np->features & FE_NOPM)
+			printf("%s: handling phase mismatch from SCRIPTS.\n", 
+			       sym_name(np));
+	}
+	/*
+	 *  And still more.
+	 */
+	if (sym_verbose >= 2) {
+		printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+			"(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+			sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl,
+			np->sv_ctest3, np->sv_ctest4, np->sv_ctest5);
+
+		printf ("%s: final   SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+			"(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+			sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
+			np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
+	}
+	/*
+	 *  Let user be aware of targets that have some disable flags set.
+	 */
+	sym_print_targets_flag(np, SYM_SCAN_BOOT_DISABLED, "SCAN AT BOOT");
+	if (sym_verbose)
+		sym_print_targets_flag(np, SYM_SCAN_LUNS_DISABLED,
+				       "SCAN FOR LUNS");
+
+	return 0;
+}
+
+/*
+ *  Test the pci bus snoop logic :-(
+ *
+ *  Has to be called with interrupts disabled.
+ */
+#ifndef CONFIG_SCSI_SYM53C8XX_IOMAPPED
+static int sym_regtest (struct sym_hcb *np)
+{
+	register volatile u32 data;
+	/*
+	 *  chip registers may NOT be cached.
+	 *  write 0xffffffff to a read only register area,
+	 *  and try to read it back.
+	 */
+	data = 0xffffffff;
+	OUTL(np, nc_dstat, data);
+	data = INL(np, nc_dstat);
+#if 1
+	if (data == 0xffffffff) {
+#else
+	if ((data & 0xe2f0fffd) != 0x02000080) {
+#endif
+		printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n",
+			(unsigned) data);
+		return (0x10);
+	}
+	return (0);
+}
+#endif
+
+static int sym_snooptest (struct sym_hcb *np)
+{
+	u32	sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat;
+	int	i, err=0;
+#ifndef CONFIG_SCSI_SYM53C8XX_IOMAPPED
+	err |= sym_regtest (np);
+	if (err) return (err);
+#endif
+restart_test:
+	/*
+	 *  Enable Master Parity Checking as we intend 
+	 *  to enable it for normal operations.
+	 */
+	OUTB(np, nc_ctest4, (np->rv_ctest4 & MPEE));
+	/*
+	 *  init
+	 */
+	pc  = SCRIPTZ_BA(np, snooptest);
+	host_wr = 1;
+	sym_wr  = 2;
+	/*
+	 *  Set memory and register.
+	 */
+	np->scratch = cpu_to_scr(host_wr);
+	OUTL(np, nc_temp, sym_wr);
+	/*
+	 *  Start script (exchange values)
+	 */
+	OUTL(np, nc_dsa, np->hcb_ba);
+	OUTL_DSP(np, pc);
+	/*
+	 *  Wait 'til done (with timeout)
+	 */
+	for (i=0; i<SYM_SNOOP_TIMEOUT; i++)
+		if (INB(np, nc_istat) & (INTF|SIP|DIP))
+			break;
+	if (i>=SYM_SNOOP_TIMEOUT) {
+		printf ("CACHE TEST FAILED: timeout.\n");
+		return (0x20);
+	}
+	/*
+	 *  Check for fatal DMA errors.
+	 */
+	dstat = INB(np, nc_dstat);
+#if 1	/* Band aiding for broken hardwares that fail PCI parity */
+	if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) {
+		printf ("%s: PCI DATA PARITY ERROR DETECTED - "
+			"DISABLING MASTER DATA PARITY CHECKING.\n",
+			sym_name(np));
+		np->rv_ctest4 &= ~MPEE;
+		goto restart_test;
+	}
+#endif
+	if (dstat & (MDPE|BF|IID)) {
+		printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat);
+		return (0x80);
+	}
+	/*
+	 *  Save termination position.
+	 */
+	pc = INL(np, nc_dsp);
+	/*
+	 *  Read memory and register.
+	 */
+	host_rd = scr_to_cpu(np->scratch);
+	sym_rd  = INL(np, nc_scratcha);
+	sym_bk  = INL(np, nc_temp);
+	/*
+	 *  Check termination position.
+	 */
+	if (pc != SCRIPTZ_BA(np, snoopend)+8) {
+		printf ("CACHE TEST FAILED: script execution failed.\n");
+		printf ("start=%08lx, pc=%08lx, end=%08lx\n", 
+			(u_long) SCRIPTZ_BA(np, snooptest), (u_long) pc,
+			(u_long) SCRIPTZ_BA(np, snoopend) +8);
+		return (0x40);
+	}
+	/*
+	 *  Show results.
+	 */
+	if (host_wr != sym_rd) {
+		printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n",
+			(int) host_wr, (int) sym_rd);
+		err |= 1;
+	}
+	if (host_rd != sym_wr) {
+		printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n",
+			(int) sym_wr, (int) host_rd);
+		err |= 2;
+	}
+	if (sym_bk != sym_wr) {
+		printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n",
+			(int) sym_wr, (int) sym_bk);
+		err |= 4;
+	}
+
+	return (err);
+}
+
+/*
+ *  log message for real hard errors
+ *
+ *  sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sx/s3/s4) @ name (dsp:dbc).
+ *  	      reg: r0 r1 r2 r3 r4 r5 r6 ..... rf.
+ *
+ *  exception register:
+ *  	ds:	dstat
+ *  	si:	sist
+ *
+ *  SCSI bus lines:
+ *  	so:	control lines as driven by chip.
+ *  	si:	control lines as seen by chip.
+ *  	sd:	scsi data lines as seen by chip.
+ *
+ *  wide/fastmode:
+ *  	sx:	sxfer  (see the manual)
+ *  	s3:	scntl3 (see the manual)
+ *  	s4:	scntl4 (see the manual)
+ *
+ *  current script command:
+ *  	dsp:	script address (relative to start of script).
+ *  	dbc:	first word of script command.
+ *
+ *  First 24 register of the chip:
+ *  	r0..rf
+ */
+static void sym_log_hard_error(struct sym_hcb *np, u_short sist, u_char dstat)
+{
+	u32	dsp;
+	int	script_ofs;
+	int	script_size;
+	char	*script_name;
+	u_char	*script_base;
+	int	i;
+
+	dsp	= INL(np, nc_dsp);
+
+	if	(dsp > np->scripta_ba &&
+		 dsp <= np->scripta_ba + np->scripta_sz) {
+		script_ofs	= dsp - np->scripta_ba;
+		script_size	= np->scripta_sz;
+		script_base	= (u_char *) np->scripta0;
+		script_name	= "scripta";
+	}
+	else if (np->scriptb_ba < dsp && 
+		 dsp <= np->scriptb_ba + np->scriptb_sz) {
+		script_ofs	= dsp - np->scriptb_ba;
+		script_size	= np->scriptb_sz;
+		script_base	= (u_char *) np->scriptb0;
+		script_name	= "scriptb";
+	} else {
+		script_ofs	= dsp;
+		script_size	= 0;
+		script_base	= NULL;
+		script_name	= "mem";
+	}
+
+	printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x/%x) @ (%s %x:%08x).\n",
+		sym_name(np), (unsigned)INB(np, nc_sdid)&0x0f, dstat, sist,
+		(unsigned)INB(np, nc_socl), (unsigned)INB(np, nc_sbcl),
+		(unsigned)INB(np, nc_sbdl), (unsigned)INB(np, nc_sxfer),
+		(unsigned)INB(np, nc_scntl3),
+		(np->features & FE_C10) ?  (unsigned)INB(np, nc_scntl4) : 0,
+		script_name, script_ofs,   (unsigned)INL(np, nc_dbc));
+
+	if (((script_ofs & 3) == 0) &&
+	    (unsigned)script_ofs < script_size) {
+		printf ("%s: script cmd = %08x\n", sym_name(np),
+			scr_to_cpu((int) *(u32 *)(script_base + script_ofs)));
+	}
+
+        printf ("%s: regdump:", sym_name(np));
+        for (i=0; i<24;i++)
+            printf (" %02x", (unsigned)INB_OFF(np, i));
+        printf (".\n");
+
+	/*
+	 *  PCI BUS error.
+	 */
+	if (dstat & (MDPE|BF))
+		sym_log_bus_error(np);
+}
+
+static struct sym_chip sym_dev_table[] = {
+ {PCI_DEVICE_ID_NCR_53C810, 0x0f, "810", 4, 8, 4, 64,
+ FE_ERL}
+ ,
+#ifdef SYM_DEBUG_GENERIC_SUPPORT
+ {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4,  8, 4, 1,
+ FE_BOF}
+ ,
+#else
+ {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4,  8, 4, 1,
+ FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF}
+ ,
+#endif
+ {PCI_DEVICE_ID_NCR_53C815, 0xff, "815", 4,  8, 4, 64,
+ FE_BOF|FE_ERL}
+ ,
+ {PCI_DEVICE_ID_NCR_53C825, 0x0f, "825", 6,  8, 4, 64,
+ FE_WIDE|FE_BOF|FE_ERL|FE_DIFF}
+ ,
+ {PCI_DEVICE_ID_NCR_53C825, 0xff, "825a", 6,  8, 4, 2,
+ FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF}
+ ,
+ {PCI_DEVICE_ID_NCR_53C860, 0xff, "860", 4,  8, 5, 1,
+ FE_ULTRA|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN}
+ ,
+ {PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, 2,
+ FE_WIDE|FE_ULTRA|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_DIFF|FE_VARCLK}
+ ,
+ {PCI_DEVICE_ID_NCR_53C875, 0xff, "875", 6, 16, 5, 2,
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_DIFF|FE_VARCLK}
+ ,
+ {PCI_DEVICE_ID_NCR_53C875J, 0xff, "875J", 6, 16, 5, 2,
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_DIFF|FE_VARCLK}
+ ,
+ {PCI_DEVICE_ID_NCR_53C885, 0xff, "885", 6, 16, 5, 2,
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_DIFF|FE_VARCLK}
+ ,
+#ifdef SYM_DEBUG_GENERIC_SUPPORT
+ {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2,
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|
+ FE_RAM|FE_LCKFRQ}
+ ,
+#else
+ {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2,
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_LCKFRQ}
+ ,
+#endif
+ {PCI_DEVICE_ID_NCR_53C896, 0xff, "896", 6, 31, 7, 4,
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
+ ,
+ {PCI_DEVICE_ID_LSI_53C895A, 0xff, "895a", 6, 31, 7, 4,
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
+ ,
+ {PCI_DEVICE_ID_LSI_53C875A, 0xff, "875a", 6, 31, 7, 4,
+ FE_WIDE|FE_ULTRA|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
+ ,
+ {PCI_DEVICE_ID_LSI_53C1010_33, 0x00, "1010-33", 6, 31, 7, 8,
+ FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC|
+ FE_C10}
+ ,
+ {PCI_DEVICE_ID_LSI_53C1010_33, 0xff, "1010-33", 6, 31, 7, 8,
+ FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC|
+ FE_C10|FE_U3EN}
+ ,
+ {PCI_DEVICE_ID_LSI_53C1010_66, 0xff, "1010-66", 6, 31, 7, 8,
+ FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC|
+ FE_C10|FE_U3EN}
+ ,
+ {PCI_DEVICE_ID_LSI_53C1510, 0xff, "1510d", 6, 31, 7, 4,
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
+ FE_RAM|FE_IO256|FE_LEDC}
+};
+
+#define sym_num_devs \
+	(sizeof(sym_dev_table) / sizeof(sym_dev_table[0]))
+
+/*
+ *  Look up the chip table.
+ *
+ *  Return a pointer to the chip entry if found, 
+ *  zero otherwise.
+ */
+struct sym_chip *
+sym_lookup_chip_table (u_short device_id, u_char revision)
+{
+	struct	sym_chip *chip;
+	int	i;
+
+	for (i = 0; i < sym_num_devs; i++) {
+		chip = &sym_dev_table[i];
+		if (device_id != chip->device_id)
+			continue;
+		if (revision > chip->revision_id)
+			continue;
+		return chip;
+	}
+
+	return NULL;
+}
+
+#if SYM_CONF_DMA_ADDRESSING_MODE == 2
+/*
+ *  Lookup the 64 bit DMA segments map.
+ *  This is only used if the direct mapping 
+ *  has been unsuccessful.
+ */
+int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s)
+{
+	int i;
+
+	if (!np->use_dac)
+		goto weird;
+
+	/* Look up existing mappings */
+	for (i = SYM_DMAP_SIZE-1; i > 0; i--) {
+		if (h == np->dmap_bah[i])
+			return i;
+	}
+	/* If direct mapping is free, get it */
+	if (!np->dmap_bah[s])
+		goto new;
+	/* Collision -> lookup free mappings */
+	for (s = SYM_DMAP_SIZE-1; s > 0; s--) {
+		if (!np->dmap_bah[s])
+			goto new;
+	}
+weird:
+	panic("sym: ran out of 64 bit DMA segment registers");
+	return -1;
+new:
+	np->dmap_bah[s] = h;
+	np->dmap_dirty = 1;
+	return s;
+}
+
+/*
+ *  Update IO registers scratch C..R so they will be 
+ *  in sync. with queued CCB expectations.
+ */
+static void sym_update_dmap_regs(struct sym_hcb *np)
+{
+	int o, i;
+
+	if (!np->dmap_dirty)
+		return;
+	o = offsetof(struct sym_reg, nc_scrx[0]);
+	for (i = 0; i < SYM_DMAP_SIZE; i++) {
+		OUTL_OFF(np, o, np->dmap_bah[i]);
+		o += 4;
+	}
+	np->dmap_dirty = 0;
+}
+#endif
+
+/* Enforce all the fiddly SPI rules and the chip limitations */
+static void sym_check_goals(struct sym_hcb *np, struct scsi_target *starget,
+		struct sym_trans *goal)
+{
+	if (!spi_support_wide(starget))
+		goal->width = 0;
+
+	if (!spi_support_sync(starget)) {
+		goal->iu = 0;
+		goal->dt = 0;
+		goal->qas = 0;
+		goal->period = 0;
+		goal->offset = 0;
+		return;
+	}
+
+	if (spi_support_dt(starget)) {
+		if (spi_support_dt_only(starget))
+			goal->dt = 1;
+
+		if (goal->offset == 0)
+			goal->dt = 0;
+	} else {
+		goal->dt = 0;
+	}
+
+	/* Some targets fail to properly negotiate DT in SE mode */
+	if ((np->scsi_mode != SMODE_LVD) || !(np->features & FE_U3EN))
+		goal->dt = 0;
+
+	if (goal->dt) {
+		/* all DT transfers must be wide */
+		goal->width = 1;
+		if (goal->offset > np->maxoffs_dt)
+			goal->offset = np->maxoffs_dt;
+		if (goal->period < np->minsync_dt)
+			goal->period = np->minsync_dt;
+		if (goal->period > np->maxsync_dt)
+			goal->period = np->maxsync_dt;
+	} else {
+		goal->iu = goal->qas = 0;
+		if (goal->offset > np->maxoffs)
+			goal->offset = np->maxoffs;
+		if (goal->period < np->minsync)
+			goal->period = np->minsync;
+		if (goal->period > np->maxsync)
+			goal->period = np->maxsync;
+	}
+}
+
+/*
+ *  Prepare the next negotiation message if needed.
+ *
+ *  Fill in the part of message buffer that contains the 
+ *  negotiation and the nego_status field of the CCB.
+ *  Returns the size of the message in bytes.
+ */
+static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgptr)
+{
+	struct sym_tcb *tp = &np->target[cp->target];
+	struct scsi_target *starget = tp->sdev->sdev_target;
+	struct sym_trans *goal = &tp->tgoal;
+	int msglen = 0;
+	int nego;
+
+	sym_check_goals(np, starget, goal);
+
+	/*
+	 * Many devices implement PPR in a buggy way, so only use it if we
+	 * really want to.
+	 */
+	if (goal->iu || goal->dt || goal->qas || (goal->period < 0xa)) {
+		nego = NS_PPR;
+	} else if (spi_width(starget) != goal->width) {
+		nego = NS_WIDE;
+	} else if (spi_period(starget) != goal->period ||
+		   spi_offset(starget) != goal->offset) {
+		nego = NS_SYNC;
+	} else {
+		goal->check_nego = 0;
+		nego = 0;
+	}
+
+	switch (nego) {
+	case NS_SYNC:
+		msgptr[msglen++] = M_EXTENDED;
+		msgptr[msglen++] = 3;
+		msgptr[msglen++] = M_X_SYNC_REQ;
+		msgptr[msglen++] = goal->period;
+		msgptr[msglen++] = goal->offset;
+		break;
+	case NS_WIDE:
+		msgptr[msglen++] = M_EXTENDED;
+		msgptr[msglen++] = 2;
+		msgptr[msglen++] = M_X_WIDE_REQ;
+		msgptr[msglen++] = goal->width;
+		break;
+	case NS_PPR:
+		msgptr[msglen++] = M_EXTENDED;
+		msgptr[msglen++] = 6;
+		msgptr[msglen++] = M_X_PPR_REQ;
+		msgptr[msglen++] = goal->period;
+		msgptr[msglen++] = 0;
+		msgptr[msglen++] = goal->offset;
+		msgptr[msglen++] = goal->width;
+		msgptr[msglen++] = (goal->iu ? PPR_OPT_IU : 0) |
+					(goal->dt ? PPR_OPT_DT : 0) |
+					(goal->qas ? PPR_OPT_QAS : 0);
+		break;
+	}
+
+	cp->nego_status = nego;
+
+	if (nego) {
+		tp->nego_cp = cp; /* Keep track a nego will be performed */
+		if (DEBUG_FLAGS & DEBUG_NEGO) {
+			sym_print_nego_msg(np, cp->target, 
+					  nego == NS_SYNC ? "sync msgout" :
+					  nego == NS_WIDE ? "wide msgout" :
+					  "ppr msgout", msgptr);
+		}
+	}
+
+	return msglen;
+}
+
+/*
+ *  Insert a job into the start queue.
+ */
+void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp)
+{
+	u_short	qidx;
+
+#ifdef SYM_CONF_IARB_SUPPORT
+	/*
+	 *  If the previously queued CCB is not yet done, 
+	 *  set the IARB hint. The SCRIPTS will go with IARB 
+	 *  for this job when starting the previous one.
+	 *  We leave devices a chance to win arbitration by 
+	 *  not using more than 'iarb_max' consecutive 
+	 *  immediate arbitrations.
+	 */
+	if (np->last_cp && np->iarb_count < np->iarb_max) {
+		np->last_cp->host_flags |= HF_HINT_IARB;
+		++np->iarb_count;
+	}
+	else
+		np->iarb_count = 0;
+	np->last_cp = cp;
+#endif
+
+#if   SYM_CONF_DMA_ADDRESSING_MODE == 2
+	/*
+	 *  Make SCRIPTS aware of the 64 bit DMA 
+	 *  segment registers not being up-to-date.
+	 */
+	if (np->dmap_dirty)
+		cp->host_xflags |= HX_DMAP_DIRTY;
+#endif
+
+	/*
+	 *  Insert first the idle task and then our job.
+	 *  The MBs should ensure proper ordering.
+	 */
+	qidx = np->squeueput + 2;
+	if (qidx >= MAX_QUEUE*2) qidx = 0;
+
+	np->squeue [qidx]	   = cpu_to_scr(np->idletask_ba);
+	MEMORY_WRITE_BARRIER();
+	np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba);
+
+	np->squeueput = qidx;
+
+	if (DEBUG_FLAGS & DEBUG_QUEUE)
+		printf ("%s: queuepos=%d.\n", sym_name (np), np->squeueput);
+
+	/*
+	 *  Script processor may be waiting for reselect.
+	 *  Wake it up.
+	 */
+	MEMORY_WRITE_BARRIER();
+	OUTB(np, nc_istat, SIGP|np->istat_sem);
+}
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+/*
+ *  Start next ready-to-start CCBs.
+ */
+void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn)
+{
+	SYM_QUEHEAD *qp;
+	struct sym_ccb *cp;
+
+	/* 
+	 *  Paranoia, as usual. :-)
+	 */
+	assert(!lp->started_tags || !lp->started_no_tag);
+
+	/*
+	 *  Try to start as many commands as asked by caller.
+	 *  Prevent from having both tagged and untagged 
+	 *  commands queued to the device at the same time.
+	 */
+	while (maxn--) {
+		qp = sym_remque_head(&lp->waiting_ccbq);
+		if (!qp)
+			break;
+		cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq);
+		if (cp->tag != NO_TAG) {
+			if (lp->started_no_tag ||
+			    lp->started_tags >= lp->started_max) {
+				sym_insque_head(qp, &lp->waiting_ccbq);
+				break;
+			}
+			lp->itlq_tbl[cp->tag] = cpu_to_scr(cp->ccb_ba);
+			lp->head.resel_sa =
+				cpu_to_scr(SCRIPTA_BA(np, resel_tag));
+			++lp->started_tags;
+		} else {
+			if (lp->started_no_tag || lp->started_tags) {
+				sym_insque_head(qp, &lp->waiting_ccbq);
+				break;
+			}
+			lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba);
+			lp->head.resel_sa =
+			      cpu_to_scr(SCRIPTA_BA(np, resel_no_tag));
+			++lp->started_no_tag;
+		}
+		cp->started = 1;
+		sym_insque_tail(qp, &lp->started_ccbq);
+		sym_put_start_queue(np, cp);
+	}
+}
+#endif /* SYM_OPT_HANDLE_DEVICE_QUEUEING */
+
+/*
+ *  The chip may have completed jobs. Look at the DONE QUEUE.
+ *
+ *  On paper, memory read barriers may be needed here to 
+ *  prevent out of order LOADs by the CPU from having 
+ *  prefetched stale data prior to DMA having occurred.
+ */
+static int sym_wakeup_done (struct sym_hcb *np)
+{
+	struct sym_ccb *cp;
+	int i, n;
+	u32 dsa;
+
+	n = 0;
+	i = np->dqueueget;
+
+	/* MEMORY_READ_BARRIER(); */
+	while (1) {
+		dsa = scr_to_cpu(np->dqueue[i]);
+		if (!dsa)
+			break;
+		np->dqueue[i] = 0;
+		if ((i = i+2) >= MAX_QUEUE*2)
+			i = 0;
+
+		cp = sym_ccb_from_dsa(np, dsa);
+		if (cp) {
+			MEMORY_READ_BARRIER();
+			sym_complete_ok (np, cp);
+			++n;
+		}
+		else
+			printf ("%s: bad DSA (%x) in done queue.\n",
+				sym_name(np), (u_int) dsa);
+	}
+	np->dqueueget = i;
+
+	return n;
+}
+
+/*
+ *  Complete all CCBs queued to the COMP queue.
+ *
+ *  These CCBs are assumed:
+ *  - Not to be referenced either by devices or 
+ *    SCRIPTS-related queues and datas.
+ *  - To have to be completed with an error condition 
+ *    or requeued.
+ *
+ *  The device queue freeze count is incremented 
+ *  for each CCB that does not prevent this.
+ *  This function is called when all CCBs involved 
+ *  in error handling/recovery have been reaped.
+ */
+static void sym_flush_comp_queue(struct sym_hcb *np, int cam_status)
+{
+	SYM_QUEHEAD *qp;
+	struct sym_ccb *cp;
+
+	while ((qp = sym_remque_head(&np->comp_ccbq)) != 0) {
+		struct scsi_cmnd *cmd;
+		cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+		sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
+		/* Leave quiet CCBs waiting for resources */
+		if (cp->host_status == HS_WAIT)
+			continue;
+		cmd = cp->cmd;
+		if (cam_status)
+			sym_set_cam_status(cmd, cam_status);
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+		if (sym_get_cam_status(cmd) == CAM_REQUEUE_REQ) {
+			struct sym_tcb *tp = &np->target[cp->target];
+			struct sym_lcb *lp = sym_lp(tp, cp->lun);
+			if (lp) {
+				sym_remque(&cp->link2_ccbq);
+				sym_insque_tail(&cp->link2_ccbq,
+				                &lp->waiting_ccbq);
+				if (cp->started) {
+					if (cp->tag != NO_TAG)
+						--lp->started_tags;
+					else
+						--lp->started_no_tag;
+				}
+			}
+			cp->started = 0;
+			continue;
+		}
+#endif
+		sym_free_ccb(np, cp);
+		sym_xpt_done(np, cmd);
+	}
+}
+
+/*
+ *  Complete all active CCBs with error.
+ *  Used on CHIP/SCSI RESET.
+ */
+static void sym_flush_busy_queue (struct sym_hcb *np, int cam_status)
+{
+	/*
+	 *  Move all active CCBs to the COMP queue 
+	 *  and flush this queue.
+	 */
+	sym_que_splice(&np->busy_ccbq, &np->comp_ccbq);
+	sym_que_init(&np->busy_ccbq);
+	sym_flush_comp_queue(np, cam_status);
+}
+
+/*
+ *  Start chip.
+ *
+ *  'reason' means:
+ *     0: initialisation.
+ *     1: SCSI BUS RESET delivered or received.
+ *     2: SCSI BUS MODE changed.
+ */
+void sym_start_up (struct sym_hcb *np, int reason)
+{
+ 	int	i;
+	u32	phys;
+
+ 	/*
+	 *  Reset chip if asked, otherwise just clear fifos.
+ 	 */
+	if (reason == 1)
+		sym_soft_reset(np);
+	else {
+		OUTB(np, nc_stest3, TE|CSF);
+		OUTONB(np, nc_ctest3, CLF);
+	}
+ 
+	/*
+	 *  Clear Start Queue
+	 */
+	phys = np->squeue_ba;
+	for (i = 0; i < MAX_QUEUE*2; i += 2) {
+		np->squeue[i]   = cpu_to_scr(np->idletask_ba);
+		np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4);
+	}
+	np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
+
+	/*
+	 *  Start at first entry.
+	 */
+	np->squeueput = 0;
+
+	/*
+	 *  Clear Done Queue
+	 */
+	phys = np->dqueue_ba;
+	for (i = 0; i < MAX_QUEUE*2; i += 2) {
+		np->dqueue[i]   = 0;
+		np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4);
+	}
+	np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
+
+	/*
+	 *  Start at first entry.
+	 */
+	np->dqueueget = 0;
+
+	/*
+	 *  Install patches in scripts.
+	 *  This also let point to first position the start 
+	 *  and done queue pointers used from SCRIPTS.
+	 */
+	np->fw_patch(np);
+
+	/*
+	 *  Wakeup all pending jobs.
+	 */
+	sym_flush_busy_queue(np, CAM_SCSI_BUS_RESET);
+
+	/*
+	 *  Init chip.
+	 */
+	OUTB(np, nc_istat,  0x00);			/*  Remove Reset, abort */
+	udelay(2000); /* The 895 needs time for the bus mode to settle */
+
+	OUTB(np, nc_scntl0, np->rv_scntl0 | 0xc0);
+					/*  full arb., ena parity, par->ATN  */
+	OUTB(np, nc_scntl1, 0x00);		/*  odd parity, and remove CRST!! */
+
+	sym_selectclock(np, np->rv_scntl3);	/* Select SCSI clock */
+
+	OUTB(np, nc_scid  , RRE|np->myaddr);	/* Adapter SCSI address */
+	OUTW(np, nc_respid, 1ul<<np->myaddr);	/* Id to respond to */
+	OUTB(np, nc_istat , SIGP	);		/*  Signal Process */
+	OUTB(np, nc_dmode , np->rv_dmode);		/* Burst length, dma mode */
+	OUTB(np, nc_ctest5, np->rv_ctest5);	/* Large fifo + large burst */
+
+	OUTB(np, nc_dcntl , NOCOM|np->rv_dcntl);	/* Protect SFBR */
+	OUTB(np, nc_ctest3, np->rv_ctest3);	/* Write and invalidate */
+	OUTB(np, nc_ctest4, np->rv_ctest4);	/* Master parity checking */
+
+	/* Extended Sreq/Sack filtering not supported on the C10 */
+	if (np->features & FE_C10)
+		OUTB(np, nc_stest2, np->rv_stest2);
+	else
+		OUTB(np, nc_stest2, EXT|np->rv_stest2);
+
+	OUTB(np, nc_stest3, TE);			/* TolerANT enable */
+	OUTB(np, nc_stime0, 0x0c);			/* HTH disabled  STO 0.25 sec */
+
+	/*
+	 *  For now, disable AIP generation on C1010-66.
+	 */
+	if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)
+		OUTB(np, nc_aipcntl1, DISAIP);
+
+	/*
+	 *  C10101 rev. 0 errata.
+	 *  Errant SGE's when in narrow. Write bits 4 & 5 of
+	 *  STEST1 register to disable SGE. We probably should do 
+	 *  that from SCRIPTS for each selection/reselection, but 
+	 *  I just don't want. :)
+	 */
+	if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_33 &&
+	    np->revision_id < 1)
+		OUTB(np, nc_stest1, INB(np, nc_stest1) | 0x30);
+
+	/*
+	 *  DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2.
+	 *  Disable overlapped arbitration for some dual function devices, 
+	 *  regardless revision id (kind of post-chip-design feature. ;-))
+	 */
+	if (np->device_id == PCI_DEVICE_ID_NCR_53C875)
+		OUTB(np, nc_ctest0, (1<<5));
+	else if (np->device_id == PCI_DEVICE_ID_NCR_53C896)
+		np->rv_ccntl0 |= DPR;
+
+	/*
+	 *  Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing 
+	 *  and/or hardware phase mismatch, since only such chips 
+	 *  seem to support those IO registers.
+	 */
+	if (np->features & (FE_DAC|FE_NOPM)) {
+		OUTB(np, nc_ccntl0, np->rv_ccntl0);
+		OUTB(np, nc_ccntl1, np->rv_ccntl1);
+	}
+
+#if	SYM_CONF_DMA_ADDRESSING_MODE == 2
+	/*
+	 *  Set up scratch C and DRS IO registers to map the 32 bit 
+	 *  DMA address range our data structures are located in.
+	 */
+	if (np->use_dac) {
+		np->dmap_bah[0] = 0;	/* ??? */
+		OUTL(np, nc_scrx[0], np->dmap_bah[0]);
+		OUTL(np, nc_drs, np->dmap_bah[0]);
+	}
+#endif
+
+	/*
+	 *  If phase mismatch handled by scripts (895A/896/1010),
+	 *  set PM jump addresses.
+	 */
+	if (np->features & FE_NOPM) {
+		OUTL(np, nc_pmjad1, SCRIPTB_BA(np, pm_handle));
+		OUTL(np, nc_pmjad2, SCRIPTB_BA(np, pm_handle));
+	}
+
+	/*
+	 *    Enable GPIO0 pin for writing if LED support from SCRIPTS.
+	 *    Also set GPIO5 and clear GPIO6 if hardware LED control.
+	 */
+	if (np->features & FE_LED0)
+		OUTB(np, nc_gpcntl, INB(np, nc_gpcntl) & ~0x01);
+	else if (np->features & FE_LEDC)
+		OUTB(np, nc_gpcntl, (INB(np, nc_gpcntl) & ~0x41) | 0x20);
+
+	/*
+	 *      enable ints
+	 */
+	OUTW(np, nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR);
+	OUTB(np, nc_dien , MDPE|BF|SSI|SIR|IID);
+
+	/*
+	 *  For 895/6 enable SBMC interrupt and save current SCSI bus mode.
+	 *  Try to eat the spurious SBMC interrupt that may occur when 
+	 *  we reset the chip but not the SCSI BUS (at initialization).
+	 */
+	if (np->features & (FE_ULTRA2|FE_ULTRA3)) {
+		OUTONW(np, nc_sien, SBMC);
+		if (reason == 0) {
+			mdelay(100);
+			INW(np, nc_sist);
+		}
+		np->scsi_mode = INB(np, nc_stest4) & SMODE;
+	}
+
+	/*
+	 *  Fill in target structure.
+	 *  Reinitialize usrsync.
+	 *  Reinitialize usrwide.
+	 *  Prepare sync negotiation according to actual SCSI bus mode.
+	 */
+	for (i=0;i<SYM_CONF_MAX_TARGET;i++) {
+		struct sym_tcb *tp = &np->target[i];
+
+		tp->to_reset  = 0;
+		tp->head.sval = 0;
+		tp->head.wval = np->rv_scntl3;
+		tp->head.uval = 0;
+	}
+
+	/*
+	 *  Download SCSI SCRIPTS to on-chip RAM if present,
+	 *  and start script processor.
+	 *  We do the download preferently from the CPU.
+	 *  For platforms that may not support PCI memory mapping,
+	 *  we use simple SCRIPTS that performs MEMORY MOVEs.
+	 */
+	phys = SCRIPTA_BA(np, init);
+	if (np->ram_ba) {
+		if (sym_verbose >= 2)
+			printf("%s: Downloading SCSI SCRIPTS.\n", sym_name(np));
+		memcpy_toio(np->s.ramaddr, np->scripta0, np->scripta_sz);
+		if (np->ram_ws == 8192) {
+			memcpy_toio(np->s.ramaddr + 4096, np->scriptb0, np->scriptb_sz);
+			phys = scr_to_cpu(np->scr_ram_seg);
+			OUTL(np, nc_mmws, phys);
+			OUTL(np, nc_mmrs, phys);
+			OUTL(np, nc_sfs,  phys);
+			phys = SCRIPTB_BA(np, start64);
+		}
+	}
+
+	np->istat_sem = 0;
+
+	OUTL(np, nc_dsa, np->hcb_ba);
+	OUTL_DSP(np, phys);
+
+	/*
+	 *  Notify the XPT about the RESET condition.
+	 */
+	if (reason != 0)
+		sym_xpt_async_bus_reset(np);
+}
+
+/*
+ *  Switch trans mode for current job and its target.
+ */
+static void sym_settrans(struct sym_hcb *np, int target, u_char opts, u_char ofs,
+			 u_char per, u_char wide, u_char div, u_char fak)
+{
+	SYM_QUEHEAD *qp;
+	u_char sval, wval, uval;
+	struct sym_tcb *tp = &np->target[target];
+
+	assert(target == (INB(np, nc_sdid) & 0x0f));
+
+	sval = tp->head.sval;
+	wval = tp->head.wval;
+	uval = tp->head.uval;
+
+#if 0
+	printf("XXXX sval=%x wval=%x uval=%x (%x)\n", 
+		sval, wval, uval, np->rv_scntl3);
+#endif
+	/*
+	 *  Set the offset.
+	 */
+	if (!(np->features & FE_C10))
+		sval = (sval & ~0x1f) | ofs;
+	else
+		sval = (sval & ~0x3f) | ofs;
+
+	/*
+	 *  Set the sync divisor and extra clock factor.
+	 */
+	if (ofs != 0) {
+		wval = (wval & ~0x70) | ((div+1) << 4);
+		if (!(np->features & FE_C10))
+			sval = (sval & ~0xe0) | (fak << 5);
+		else {
+			uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT);
+			if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT);
+			if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT);
+		}
+	}
+
+	/*
+	 *  Set the bus width.
+	 */
+	wval = wval & ~EWS;
+	if (wide != 0)
+		wval |= EWS;
+
+	/*
+	 *  Set misc. ultra enable bits.
+	 */
+	if (np->features & FE_C10) {
+		uval = uval & ~(U3EN|AIPCKEN);
+		if (opts)	{
+			assert(np->features & FE_U3EN);
+			uval |= U3EN;
+		}
+	} else {
+		wval = wval & ~ULTRA;
+		if (per <= 12)	wval |= ULTRA;
+	}
+
+	/*
+	 *   Stop there if sync parameters are unchanged.
+	 */
+	if (tp->head.sval == sval && 
+	    tp->head.wval == wval &&
+	    tp->head.uval == uval)
+		return;
+	tp->head.sval = sval;
+	tp->head.wval = wval;
+	tp->head.uval = uval;
+
+	/*
+	 *  Disable extended Sreq/Sack filtering if per < 50.
+	 *  Not supported on the C1010.
+	 */
+	if (per < 50 && !(np->features & FE_C10))
+		OUTOFFB(np, nc_stest2, EXT);
+
+	/*
+	 *  set actual value and sync_status
+	 */
+	OUTB(np, nc_sxfer,  tp->head.sval);
+	OUTB(np, nc_scntl3, tp->head.wval);
+
+	if (np->features & FE_C10) {
+		OUTB(np, nc_scntl4, tp->head.uval);
+	}
+
+	/*
+	 *  patch ALL busy ccbs of this target.
+	 */
+	FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
+		struct sym_ccb *cp;
+		cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+		if (cp->target != target)
+			continue;
+		cp->phys.select.sel_scntl3 = tp->head.wval;
+		cp->phys.select.sel_sxfer  = tp->head.sval;
+		if (np->features & FE_C10) {
+			cp->phys.select.sel_scntl4 = tp->head.uval;
+		}
+	}
+}
+
+/*
+ *  We received a WDTR.
+ *  Let everything be aware of the changes.
+ */
+static void sym_setwide(struct sym_hcb *np, int target, u_char wide)
+{
+	struct sym_tcb *tp = &np->target[target];
+	struct scsi_target *starget = tp->sdev->sdev_target;
+
+	if (spi_width(starget) == wide)
+		return;
+
+	sym_settrans(np, target, 0, 0, 0, wide, 0, 0);
+
+	tp->tgoal.width = wide;
+	spi_offset(starget) = 0;
+	spi_period(starget) = 0;
+	spi_width(starget) = wide;
+	spi_iu(starget) = 0;
+	spi_dt(starget) = 0;
+	spi_qas(starget) = 0;
+
+	if (sym_verbose >= 3)
+		spi_display_xfer_agreement(starget);
+}
+
+/*
+ *  We received a SDTR.
+ *  Let everything be aware of the changes.
+ */
+static void
+sym_setsync(struct sym_hcb *np, int target,
+            u_char ofs, u_char per, u_char div, u_char fak)
+{
+	struct sym_tcb *tp = &np->target[target];
+	struct scsi_target *starget = tp->sdev->sdev_target;
+	u_char wide = (tp->head.wval & EWS) ? BUS_16_BIT : BUS_8_BIT;
+
+	sym_settrans(np, target, 0, ofs, per, wide, div, fak);
+
+	spi_period(starget) = per;
+	spi_offset(starget) = ofs;
+	spi_iu(starget) = spi_dt(starget) = spi_qas(starget) = 0;
+
+	if (!tp->tgoal.dt && !tp->tgoal.iu && !tp->tgoal.qas) {
+		tp->tgoal.period = per;
+		tp->tgoal.offset = ofs;
+		tp->tgoal.check_nego = 0;
+	}
+
+	spi_display_xfer_agreement(starget);
+}
+
+/*
+ *  We received a PPR.
+ *  Let everything be aware of the changes.
+ */
+static void 
+sym_setpprot(struct sym_hcb *np, int target, u_char opts, u_char ofs,
+             u_char per, u_char wide, u_char div, u_char fak)
+{
+	struct sym_tcb *tp = &np->target[target];
+	struct scsi_target *starget = tp->sdev->sdev_target;
+
+	sym_settrans(np, target, opts, ofs, per, wide, div, fak);
+
+	spi_width(starget) = tp->tgoal.width = wide;
+	spi_period(starget) = tp->tgoal.period = per;
+	spi_offset(starget) = tp->tgoal.offset = ofs;
+	spi_iu(starget) = tp->tgoal.iu = !!(opts & PPR_OPT_IU);
+	spi_dt(starget) = tp->tgoal.dt = !!(opts & PPR_OPT_DT);
+	spi_qas(starget) = tp->tgoal.qas = !!(opts & PPR_OPT_QAS);
+	tp->tgoal.check_nego = 0;
+
+	spi_display_xfer_agreement(starget);
+}
+
+/*
+ *  generic recovery from scsi interrupt
+ *
+ *  The doc says that when the chip gets an SCSI interrupt,
+ *  it tries to stop in an orderly fashion, by completing 
+ *  an instruction fetch that had started or by flushing 
+ *  the DMA fifo for a write to memory that was executing.
+ *  Such a fashion is not enough to know if the instruction 
+ *  that was just before the current DSP value has been 
+ *  executed or not.
+ *
+ *  There are some small SCRIPTS sections that deal with 
+ *  the start queue and the done queue that may break any 
+ *  assomption from the C code if we are interrupted 
+ *  inside, so we reset if this happens. Btw, since these 
+ *  SCRIPTS sections are executed while the SCRIPTS hasn't 
+ *  started SCSI operations, it is very unlikely to happen.
+ *
+ *  All the driver data structures are supposed to be 
+ *  allocated from the same 4 GB memory window, so there 
+ *  is a 1 to 1 relationship between DSA and driver data 
+ *  structures. Since we are careful :) to invalidate the 
+ *  DSA when we complete a command or when the SCRIPTS 
+ *  pushes a DSA into a queue, we can trust it when it 
+ *  points to a CCB.
+ */
+static void sym_recover_scsi_int (struct sym_hcb *np, u_char hsts)
+{
+	u32	dsp	= INL(np, nc_dsp);
+	u32	dsa	= INL(np, nc_dsa);
+	struct sym_ccb *cp	= sym_ccb_from_dsa(np, dsa);
+
+	/*
+	 *  If we haven't been interrupted inside the SCRIPTS 
+	 *  critical pathes, we can safely restart the SCRIPTS 
+	 *  and trust the DSA value if it matches a CCB.
+	 */
+	if ((!(dsp > SCRIPTA_BA(np, getjob_begin) &&
+	       dsp < SCRIPTA_BA(np, getjob_end) + 1)) &&
+	    (!(dsp > SCRIPTA_BA(np, ungetjob) &&
+	       dsp < SCRIPTA_BA(np, reselect) + 1)) &&
+	    (!(dsp > SCRIPTB_BA(np, sel_for_abort) &&
+	       dsp < SCRIPTB_BA(np, sel_for_abort_1) + 1)) &&
+	    (!(dsp > SCRIPTA_BA(np, done) &&
+	       dsp < SCRIPTA_BA(np, done_end) + 1))) {
+		OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo  */
+		OUTB(np, nc_stest3, TE|CSF);		/* clear scsi fifo */
+		/*
+		 *  If we have a CCB, let the SCRIPTS call us back for 
+		 *  the handling of the error with SCRATCHA filled with 
+		 *  STARTPOS. This way, we will be able to freeze the 
+		 *  device queue and requeue awaiting IOs.
+		 */
+		if (cp) {
+			cp->host_status = hsts;
+			OUTL_DSP(np, SCRIPTA_BA(np, complete_error));
+		}
+		/*
+		 *  Otherwise just restart the SCRIPTS.
+		 */
+		else {
+			OUTL(np, nc_dsa, 0xffffff);
+			OUTL_DSP(np, SCRIPTA_BA(np, start));
+		}
+	}
+	else
+		goto reset_all;
+
+	return;
+
+reset_all:
+	sym_start_reset(np);
+}
+
+/*
+ *  chip exception handler for selection timeout
+ */
+static void sym_int_sto (struct sym_hcb *np)
+{
+	u32 dsp	= INL(np, nc_dsp);
+
+	if (DEBUG_FLAGS & DEBUG_TINY) printf ("T");
+
+	if (dsp == SCRIPTA_BA(np, wf_sel_done) + 8)
+		sym_recover_scsi_int(np, HS_SEL_TIMEOUT);
+	else
+		sym_start_reset(np);
+}
+
+/*
+ *  chip exception handler for unexpected disconnect
+ */
+static void sym_int_udc (struct sym_hcb *np)
+{
+	printf ("%s: unexpected disconnect\n", sym_name(np));
+	sym_recover_scsi_int(np, HS_UNEXPECTED);
+}
+
+/*
+ *  chip exception handler for SCSI bus mode change
+ *
+ *  spi2-r12 11.2.3 says a transceiver mode change must 
+ *  generate a reset event and a device that detects a reset 
+ *  event shall initiate a hard reset. It says also that a
+ *  device that detects a mode change shall set data transfer 
+ *  mode to eight bit asynchronous, etc...
+ *  So, just reinitializing all except chip should be enough.
+ */
+static void sym_int_sbmc (struct sym_hcb *np)
+{
+	u_char scsi_mode = INB(np, nc_stest4) & SMODE;
+
+	/*
+	 *  Notify user.
+	 */
+	printf("%s: SCSI BUS mode change from %s to %s.\n", sym_name(np),
+		sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode));
+
+	/*
+	 *  Should suspend command processing for a few seconds and 
+	 *  reinitialize all except the chip.
+	 */
+	sym_start_up (np, 2);
+}
+
+/*
+ *  chip exception handler for SCSI parity error.
+ *
+ *  When the chip detects a SCSI parity error and is 
+ *  currently executing a (CH)MOV instruction, it does 
+ *  not interrupt immediately, but tries to finish the 
+ *  transfer of the current scatter entry before 
+ *  interrupting. The following situations may occur:
+ *
+ *  - The complete scatter entry has been transferred 
+ *    without the device having changed phase.
+ *    The chip will then interrupt with the DSP pointing 
+ *    to the instruction that follows the MOV.
+ *
+ *  - A phase mismatch occurs before the MOV finished 
+ *    and phase errors are to be handled by the C code.
+ *    The chip will then interrupt with both PAR and MA 
+ *    conditions set.
+ *
+ *  - A phase mismatch occurs before the MOV finished and 
+ *    phase errors are to be handled by SCRIPTS.
+ *    The chip will load the DSP with the phase mismatch 
+ *    JUMP address and interrupt the host processor.
+ */
+static void sym_int_par (struct sym_hcb *np, u_short sist)
+{
+	u_char	hsts	= INB(np, HS_PRT);
+	u32	dsp	= INL(np, nc_dsp);
+	u32	dbc	= INL(np, nc_dbc);
+	u32	dsa	= INL(np, nc_dsa);
+	u_char	sbcl	= INB(np, nc_sbcl);
+	u_char	cmd	= dbc >> 24;
+	int phase	= cmd & 7;
+	struct sym_ccb *cp	= sym_ccb_from_dsa(np, dsa);
+
+	printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n",
+		sym_name(np), hsts, dbc, sbcl);
+
+	/*
+	 *  Check that the chip is connected to the SCSI BUS.
+	 */
+	if (!(INB(np, nc_scntl1) & ISCON)) {
+		sym_recover_scsi_int(np, HS_UNEXPECTED);
+		return;
+	}
+
+	/*
+	 *  If the nexus is not clearly identified, reset the bus.
+	 *  We will try to do better later.
+	 */
+	if (!cp)
+		goto reset_all;
+
+	/*
+	 *  Check instruction was a MOV, direction was INPUT and 
+	 *  ATN is asserted.
+	 */
+	if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8))
+		goto reset_all;
+
+	/*
+	 *  Keep track of the parity error.
+	 */
+	OUTONB(np, HF_PRT, HF_EXT_ERR);
+	cp->xerr_status |= XE_PARITY_ERR;
+
+	/*
+	 *  Prepare the message to send to the device.
+	 */
+	np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR;
+
+	/*
+	 *  If the old phase was DATA IN phase, we have to deal with
+	 *  the 3 situations described above.
+	 *  For other input phases (MSG IN and STATUS), the device 
+	 *  must resend the whole thing that failed parity checking 
+	 *  or signal error. So, jumping to dispatcher should be OK.
+	 */
+	if (phase == 1 || phase == 5) {
+		/* Phase mismatch handled by SCRIPTS */
+		if (dsp == SCRIPTB_BA(np, pm_handle))
+			OUTL_DSP(np, dsp);
+		/* Phase mismatch handled by the C code */
+		else if (sist & MA)
+			sym_int_ma (np);
+		/* No phase mismatch occurred */
+		else {
+			sym_set_script_dp (np, cp, dsp);
+			OUTL_DSP(np, SCRIPTA_BA(np, dispatch));
+		}
+	}
+	else if (phase == 7)	/* We definitely cannot handle parity errors */
+#if 1				/* in message-in phase due to the relection  */
+		goto reset_all; /* path and various message anticipations.   */
+#else
+		OUTL_DSP(np, SCRIPTA_BA(np, clrack));
+#endif
+	else
+		OUTL_DSP(np, SCRIPTA_BA(np, dispatch));
+	return;
+
+reset_all:
+	sym_start_reset(np);
+	return;
+}
+
+/*
+ *  chip exception handler for phase errors.
+ *
+ *  We have to construct a new transfer descriptor,
+ *  to transfer the rest of the current block.
+ */
+static void sym_int_ma (struct sym_hcb *np)
+{
+	u32	dbc;
+	u32	rest;
+	u32	dsp;
+	u32	dsa;
+	u32	nxtdsp;
+	u32	*vdsp;
+	u32	oadr, olen;
+	u32	*tblp;
+        u32	newcmd;
+	u_int	delta;
+	u_char	cmd;
+	u_char	hflags, hflags0;
+	struct	sym_pmc *pm;
+	struct sym_ccb *cp;
+
+	dsp	= INL(np, nc_dsp);
+	dbc	= INL(np, nc_dbc);
+	dsa	= INL(np, nc_dsa);
+
+	cmd	= dbc >> 24;
+	rest	= dbc & 0xffffff;
+	delta	= 0;
+
+	/*
+	 *  locate matching cp if any.
+	 */
+	cp = sym_ccb_from_dsa(np, dsa);
+
+	/*
+	 *  Donnot take into account dma fifo and various buffers in 
+	 *  INPUT phase since the chip flushes everything before 
+	 *  raising the MA interrupt for interrupted INPUT phases.
+	 *  For DATA IN phase, we will check for the SWIDE later.
+	 */
+	if ((cmd & 7) != 1 && (cmd & 7) != 5) {
+		u_char ss0, ss2;
+
+		if (np->features & FE_DFBC)
+			delta = INW(np, nc_dfbc);
+		else {
+			u32 dfifo;
+
+			/*
+			 * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership.
+			 */
+			dfifo = INL(np, nc_dfifo);
+
+			/*
+			 *  Calculate remaining bytes in DMA fifo.
+			 *  (CTEST5 = dfifo >> 16)
+			 */
+			if (dfifo & (DFS << 16))
+				delta = ((((dfifo >> 8) & 0x300) |
+				          (dfifo & 0xff)) - rest) & 0x3ff;
+			else
+				delta = ((dfifo & 0xff) - rest) & 0x7f;
+		}
+
+		/*
+		 *  The data in the dma fifo has not been transfered to
+		 *  the target -> add the amount to the rest
+		 *  and clear the data.
+		 *  Check the sstat2 register in case of wide transfer.
+		 */
+		rest += delta;
+		ss0  = INB(np, nc_sstat0);
+		if (ss0 & OLF) rest++;
+		if (!(np->features & FE_C10))
+			if (ss0 & ORF) rest++;
+		if (cp && (cp->phys.select.sel_scntl3 & EWS)) {
+			ss2 = INB(np, nc_sstat2);
+			if (ss2 & OLF1) rest++;
+			if (!(np->features & FE_C10))
+				if (ss2 & ORF1) rest++;
+		}
+
+		/*
+		 *  Clear fifos.
+		 */
+		OUTB(np, nc_ctest3, np->rv_ctest3 | CLF);	/* dma fifo  */
+		OUTB(np, nc_stest3, TE|CSF);		/* scsi fifo */
+	}
+
+	/*
+	 *  log the information
+	 */
+	if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
+		printf ("P%x%x RL=%d D=%d ", cmd&7, INB(np, nc_sbcl)&7,
+			(unsigned) rest, (unsigned) delta);
+
+	/*
+	 *  try to find the interrupted script command,
+	 *  and the address at which to continue.
+	 */
+	vdsp	= NULL;
+	nxtdsp	= 0;
+	if	(dsp >  np->scripta_ba &&
+		 dsp <= np->scripta_ba + np->scripta_sz) {
+		vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8));
+		nxtdsp = dsp;
+	}
+	else if	(dsp >  np->scriptb_ba &&
+		 dsp <= np->scriptb_ba + np->scriptb_sz) {
+		vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8));
+		nxtdsp = dsp;
+	}
+
+	/*
+	 *  log the information
+	 */
+	if (DEBUG_FLAGS & DEBUG_PHASE) {
+		printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ",
+			cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd);
+	}
+
+	if (!vdsp) {
+		printf ("%s: interrupted SCRIPT address not found.\n", 
+			sym_name (np));
+		goto reset_all;
+	}
+
+	if (!cp) {
+		printf ("%s: SCSI phase error fixup: CCB already dequeued.\n", 
+			sym_name (np));
+		goto reset_all;
+	}
+
+	/*
+	 *  get old startaddress and old length.
+	 */
+	oadr = scr_to_cpu(vdsp[1]);
+
+	if (cmd & 0x10) {	/* Table indirect */
+		tblp = (u32 *) ((char*) &cp->phys + oadr);
+		olen = scr_to_cpu(tblp[0]);
+		oadr = scr_to_cpu(tblp[1]);
+	} else {
+		tblp = (u32 *) 0;
+		olen = scr_to_cpu(vdsp[0]) & 0xffffff;
+	}
+
+	if (DEBUG_FLAGS & DEBUG_PHASE) {
+		printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n",
+			(unsigned) (scr_to_cpu(vdsp[0]) >> 24),
+			tblp,
+			(unsigned) olen,
+			(unsigned) oadr);
+	}
+
+	/*
+	 *  check cmd against assumed interrupted script command.
+	 *  If dt data phase, the MOVE instruction hasn't bit 4 of 
+	 *  the phase.
+	 */
+	if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) {
+		sym_print_addr(cp->cmd,
+			"internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n",
+			cmd, scr_to_cpu(vdsp[0]) >> 24);
+
+		goto reset_all;
+	}
+
+	/*
+	 *  if old phase not dataphase, leave here.
+	 */
+	if (cmd & 2) {
+		sym_print_addr(cp->cmd,
+			"phase change %x-%x %d@%08x resid=%d.\n",
+			cmd&7, INB(np, nc_sbcl)&7, (unsigned)olen,
+			(unsigned)oadr, (unsigned)rest);
+		goto unexpected_phase;
+	}
+
+	/*
+	 *  Choose the correct PM save area.
+	 *
+	 *  Look at the PM_SAVE SCRIPT if you want to understand 
+	 *  this stuff. The equivalent code is implemented in 
+	 *  SCRIPTS for the 895A, 896 and 1010 that are able to 
+	 *  handle PM from the SCRIPTS processor.
+	 */
+	hflags0 = INB(np, HF_PRT);
+	hflags = hflags0;
+
+	if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) {
+		if (hflags & HF_IN_PM0)
+			nxtdsp = scr_to_cpu(cp->phys.pm0.ret);
+		else if	(hflags & HF_IN_PM1)
+			nxtdsp = scr_to_cpu(cp->phys.pm1.ret);
+
+		if (hflags & HF_DP_SAVED)
+			hflags ^= HF_ACT_PM;
+	}
+
+	if (!(hflags & HF_ACT_PM)) {
+		pm = &cp->phys.pm0;
+		newcmd = SCRIPTA_BA(np, pm0_data);
+	}
+	else {
+		pm = &cp->phys.pm1;
+		newcmd = SCRIPTA_BA(np, pm1_data);
+	}
+
+	hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED);
+	if (hflags != hflags0)
+		OUTB(np, HF_PRT, hflags);
+
+	/*
+	 *  fillin the phase mismatch context
+	 */
+	pm->sg.addr = cpu_to_scr(oadr + olen - rest);
+	pm->sg.size = cpu_to_scr(rest);
+	pm->ret     = cpu_to_scr(nxtdsp);
+
+	/*
+	 *  If we have a SWIDE,
+	 *  - prepare the address to write the SWIDE from SCRIPTS,
+	 *  - compute the SCRIPTS address to restart from,
+	 *  - move current data pointer context by one byte.
+	 */
+	nxtdsp = SCRIPTA_BA(np, dispatch);
+	if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) &&
+	    (INB(np, nc_scntl2) & WSR)) {
+		u32 tmp;
+
+		/*
+		 *  Set up the table indirect for the MOVE
+		 *  of the residual byte and adjust the data 
+		 *  pointer context.
+		 */
+		tmp = scr_to_cpu(pm->sg.addr);
+		cp->phys.wresid.addr = cpu_to_scr(tmp);
+		pm->sg.addr = cpu_to_scr(tmp + 1);
+		tmp = scr_to_cpu(pm->sg.size);
+		cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1);
+		pm->sg.size = cpu_to_scr(tmp - 1);
+
+		/*
+		 *  If only the residual byte is to be moved, 
+		 *  no PM context is needed.
+		 */
+		if ((tmp&0xffffff) == 1)
+			newcmd = pm->ret;
+
+		/*
+		 *  Prepare the address of SCRIPTS that will 
+		 *  move the residual byte to memory.
+		 */
+		nxtdsp = SCRIPTB_BA(np, wsr_ma_helper);
+	}
+
+	if (DEBUG_FLAGS & DEBUG_PHASE) {
+		sym_print_addr(cp->cmd, "PM %x %x %x / %x %x %x.\n",
+			hflags0, hflags, newcmd,
+			(unsigned)scr_to_cpu(pm->sg.addr),
+			(unsigned)scr_to_cpu(pm->sg.size),
+			(unsigned)scr_to_cpu(pm->ret));
+	}
+
+	/*
+	 *  Restart the SCRIPTS processor.
+	 */
+	sym_set_script_dp (np, cp, newcmd);
+	OUTL_DSP(np, nxtdsp);
+	return;
+
+	/*
+	 *  Unexpected phase changes that occurs when the current phase 
+	 *  is not a DATA IN or DATA OUT phase are due to error conditions.
+	 *  Such event may only happen when the SCRIPTS is using a 
+	 *  multibyte SCSI MOVE.
+	 *
+	 *  Phase change		Some possible cause
+	 *
+	 *  COMMAND  --> MSG IN	SCSI parity error detected by target.
+	 *  COMMAND  --> STATUS	Bad command or refused by target.
+	 *  MSG OUT  --> MSG IN     Message rejected by target.
+	 *  MSG OUT  --> COMMAND    Bogus target that discards extended
+	 *  			negotiation messages.
+	 *
+	 *  The code below does not care of the new phase and so 
+	 *  trusts the target. Why to annoy it ?
+	 *  If the interrupted phase is COMMAND phase, we restart at
+	 *  dispatcher.
+	 *  If a target does not get all the messages after selection, 
+	 *  the code assumes blindly that the target discards extended 
+	 *  messages and clears the negotiation status.
+	 *  If the target does not want all our response to negotiation,
+	 *  we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids 
+	 *  bloat for such a should_not_happen situation).
+	 *  In all other situation, we reset the BUS.
+	 *  Are these assumptions reasonnable ? (Wait and see ...)
+	 */
+unexpected_phase:
+	dsp -= 8;
+	nxtdsp = 0;
+
+	switch (cmd & 7) {
+	case 2:	/* COMMAND phase */
+		nxtdsp = SCRIPTA_BA(np, dispatch);
+		break;
+#if 0
+	case 3:	/* STATUS  phase */
+		nxtdsp = SCRIPTA_BA(np, dispatch);
+		break;
+#endif
+	case 6:	/* MSG OUT phase */
+		/*
+		 *  If the device may want to use untagged when we want 
+		 *  tagged, we prepare an IDENTIFY without disc. granted, 
+		 *  since we will not be able to handle reselect.
+		 *  Otherwise, we just don't care.
+		 */
+		if	(dsp == SCRIPTA_BA(np, send_ident)) {
+			if (cp->tag != NO_TAG && olen - rest <= 3) {
+				cp->host_status = HS_BUSY;
+				np->msgout[0] = IDENTIFY(0, cp->lun);
+				nxtdsp = SCRIPTB_BA(np, ident_break_atn);
+			}
+			else
+				nxtdsp = SCRIPTB_BA(np, ident_break);
+		}
+		else if	(dsp == SCRIPTB_BA(np, send_wdtr) ||
+			 dsp == SCRIPTB_BA(np, send_sdtr) ||
+			 dsp == SCRIPTB_BA(np, send_ppr)) {
+			nxtdsp = SCRIPTB_BA(np, nego_bad_phase);
+			if (dsp == SCRIPTB_BA(np, send_ppr)) {
+				struct scsi_device *dev = cp->cmd->device;
+				dev->ppr = 0;
+			}
+		}
+		break;
+#if 0
+	case 7:	/* MSG IN  phase */
+		nxtdsp = SCRIPTA_BA(np, clrack);
+		break;
+#endif
+	}
+
+	if (nxtdsp) {
+		OUTL_DSP(np, nxtdsp);
+		return;
+	}
+
+reset_all:
+	sym_start_reset(np);
+}
+
+/*
+ *  chip interrupt handler
+ *
+ *  In normal situations, interrupt conditions occur one at 
+ *  a time. But when something bad happens on the SCSI BUS, 
+ *  the chip may raise several interrupt flags before 
+ *  stopping and interrupting the CPU. The additionnal 
+ *  interrupt flags are stacked in some extra registers 
+ *  after the SIP and/or DIP flag has been raised in the 
+ *  ISTAT. After the CPU has read the interrupt condition 
+ *  flag from SIST or DSTAT, the chip unstacks the other 
+ *  interrupt flags and sets the corresponding bits in 
+ *  SIST or DSTAT. Since the chip starts stacking once the 
+ *  SIP or DIP flag is set, there is a small window of time 
+ *  where the stacking does not occur.
+ *
+ *  Typically, multiple interrupt conditions may happen in 
+ *  the following situations:
+ *
+ *  - SCSI parity error + Phase mismatch  (PAR|MA)
+ *    When an parity error is detected in input phase 
+ *    and the device switches to msg-in phase inside a 
+ *    block MOV.
+ *  - SCSI parity error + Unexpected disconnect (PAR|UDC)
+ *    When a stupid device does not want to handle the 
+ *    recovery of an SCSI parity error.
+ *  - Some combinations of STO, PAR, UDC, ...
+ *    When using non compliant SCSI stuff, when user is 
+ *    doing non compliant hot tampering on the BUS, when 
+ *    something really bad happens to a device, etc ...
+ *
+ *  The heuristic suggested by SYMBIOS to handle 
+ *  multiple interrupts is to try unstacking all 
+ *  interrupts conditions and to handle them on some 
+ *  priority based on error severity.
+ *  This will work when the unstacking has been 
+ *  successful, but we cannot be 100 % sure of that, 
+ *  since the CPU may have been faster to unstack than 
+ *  the chip is able to stack. Hmmm ... But it seems that 
+ *  such a situation is very unlikely to happen.
+ *
+ *  If this happen, for example STO caught by the CPU 
+ *  then UDC happenning before the CPU have restarted 
+ *  the SCRIPTS, the driver may wrongly complete the 
+ *  same command on UDC, since the SCRIPTS didn't restart 
+ *  and the DSA still points to the same command.
+ *  We avoid this situation by setting the DSA to an 
+ *  invalid value when the CCB is completed and before 
+ *  restarting the SCRIPTS.
+ *
+ *  Another issue is that we need some section of our 
+ *  recovery procedures to be somehow uninterruptible but 
+ *  the SCRIPTS processor does not provides such a 
+ *  feature. For this reason, we handle recovery preferently 
+ *  from the C code and check against some SCRIPTS critical 
+ *  sections from the C code.
+ *
+ *  Hopefully, the interrupt handling of the driver is now 
+ *  able to resist to weird BUS error conditions, but donnot 
+ *  ask me for any guarantee that it will never fail. :-)
+ *  Use at your own decision and risk.
+ */
+
+void sym_interrupt (struct sym_hcb *np)
+{
+	u_char	istat, istatc;
+	u_char	dstat;
+	u_short	sist;
+
+	/*
+	 *  interrupt on the fly ?
+	 *  (SCRIPTS may still be running)
+	 *
+	 *  A `dummy read' is needed to ensure that the 
+	 *  clear of the INTF flag reaches the device 
+	 *  and that posted writes are flushed to memory
+	 *  before the scanning of the DONE queue.
+	 *  Note that SCRIPTS also (dummy) read to memory 
+	 *  prior to deliver the INTF interrupt condition.
+	 */
+	istat = INB(np, nc_istat);
+	if (istat & INTF) {
+		OUTB(np, nc_istat, (istat & SIGP) | INTF | np->istat_sem);
+		istat = INB(np, nc_istat);		/* DUMMY READ */
+		if (DEBUG_FLAGS & DEBUG_TINY) printf ("F ");
+		sym_wakeup_done(np);
+	}
+
+	if (!(istat & (SIP|DIP)))
+		return;
+
+#if 0	/* We should never get this one */
+	if (istat & CABRT)
+		OUTB(np, nc_istat, CABRT);
+#endif
+
+	/*
+	 *  PAR and MA interrupts may occur at the same time,
+	 *  and we need to know of both in order to handle 
+	 *  this situation properly. We try to unstack SCSI 
+	 *  interrupts for that reason. BTW, I dislike a LOT 
+	 *  such a loop inside the interrupt routine.
+	 *  Even if DMA interrupt stacking is very unlikely to 
+	 *  happen, we also try unstacking these ones, since 
+	 *  this has no performance impact.
+	 */
+	sist	= 0;
+	dstat	= 0;
+	istatc	= istat;
+	do {
+		if (istatc & SIP)
+			sist  |= INW(np, nc_sist);
+		if (istatc & DIP)
+			dstat |= INB(np, nc_dstat);
+		istatc = INB(np, nc_istat);
+		istat |= istatc;
+	} while (istatc & (SIP|DIP));
+
+	if (DEBUG_FLAGS & DEBUG_TINY)
+		printf ("<%d|%x:%x|%x:%x>",
+			(int)INB(np, nc_scr0),
+			dstat,sist,
+			(unsigned)INL(np, nc_dsp),
+			(unsigned)INL(np, nc_dbc));
+	/*
+	 *  On paper, a memory read barrier may be needed here to 
+	 *  prevent out of order LOADs by the CPU from having 
+	 *  prefetched stale data prior to DMA having occurred.
+	 *  And since we are paranoid ... :)
+	 */
+	MEMORY_READ_BARRIER();
+
+	/*
+	 *  First, interrupts we want to service cleanly.
+	 *
+	 *  Phase mismatch (MA) is the most frequent interrupt 
+	 *  for chip earlier than the 896 and so we have to service 
+	 *  it as quickly as possible.
+	 *  A SCSI parity error (PAR) may be combined with a phase 
+	 *  mismatch condition (MA).
+	 *  Programmed interrupts (SIR) are used to call the C code 
+	 *  from SCRIPTS.
+	 *  The single step interrupt (SSI) is not used in this 
+	 *  driver.
+	 */
+	if (!(sist  & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) &&
+	    !(dstat & (MDPE|BF|ABRT|IID))) {
+		if	(sist & PAR)	sym_int_par (np, sist);
+		else if (sist & MA)	sym_int_ma (np);
+		else if (dstat & SIR)	sym_int_sir (np);
+		else if (dstat & SSI)	OUTONB_STD();
+		else			goto unknown_int;
+		return;
+	}
+
+	/*
+	 *  Now, interrupts that donnot happen in normal 
+	 *  situations and that we may need to recover from.
+	 *
+	 *  On SCSI RESET (RST), we reset everything.
+	 *  On SCSI BUS MODE CHANGE (SBMC), we complete all 
+	 *  active CCBs with RESET status, prepare all devices 
+	 *  for negotiating again and restart the SCRIPTS.
+	 *  On STO and UDC, we complete the CCB with the corres- 
+	 *  ponding status and restart the SCRIPTS.
+	 */
+	if (sist & RST) {
+		printf("%s: SCSI BUS reset detected.\n", sym_name(np));
+		sym_start_up (np, 1);
+		return;
+	}
+
+	OUTB(np, nc_ctest3, np->rv_ctest3 | CLF);	/* clear dma fifo  */
+	OUTB(np, nc_stest3, TE|CSF);		/* clear scsi fifo */
+
+	if (!(sist  & (GEN|HTH|SGE)) &&
+	    !(dstat & (MDPE|BF|ABRT|IID))) {
+		if	(sist & SBMC)	sym_int_sbmc (np);
+		else if (sist & STO)	sym_int_sto (np);
+		else if (sist & UDC)	sym_int_udc (np);
+		else			goto unknown_int;
+		return;
+	}
+
+	/*
+	 *  Now, interrupts we are not able to recover cleanly.
+	 *
+	 *  Log message for hard errors.
+	 *  Reset everything.
+	 */
+
+	sym_log_hard_error(np, sist, dstat);
+
+	if ((sist & (GEN|HTH|SGE)) ||
+		(dstat & (MDPE|BF|ABRT|IID))) {
+		sym_start_reset(np);
+		return;
+	}
+
+unknown_int:
+	/*
+	 *  We just miss the cause of the interrupt. :(
+	 *  Print a message. The timeout will do the real work.
+	 */
+	printf(	"%s: unknown interrupt(s) ignored, "
+		"ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n",
+		sym_name(np), istat, dstat, sist);
+}
+
+/*
+ *  Dequeue from the START queue all CCBs that match 
+ *  a given target/lun/task condition (-1 means all),
+ *  and move them from the BUSY queue to the COMP queue 
+ *  with CAM_REQUEUE_REQ status condition.
+ *  This function is used during error handling/recovery.
+ *  It is called with SCRIPTS not running.
+ */
+static int 
+sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task)
+{
+	int j;
+	struct sym_ccb *cp;
+
+	/*
+	 *  Make sure the starting index is within range.
+	 */
+	assert((i >= 0) && (i < 2*MAX_QUEUE));
+
+	/*
+	 *  Walk until end of START queue and dequeue every job 
+	 *  that matches the target/lun/task condition.
+	 */
+	j = i;
+	while (i != np->squeueput) {
+		cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i]));
+		assert(cp);
+#ifdef SYM_CONF_IARB_SUPPORT
+		/* Forget hints for IARB, they may be no longer relevant */
+		cp->host_flags &= ~HF_HINT_IARB;
+#endif
+		if ((target == -1 || cp->target == target) &&
+		    (lun    == -1 || cp->lun    == lun)    &&
+		    (task   == -1 || cp->tag    == task)) {
+			sym_set_cam_status(cp->cmd, CAM_REQUEUE_REQ);
+			sym_remque(&cp->link_ccbq);
+			sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
+		}
+		else {
+			if (i != j)
+				np->squeue[j] = np->squeue[i];
+			if ((j += 2) >= MAX_QUEUE*2) j = 0;
+		}
+		if ((i += 2) >= MAX_QUEUE*2) i = 0;
+	}
+	if (i != j)		/* Copy back the idle task if needed */
+		np->squeue[j] = np->squeue[i];
+	np->squeueput = j;	/* Update our current start queue pointer */
+
+	return (i - j) / 2;
+}
+
+/*
+ *  chip handler for bad SCSI status condition
+ *
+ *  In case of bad SCSI status, we unqueue all the tasks 
+ *  currently queued to the controller but not yet started 
+ *  and then restart the SCRIPTS processor immediately.
+ *
+ *  QUEUE FULL and BUSY conditions are handled the same way.
+ *  Basically all the not yet started tasks are requeued in 
+ *  device queue and the queue is frozen until a completion.
+ *
+ *  For CHECK CONDITION and COMMAND TERMINATED status, we use 
+ *  the CCB of the failed command to prepare a REQUEST SENSE 
+ *  SCSI command and queue it to the controller queue.
+ *
+ *  SCRATCHA is assumed to have been loaded with STARTPOS 
+ *  before the SCRIPTS called the C code.
+ */
+static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb *cp)
+{
+	u32		startp;
+	u_char		s_status = cp->ssss_status;
+	u_char		h_flags  = cp->host_flags;
+	int		msglen;
+	int		i;
+
+	/*
+	 *  Compute the index of the next job to start from SCRIPTS.
+	 */
+	i = (INL(np, nc_scratcha) - np->squeue_ba) / 4;
+
+	/*
+	 *  The last CCB queued used for IARB hint may be 
+	 *  no longer relevant. Forget it.
+	 */
+#ifdef SYM_CONF_IARB_SUPPORT
+	if (np->last_cp)
+		np->last_cp = 0;
+#endif
+
+	/*
+	 *  Now deal with the SCSI status.
+	 */
+	switch(s_status) {
+	case S_BUSY:
+	case S_QUEUE_FULL:
+		if (sym_verbose >= 2) {
+			sym_print_addr(cp->cmd, "%s\n",
+			        s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n");
+		}
+	default:	/* S_INT, S_INT_COND_MET, S_CONFLICT */
+		sym_complete_error (np, cp);
+		break;
+	case S_TERMINATED:
+	case S_CHECK_COND:
+		/*
+		 *  If we get an SCSI error when requesting sense, give up.
+		 */
+		if (h_flags & HF_SENSE) {
+			sym_complete_error (np, cp);
+			break;
+		}
+
+		/*
+		 *  Dequeue all queued CCBs for that device not yet started,
+		 *  and restart the SCRIPTS processor immediately.
+		 */
+		sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
+		OUTL_DSP(np, SCRIPTA_BA(np, start));
+
+ 		/*
+		 *  Save some info of the actual IO.
+		 *  Compute the data residual.
+		 */
+		cp->sv_scsi_status = cp->ssss_status;
+		cp->sv_xerr_status = cp->xerr_status;
+		cp->sv_resid = sym_compute_residual(np, cp);
+
+		/*
+		 *  Prepare all needed data structures for 
+		 *  requesting sense data.
+		 */
+
+		cp->scsi_smsg2[0] = IDENTIFY(0, cp->lun);
+		msglen = 1;
+
+		/*
+		 *  If we are currently using anything different from 
+		 *  async. 8 bit data transfers with that target,
+		 *  start a negotiation, since the device may want 
+		 *  to report us a UNIT ATTENTION condition due to 
+		 *  a cause we currently ignore, and we donnot want 
+		 *  to be stuck with WIDE and/or SYNC data transfer.
+		 *
+		 *  cp->nego_status is filled by sym_prepare_nego().
+		 */
+		cp->nego_status = 0;
+		msglen += sym_prepare_nego(np, cp, &cp->scsi_smsg2[msglen]);
+		/*
+		 *  Message table indirect structure.
+		 */
+		cp->phys.smsg.addr	= cpu_to_scr(CCB_BA(cp, scsi_smsg2));
+		cp->phys.smsg.size	= cpu_to_scr(msglen);
+
+		/*
+		 *  sense command
+		 */
+		cp->phys.cmd.addr	= cpu_to_scr(CCB_BA(cp, sensecmd));
+		cp->phys.cmd.size	= cpu_to_scr(6);
+
+		/*
+		 *  patch requested size into sense command
+		 */
+		cp->sensecmd[0]		= REQUEST_SENSE;
+		cp->sensecmd[1]		= 0;
+		if (cp->cmd->device->scsi_level <= SCSI_2 && cp->lun <= 7)
+			cp->sensecmd[1]	= cp->lun << 5;
+		cp->sensecmd[4]		= SYM_SNS_BBUF_LEN;
+		cp->data_len		= SYM_SNS_BBUF_LEN;
+
+		/*
+		 *  sense data
+		 */
+		memset(cp->sns_bbuf, 0, SYM_SNS_BBUF_LEN);
+		cp->phys.sense.addr	= cpu_to_scr(CCB_BA(cp, sns_bbuf));
+		cp->phys.sense.size	= cpu_to_scr(SYM_SNS_BBUF_LEN);
+
+		/*
+		 *  requeue the command.
+		 */
+		startp = SCRIPTB_BA(np, sdata_in);
+
+		cp->phys.head.savep	= cpu_to_scr(startp);
+		cp->phys.head.lastp	= cpu_to_scr(startp);
+		cp->startp		= cpu_to_scr(startp);
+		cp->goalp		= cpu_to_scr(startp + 16);
+
+		cp->host_xflags = 0;
+		cp->host_status	= cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
+		cp->ssss_status = S_ILLEGAL;
+		cp->host_flags	= (HF_SENSE|HF_DATA_IN);
+		cp->xerr_status = 0;
+		cp->extra_bytes = 0;
+
+		cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select));
+
+		/*
+		 *  Requeue the command.
+		 */
+		sym_put_start_queue(np, cp);
+
+		/*
+		 *  Give back to upper layer everything we have dequeued.
+		 */
+		sym_flush_comp_queue(np, 0);
+		break;
+	}
+}
+
+/*
+ *  After a device has accepted some management message 
+ *  as BUS DEVICE RESET, ABORT TASK, etc ..., or when 
+ *  a device signals a UNIT ATTENTION condition, some 
+ *  tasks are thrown away by the device. We are required 
+ *  to reflect that on our tasks list since the device 
+ *  will never complete these tasks.
+ *
+ *  This function move from the BUSY queue to the COMP 
+ *  queue all disconnected CCBs for a given target that 
+ *  match the following criteria:
+ *  - lun=-1  means any logical UNIT otherwise a given one.
+ *  - task=-1 means any task, otherwise a given one.
+ */
+int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task)
+{
+	SYM_QUEHEAD qtmp, *qp;
+	int i = 0;
+	struct sym_ccb *cp;
+
+	/*
+	 *  Move the entire BUSY queue to our temporary queue.
+	 */
+	sym_que_init(&qtmp);
+	sym_que_splice(&np->busy_ccbq, &qtmp);
+	sym_que_init(&np->busy_ccbq);
+
+	/*
+	 *  Put all CCBs that matches our criteria into 
+	 *  the COMP queue and put back other ones into 
+	 *  the BUSY queue.
+	 */
+	while ((qp = sym_remque_head(&qtmp)) != 0) {
+		struct scsi_cmnd *cmd;
+		cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+		cmd = cp->cmd;
+		if (cp->host_status != HS_DISCONNECT ||
+		    cp->target != target	     ||
+		    (lun  != -1 && cp->lun != lun)   ||
+		    (task != -1 && 
+			(cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) {
+			sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
+			continue;
+		}
+		sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
+
+		/* Preserve the software timeout condition */
+		if (sym_get_cam_status(cmd) != CAM_CMD_TIMEOUT)
+			sym_set_cam_status(cmd, cam_status);
+		++i;
+#if 0
+printf("XXXX TASK @%p CLEARED\n", cp);
+#endif
+	}
+	return i;
+}
+
+/*
+ *  chip handler for TASKS recovery
+ *
+ *  We cannot safely abort a command, while the SCRIPTS 
+ *  processor is running, since we just would be in race 
+ *  with it.
+ *
+ *  As long as we have tasks to abort, we keep the SEM 
+ *  bit set in the ISTAT. When this bit is set, the 
+ *  SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED) 
+ *  each time it enters the scheduler.
+ *
+ *  If we have to reset a target, clear tasks of a unit,
+ *  or to perform the abort of a disconnected job, we 
+ *  restart the SCRIPTS for selecting the target. Once 
+ *  selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED).
+ *  If it loses arbitration, the SCRIPTS will interrupt again 
+ *  the next time it will enter its scheduler, and so on ...
+ *
+ *  On SIR_TARGET_SELECTED, we scan for the more 
+ *  appropriate thing to do:
+ *
+ *  - If nothing, we just sent a M_ABORT message to the 
+ *    target to get rid of the useless SCSI bus ownership.
+ *    According to the specs, no tasks shall be affected.
+ *  - If the target is to be reset, we send it a M_RESET 
+ *    message.
+ *  - If a logical UNIT is to be cleared , we send the 
+ *    IDENTIFY(lun) + M_ABORT.
+ *  - If an untagged task is to be aborted, we send the 
+ *    IDENTIFY(lun) + M_ABORT.
+ *  - If a tagged task is to be aborted, we send the 
+ *    IDENTIFY(lun) + task attributes + M_ABORT_TAG.
+ *
+ *  Once our 'kiss of death' :) message has been accepted 
+ *  by the target, the SCRIPTS interrupts again 
+ *  (SIR_ABORT_SENT). On this interrupt, we complete 
+ *  all the CCBs that should have been aborted by the 
+ *  target according to our message.
+ */
+static void sym_sir_task_recovery(struct sym_hcb *np, int num)
+{
+	SYM_QUEHEAD *qp;
+	struct sym_ccb *cp;
+	struct sym_tcb *tp = NULL; /* gcc isn't quite smart enough yet */
+	struct scsi_target *starget;
+	int target=-1, lun=-1, task;
+	int i, k;
+
+	switch(num) {
+	/*
+	 *  The SCRIPTS processor stopped before starting
+	 *  the next command in order to allow us to perform 
+	 *  some task recovery.
+	 */
+	case SIR_SCRIPT_STOPPED:
+		/*
+		 *  Do we have any target to reset or unit to clear ?
+		 */
+		for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
+			tp = &np->target[i];
+			if (tp->to_reset || 
+			    (tp->lun0p && tp->lun0p->to_clear)) {
+				target = i;
+				break;
+			}
+			if (!tp->lunmp)
+				continue;
+			for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) {
+				if (tp->lunmp[k] && tp->lunmp[k]->to_clear) {
+					target	= i;
+					break;
+				}
+			}
+			if (target != -1)
+				break;
+		}
+
+		/*
+		 *  If not, walk the busy queue for any 
+		 *  disconnected CCB to be aborted.
+		 */
+		if (target == -1) {
+			FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
+				cp = sym_que_entry(qp,struct sym_ccb,link_ccbq);
+				if (cp->host_status != HS_DISCONNECT)
+					continue;
+				if (cp->to_abort) {
+					target = cp->target;
+					break;
+				}
+			}
+		}
+
+		/*
+		 *  If some target is to be selected, 
+		 *  prepare and start the selection.
+		 */
+		if (target != -1) {
+			tp = &np->target[target];
+			np->abrt_sel.sel_id	= target;
+			np->abrt_sel.sel_scntl3 = tp->head.wval;
+			np->abrt_sel.sel_sxfer  = tp->head.sval;
+			OUTL(np, nc_dsa, np->hcb_ba);
+			OUTL_DSP(np, SCRIPTB_BA(np, sel_for_abort));
+			return;
+		}
+
+		/*
+		 *  Now look for a CCB to abort that haven't started yet.
+		 *  Btw, the SCRIPTS processor is still stopped, so 
+		 *  we are not in race.
+		 */
+		i = 0;
+		cp = NULL;
+		FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
+			cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+			if (cp->host_status != HS_BUSY &&
+			    cp->host_status != HS_NEGOTIATE)
+				continue;
+			if (!cp->to_abort)
+				continue;
+#ifdef SYM_CONF_IARB_SUPPORT
+			/*
+			 *    If we are using IMMEDIATE ARBITRATION, we donnot 
+			 *    want to cancel the last queued CCB, since the 
+			 *    SCRIPTS may have anticipated the selection.
+			 */
+			if (cp == np->last_cp) {
+				cp->to_abort = 0;
+				continue;
+			}
+#endif
+			i = 1;	/* Means we have found some */
+			break;
+		}
+		if (!i) {
+			/*
+			 *  We are done, so we donnot need 
+			 *  to synchronize with the SCRIPTS anylonger.
+			 *  Remove the SEM flag from the ISTAT.
+			 */
+			np->istat_sem = 0;
+			OUTB(np, nc_istat, SIGP);
+			break;
+		}
+		/*
+		 *  Compute index of next position in the start 
+		 *  queue the SCRIPTS intends to start and dequeue 
+		 *  all CCBs for that device that haven't been started.
+		 */
+		i = (INL(np, nc_scratcha) - np->squeue_ba) / 4;
+		i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
+
+		/*
+		 *  Make sure at least our IO to abort has been dequeued.
+		 */
+#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
+		assert(i && sym_get_cam_status(cp->cmd) == CAM_REQUEUE_REQ);
+#else
+		sym_remque(&cp->link_ccbq);
+		sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
+#endif
+		/*
+		 *  Keep track in cam status of the reason of the abort.
+		 */
+		if (cp->to_abort == 2)
+			sym_set_cam_status(cp->cmd, CAM_CMD_TIMEOUT);
+		else
+			sym_set_cam_status(cp->cmd, CAM_REQ_ABORTED);
+
+		/*
+		 *  Complete with error everything that we have dequeued.
+	 	 */
+		sym_flush_comp_queue(np, 0);
+		break;
+	/*
+	 *  The SCRIPTS processor has selected a target 
+	 *  we may have some manual recovery to perform for.
+	 */
+	case SIR_TARGET_SELECTED:
+		target = INB(np, nc_sdid) & 0xf;
+		tp = &np->target[target];
+
+		np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg));
+
+		/*
+		 *  If the target is to be reset, prepare a 
+		 *  M_RESET message and clear the to_reset flag 
+		 *  since we donnot expect this operation to fail.
+		 */
+		if (tp->to_reset) {
+			np->abrt_msg[0] = M_RESET;
+			np->abrt_tbl.size = 1;
+			tp->to_reset = 0;
+			break;
+		}
+
+		/*
+		 *  Otherwise, look for some logical unit to be cleared.
+		 */
+		if (tp->lun0p && tp->lun0p->to_clear)
+			lun = 0;
+		else if (tp->lunmp) {
+			for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) {
+				if (tp->lunmp[k] && tp->lunmp[k]->to_clear) {
+					lun = k;
+					break;
+				}
+			}
+		}
+
+		/*
+		 *  If a logical unit is to be cleared, prepare 
+		 *  an IDENTIFY(lun) + ABORT MESSAGE.
+		 */
+		if (lun != -1) {
+			struct sym_lcb *lp = sym_lp(tp, lun);
+			lp->to_clear = 0; /* We don't expect to fail here */
+			np->abrt_msg[0] = IDENTIFY(0, lun);
+			np->abrt_msg[1] = M_ABORT;
+			np->abrt_tbl.size = 2;
+			break;
+		}
+
+		/*
+		 *  Otherwise, look for some disconnected job to 
+		 *  abort for this target.
+		 */
+		i = 0;
+		cp = NULL;
+		FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
+			cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+			if (cp->host_status != HS_DISCONNECT)
+				continue;
+			if (cp->target != target)
+				continue;
+			if (!cp->to_abort)
+				continue;
+			i = 1;	/* Means we have some */
+			break;
+		}
+
+		/*
+		 *  If we have none, probably since the device has 
+		 *  completed the command before we won abitration,
+		 *  send a M_ABORT message without IDENTIFY.
+		 *  According to the specs, the device must just 
+		 *  disconnect the BUS and not abort any task.
+		 */
+		if (!i) {
+			np->abrt_msg[0] = M_ABORT;
+			np->abrt_tbl.size = 1;
+			break;
+		}
+
+		/*
+		 *  We have some task to abort.
+		 *  Set the IDENTIFY(lun)
+		 */
+		np->abrt_msg[0] = IDENTIFY(0, cp->lun);
+
+		/*
+		 *  If we want to abort an untagged command, we 
+		 *  will send a IDENTIFY + M_ABORT.
+		 *  Otherwise (tagged command), we will send 
+		 *  a IDENTITFY + task attributes + ABORT TAG.
+		 */
+		if (cp->tag == NO_TAG) {
+			np->abrt_msg[1] = M_ABORT;
+			np->abrt_tbl.size = 2;
+		} else {
+			np->abrt_msg[1] = cp->scsi_smsg[1];
+			np->abrt_msg[2] = cp->scsi_smsg[2];
+			np->abrt_msg[3] = M_ABORT_TAG;
+			np->abrt_tbl.size = 4;
+		}
+		/*
+		 *  Keep track of software timeout condition, since the 
+		 *  peripheral driver may not count retries on abort 
+		 *  conditions not due to timeout.
+		 */
+		if (cp->to_abort == 2)
+			sym_set_cam_status(cp->cmd, CAM_CMD_TIMEOUT);
+		cp->to_abort = 0; /* We donnot expect to fail here */
+		break;
+
+	/*
+	 *  The target has accepted our message and switched 
+	 *  to BUS FREE phase as we expected.
+	 */
+	case SIR_ABORT_SENT:
+		target = INB(np, nc_sdid) & 0xf;
+		tp = &np->target[target];
+		starget = tp->sdev->sdev_target;
+		
+		/*
+		**  If we didn't abort anything, leave here.
+		*/
+		if (np->abrt_msg[0] == M_ABORT)
+			break;
+
+		/*
+		 *  If we sent a M_RESET, then a hardware reset has 
+		 *  been performed by the target.
+		 *  - Reset everything to async 8 bit
+		 *  - Tell ourself to negotiate next time :-)
+		 *  - Prepare to clear all disconnected CCBs for 
+		 *    this target from our task list (lun=task=-1)
+		 */
+		lun = -1;
+		task = -1;
+		if (np->abrt_msg[0] == M_RESET) {
+			tp->head.sval = 0;
+			tp->head.wval = np->rv_scntl3;
+			tp->head.uval = 0;
+			spi_period(starget) = 0;
+			spi_offset(starget) = 0;
+			spi_width(starget) = 0;
+			spi_iu(starget) = 0;
+			spi_dt(starget) = 0;
+			spi_qas(starget) = 0;
+			tp->tgoal.check_nego = 1;
+		}
+
+		/*
+		 *  Otherwise, check for the LUN and TASK(s) 
+		 *  concerned by the cancelation.
+		 *  If it is not ABORT_TAG then it is CLEAR_QUEUE 
+		 *  or an ABORT message :-)
+		 */
+		else {
+			lun = np->abrt_msg[0] & 0x3f;
+			if (np->abrt_msg[1] == M_ABORT_TAG)
+				task = np->abrt_msg[2];
+		}
+
+		/*
+		 *  Complete all the CCBs the device should have 
+		 *  aborted due to our 'kiss of death' message.
+		 */
+		i = (INL(np, nc_scratcha) - np->squeue_ba) / 4;
+		sym_dequeue_from_squeue(np, i, target, lun, -1);
+		sym_clear_tasks(np, CAM_REQ_ABORTED, target, lun, task);
+		sym_flush_comp_queue(np, 0);
+
+ 		/*
+		 *  If we sent a BDR, make upper layer aware of that.
+ 		 */
+		if (np->abrt_msg[0] == M_RESET)
+			sym_xpt_async_sent_bdr(np, target);
+		break;
+	}
+
+	/*
+	 *  Print to the log the message we intend to send.
+	 */
+	if (num == SIR_TARGET_SELECTED) {
+		dev_info(&tp->sdev->sdev_target->dev, "control msgout:");
+		sym_printl_hex(np->abrt_msg, np->abrt_tbl.size);
+		np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size);
+	}
+
+	/*
+	 *  Let the SCRIPTS processor continue.
+	 */
+	OUTONB_STD();
+}
+
+/*
+ *  Gerard's alchemy:) that deals with with the data 
+ *  pointer for both MDP and the residual calculation.
+ *
+ *  I didn't want to bloat the code by more than 200 
+ *  lines for the handling of both MDP and the residual.
+ *  This has been achieved by using a data pointer 
+ *  representation consisting in an index in the data 
+ *  array (dp_sg) and a negative offset (dp_ofs) that 
+ *  have the following meaning:
+ *
+ *  - dp_sg = SYM_CONF_MAX_SG
+ *    we are at the end of the data script.
+ *  - dp_sg < SYM_CONF_MAX_SG
+ *    dp_sg points to the next entry of the scatter array 
+ *    we want to transfer.
+ *  - dp_ofs < 0
+ *    dp_ofs represents the residual of bytes of the 
+ *    previous entry scatter entry we will send first.
+ *  - dp_ofs = 0
+ *    no residual to send first.
+ *
+ *  The function sym_evaluate_dp() accepts an arbitray 
+ *  offset (basically from the MDP message) and returns 
+ *  the corresponding values of dp_sg and dp_ofs.
+ */
+
+static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int *ofs)
+{
+	u32	dp_scr;
+	int	dp_ofs, dp_sg, dp_sgmin;
+	int	tmp;
+	struct sym_pmc *pm;
+
+	/*
+	 *  Compute the resulted data pointer in term of a script 
+	 *  address within some DATA script and a signed byte offset.
+	 */
+	dp_scr = scr;
+	dp_ofs = *ofs;
+	if	(dp_scr == SCRIPTA_BA(np, pm0_data))
+		pm = &cp->phys.pm0;
+	else if (dp_scr == SCRIPTA_BA(np, pm1_data))
+		pm = &cp->phys.pm1;
+	else
+		pm = NULL;
+
+	if (pm) {
+		dp_scr  = scr_to_cpu(pm->ret);
+		dp_ofs -= scr_to_cpu(pm->sg.size);
+	}
+
+	/*
+	 *  If we are auto-sensing, then we are done.
+	 */
+	if (cp->host_flags & HF_SENSE) {
+		*ofs = dp_ofs;
+		return 0;
+	}
+
+	/*
+	 *  Deduce the index of the sg entry.
+	 *  Keep track of the index of the first valid entry.
+	 *  If result is dp_sg = SYM_CONF_MAX_SG, then we are at the 
+	 *  end of the data.
+	 */
+	tmp = scr_to_cpu(sym_goalp(cp));
+	dp_sg = SYM_CONF_MAX_SG;
+	if (dp_scr != tmp)
+		dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4);
+	dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
+
+	/*
+	 *  Move to the sg entry the data pointer belongs to.
+	 *
+	 *  If we are inside the data area, we expect result to be:
+	 *
+	 *  Either,
+	 *      dp_ofs = 0 and dp_sg is the index of the sg entry
+	 *      the data pointer belongs to (or the end of the data)
+	 *  Or,
+	 *      dp_ofs < 0 and dp_sg is the index of the sg entry 
+	 *      the data pointer belongs to + 1.
+	 */
+	if (dp_ofs < 0) {
+		int n;
+		while (dp_sg > dp_sgmin) {
+			--dp_sg;
+			tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
+			n = dp_ofs + (tmp & 0xffffff);
+			if (n > 0) {
+				++dp_sg;
+				break;
+			}
+			dp_ofs = n;
+		}
+	}
+	else if (dp_ofs > 0) {
+		while (dp_sg < SYM_CONF_MAX_SG) {
+			tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
+			dp_ofs -= (tmp & 0xffffff);
+			++dp_sg;
+			if (dp_ofs <= 0)
+				break;
+		}
+	}
+
+	/*
+	 *  Make sure the data pointer is inside the data area.
+	 *  If not, return some error.
+	 */
+	if	(dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0))
+		goto out_err;
+	else if	(dp_sg > SYM_CONF_MAX_SG ||
+		 (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0))
+		goto out_err;
+
+	/*
+	 *  Save the extreme pointer if needed.
+	 */
+	if (dp_sg > cp->ext_sg ||
+            (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) {
+		cp->ext_sg  = dp_sg;
+		cp->ext_ofs = dp_ofs;
+	}
+
+	/*
+	 *  Return data.
+	 */
+	*ofs = dp_ofs;
+	return dp_sg;
+
+out_err:
+	return -1;
+}
+
+/*
+ *  chip handler for MODIFY DATA POINTER MESSAGE
+ *
+ *  We also call this function on IGNORE WIDE RESIDUE 
+ *  messages that do not match a SWIDE full condition.
+ *  Btw, we assume in that situation that such a message 
+ *  is equivalent to a MODIFY DATA POINTER (offset=-1).
+ */
+
+static void sym_modify_dp(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp, int ofs)
+{
+	int dp_ofs	= ofs;
+	u32	dp_scr	= sym_get_script_dp (np, cp);
+	u32	dp_ret;
+	u32	tmp;
+	u_char	hflags;
+	int	dp_sg;
+	struct	sym_pmc *pm;
+
+	/*
+	 *  Not supported for auto-sense.
+	 */
+	if (cp->host_flags & HF_SENSE)
+		goto out_reject;
+
+	/*
+	 *  Apply our alchemy:) (see comments in sym_evaluate_dp()), 
+	 *  to the resulted data pointer.
+	 */
+	dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs);
+	if (dp_sg < 0)
+		goto out_reject;
+
+	/*
+	 *  And our alchemy:) allows to easily calculate the data 
+	 *  script address we want to return for the next data phase.
+	 */
+	dp_ret = cpu_to_scr(sym_goalp(cp));
+	dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4);
+
+	/*
+	 *  If offset / scatter entry is zero we donnot need 
+	 *  a context for the new current data pointer.
+	 */
+	if (dp_ofs == 0) {
+		dp_scr = dp_ret;
+		goto out_ok;
+	}
+
+	/*
+	 *  Get a context for the new current data pointer.
+	 */
+	hflags = INB(np, HF_PRT);
+
+	if (hflags & HF_DP_SAVED)
+		hflags ^= HF_ACT_PM;
+
+	if (!(hflags & HF_ACT_PM)) {
+		pm  = &cp->phys.pm0;
+		dp_scr = SCRIPTA_BA(np, pm0_data);
+	}
+	else {
+		pm = &cp->phys.pm1;
+		dp_scr = SCRIPTA_BA(np, pm1_data);
+	}
+
+	hflags &= ~(HF_DP_SAVED);
+
+	OUTB(np, HF_PRT, hflags);
+
+	/*
+	 *  Set up the new current data pointer.
+	 *  ofs < 0 there, and for the next data phase, we 
+	 *  want to transfer part of the data of the sg entry 
+	 *  corresponding to index dp_sg-1 prior to returning 
+	 *  to the main data script.
+	 */
+	pm->ret = cpu_to_scr(dp_ret);
+	tmp  = scr_to_cpu(cp->phys.data[dp_sg-1].addr);
+	tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs;
+	pm->sg.addr = cpu_to_scr(tmp);
+	pm->sg.size = cpu_to_scr(-dp_ofs);
+
+out_ok:
+	sym_set_script_dp (np, cp, dp_scr);
+	OUTL_DSP(np, SCRIPTA_BA(np, clrack));
+	return;
+
+out_reject:
+	OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
+}
+
+
+/*
+ *  chip calculation of the data residual.
+ *
+ *  As I used to say, the requirement of data residual 
+ *  in SCSI is broken, useless and cannot be achieved 
+ *  without huge complexity.
+ *  But most OSes and even the official CAM require it.
+ *  When stupidity happens to be so widely spread inside 
+ *  a community, it gets hard to convince.
+ *
+ *  Anyway, I don't care, since I am not going to use 
+ *  any software that considers this data residual as 
+ *  a relevant information. :)
+ */
+
+int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp)
+{
+	int dp_sg, dp_sgmin, resid = 0;
+	int dp_ofs = 0;
+
+	/*
+	 *  Check for some data lost or just thrown away.
+	 *  We are not required to be quite accurate in this 
+	 *  situation. Btw, if we are odd for output and the 
+	 *  device claims some more data, it may well happen 
+	 *  than our residual be zero. :-)
+	 */
+	if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) {
+		if (cp->xerr_status & XE_EXTRA_DATA)
+			resid -= cp->extra_bytes;
+		if (cp->xerr_status & XE_SODL_UNRUN)
+			++resid;
+		if (cp->xerr_status & XE_SWIDE_OVRUN)
+			--resid;
+	}
+
+	/*
+	 *  If all data has been transferred,
+	 *  there is no residual.
+	 */
+	if (cp->phys.head.lastp == sym_goalp(cp))
+		return resid;
+
+	/*
+	 *  If no data transfer occurs, or if the data
+	 *  pointer is weird, return full residual.
+	 */
+	if (cp->startp == cp->phys.head.lastp ||
+	    sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp),
+			    &dp_ofs) < 0) {
+		return cp->data_len;
+	}
+
+	/*
+	 *  If we were auto-sensing, then we are done.
+	 */
+	if (cp->host_flags & HF_SENSE) {
+		return -dp_ofs;
+	}
+
+	/*
+	 *  We are now full comfortable in the computation 
+	 *  of the data residual (2's complement).
+	 */
+	dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
+	resid = -cp->ext_ofs;
+	for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) {
+		u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
+		resid += (tmp & 0xffffff);
+	}
+
+	/*
+	 *  Hopefully, the result is not too wrong.
+	 */
+	return resid;
+}
+
+/*
+ *  Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER.
+ *
+ *  When we try to negotiate, we append the negotiation message
+ *  to the identify and (maybe) simple tag message.
+ *  The host status field is set to HS_NEGOTIATE to mark this
+ *  situation.
+ *
+ *  If the target doesn't answer this message immediately
+ *  (as required by the standard), the SIR_NEGO_FAILED interrupt
+ *  will be raised eventually.
+ *  The handler removes the HS_NEGOTIATE status, and sets the
+ *  negotiated value to the default (async / nowide).
+ *
+ *  If we receive a matching answer immediately, we check it
+ *  for validity, and set the values.
+ *
+ *  If we receive a Reject message immediately, we assume the
+ *  negotiation has failed, and fall back to standard values.
+ *
+ *  If we receive a negotiation message while not in HS_NEGOTIATE
+ *  state, it's a target initiated negotiation. We prepare a
+ *  (hopefully) valid answer, set our parameters, and send back 
+ *  this answer to the target.
+ *
+ *  If the target doesn't fetch the answer (no message out phase),
+ *  we assume the negotiation has failed, and fall back to default
+ *  settings (SIR_NEGO_PROTO interrupt).
+ *
+ *  When we set the values, we adjust them in all ccbs belonging 
+ *  to this target, in the controller's register, and in the "phys"
+ *  field of the controller's struct sym_hcb.
+ */
+
+/*
+ *  chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message.
+ */
+static int  
+sym_sync_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp)
+{
+	int target = cp->target;
+	u_char	chg, ofs, per, fak, div;
+
+	if (DEBUG_FLAGS & DEBUG_NEGO) {
+		sym_print_nego_msg(np, target, "sync msgin", np->msgin);
+	}
+
+	/*
+	 *  Get requested values.
+	 */
+	chg = 0;
+	per = np->msgin[3];
+	ofs = np->msgin[4];
+
+	/*
+	 *  Check values against our limits.
+	 */
+	if (ofs) {
+		if (ofs > np->maxoffs)
+			{chg = 1; ofs = np->maxoffs;}
+	}
+
+	if (ofs) {
+		if (per < np->minsync)
+			{chg = 1; per = np->minsync;}
+	}
+
+	/*
+	 *  Get new chip synchronous parameters value.
+	 */
+	div = fak = 0;
+	if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0)
+		goto reject_it;
+
+	if (DEBUG_FLAGS & DEBUG_NEGO) {
+		sym_print_addr(cp->cmd,
+				"sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n",
+				ofs, per, div, fak, chg);
+	}
+
+	/*
+	 *  If it was an answer we want to change, 
+	 *  then it isn't acceptable. Reject it.
+	 */
+	if (!req && chg)
+		goto reject_it;
+
+	/*
+	 *  Apply new values.
+	 */
+	sym_setsync (np, target, ofs, per, div, fak);
+
+	/*
+	 *  It was an answer. We are done.
+	 */
+	if (!req)
+		return 0;
+
+	/*
+	 *  It was a request. Prepare an answer message.
+	 */
+	np->msgout[0] = M_EXTENDED;
+	np->msgout[1] = 3;
+	np->msgout[2] = M_X_SYNC_REQ;
+	np->msgout[3] = per;
+	np->msgout[4] = ofs;
+
+	if (DEBUG_FLAGS & DEBUG_NEGO) {
+		sym_print_nego_msg(np, target, "sync msgout", np->msgout);
+	}
+
+	np->msgin [0] = M_NOOP;
+
+	return 0;
+
+reject_it:
+	sym_setsync (np, target, 0, 0, 0, 0);
+	return -1;
+}
+
+static void sym_sync_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
+{
+	int req = 1;
+	int result;
+
+	/*
+	 *  Request or answer ?
+	 */
+	if (INB(np, HS_PRT) == HS_NEGOTIATE) {
+		OUTB(np, HS_PRT, HS_BUSY);
+		if (cp->nego_status && cp->nego_status != NS_SYNC)
+			goto reject_it;
+		req = 0;
+	}
+
+	/*
+	 *  Check and apply new values.
+	 */
+	result = sym_sync_nego_check(np, req, cp);
+	if (result)	/* Not acceptable, reject it */
+		goto reject_it;
+	if (req) {	/* Was a request, send response. */
+		cp->nego_status = NS_SYNC;
+		OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp));
+	}
+	else		/* Was a response, we are done. */
+		OUTL_DSP(np, SCRIPTA_BA(np, clrack));
+	return;
+
+reject_it:
+	OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
+}
+
+/*
+ *  chip handler for PARALLEL PROTOCOL REQUEST (PPR) message.
+ */
+static int 
+sym_ppr_nego_check(struct sym_hcb *np, int req, int target)
+{
+	struct sym_tcb *tp = &np->target[target];
+	unsigned char fak, div;
+	int dt, chg = 0;
+
+	unsigned char per = np->msgin[3];
+	unsigned char ofs = np->msgin[5];
+	unsigned char wide = np->msgin[6];
+	unsigned char opts = np->msgin[7] & PPR_OPT_MASK;
+
+	if (DEBUG_FLAGS & DEBUG_NEGO) {
+		sym_print_nego_msg(np, target, "ppr msgin", np->msgin);
+	}
+
+	/*
+	 *  Check values against our limits.
+	 */
+	if (wide > np->maxwide) {
+		chg = 1;
+		wide = np->maxwide;
+	}
+	if (!wide || !(np->features & FE_U3EN))
+		opts = 0;
+
+	if (opts != (np->msgin[7] & PPR_OPT_MASK))
+		chg = 1;
+
+	dt = opts & PPR_OPT_DT;
+
+	if (ofs) {
+		unsigned char maxoffs = dt ? np->maxoffs_dt : np->maxoffs;
+		if (ofs > maxoffs) {
+			chg = 1;
+			ofs = maxoffs;
+		}
+	}
+
+	if (ofs) {
+		unsigned char minsync = dt ? np->minsync_dt : np->minsync;
+		if (per < minsync) {
+			chg = 1;
+			per = minsync;
+		}
+	}
+
+	/*
+	 *  Get new chip synchronous parameters value.
+	 */
+	div = fak = 0;
+	if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0)
+		goto reject_it;
+
+	/*
+	 *  If it was an answer we want to change, 
+	 *  then it isn't acceptable. Reject it.
+	 */
+	if (!req && chg)
+		goto reject_it;
+
+	/*
+	 *  Apply new values.
+	 */
+	sym_setpprot(np, target, opts, ofs, per, wide, div, fak);
+
+	/*
+	 *  It was an answer. We are done.
+	 */
+	if (!req)
+		return 0;
+
+	/*
+	 *  It was a request. Prepare an answer message.
+	 */
+	np->msgout[0] = M_EXTENDED;
+	np->msgout[1] = 6;
+	np->msgout[2] = M_X_PPR_REQ;
+	np->msgout[3] = per;
+	np->msgout[4] = 0;
+	np->msgout[5] = ofs;
+	np->msgout[6] = wide;
+	np->msgout[7] = opts;
+
+	if (DEBUG_FLAGS & DEBUG_NEGO) {
+		sym_print_nego_msg(np, target, "ppr msgout", np->msgout);
+	}
+
+	np->msgin [0] = M_NOOP;
+
+	return 0;
+
+reject_it:
+	sym_setpprot (np, target, 0, 0, 0, 0, 0, 0);
+	/*
+	 *  If it is a device response that should result in  
+	 *  ST, we may want to try a legacy negotiation later.
+	 */
+	if (!req && !opts) {
+		tp->tgoal.period = per;
+		tp->tgoal.offset = ofs;
+		tp->tgoal.width = wide;
+		tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
+		tp->tgoal.check_nego = 1;
+	}
+	return -1;
+}
+
+static void sym_ppr_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
+{
+	int req = 1;
+	int result;
+
+	/*
+	 *  Request or answer ?
+	 */
+	if (INB(np, HS_PRT) == HS_NEGOTIATE) {
+		OUTB(np, HS_PRT, HS_BUSY);
+		if (cp->nego_status && cp->nego_status != NS_PPR)
+			goto reject_it;
+		req = 0;
+	}
+
+	/*
+	 *  Check and apply new values.
+	 */
+	result = sym_ppr_nego_check(np, req, cp->target);
+	if (result)	/* Not acceptable, reject it */
+		goto reject_it;
+	if (req) {	/* Was a request, send response. */
+		cp->nego_status = NS_PPR;
+		OUTL_DSP(np, SCRIPTB_BA(np, ppr_resp));
+	}
+	else		/* Was a response, we are done. */
+		OUTL_DSP(np, SCRIPTA_BA(np, clrack));
+	return;
+
+reject_it:
+	OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
+}
+
+/*
+ *  chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message.
+ */
+static int  
+sym_wide_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp)
+{
+	int target = cp->target;
+	u_char	chg, wide;
+
+	if (DEBUG_FLAGS & DEBUG_NEGO) {
+		sym_print_nego_msg(np, target, "wide msgin", np->msgin);
+	}
+
+	/*
+	 *  Get requested values.
+	 */
+	chg  = 0;
+	wide = np->msgin[3];
+
+	/*
+	 *  Check values against our limits.
+	 */
+	if (wide > np->maxwide) {
+		chg = 1;
+		wide = np->maxwide;
+	}
+
+	if (DEBUG_FLAGS & DEBUG_NEGO) {
+		sym_print_addr(cp->cmd, "wdtr: wide=%d chg=%d.\n",
+				wide, chg);
+	}
+
+	/*
+	 *  If it was an answer we want to change, 
+	 *  then it isn't acceptable. Reject it.
+	 */
+	if (!req && chg)
+		goto reject_it;
+
+	/*
+	 *  Apply new values.
+	 */
+	sym_setwide (np, target, wide);
+
+	/*
+	 *  It was an answer. We are done.
+	 */
+	if (!req)
+		return 0;
+
+	/*
+	 *  It was a request. Prepare an answer message.
+	 */
+	np->msgout[0] = M_EXTENDED;
+	np->msgout[1] = 2;
+	np->msgout[2] = M_X_WIDE_REQ;
+	np->msgout[3] = wide;
+
+	np->msgin [0] = M_NOOP;
+
+	if (DEBUG_FLAGS & DEBUG_NEGO) {
+		sym_print_nego_msg(np, target, "wide msgout", np->msgout);
+	}
+
+	return 0;
+
+reject_it:
+	return -1;
+}
+
+static void sym_wide_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
+{
+	int req = 1;
+	int result;
+
+	/*
+	 *  Request or answer ?
+	 */
+	if (INB(np, HS_PRT) == HS_NEGOTIATE) {
+		OUTB(np, HS_PRT, HS_BUSY);
+		if (cp->nego_status && cp->nego_status != NS_WIDE)
+			goto reject_it;
+		req = 0;
+	}
+
+	/*
+	 *  Check and apply new values.
+	 */
+	result = sym_wide_nego_check(np, req, cp);
+	if (result)	/* Not acceptable, reject it */
+		goto reject_it;
+	if (req) {	/* Was a request, send response. */
+		cp->nego_status = NS_WIDE;
+		OUTL_DSP(np, SCRIPTB_BA(np, wdtr_resp));
+	} else {		/* Was a response. */
+		/*
+		 * Negotiate for SYNC immediately after WIDE response.
+		 * This allows to negotiate for both WIDE and SYNC on 
+		 * a single SCSI command (Suggested by Justin Gibbs).
+		 */
+		if (tp->tgoal.offset) {
+			np->msgout[0] = M_EXTENDED;
+			np->msgout[1] = 3;
+			np->msgout[2] = M_X_SYNC_REQ;
+			np->msgout[3] = tp->tgoal.period;
+			np->msgout[4] = tp->tgoal.offset;
+
+			if (DEBUG_FLAGS & DEBUG_NEGO) {
+				sym_print_nego_msg(np, cp->target,
+				                   "sync msgout", np->msgout);
+			}
+
+			cp->nego_status = NS_SYNC;
+			OUTB(np, HS_PRT, HS_NEGOTIATE);
+			OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp));
+			return;
+		} else
+			OUTL_DSP(np, SCRIPTA_BA(np, clrack));
+	}
+
+	return;
+
+reject_it:
+	OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
+}
+
+/*
+ *  Reset DT, SYNC or WIDE to default settings.
+ *
+ *  Called when a negotiation does not succeed either 
+ *  on rejection or on protocol error.
+ *
+ *  A target that understands a PPR message should never 
+ *  reject it, and messing with it is very unlikely.
+ *  So, if a PPR makes problems, we may just want to 
+ *  try a legacy negotiation later.
+ */
+static void sym_nego_default(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
+{
+	switch (cp->nego_status) {
+	case NS_PPR:
+#if 0
+		sym_setpprot (np, cp->target, 0, 0, 0, 0, 0, 0);
+#else
+		if (tp->tgoal.period < np->minsync)
+			tp->tgoal.period = np->minsync;
+		if (tp->tgoal.offset > np->maxoffs)
+			tp->tgoal.offset = np->maxoffs;
+		tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
+		tp->tgoal.check_nego = 1;
+#endif
+		break;
+	case NS_SYNC:
+		sym_setsync (np, cp->target, 0, 0, 0, 0);
+		break;
+	case NS_WIDE:
+		sym_setwide (np, cp->target, 0);
+		break;
+	}
+	np->msgin [0] = M_NOOP;
+	np->msgout[0] = M_NOOP;
+	cp->nego_status = 0;
+}
+
+/*
+ *  chip handler for MESSAGE REJECT received in response to 
+ *  PPR, WIDE or SYNCHRONOUS negotiation.
+ */
+static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
+{
+	sym_nego_default(np, tp, cp);
+	OUTB(np, HS_PRT, HS_BUSY);
+}
+
+/*
+ *  chip exception handler for programmed interrupts.
+ */
+static void sym_int_sir (struct sym_hcb *np)
+{
+	u_char	num	= INB(np, nc_dsps);
+	u32	dsa	= INL(np, nc_dsa);
+	struct sym_ccb *cp	= sym_ccb_from_dsa(np, dsa);
+	u_char	target	= INB(np, nc_sdid) & 0x0f;
+	struct sym_tcb *tp	= &np->target[target];
+	int	tmp;
+
+	if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num);
+
+	switch (num) {
+#if   SYM_CONF_DMA_ADDRESSING_MODE == 2
+	/*
+	 *  SCRIPTS tell us that we may have to update 
+	 *  64 bit DMA segment registers.
+	 */
+	case SIR_DMAP_DIRTY:
+		sym_update_dmap_regs(np);
+		goto out;
+#endif
+	/*
+	 *  Command has been completed with error condition 
+	 *  or has been auto-sensed.
+	 */
+	case SIR_COMPLETE_ERROR:
+		sym_complete_error(np, cp);
+		return;
+	/*
+	 *  The C code is currently trying to recover from something.
+	 *  Typically, user want to abort some command.
+	 */
+	case SIR_SCRIPT_STOPPED:
+	case SIR_TARGET_SELECTED:
+	case SIR_ABORT_SENT:
+		sym_sir_task_recovery(np, num);
+		return;
+	/*
+	 *  The device didn't go to MSG OUT phase after having 
+	 *  been selected with ATN. We donnot want to handle 
+	 *  that.
+	 */
+	case SIR_SEL_ATN_NO_MSG_OUT:
+		printf ("%s:%d: No MSG OUT phase after selection with ATN.\n",
+			sym_name (np), target);
+		goto out_stuck;
+	/*
+	 *  The device didn't switch to MSG IN phase after 
+	 *  having reseleted the initiator.
+	 */
+	case SIR_RESEL_NO_MSG_IN:
+		printf ("%s:%d: No MSG IN phase after reselection.\n",
+			sym_name (np), target);
+		goto out_stuck;
+	/*
+	 *  After reselection, the device sent a message that wasn't 
+	 *  an IDENTIFY.
+	 */
+	case SIR_RESEL_NO_IDENTIFY:
+		printf ("%s:%d: No IDENTIFY after reselection.\n",
+			sym_name (np), target);
+		goto out_stuck;
+	/*
+	 *  The device reselected a LUN we donnot know about.
+	 */
+	case SIR_RESEL_BAD_LUN:
+		np->msgout[0] = M_RESET;
+		goto out;
+	/*
+	 *  The device reselected for an untagged nexus and we 
+	 *  haven't any.
+	 */
+	case SIR_RESEL_BAD_I_T_L:
+		np->msgout[0] = M_ABORT;
+		goto out;
+	/*
+	 *  The device reselected for a tagged nexus that we donnot 
+	 *  have.
+	 */
+	case SIR_RESEL_BAD_I_T_L_Q:
+		np->msgout[0] = M_ABORT_TAG;
+		goto out;
+	/*
+	 *  The SCRIPTS let us know that the device has grabbed 
+	 *  our message and will abort the job.
+	 */
+	case SIR_RESEL_ABORTED:
+		np->lastmsg = np->msgout[0];
+		np->msgout[0] = M_NOOP;
+		printf ("%s:%d: message %x sent on bad reselection.\n",
+			sym_name (np), target, np->lastmsg);
+		goto out;
+	/*
+	 *  The SCRIPTS let us know that a message has been 
+	 *  successfully sent to the device.
+	 */
+	case SIR_MSG_OUT_DONE:
+		np->lastmsg = np->msgout[0];
+		np->msgout[0] = M_NOOP;
+		/* Should we really care of that */
+		if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) {
+			if (cp) {
+				cp->xerr_status &= ~XE_PARITY_ERR;
+				if (!cp->xerr_status)
+					OUTOFFB(np, HF_PRT, HF_EXT_ERR);
+			}
+		}
+		goto out;
+	/*
+	 *  The device didn't send a GOOD SCSI status.
+	 *  We may have some work to do prior to allow 
+	 *  the SCRIPTS processor to continue.
+	 */
+	case SIR_BAD_SCSI_STATUS:
+		if (!cp)
+			goto out;
+		sym_sir_bad_scsi_status(np, num, cp);
+		return;
+	/*
+	 *  We are asked by the SCRIPTS to prepare a 
+	 *  REJECT message.
+	 */
+	case SIR_REJECT_TO_SEND:
+		sym_print_msg(cp, "M_REJECT to send for ", np->msgin);
+		np->msgout[0] = M_REJECT;
+		goto out;
+	/*
+	 *  We have been ODD at the end of a DATA IN 
+	 *  transfer and the device didn't send a 
+	 *  IGNORE WIDE RESIDUE message.
+	 *  It is a data overrun condition.
+	 */
+	case SIR_SWIDE_OVERRUN:
+		if (cp) {
+			OUTONB(np, HF_PRT, HF_EXT_ERR);
+			cp->xerr_status |= XE_SWIDE_OVRUN;
+		}
+		goto out;
+	/*
+	 *  We have been ODD at the end of a DATA OUT 
+	 *  transfer.
+	 *  It is a data underrun condition.
+	 */
+	case SIR_SODL_UNDERRUN:
+		if (cp) {
+			OUTONB(np, HF_PRT, HF_EXT_ERR);
+			cp->xerr_status |= XE_SODL_UNRUN;
+		}
+		goto out;
+	/*
+	 *  The device wants us to tranfer more data than 
+	 *  expected or in the wrong direction.
+	 *  The number of extra bytes is in scratcha.
+	 *  It is a data overrun condition.
+	 */
+	case SIR_DATA_OVERRUN:
+		if (cp) {
+			OUTONB(np, HF_PRT, HF_EXT_ERR);
+			cp->xerr_status |= XE_EXTRA_DATA;
+			cp->extra_bytes += INL(np, nc_scratcha);
+		}
+		goto out;
+	/*
+	 *  The device switched to an illegal phase (4/5).
+	 */
+	case SIR_BAD_PHASE:
+		if (cp) {
+			OUTONB(np, HF_PRT, HF_EXT_ERR);
+			cp->xerr_status |= XE_BAD_PHASE;
+		}
+		goto out;
+	/*
+	 *  We received a message.
+	 */
+	case SIR_MSG_RECEIVED:
+		if (!cp)
+			goto out_stuck;
+		switch (np->msgin [0]) {
+		/*
+		 *  We received an extended message.
+		 *  We handle MODIFY DATA POINTER, SDTR, WDTR 
+		 *  and reject all other extended messages.
+		 */
+		case M_EXTENDED:
+			switch (np->msgin [2]) {
+			case M_X_MODIFY_DP:
+				if (DEBUG_FLAGS & DEBUG_POINTER)
+					sym_print_msg(cp,"modify DP",np->msgin);
+				tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + 
+				      (np->msgin[5]<<8)  + (np->msgin[6]);
+				sym_modify_dp(np, tp, cp, tmp);
+				return;
+			case M_X_SYNC_REQ:
+				sym_sync_nego(np, tp, cp);
+				return;
+			case M_X_PPR_REQ:
+				sym_ppr_nego(np, tp, cp);
+				return;
+			case M_X_WIDE_REQ:
+				sym_wide_nego(np, tp, cp);
+				return;
+			default:
+				goto out_reject;
+			}
+			break;
+		/*
+		 *  We received a 1/2 byte message not handled from SCRIPTS.
+		 *  We are only expecting MESSAGE REJECT and IGNORE WIDE 
+		 *  RESIDUE messages that haven't been anticipated by 
+		 *  SCRIPTS on SWIDE full condition. Unanticipated IGNORE 
+		 *  WIDE RESIDUE messages are aliased as MODIFY DP (-1).
+		 */
+		case M_IGN_RESIDUE:
+			if (DEBUG_FLAGS & DEBUG_POINTER)
+				sym_print_msg(cp,"ign wide residue", np->msgin);
+			if (cp->host_flags & HF_SENSE)
+				OUTL_DSP(np, SCRIPTA_BA(np, clrack));
+			else
+				sym_modify_dp(np, tp, cp, -1);
+			return;
+		case M_REJECT:
+			if (INB(np, HS_PRT) == HS_NEGOTIATE)
+				sym_nego_rejected(np, tp, cp);
+			else {
+				sym_print_addr(cp->cmd,
+					"M_REJECT received (%x:%x).\n",
+					scr_to_cpu(np->lastmsg), np->msgout[0]);
+			}
+			goto out_clrack;
+			break;
+		default:
+			goto out_reject;
+		}
+		break;
+	/*
+	 *  We received an unknown message.
+	 *  Ignore all MSG IN phases and reject it.
+	 */
+	case SIR_MSG_WEIRD:
+		sym_print_msg(cp, "WEIRD message received", np->msgin);
+		OUTL_DSP(np, SCRIPTB_BA(np, msg_weird));
+		return;
+	/*
+	 *  Negotiation failed.
+	 *  Target does not send us the reply.
+	 *  Remove the HS_NEGOTIATE status.
+	 */
+	case SIR_NEGO_FAILED:
+		OUTB(np, HS_PRT, HS_BUSY);
+	/*
+	 *  Negotiation failed.
+	 *  Target does not want answer message.
+	 */
+	case SIR_NEGO_PROTO:
+		sym_nego_default(np, tp, cp);
+		goto out;
+	}
+
+out:
+	OUTONB_STD();
+	return;
+out_reject:
+	OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
+	return;
+out_clrack:
+	OUTL_DSP(np, SCRIPTA_BA(np, clrack));
+	return;
+out_stuck:
+	return;
+}
+
+/*
+ *  Acquire a control block
+ */
+struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order)
+{
+	u_char tn = cmd->device->id;
+	u_char ln = cmd->device->lun;
+	struct sym_tcb *tp = &np->target[tn];
+	struct sym_lcb *lp = sym_lp(tp, ln);
+	u_short tag = NO_TAG;
+	SYM_QUEHEAD *qp;
+	struct sym_ccb *cp = NULL;
+
+	/*
+	 *  Look for a free CCB
+	 */
+	if (sym_que_empty(&np->free_ccbq))
+		sym_alloc_ccb(np);
+	qp = sym_remque_head(&np->free_ccbq);
+	if (!qp)
+		goto out;
+	cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+
+#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
+	/*
+	 *  If the LCB is not yet available and the LUN
+	 *  has been probed ok, try to allocate the LCB.
+	 */
+	if (!lp && sym_is_bit(tp->lun_map, ln)) {
+		lp = sym_alloc_lcb(np, tn, ln);
+		if (!lp)
+			goto out_free;
+	}
+#endif
+
+	/*
+	 *  If the LCB is not available here, then the 
+	 *  logical unit is not yet discovered. For those 
+	 *  ones only accept 1 SCSI IO per logical unit, 
+	 *  since we cannot allow disconnections.
+	 */
+	if (!lp) {
+		if (!sym_is_bit(tp->busy0_map, ln))
+			sym_set_bit(tp->busy0_map, ln);
+		else
+			goto out_free;
+	} else {
+		/*
+		 *  If we have been asked for a tagged command.
+		 */
+		if (tag_order) {
+			/*
+			 *  Debugging purpose.
+			 */
+#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
+			assert(lp->busy_itl == 0);
+#endif
+			/*
+			 *  Allocate resources for tags if not yet.
+			 */
+			if (!lp->cb_tags) {
+				sym_alloc_lcb_tags(np, tn, ln);
+				if (!lp->cb_tags)
+					goto out_free;
+			}
+			/*
+			 *  Get a tag for this SCSI IO and set up
+			 *  the CCB bus address for reselection, 
+			 *  and count it for this LUN.
+			 *  Toggle reselect path to tagged.
+			 */
+			if (lp->busy_itlq < SYM_CONF_MAX_TASK) {
+				tag = lp->cb_tags[lp->ia_tag];
+				if (++lp->ia_tag == SYM_CONF_MAX_TASK)
+					lp->ia_tag = 0;
+				++lp->busy_itlq;
+#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
+				lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba);
+				lp->head.resel_sa =
+					cpu_to_scr(SCRIPTA_BA(np, resel_tag));
+#endif
+#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
+				cp->tags_si = lp->tags_si;
+				++lp->tags_sum[cp->tags_si];
+				++lp->tags_since;
+#endif
+			}
+			else
+				goto out_free;
+		}
+		/*
+		 *  This command will not be tagged.
+		 *  If we already have either a tagged or untagged 
+		 *  one, refuse to overlap this untagged one.
+		 */
+		else {
+			/*
+			 *  Debugging purpose.
+			 */
+#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
+			assert(lp->busy_itl == 0 && lp->busy_itlq == 0);
+#endif
+			/*
+			 *  Count this nexus for this LUN.
+			 *  Set up the CCB bus address for reselection.
+			 *  Toggle reselect path to untagged.
+			 */
+			++lp->busy_itl;
+#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
+			if (lp->busy_itl == 1) {
+				lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba);
+				lp->head.resel_sa =
+				      cpu_to_scr(SCRIPTA_BA(np, resel_no_tag));
+			}
+			else
+				goto out_free;
+#endif
+		}
+	}
+	/*
+	 *  Put the CCB into the busy queue.
+	 */
+	sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+	if (lp) {
+		sym_remque(&cp->link2_ccbq);
+		sym_insque_tail(&cp->link2_ccbq, &lp->waiting_ccbq);
+	}
+
+#endif
+	/*
+	 *  Remember all informations needed to free this CCB.
+	 */
+	cp->to_abort = 0;
+	cp->tag	   = tag;
+	cp->order  = tag_order;
+	cp->target = tn;
+	cp->lun    = ln;
+
+	if (DEBUG_FLAGS & DEBUG_TAGS) {
+		sym_print_addr(cmd, "ccb @%p using tag %d.\n", cp, tag);
+	}
+
+out:
+	return cp;
+out_free:
+	sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
+	return NULL;
+}
+
+/*
+ *  Release one control block
+ */
+void sym_free_ccb (struct sym_hcb *np, struct sym_ccb *cp)
+{
+	struct sym_tcb *tp = &np->target[cp->target];
+	struct sym_lcb *lp = sym_lp(tp, cp->lun);
+
+	if (DEBUG_FLAGS & DEBUG_TAGS) {
+		sym_print_addr(cp->cmd, "ccb @%p freeing tag %d.\n",
+				cp, cp->tag);
+	}
+
+	/*
+	 *  If LCB available,
+	 */
+	if (lp) {
+		/*
+		 *  If tagged, release the tag, set the relect path 
+		 */
+		if (cp->tag != NO_TAG) {
+#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
+			--lp->tags_sum[cp->tags_si];
+#endif
+			/*
+			 *  Free the tag value.
+			 */
+			lp->cb_tags[lp->if_tag] = cp->tag;
+			if (++lp->if_tag == SYM_CONF_MAX_TASK)
+				lp->if_tag = 0;
+			/*
+			 *  Make the reselect path invalid, 
+			 *  and uncount this CCB.
+			 */
+			lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba);
+			--lp->busy_itlq;
+		} else {	/* Untagged */
+			/*
+			 *  Make the reselect path invalid, 
+			 *  and uncount this CCB.
+			 */
+			lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba);
+			--lp->busy_itl;
+		}
+		/*
+		 *  If no JOB active, make the LUN reselect path invalid.
+		 */
+		if (lp->busy_itlq == 0 && lp->busy_itl == 0)
+			lp->head.resel_sa =
+				cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun));
+	}
+	/*
+	 *  Otherwise, we only accept 1 IO per LUN.
+	 *  Clear the bit that keeps track of this IO.
+	 */
+	else
+		sym_clr_bit(tp->busy0_map, cp->lun);
+
+	/*
+	 *  We donnot queue more than 1 ccb per target 
+	 *  with negotiation at any time. If this ccb was 
+	 *  used for negotiation, clear this info in the tcb.
+	 */
+	if (cp == tp->nego_cp)
+		tp->nego_cp = NULL;
+
+#ifdef SYM_CONF_IARB_SUPPORT
+	/*
+	 *  If we just complete the last queued CCB,
+	 *  clear this info that is no longer relevant.
+	 */
+	if (cp == np->last_cp)
+		np->last_cp = 0;
+#endif
+
+	/*
+	 *  Make this CCB available.
+	 */
+	cp->cmd = NULL;
+	cp->host_status = HS_IDLE;
+	sym_remque(&cp->link_ccbq);
+	sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+	if (lp) {
+		sym_remque(&cp->link2_ccbq);
+		sym_insque_tail(&cp->link2_ccbq, &np->dummy_ccbq);
+		if (cp->started) {
+			if (cp->tag != NO_TAG)
+				--lp->started_tags;
+			else
+				--lp->started_no_tag;
+		}
+	}
+	cp->started = 0;
+#endif
+}
+
+/*
+ *  Allocate a CCB from memory and initialize its fixed part.
+ */
+static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np)
+{
+	struct sym_ccb *cp = NULL;
+	int hcode;
+
+	/*
+	 *  Prevent from allocating more CCBs than we can 
+	 *  queue to the controller.
+	 */
+	if (np->actccbs >= SYM_CONF_MAX_START)
+		return NULL;
+
+	/*
+	 *  Allocate memory for this CCB.
+	 */
+	cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB");
+	if (!cp)
+		goto out_free;
+
+	/*
+	 *  Count it.
+	 */
+	np->actccbs++;
+
+	/*
+	 *  Compute the bus address of this ccb.
+	 */
+	cp->ccb_ba = vtobus(cp);
+
+	/*
+	 *  Insert this ccb into the hashed list.
+	 */
+	hcode = CCB_HASH_CODE(cp->ccb_ba);
+	cp->link_ccbh = np->ccbh[hcode];
+	np->ccbh[hcode] = cp;
+
+	/*
+	 *  Initialyze the start and restart actions.
+	 */
+	cp->phys.head.go.start   = cpu_to_scr(SCRIPTA_BA(np, idle));
+	cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l));
+
+ 	/*
+	 *  Initilialyze some other fields.
+	 */
+	cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2]));
+
+	/*
+	 *  Chain into free ccb queue.
+	 */
+	sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
+
+	/*
+	 *  Chain into optionnal lists.
+	 */
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+	sym_insque_head(&cp->link2_ccbq, &np->dummy_ccbq);
+#endif
+	return cp;
+out_free:
+	if (cp)
+		sym_mfree_dma(cp, sizeof(*cp), "CCB");
+	return NULL;
+}
+
+/*
+ *  Look up a CCB from a DSA value.
+ */
+static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa)
+{
+	int hcode;
+	struct sym_ccb *cp;
+
+	hcode = CCB_HASH_CODE(dsa);
+	cp = np->ccbh[hcode];
+	while (cp) {
+		if (cp->ccb_ba == dsa)
+			break;
+		cp = cp->link_ccbh;
+	}
+
+	return cp;
+}
+
+/*
+ *  Target control block initialisation.
+ *  Nothing important to do at the moment.
+ */
+static void sym_init_tcb (struct sym_hcb *np, u_char tn)
+{
+#if 0	/*  Hmmm... this checking looks paranoid. */
+	/*
+	 *  Check some alignments required by the chip.
+	 */	
+	assert (((offsetof(struct sym_reg, nc_sxfer) ^
+		offsetof(struct sym_tcb, head.sval)) &3) == 0);
+	assert (((offsetof(struct sym_reg, nc_scntl3) ^
+		offsetof(struct sym_tcb, head.wval)) &3) == 0);
+#endif
+}
+
+/*
+ *  Lun control block allocation and initialization.
+ */
+struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
+{
+	struct sym_tcb *tp = &np->target[tn];
+	struct sym_lcb *lp = sym_lp(tp, ln);
+
+	/*
+	 *  Already done, just return.
+	 */
+	if (lp)
+		return lp;
+
+	/*
+	 *  Donnot allow LUN control block 
+	 *  allocation for not probed LUNs.
+	 */
+	if (!sym_is_bit(tp->lun_map, ln))
+		return NULL;
+
+	/*
+	 *  Initialize the target control block if not yet.
+	 */
+	sym_init_tcb (np, tn);
+
+	/*
+	 *  Allocate the LCB bus address array.
+	 *  Compute the bus address of this table.
+	 */
+	if (ln && !tp->luntbl) {
+		int i;
+
+		tp->luntbl = sym_calloc_dma(256, "LUNTBL");
+		if (!tp->luntbl)
+			goto fail;
+		for (i = 0 ; i < 64 ; i++)
+			tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa));
+		tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl));
+	}
+
+	/*
+	 *  Allocate the table of pointers for LUN(s) > 0, if needed.
+	 */
+	if (ln && !tp->lunmp) {
+		tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *),
+				GFP_KERNEL);
+		if (!tp->lunmp)
+			goto fail;
+	}
+
+	/*
+	 *  Allocate the lcb.
+	 *  Make it available to the chip.
+	 */
+	lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB");
+	if (!lp)
+		goto fail;
+	if (ln) {
+		tp->lunmp[ln] = lp;
+		tp->luntbl[ln] = cpu_to_scr(vtobus(lp));
+	}
+	else {
+		tp->lun0p = lp;
+		tp->head.lun0_sa = cpu_to_scr(vtobus(lp));
+	}
+
+	/*
+	 *  Let the itl task point to error handling.
+	 */
+	lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba);
+
+	/*
+	 *  Set the reselect pattern to our default. :)
+	 */
+	lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun));
+
+	/*
+	 *  Set user capabilities.
+	 */
+	lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+	/*
+	 *  Initialize device queueing.
+	 */
+	sym_que_init(&lp->waiting_ccbq);
+	sym_que_init(&lp->started_ccbq);
+	lp->started_max   = SYM_CONF_MAX_TASK;
+	lp->started_limit = SYM_CONF_MAX_TASK;
+#endif
+	/*
+	 *  If we are busy, count the IO.
+	 */
+	if (sym_is_bit(tp->busy0_map, ln)) {
+		lp->busy_itl = 1;
+		sym_clr_bit(tp->busy0_map, ln);
+	}
+fail:
+	return lp;
+}
+
+/*
+ *  Allocate LCB resources for tagged command queuing.
+ */
+static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln)
+{
+	struct sym_tcb *tp = &np->target[tn];
+	struct sym_lcb *lp = sym_lp(tp, ln);
+	int i;
+
+	/*
+	 *  If LCB not available, try to allocate it.
+	 */
+	if (!lp && !(lp = sym_alloc_lcb(np, tn, ln)))
+		goto fail;
+
+	/*
+	 *  Allocate the task table and and the tag allocation 
+	 *  circular buffer. We want both or none.
+	 */
+	lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
+	if (!lp->itlq_tbl)
+		goto fail;
+	lp->cb_tags = kcalloc(SYM_CONF_MAX_TASK, 1, GFP_KERNEL);
+	if (!lp->cb_tags) {
+		sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
+		lp->itlq_tbl = NULL;
+		goto fail;
+	}
+
+	/*
+	 *  Initialize the task table with invalid entries.
+	 */
+	for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++)
+		lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba);
+
+	/*
+	 *  Fill up the tag buffer with tag numbers.
+	 */
+	for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++)
+		lp->cb_tags[i] = i;
+
+	/*
+	 *  Make the task table available to SCRIPTS, 
+	 *  And accept tagged commands now.
+	 */
+	lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl));
+
+	return;
+fail:
+	return;
+}
+
+/*
+ *  Queue a SCSI IO to the controller.
+ */
+int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
+{
+	struct scsi_device *sdev = cmd->device;
+	struct sym_tcb *tp;
+	struct sym_lcb *lp;
+	u_char	*msgptr;
+	u_int   msglen;
+	int can_disconnect;
+
+	/*
+	 *  Keep track of the IO in our CCB.
+	 */
+	cp->cmd = cmd;
+
+	/*
+	 *  Retrieve the target descriptor.
+	 */
+	tp = &np->target[cp->target];
+
+	/*
+	 *  Retrieve the lun descriptor.
+	 */
+	lp = sym_lp(tp, sdev->lun);
+
+	can_disconnect = (cp->tag != NO_TAG) ||
+		(lp && (lp->curr_flags & SYM_DISC_ENABLED));
+
+	msgptr = cp->scsi_smsg;
+	msglen = 0;
+	msgptr[msglen++] = IDENTIFY(can_disconnect, sdev->lun);
+
+	/*
+	 *  Build the tag message if present.
+	 */
+	if (cp->tag != NO_TAG) {
+		u_char order = cp->order;
+
+		switch(order) {
+		case M_ORDERED_TAG:
+			break;
+		case M_HEAD_TAG:
+			break;
+		default:
+			order = M_SIMPLE_TAG;
+		}
+#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
+		/*
+		 *  Avoid too much reordering of SCSI commands.
+		 *  The algorithm tries to prevent completion of any 
+		 *  tagged command from being delayed against more 
+		 *  than 3 times the max number of queued commands.
+		 */
+		if (lp && lp->tags_since > 3*SYM_CONF_MAX_TAG) {
+			lp->tags_si = !(lp->tags_si);
+			if (lp->tags_sum[lp->tags_si]) {
+				order = M_ORDERED_TAG;
+				if ((DEBUG_FLAGS & DEBUG_TAGS)||sym_verbose>1) {
+					sym_print_addr(cmd,
+						"ordered tag forced.\n");
+				}
+			}
+			lp->tags_since = 0;
+		}
+#endif
+		msgptr[msglen++] = order;
+
+		/*
+		 *  For less than 128 tags, actual tags are numbered 
+		 *  1,3,5,..2*MAXTAGS+1,since we may have to deal 
+		 *  with devices that have problems with #TAG 0 or too 
+		 *  great #TAG numbers. For more tags (up to 256), 
+		 *  we use directly our tag number.
+		 */
+#if SYM_CONF_MAX_TASK > (512/4)
+		msgptr[msglen++] = cp->tag;
+#else
+		msgptr[msglen++] = (cp->tag << 1) + 1;
+#endif
+	}
+
+	/*
+	 *  Build a negotiation message if needed.
+	 *  (nego_status is filled by sym_prepare_nego())
+	 */
+	cp->nego_status = 0;
+	if (tp->tgoal.check_nego && !tp->nego_cp && lp) {
+		msglen += sym_prepare_nego(np, cp, msgptr + msglen);
+	}
+
+	/*
+	 *  Startqueue
+	 */
+	cp->phys.head.go.start   = cpu_to_scr(SCRIPTA_BA(np, select));
+	cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA(np, resel_dsa));
+
+	/*
+	 *  select
+	 */
+	cp->phys.select.sel_id		= cp->target;
+	cp->phys.select.sel_scntl3	= tp->head.wval;
+	cp->phys.select.sel_sxfer	= tp->head.sval;
+	cp->phys.select.sel_scntl4	= tp->head.uval;
+
+	/*
+	 *  message
+	 */
+	cp->phys.smsg.addr	= cpu_to_scr(CCB_BA(cp, scsi_smsg));
+	cp->phys.smsg.size	= cpu_to_scr(msglen);
+
+	/*
+	 *  status
+	 */
+	cp->host_xflags		= 0;
+	cp->host_status		= cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
+	cp->ssss_status		= S_ILLEGAL;
+	cp->xerr_status		= 0;
+	cp->host_flags		= 0;
+	cp->extra_bytes		= 0;
+
+	/*
+	 *  extreme data pointer.
+	 *  shall be positive, so -1 is lower than lowest.:)
+	 */
+	cp->ext_sg  = -1;
+	cp->ext_ofs = 0;
+
+	/*
+	 *  Build the CDB and DATA descriptor block 
+	 *  and start the IO.
+	 */
+	return sym_setup_data_and_start(np, cmd, cp);
+}
+
+/*
+ *  Reset a SCSI target (all LUNs of this target).
+ */
+int sym_reset_scsi_target(struct sym_hcb *np, int target)
+{
+	struct sym_tcb *tp;
+
+	if (target == np->myaddr || (u_int)target >= SYM_CONF_MAX_TARGET)
+		return -1;
+
+	tp = &np->target[target];
+	tp->to_reset = 1;
+
+	np->istat_sem = SEM;
+	OUTB(np, nc_istat, SIGP|SEM);
+
+	return 0;
+}
+
+/*
+ *  Abort a SCSI IO.
+ */
+static int sym_abort_ccb(struct sym_hcb *np, struct sym_ccb *cp, int timed_out)
+{
+	/*
+	 *  Check that the IO is active.
+	 */
+	if (!cp || !cp->host_status || cp->host_status == HS_WAIT)
+		return -1;
+
+	/*
+	 *  If a previous abort didn't succeed in time,
+	 *  perform a BUS reset.
+	 */
+	if (cp->to_abort) {
+		sym_reset_scsi_bus(np, 1);
+		return 0;
+	}
+
+	/*
+	 *  Mark the CCB for abort and allow time for.
+	 */
+	cp->to_abort = timed_out ? 2 : 1;
+
+	/*
+	 *  Tell the SCRIPTS processor to stop and synchronize with us.
+	 */
+	np->istat_sem = SEM;
+	OUTB(np, nc_istat, SIGP|SEM);
+	return 0;
+}
+
+int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, int timed_out)
+{
+	struct sym_ccb *cp;
+	SYM_QUEHEAD *qp;
+
+	/*
+	 *  Look up our CCB control block.
+	 */
+	cp = NULL;
+	FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
+		struct sym_ccb *cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+		if (cp2->cmd == cmd) {
+			cp = cp2;
+			break;
+		}
+	}
+
+	return sym_abort_ccb(np, cp, timed_out);
+}
+
+/*
+ *  Complete execution of a SCSI command with extented 
+ *  error, SCSI status error, or having been auto-sensed.
+ *
+ *  The SCRIPTS processor is not running there, so we 
+ *  can safely access IO registers and remove JOBs from  
+ *  the START queue.
+ *  SCRATCHA is assumed to have been loaded with STARTPOS 
+ *  before the SCRIPTS called the C code.
+ */
+void sym_complete_error(struct sym_hcb *np, struct sym_ccb *cp)
+{
+	struct scsi_device *sdev;
+	struct scsi_cmnd *cmd;
+	struct sym_tcb *tp;
+	struct sym_lcb *lp;
+	int resid;
+	int i;
+
+	/*
+	 *  Paranoid check. :)
+	 */
+	if (!cp || !cp->cmd)
+		return;
+
+	cmd = cp->cmd;
+	sdev = cmd->device;
+	if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) {
+		dev_info(&sdev->sdev_gendev, "CCB=%p STAT=%x/%x/%x\n", cp,
+			cp->host_status, cp->ssss_status, cp->host_flags);
+	}
+
+	/*
+	 *  Get target and lun pointers.
+	 */
+	tp = &np->target[cp->target];
+	lp = sym_lp(tp, sdev->lun);
+
+	/*
+	 *  Check for extended errors.
+	 */
+	if (cp->xerr_status) {
+		if (sym_verbose)
+			sym_print_xerr(cmd, cp->xerr_status);
+		if (cp->host_status == HS_COMPLETE)
+			cp->host_status = HS_COMP_ERR;
+	}
+
+	/*
+	 *  Calculate the residual.
+	 */
+	resid = sym_compute_residual(np, cp);
+
+	if (!SYM_SETUP_RESIDUAL_SUPPORT) {/* If user does not want residuals */
+		resid  = 0;		 /* throw them away. :)		    */
+		cp->sv_resid = 0;
+	}
+#ifdef DEBUG_2_0_X
+if (resid)
+	printf("XXXX RESID= %d - 0x%x\n", resid, resid);
+#endif
+
+	/*
+	 *  Dequeue all queued CCBs for that device 
+	 *  not yet started by SCRIPTS.
+	 */
+	i = (INL(np, nc_scratcha) - np->squeue_ba) / 4;
+	i = sym_dequeue_from_squeue(np, i, cp->target, sdev->lun, -1);
+
+	/*
+	 *  Restart the SCRIPTS processor.
+	 */
+	OUTL_DSP(np, SCRIPTA_BA(np, start));
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+	if (cp->host_status == HS_COMPLETE &&
+	    cp->ssss_status == S_QUEUE_FULL) {
+		if (!lp || lp->started_tags - i < 2)
+			goto weirdness;
+		/*
+		 *  Decrease queue depth as needed.
+		 */
+		lp->started_max = lp->started_tags - i - 1;
+		lp->num_sgood = 0;
+
+		if (sym_verbose >= 2) {
+			sym_print_addr(cmd, " queue depth is now %d\n",
+					lp->started_max);
+		}
+
+		/*
+		 *  Repair the CCB.
+		 */
+		cp->host_status = HS_BUSY;
+		cp->ssss_status = S_ILLEGAL;
+
+		/*
+		 *  Let's requeue it to device.
+		 */
+		sym_set_cam_status(cmd, CAM_REQUEUE_REQ);
+		goto finish;
+	}
+weirdness:
+#endif
+	/*
+	 *  Build result in CAM ccb.
+	 */
+	sym_set_cam_result_error(np, cp, resid);
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+finish:
+#endif
+	/*
+	 *  Add this one to the COMP queue.
+	 */
+	sym_remque(&cp->link_ccbq);
+	sym_insque_head(&cp->link_ccbq, &np->comp_ccbq);
+
+	/*
+	 *  Complete all those commands with either error 
+	 *  or requeue condition.
+	 */
+	sym_flush_comp_queue(np, 0);
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+	/*
+	 *  Donnot start more than 1 command after an error.
+	 */
+	if (lp)
+		sym_start_next_ccbs(np, lp, 1);
+#endif
+}
+
+/*
+ *  Complete execution of a successful SCSI command.
+ *
+ *  Only successful commands go to the DONE queue, 
+ *  since we need to have the SCRIPTS processor 
+ *  stopped on any error condition.
+ *  The SCRIPTS processor is running while we are 
+ *  completing successful commands.
+ */
+void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp)
+{
+	struct sym_tcb *tp;
+	struct sym_lcb *lp;
+	struct scsi_cmnd *cmd;
+	int resid;
+
+	/*
+	 *  Paranoid check. :)
+	 */
+	if (!cp || !cp->cmd)
+		return;
+	assert (cp->host_status == HS_COMPLETE);
+
+	/*
+	 *  Get user command.
+	 */
+	cmd = cp->cmd;
+
+	/*
+	 *  Get target and lun pointers.
+	 */
+	tp = &np->target[cp->target];
+	lp = sym_lp(tp, cp->lun);
+
+	/*
+	 *  Assume device discovered on first success.
+	 */
+	if (!lp)
+		sym_set_bit(tp->lun_map, cp->lun);
+
+	/*
+	 *  If all data have been transferred, given than no
+	 *  extended error did occur, there is no residual.
+	 */
+	resid = 0;
+	if (cp->phys.head.lastp != sym_goalp(cp))
+		resid = sym_compute_residual(np, cp);
+
+	/*
+	 *  Wrong transfer residuals may be worse than just always 
+	 *  returning zero. User can disable this feature in 
+	 *  sym53c8xx.h. Residual support is enabled by default.
+	 */
+	if (!SYM_SETUP_RESIDUAL_SUPPORT)
+		resid  = 0;
+#ifdef DEBUG_2_0_X
+if (resid)
+	printf("XXXX RESID= %d - 0x%x\n", resid, resid);
+#endif
+
+	/*
+	 *  Build result in CAM ccb.
+	 */
+	sym_set_cam_result_ok(cp, cmd, resid);
+
+#ifdef	SYM_OPT_SNIFF_INQUIRY
+	/*
+	 *  On standard INQUIRY response (EVPD and CmDt 
+	 *  not set), sniff out device capabilities.
+	 */
+	if (cp->cdb_buf[0] == INQUIRY && !(cp->cdb_buf[1] & 0x3))
+		sym_sniff_inquiry(np, cmd, resid);
+#endif
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+	/*
+	 *  If max number of started ccbs had been reduced,
+	 *  increase it if 200 good status received.
+	 */
+	if (lp && lp->started_max < lp->started_limit) {
+		++lp->num_sgood;
+		if (lp->num_sgood >= 200) {
+			lp->num_sgood = 0;
+			++lp->started_max;
+			if (sym_verbose >= 2) {
+				sym_print_addr(cmd, " queue depth is now %d\n",
+				       lp->started_max);
+			}
+		}
+	}
+#endif
+
+	/*
+	 *  Free our CCB.
+	 */
+	sym_free_ccb (np, cp);
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+	/*
+	 *  Requeue a couple of awaiting scsi commands.
+	 */
+	if (lp && !sym_que_empty(&lp->waiting_ccbq))
+		sym_start_next_ccbs(np, lp, 2);
+#endif
+	/*
+	 *  Complete the command.
+	 */
+	sym_xpt_done(np, cmd);
+}
+
+/*
+ *  Soft-attach the controller.
+ */
+int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram)
+{
+	struct sym_hcb *np = sym_get_hcb(shost);
+	int i;
+
+	/*
+	 *  Get some info about the firmware.
+	 */
+	np->scripta_sz	 = fw->a_size;
+	np->scriptb_sz	 = fw->b_size;
+	np->scriptz_sz	 = fw->z_size;
+	np->fw_setup	 = fw->setup;
+	np->fw_patch	 = fw->patch;
+	np->fw_name	 = fw->name;
+
+	/*
+	 *  Save setting of some IO registers, so we will 
+	 *  be able to probe specific implementations.
+	 */
+	sym_save_initial_setting (np);
+
+	/*
+	 *  Reset the chip now, since it has been reported 
+	 *  that SCSI clock calibration may not work properly 
+	 *  if the chip is currently active.
+	 */
+	sym_chip_reset(np);
+
+	/*
+	 *  Prepare controller and devices settings, according 
+	 *  to chip features, user set-up and driver set-up.
+	 */
+	sym_prepare_setting(shost, np, nvram);
+
+	/*
+	 *  Check the PCI clock frequency.
+	 *  Must be performed after prepare_setting since it destroys 
+	 *  STEST1 that is used to probe for the clock doubler.
+	 */
+	i = sym_getpciclock(np);
+	if (i > 37000 && !(np->features & FE_66MHZ))
+		printf("%s: PCI BUS clock seems too high: %u KHz.\n",
+			sym_name(np), i);
+
+	/*
+	 *  Allocate the start queue.
+	 */
+	np->squeue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE");
+	if (!np->squeue)
+		goto attach_failed;
+	np->squeue_ba = vtobus(np->squeue);
+
+	/*
+	 *  Allocate the done queue.
+	 */
+	np->dqueue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE");
+	if (!np->dqueue)
+		goto attach_failed;
+	np->dqueue_ba = vtobus(np->dqueue);
+
+	/*
+	 *  Allocate the target bus address array.
+	 */
+	np->targtbl = sym_calloc_dma(256, "TARGTBL");
+	if (!np->targtbl)
+		goto attach_failed;
+	np->targtbl_ba = vtobus(np->targtbl);
+
+	/*
+	 *  Allocate SCRIPTS areas.
+	 */
+	np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0");
+	np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0");
+	np->scriptz0 = sym_calloc_dma(np->scriptz_sz, "SCRIPTZ0");
+	if (!np->scripta0 || !np->scriptb0 || !np->scriptz0)
+		goto attach_failed;
+
+	/*
+	 *  Allocate the array of lists of CCBs hashed by DSA.
+	 */
+	np->ccbh = kcalloc(sizeof(struct sym_ccb **), CCB_HASH_SIZE, GFP_KERNEL);
+	if (!np->ccbh)
+		goto attach_failed;
+
+	/*
+	 *  Initialyze the CCB free and busy queues.
+	 */
+	sym_que_init(&np->free_ccbq);
+	sym_que_init(&np->busy_ccbq);
+	sym_que_init(&np->comp_ccbq);
+
+	/*
+	 *  Initialization for optional handling 
+	 *  of device queueing.
+	 */
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+	sym_que_init(&np->dummy_ccbq);
+#endif
+	/*
+	 *  Allocate some CCB. We need at least ONE.
+	 */
+	if (!sym_alloc_ccb(np))
+		goto attach_failed;
+
+	/*
+	 *  Calculate BUS addresses where we are going 
+	 *  to load the SCRIPTS.
+	 */
+	np->scripta_ba	= vtobus(np->scripta0);
+	np->scriptb_ba	= vtobus(np->scriptb0);
+	np->scriptz_ba	= vtobus(np->scriptz0);
+
+	if (np->ram_ba) {
+		np->scripta_ba	= np->ram_ba;
+		if (np->features & FE_RAM8K) {
+			np->ram_ws = 8192;
+			np->scriptb_ba = np->scripta_ba + 4096;
+#if 0	/* May get useful for 64 BIT PCI addressing */
+			np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32);
+#endif
+		}
+		else
+			np->ram_ws = 4096;
+	}
+
+	/*
+	 *  Copy scripts to controller instance.
+	 */
+	memcpy(np->scripta0, fw->a_base, np->scripta_sz);
+	memcpy(np->scriptb0, fw->b_base, np->scriptb_sz);
+	memcpy(np->scriptz0, fw->z_base, np->scriptz_sz);
+
+	/*
+	 *  Setup variable parts in scripts and compute
+	 *  scripts bus addresses used from the C code.
+	 */
+	np->fw_setup(np, fw);
+
+	/*
+	 *  Bind SCRIPTS with physical addresses usable by the 
+	 *  SCRIPTS processor (as seen from the BUS = BUS addresses).
+	 */
+	sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz);
+	sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz);
+	sym_fw_bind_script(np, (u32 *) np->scriptz0, np->scriptz_sz);
+
+#ifdef SYM_CONF_IARB_SUPPORT
+	/*
+	 *    If user wants IARB to be set when we win arbitration 
+	 *    and have other jobs, compute the max number of consecutive 
+	 *    settings of IARB hints before we leave devices a chance to 
+	 *    arbitrate for reselection.
+	 */
+#ifdef	SYM_SETUP_IARB_MAX
+	np->iarb_max = SYM_SETUP_IARB_MAX;
+#else
+	np->iarb_max = 4;
+#endif
+#endif
+
+	/*
+	 *  Prepare the idle and invalid task actions.
+	 */
+	np->idletask.start	= cpu_to_scr(SCRIPTA_BA(np, idle));
+	np->idletask.restart	= cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l));
+	np->idletask_ba		= vtobus(&np->idletask);
+
+	np->notask.start	= cpu_to_scr(SCRIPTA_BA(np, idle));
+	np->notask.restart	= cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l));
+	np->notask_ba		= vtobus(&np->notask);
+
+	np->bad_itl.start	= cpu_to_scr(SCRIPTA_BA(np, idle));
+	np->bad_itl.restart	= cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l));
+	np->bad_itl_ba		= vtobus(&np->bad_itl);
+
+	np->bad_itlq.start	= cpu_to_scr(SCRIPTA_BA(np, idle));
+	np->bad_itlq.restart	= cpu_to_scr(SCRIPTB_BA(np,bad_i_t_l_q));
+	np->bad_itlq_ba		= vtobus(&np->bad_itlq);
+
+	/*
+	 *  Allocate and prepare the lun JUMP table that is used 
+	 *  for a target prior the probing of devices (bad lun table).
+	 *  A private table will be allocated for the target on the 
+	 *  first INQUIRY response received.
+	 */
+	np->badluntbl = sym_calloc_dma(256, "BADLUNTBL");
+	if (!np->badluntbl)
+		goto attach_failed;
+
+	np->badlun_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun));
+	for (i = 0 ; i < 64 ; i++)	/* 64 luns/target, no less */
+		np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa));
+
+	/*
+	 *  Prepare the bus address array that contains the bus 
+	 *  address of each target control block.
+	 *  For now, assume all logical units are wrong. :)
+	 */
+	for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
+		np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i]));
+		np->target[i].head.luntbl_sa =
+				cpu_to_scr(vtobus(np->badluntbl));
+		np->target[i].head.lun0_sa =
+				cpu_to_scr(vtobus(&np->badlun_sa));
+	}
+
+	/*
+	 *  Now check the cache handling of the pci chipset.
+	 */
+	if (sym_snooptest (np)) {
+		printf("%s: CACHE INCORRECTLY CONFIGURED.\n", sym_name(np));
+		goto attach_failed;
+	}
+
+	/*
+	 *  Sigh! we are done.
+	 */
+	return 0;
+
+attach_failed:
+	return -ENXIO;
+}
+
+/*
+ *  Free everything that has been allocated for this device.
+ */
+void sym_hcb_free(struct sym_hcb *np)
+{
+	SYM_QUEHEAD *qp;
+	struct sym_ccb *cp;
+	struct sym_tcb *tp;
+	struct sym_lcb *lp;
+	int target, lun;
+
+	if (np->scriptz0)
+		sym_mfree_dma(np->scriptz0, np->scriptz_sz, "SCRIPTZ0");
+	if (np->scriptb0)
+		sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0");
+	if (np->scripta0)
+		sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0");
+	if (np->squeue)
+		sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE");
+	if (np->dqueue)
+		sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE");
+
+	if (np->actccbs) {
+		while ((qp = sym_remque_head(&np->free_ccbq)) != 0) {
+			cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
+			sym_mfree_dma(cp, sizeof(*cp), "CCB");
+		}
+	}
+	kfree(np->ccbh);
+
+	if (np->badluntbl)
+		sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL");
+
+	for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) {
+		tp = &np->target[target];
+		for (lun = 0 ; lun < SYM_CONF_MAX_LUN ; lun++) {
+			lp = sym_lp(tp, lun);
+			if (!lp)
+				continue;
+			if (lp->itlq_tbl)
+				sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4,
+				       "ITLQ_TBL");
+			kfree(lp->cb_tags);
+			sym_mfree_dma(lp, sizeof(*lp), "LCB");
+		}
+#if SYM_CONF_MAX_LUN > 1
+		kfree(tp->lunmp);
+#endif 
+	}
+	if (np->targtbl)
+		sym_mfree_dma(np->targtbl, 256, "TARGTBL");
+}
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
new file mode 100644
index 0000000..a95cbe4
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -0,0 +1,1304 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000  Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ *         Wolfgang Stanglmeier        <wolf@cologne.de>
+ *         Stefan Esser                <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994  Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef SYM_HIPD_H
+#define SYM_HIPD_H
+
+/*
+ *  Generic driver options.
+ *
+ *  They may be defined in platform specific headers, if they 
+ *  are useful.
+ *
+ *    SYM_OPT_HANDLE_DIR_UNKNOWN
+ *        When this option is set, the SCRIPTS used by the driver 
+ *        are able to handle SCSI transfers with direction not 
+ *        supplied by user.
+ *        (set for Linux-2.0.X)
+ *
+ *    SYM_OPT_HANDLE_DEVICE_QUEUEING
+ *        When this option is set, the driver will use a queue per 
+ *        device and handle QUEUE FULL status requeuing internally.
+ *
+ *    SYM_OPT_LIMIT_COMMAND_REORDERING
+ *        When this option is set, the driver tries to limit tagged 
+ *        command reordering to some reasonnable value.
+ *        (set for Linux)
+ */
+#if 0
+#define SYM_OPT_HANDLE_DIR_UNKNOWN
+#define SYM_OPT_HANDLE_DEVICE_QUEUEING
+#define SYM_OPT_LIMIT_COMMAND_REORDERING
+#endif
+
+/*
+ *  Active debugging tags and verbosity.
+ *  Both DEBUG_FLAGS and sym_verbose can be redefined 
+ *  by the platform specific code to something else.
+ */
+#define DEBUG_ALLOC	(0x0001)
+#define DEBUG_PHASE	(0x0002)
+#define DEBUG_POLL	(0x0004)
+#define DEBUG_QUEUE	(0x0008)
+#define DEBUG_RESULT	(0x0010)
+#define DEBUG_SCATTER	(0x0020)
+#define DEBUG_SCRIPT	(0x0040)
+#define DEBUG_TINY	(0x0080)
+#define DEBUG_TIMING	(0x0100)
+#define DEBUG_NEGO	(0x0200)
+#define DEBUG_TAGS	(0x0400)
+#define DEBUG_POINTER	(0x0800)
+
+#ifndef DEBUG_FLAGS
+#define DEBUG_FLAGS	(0x0000)
+#endif
+
+#ifndef sym_verbose
+#define sym_verbose	(np->verbose)
+#endif
+
+/*
+ *  These ones should have been already defined.
+ */
+#ifndef assert
+#define	assert(expression) { \
+	if (!(expression)) { \
+		(void)panic( \
+			"assertion \"%s\" failed: file \"%s\", line %d\n", \
+			#expression, \
+			__FILE__, __LINE__); \
+	} \
+}
+#endif
+
+/*
+ *  Number of tasks per device we want to handle.
+ */
+#if	SYM_CONF_MAX_TAG_ORDER > 8
+#error	"more than 256 tags per logical unit not allowed."
+#endif
+#define	SYM_CONF_MAX_TASK	(1<<SYM_CONF_MAX_TAG_ORDER)
+
+/*
+ *  Donnot use more tasks that we can handle.
+ */
+#ifndef	SYM_CONF_MAX_TAG
+#define	SYM_CONF_MAX_TAG	SYM_CONF_MAX_TASK
+#endif
+#if	SYM_CONF_MAX_TAG > SYM_CONF_MAX_TASK
+#undef	SYM_CONF_MAX_TAG
+#define	SYM_CONF_MAX_TAG	SYM_CONF_MAX_TASK
+#endif
+
+/*
+ *    This one means 'NO TAG for this job'
+ */
+#define NO_TAG	(256)
+
+/*
+ *  Number of SCSI targets.
+ */
+#if	SYM_CONF_MAX_TARGET > 16
+#error	"more than 16 targets not allowed."
+#endif
+
+/*
+ *  Number of logical units per target.
+ */
+#if	SYM_CONF_MAX_LUN > 64
+#error	"more than 64 logical units per target not allowed."
+#endif
+
+/*
+ *    Asynchronous pre-scaler (ns). Shall be 40 for 
+ *    the SCSI timings to be compliant.
+ */
+#define	SYM_CONF_MIN_ASYNC (40)
+
+/*
+ *  Shortest memory chunk is (1<<SYM_MEM_SHIFT), currently 16.
+ *  Actual allocations happen as SYM_MEM_CLUSTER_SIZE sized.
+ *  (1 PAGE at a time is just fine).
+ */
+#define SYM_MEM_SHIFT	4
+#define SYM_MEM_CLUSTER_SIZE	(1UL << SYM_MEM_CLUSTER_SHIFT)
+#define SYM_MEM_CLUSTER_MASK	(SYM_MEM_CLUSTER_SIZE-1)
+
+/*
+ *  Number of entries in the START and DONE queues.
+ *
+ *  We limit to 1 PAGE in order to succeed allocation of 
+ *  these queues. Each entry is 8 bytes long (2 DWORDS).
+ */
+#ifdef	SYM_CONF_MAX_START
+#define	SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2)
+#else
+#define	SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2)
+#define	SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
+#endif
+
+#if	SYM_CONF_MAX_QUEUE > SYM_MEM_CLUSTER_SIZE/8
+#undef	SYM_CONF_MAX_QUEUE
+#define	SYM_CONF_MAX_QUEUE (SYM_MEM_CLUSTER_SIZE/8)
+#undef	SYM_CONF_MAX_START
+#define	SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2)
+#endif
+
+/*
+ *  For this one, we want a short name :-)
+ */
+#define MAX_QUEUE	SYM_CONF_MAX_QUEUE
+
+/*
+ *  Common definitions for both bus space based and legacy IO methods.
+ */
+
+#define INB_OFF(np, o)		ioread8(np->s.ioaddr + (o))
+#define INW_OFF(np, o)		ioread16(np->s.ioaddr + (o))
+#define INL_OFF(np, o)		ioread32(np->s.ioaddr + (o))
+
+#define OUTB_OFF(np, o, val)	iowrite8((val), np->s.ioaddr + (o))
+#define OUTW_OFF(np, o, val)	iowrite16((val), np->s.ioaddr + (o))
+#define OUTL_OFF(np, o, val)	iowrite32((val), np->s.ioaddr + (o))
+
+#define INB(np, r)		INB_OFF(np, offsetof(struct sym_reg, r))
+#define INW(np, r)		INW_OFF(np, offsetof(struct sym_reg, r))
+#define INL(np, r)		INL_OFF(np, offsetof(struct sym_reg, r))
+
+#define OUTB(np, r, v)		OUTB_OFF(np, offsetof(struct sym_reg, r), (v))
+#define OUTW(np, r, v)		OUTW_OFF(np, offsetof(struct sym_reg, r), (v))
+#define OUTL(np, r, v)		OUTL_OFF(np, offsetof(struct sym_reg, r), (v))
+
+#define OUTONB(np, r, m)	OUTB(np, r, INB(np, r) | (m))
+#define OUTOFFB(np, r, m)	OUTB(np, r, INB(np, r) & ~(m))
+#define OUTONW(np, r, m)	OUTW(np, r, INW(np, r) | (m))
+#define OUTOFFW(np, r, m)	OUTW(np, r, INW(np, r) & ~(m))
+#define OUTONL(np, r, m)	OUTL(np, r, INL(np, r) | (m))
+#define OUTOFFL(np, r, m)	OUTL(np, r, INL(np, r) & ~(m))
+
+/*
+ *  We normally want the chip to have a consistent view
+ *  of driver internal data structures when we restart it.
+ *  Thus these macros.
+ */
+#define OUTL_DSP(np, v)				\
+	do {					\
+		MEMORY_WRITE_BARRIER();		\
+		OUTL(np, nc_dsp, (v));		\
+	} while (0)
+
+#define OUTONB_STD()				\
+	do {					\
+		MEMORY_WRITE_BARRIER();		\
+		OUTONB(np, nc_dcntl, (STD|NOCOM));	\
+	} while (0)
+
+/*
+ *  Command control block states.
+ */
+#define HS_IDLE		(0)
+#define HS_BUSY		(1)
+#define HS_NEGOTIATE	(2)	/* sync/wide data transfer*/
+#define HS_DISCONNECT	(3)	/* Disconnected by target */
+#define HS_WAIT		(4)	/* waiting for resource	  */
+
+#define HS_DONEMASK	(0x80)
+#define HS_COMPLETE	(4|HS_DONEMASK)
+#define HS_SEL_TIMEOUT	(5|HS_DONEMASK)	/* Selection timeout      */
+#define HS_UNEXPECTED	(6|HS_DONEMASK)	/* Unexpected disconnect  */
+#define HS_COMP_ERR	(7|HS_DONEMASK)	/* Completed with error	  */
+
+/*
+ *  Software Interrupt Codes
+ */
+#define	SIR_BAD_SCSI_STATUS	(1)
+#define	SIR_SEL_ATN_NO_MSG_OUT	(2)
+#define	SIR_MSG_RECEIVED	(3)
+#define	SIR_MSG_WEIRD		(4)
+#define	SIR_NEGO_FAILED		(5)
+#define	SIR_NEGO_PROTO		(6)
+#define	SIR_SCRIPT_STOPPED	(7)
+#define	SIR_REJECT_TO_SEND	(8)
+#define	SIR_SWIDE_OVERRUN	(9)
+#define	SIR_SODL_UNDERRUN	(10)
+#define	SIR_RESEL_NO_MSG_IN	(11)
+#define	SIR_RESEL_NO_IDENTIFY	(12)
+#define	SIR_RESEL_BAD_LUN	(13)
+#define	SIR_TARGET_SELECTED	(14)
+#define	SIR_RESEL_BAD_I_T_L	(15)
+#define	SIR_RESEL_BAD_I_T_L_Q	(16)
+#define	SIR_ABORT_SENT		(17)
+#define	SIR_RESEL_ABORTED	(18)
+#define	SIR_MSG_OUT_DONE	(19)
+#define	SIR_COMPLETE_ERROR	(20)
+#define	SIR_DATA_OVERRUN	(21)
+#define	SIR_BAD_PHASE		(22)
+#if	SYM_CONF_DMA_ADDRESSING_MODE == 2
+#define	SIR_DMAP_DIRTY		(23)
+#define	SIR_MAX			(23)
+#else
+#define	SIR_MAX			(22)
+#endif
+
+/*
+ *  Extended error bit codes.
+ *  xerr_status field of struct sym_ccb.
+ */
+#define	XE_EXTRA_DATA	(1)	/* unexpected data phase	 */
+#define	XE_BAD_PHASE	(1<<1)	/* illegal phase (4/5)		 */
+#define	XE_PARITY_ERR	(1<<2)	/* unrecovered SCSI parity error */
+#define	XE_SODL_UNRUN	(1<<3)	/* ODD transfer in DATA OUT phase */
+#define	XE_SWIDE_OVRUN	(1<<4)	/* ODD transfer in DATA IN phase */
+
+/*
+ *  Negotiation status.
+ *  nego_status field of struct sym_ccb.
+ */
+#define NS_SYNC		(1)
+#define NS_WIDE		(2)
+#define NS_PPR		(3)
+
+/*
+ *  A CCB hashed table is used to retrieve CCB address 
+ *  from DSA value.
+ */
+#define CCB_HASH_SHIFT		8
+#define CCB_HASH_SIZE		(1UL << CCB_HASH_SHIFT)
+#define CCB_HASH_MASK		(CCB_HASH_SIZE-1)
+#if 1
+#define CCB_HASH_CODE(dsa)	\
+	(((dsa) >> (_LGRU16_(sizeof(struct sym_ccb)))) & CCB_HASH_MASK)
+#else
+#define CCB_HASH_CODE(dsa)	(((dsa) >> 9) & CCB_HASH_MASK)
+#endif
+
+#if	SYM_CONF_DMA_ADDRESSING_MODE == 2
+/*
+ *  We may want to use segment registers for 64 bit DMA.
+ *  16 segments registers -> up to 64 GB addressable.
+ */
+#define SYM_DMAP_SHIFT	(4)
+#define SYM_DMAP_SIZE	(1u<<SYM_DMAP_SHIFT)
+#define SYM_DMAP_MASK	(SYM_DMAP_SIZE-1)
+#endif
+
+/*
+ *  Device flags.
+ */
+#define SYM_DISC_ENABLED	(1)
+#define SYM_TAGS_ENABLED	(1<<1)
+#define SYM_SCAN_BOOT_DISABLED	(1<<2)
+#define SYM_SCAN_LUNS_DISABLED	(1<<3)
+
+/*
+ *  Host adapter miscellaneous flags.
+ */
+#define SYM_AVOID_BUS_RESET	(1)
+
+/*
+ *  Misc.
+ */
+#define SYM_SNOOP_TIMEOUT (10000000)
+#define BUS_8_BIT	0
+#define BUS_16_BIT	1
+
+/*
+ *  Gather negotiable parameters value
+ */
+struct sym_trans {
+	u8 period;
+	u8 offset;
+	unsigned int width:1;
+	unsigned int iu:1;
+	unsigned int dt:1;
+	unsigned int qas:1;
+	unsigned int check_nego:1;
+};
+
+/*
+ *  Global TCB HEADER.
+ *
+ *  Due to lack of indirect addressing on earlier NCR chips,
+ *  this substructure is copied from the TCB to a global 
+ *  address after selection.
+ *  For SYMBIOS chips that support LOAD/STORE this copy is 
+ *  not needed and thus not performed.
+ */
+struct sym_tcbh {
+	/*
+	 *  Scripts bus addresses of LUN table accessed from scripts.
+	 *  LUN #0 is a special case, since multi-lun devices are rare, 
+	 *  and we we want to speed-up the general case and not waste 
+	 *  resources.
+	 */
+	u32	luntbl_sa;	/* bus address of this table	*/
+	u32	lun0_sa;	/* bus address of LCB #0	*/
+	/*
+	 *  Actual SYNC/WIDE IO registers value for this target.
+	 *  'sval', 'wval' and 'uval' are read from SCRIPTS and 
+	 *  so have alignment constraints.
+	 */
+/*0*/	u_char	uval;		/* -> SCNTL4 register		*/
+/*1*/	u_char	sval;		/* -> SXFER  io register	*/
+/*2*/	u_char	filler1;
+/*3*/	u_char	wval;		/* -> SCNTL3 io register	*/
+};
+
+/*
+ *  Target Control Block
+ */
+struct sym_tcb {
+	/*
+	 *  TCB header.
+	 *  Assumed at offset 0.
+	 */
+/*0*/	struct sym_tcbh head;
+
+	/*
+	 *  LUN table used by the SCRIPTS processor.
+	 *  An array of bus addresses is used on reselection.
+	 */
+	u32	*luntbl;	/* LCBs bus address table	*/
+
+	/*
+	 *  LUN table used by the C code.
+	 */
+	struct sym_lcb *lun0p;		/* LCB of LUN #0 (usual case)	*/
+#if SYM_CONF_MAX_LUN > 1
+	struct sym_lcb **lunmp;		/* Other LCBs [1..MAX_LUN]	*/
+#endif
+
+	/*
+	 *  Bitmap that tells about LUNs that succeeded at least 
+	 *  1 IO and therefore assumed to be a real device.
+	 *  Avoid useless allocation of the LCB structure.
+	 */
+	u32	lun_map[(SYM_CONF_MAX_LUN+31)/32];
+
+	/*
+	 *  Bitmap that tells about LUNs that haven't yet an LCB 
+	 *  allocated (not discovered or LCB allocation failed).
+	 */
+	u32	busy0_map[(SYM_CONF_MAX_LUN+31)/32];
+
+#ifdef	SYM_HAVE_STCB
+	/*
+	 *  O/S specific data structure.
+	 */
+	struct sym_stcb s;
+#endif
+
+	/* Transfer goal */
+	struct sym_trans tgoal;
+
+	/*
+	 * Keep track of the CCB used for the negotiation in order
+	 * to ensure that only 1 negotiation is queued at a time.
+	 */
+	struct sym_ccb *  nego_cp;	/* CCB used for the nego		*/
+
+	/*
+	 *  Set when we want to reset the device.
+	 */
+	u_char	to_reset;
+
+	/*
+	 *  Other user settable limits and options.
+	 *  These limits are read from the NVRAM if present.
+	 */
+	u_char	usrflags;
+	u_short	usrtags;
+	struct scsi_device *sdev;
+};
+
+/*
+ *  Global LCB HEADER.
+ *
+ *  Due to lack of indirect addressing on earlier NCR chips,
+ *  this substructure is copied from the LCB to a global 
+ *  address after selection.
+ *  For SYMBIOS chips that support LOAD/STORE this copy is 
+ *  not needed and thus not performed.
+ */
+struct sym_lcbh {
+	/*
+	 *  SCRIPTS address jumped by SCRIPTS on reselection.
+	 *  For not probed logical units, this address points to 
+	 *  SCRIPTS that deal with bad LU handling (must be at 
+	 *  offset zero of the LCB for that reason).
+	 */
+/*0*/	u32	resel_sa;
+
+	/*
+	 *  Task (bus address of a CCB) read from SCRIPTS that points 
+	 *  to the unique ITL nexus allowed to be disconnected.
+	 */
+	u32	itl_task_sa;
+
+	/*
+	 *  Task table bus address (read from SCRIPTS).
+	 */
+	u32	itlq_tbl_sa;
+};
+
+/*
+ *  Logical Unit Control Block
+ */
+struct sym_lcb {
+	/*
+	 *  TCB header.
+	 *  Assumed at offset 0.
+	 */
+/*0*/	struct sym_lcbh head;
+
+	/*
+	 *  Task table read from SCRIPTS that contains pointers to 
+	 *  ITLQ nexuses. The bus address read from SCRIPTS is 
+	 *  inside the header.
+	 */
+	u32	*itlq_tbl;	/* Kernel virtual address	*/
+
+	/*
+	 *  Busy CCBs management.
+	 */
+	u_short	busy_itlq;	/* Number of busy tagged CCBs	*/
+	u_short	busy_itl;	/* Number of busy untagged CCBs	*/
+
+	/*
+	 *  Circular tag allocation buffer.
+	 */
+	u_short	ia_tag;		/* Tag allocation index		*/
+	u_short	if_tag;		/* Tag release index		*/
+	u_char	*cb_tags;	/* Circular tags buffer		*/
+
+	/*
+	 *  O/S specific data structure.
+	 */
+#ifdef	SYM_HAVE_SLCB
+	struct sym_slcb s;
+#endif
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+	/*
+	 *  Optionnaly the driver can handle device queueing, 
+	 *  and requeues internally command to redo.
+	 */
+	SYM_QUEHEAD waiting_ccbq;
+	SYM_QUEHEAD started_ccbq;
+	int	num_sgood;
+	u_short	started_tags;
+	u_short	started_no_tag;
+	u_short	started_max;
+	u_short	started_limit;
+#endif
+
+#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
+	/*
+	 *  Optionally the driver can try to prevent SCSI 
+	 *  IOs from being reordered too much.
+	 */
+	u_char		tags_si;	/* Current index to tags sum	*/
+	u_short		tags_sum[2];	/* Tags sum counters		*/
+	u_short		tags_since;	/* # of tags since last switch	*/
+#endif
+
+	/*
+	 *  Set when we want to clear all tasks.
+	 */
+	u_char to_clear;
+
+	/*
+	 *  Capabilities.
+	 */
+	u_char	user_flags;
+	u_char	curr_flags;
+};
+
+/*
+ *  Action from SCRIPTS on a task.
+ *  Is part of the CCB, but is also used separately to plug 
+ *  error handling action to perform from SCRIPTS.
+ */
+struct sym_actscr {
+	u32	start;		/* Jumped by SCRIPTS after selection	*/
+	u32	restart;	/* Jumped by SCRIPTS on relection	*/
+};
+
+/*
+ *  Phase mismatch context.
+ *
+ *  It is part of the CCB and is used as parameters for the 
+ *  DATA pointer. We need two contexts to handle correctly the 
+ *  SAVED DATA POINTER.
+ */
+struct sym_pmc {
+	struct	sym_tblmove sg;	/* Updated interrupted SG block	*/
+	u32	ret;		/* SCRIPT return address	*/
+};
+
+/*
+ *  LUN control block lookup.
+ *  We use a direct pointer for LUN #0, and a table of 
+ *  pointers which is only allocated for devices that support 
+ *  LUN(s) > 0.
+ */
+#if SYM_CONF_MAX_LUN <= 1
+#define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : NULL
+#else
+#define sym_lp(tp, lun) \
+	(!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : NULL
+#endif
+
+/*
+ *  Status are used by the host and the script processor.
+ *
+ *  The last four bytes (status[4]) are copied to the 
+ *  scratchb register (declared as scr0..scr3) just after the 
+ *  select/reselect, and copied back just after disconnecting.
+ *  Inside the script the XX_REG are used.
+ */
+
+/*
+ *  Last four bytes (script)
+ */
+#define  HX_REG	scr0
+#define  HX_PRT	nc_scr0
+#define  HS_REG	scr1
+#define  HS_PRT	nc_scr1
+#define  SS_REG	scr2
+#define  SS_PRT	nc_scr2
+#define  HF_REG	scr3
+#define  HF_PRT	nc_scr3
+
+/*
+ *  Last four bytes (host)
+ */
+#define  host_xflags   phys.head.status[0]
+#define  host_status   phys.head.status[1]
+#define  ssss_status   phys.head.status[2]
+#define  host_flags    phys.head.status[3]
+
+/*
+ *  Host flags
+ */
+#define HF_IN_PM0	1u
+#define HF_IN_PM1	(1u<<1)
+#define HF_ACT_PM	(1u<<2)
+#define HF_DP_SAVED	(1u<<3)
+#define HF_SENSE	(1u<<4)
+#define HF_EXT_ERR	(1u<<5)
+#define HF_DATA_IN	(1u<<6)
+#ifdef SYM_CONF_IARB_SUPPORT
+#define HF_HINT_IARB	(1u<<7)
+#endif
+
+/*
+ *  More host flags
+ */
+#if	SYM_CONF_DMA_ADDRESSING_MODE == 2
+#define	HX_DMAP_DIRTY	(1u<<7)
+#endif
+
+/*
+ *  Global CCB HEADER.
+ *
+ *  Due to lack of indirect addressing on earlier NCR chips,
+ *  this substructure is copied from the ccb to a global 
+ *  address after selection (or reselection) and copied back 
+ *  before disconnect.
+ *  For SYMBIOS chips that support LOAD/STORE this copy is 
+ *  not needed and thus not performed.
+ */
+
+struct sym_ccbh {
+	/*
+	 *  Start and restart SCRIPTS addresses (must be at 0).
+	 */
+/*0*/	struct sym_actscr go;
+
+	/*
+	 *  SCRIPTS jump address that deal with data pointers.
+	 *  'savep' points to the position in the script responsible 
+	 *  for the actual transfer of data.
+	 *  It's written on reception of a SAVE_DATA_POINTER message.
+	 */
+	u32	savep;		/* Jump address to saved data pointer	*/
+	u32	lastp;		/* SCRIPTS address at end of data	*/
+#ifdef	SYM_OPT_HANDLE_DIR_UNKNOWN
+	u32	wlastp;
+#endif
+
+	/*
+	 *  Status fields.
+	 */
+	u8	status[4];
+};
+
+/*
+ *  GET/SET the value of the data pointer used by SCRIPTS.
+ *
+ *  We must distinguish between the LOAD/STORE-based SCRIPTS 
+ *  that use directly the header in the CCB, and the NCR-GENERIC 
+ *  SCRIPTS that use the copy of the header in the HCB.
+ */
+#if	SYM_CONF_GENERIC_SUPPORT
+#define sym_set_script_dp(np, cp, dp)				\
+	do {							\
+		if (np->features & FE_LDSTR)			\
+			cp->phys.head.lastp = cpu_to_scr(dp);	\
+		else						\
+			np->ccb_head.lastp = cpu_to_scr(dp);	\
+	} while (0)
+#define sym_get_script_dp(np, cp) 				\
+	scr_to_cpu((np->features & FE_LDSTR) ?			\
+		cp->phys.head.lastp : np->ccb_head.lastp)
+#else
+#define sym_set_script_dp(np, cp, dp)				\
+	do {							\
+		cp->phys.head.lastp = cpu_to_scr(dp);		\
+	} while (0)
+
+#define sym_get_script_dp(np, cp) (cp->phys.head.lastp)
+#endif
+
+/*
+ *  Data Structure Block
+ *
+ *  During execution of a ccb by the script processor, the 
+ *  DSA (data structure address) register points to this 
+ *  substructure of the ccb.
+ */
+struct sym_dsb {
+	/*
+	 *  CCB header.
+	 *  Also assumed at offset 0 of the sym_ccb structure.
+	 */
+/*0*/	struct sym_ccbh head;
+
+	/*
+	 *  Phase mismatch contexts.
+	 *  We need two to handle correctly the SAVED DATA POINTER.
+	 *  MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic 
+	 *  for address calculation from SCRIPTS.
+	 */
+	struct sym_pmc pm0;
+	struct sym_pmc pm1;
+
+	/*
+	 *  Table data for Script
+	 */
+	struct sym_tblsel  select;
+	struct sym_tblmove smsg;
+	struct sym_tblmove smsg_ext;
+	struct sym_tblmove cmd;
+	struct sym_tblmove sense;
+	struct sym_tblmove wresid;
+	struct sym_tblmove data [SYM_CONF_MAX_SG];
+};
+
+/*
+ *  Our Command Control Block
+ */
+struct sym_ccb {
+	/*
+	 *  This is the data structure which is pointed by the DSA 
+	 *  register when it is executed by the script processor.
+	 *  It must be the first entry.
+	 */
+	struct sym_dsb phys;
+
+	/*
+	 *  Pointer to CAM ccb and related stuff.
+	 */
+	struct scsi_cmnd *cmd;	/* CAM scsiio ccb		*/
+	u8	cdb_buf[16];	/* Copy of CDB			*/
+#define	SYM_SNS_BBUF_LEN 32
+	u8	sns_bbuf[SYM_SNS_BBUF_LEN]; /* Bounce buffer for sense data */
+	int	data_len;	/* Total data length		*/
+	int	segments;	/* Number of SG segments	*/
+
+	u8	order;		/* Tag type (if tagged command)	*/
+
+	/*
+	 *  Miscellaneous status'.
+	 */
+	u_char	nego_status;	/* Negotiation status		*/
+	u_char	xerr_status;	/* Extended error flags		*/
+	u32	extra_bytes;	/* Extraneous bytes transferred	*/
+
+	/*
+	 *  Message areas.
+	 *  We prepare a message to be sent after selection.
+	 *  We may use a second one if the command is rescheduled 
+	 *  due to CHECK_CONDITION or COMMAND TERMINATED.
+	 *  Contents are IDENTIFY and SIMPLE_TAG.
+	 *  While negotiating sync or wide transfer,
+	 *  a SDTR or WDTR message is appended.
+	 */
+	u_char	scsi_smsg [12];
+	u_char	scsi_smsg2[12];
+
+	/*
+	 *  Auto request sense related fields.
+	 */
+	u_char	sensecmd[6];	/* Request Sense command	*/
+	u_char	sv_scsi_status;	/* Saved SCSI status 		*/
+	u_char	sv_xerr_status;	/* Saved extended status	*/
+	int	sv_resid;	/* Saved residual		*/
+
+	/*
+	 *  Other fields.
+	 */
+	u32	ccb_ba;		/* BUS address of this CCB	*/
+	u_short	tag;		/* Tag for this transfer	*/
+				/*  NO_TAG means no tag		*/
+	u_char	target;
+	u_char	lun;
+	struct sym_ccb *link_ccbh;	/* Host adapter CCB hash chain	*/
+	SYM_QUEHEAD link_ccbq;	/* Link to free/busy CCB queue	*/
+	u32	startp;		/* Initial data pointer		*/
+	u32	goalp;		/* Expected last data pointer	*/
+#ifdef	SYM_OPT_HANDLE_DIR_UNKNOWN
+	u32	wgoalp;
+#endif
+	int	ext_sg;		/* Extreme data pointer, used	*/
+	int	ext_ofs;	/*  to calculate the residual.	*/
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+	SYM_QUEHEAD link2_ccbq;	/* Link for device queueing	*/
+	u_char	started;	/* CCB queued to the squeue	*/
+#endif
+	u_char	to_abort;	/* Want this IO to be aborted	*/
+#ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
+	u_char	tags_si;	/* Lun tags sum index (0,1)	*/
+#endif
+};
+
+#define CCB_BA(cp,lbl)	(cp->ccb_ba + offsetof(struct sym_ccb, lbl))
+
+#ifdef	SYM_OPT_HANDLE_DIR_UNKNOWN
+#define	sym_goalp(cp) ((cp->host_flags & HF_DATA_IN) ? cp->goalp : cp->wgoalp)
+#else
+#define	sym_goalp(cp) (cp->goalp)
+#endif
+
+typedef struct device *m_pool_ident_t;
+
+/*
+ *  Host Control Block
+ */
+struct sym_hcb {
+	/*
+	 *  Global headers.
+	 *  Due to poorness of addressing capabilities, earlier 
+	 *  chips (810, 815, 825) copy part of the data structures 
+	 *  (CCB, TCB and LCB) in fixed areas.
+	 */
+#if	SYM_CONF_GENERIC_SUPPORT
+	struct sym_ccbh	ccb_head;
+	struct sym_tcbh	tcb_head;
+	struct sym_lcbh	lcb_head;
+#endif
+	/*
+	 *  Idle task and invalid task actions and 
+	 *  their bus addresses.
+	 */
+	struct sym_actscr idletask, notask, bad_itl, bad_itlq;
+	u32 idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba;
+
+	/*
+	 *  Dummy lun table to protect us against target 
+	 *  returning bad lun number on reselection.
+	 */
+	u32	*badluntbl;	/* Table physical address	*/
+	u32	badlun_sa;	/* SCRIPT handler BUS address	*/
+
+	/*
+	 *  Bus address of this host control block.
+	 */
+	u32	hcb_ba;
+
+	/*
+	 *  Bit 32-63 of the on-chip RAM bus address in LE format.
+	 *  The START_RAM64 script loads the MMRS and MMWS from this 
+	 *  field.
+	 */
+	u32	scr_ram_seg;
+
+	/*
+	 *  Initial value of some IO register bits.
+	 *  These values are assumed to have been set by BIOS, and may 
+	 *  be used to probe adapter implementation differences.
+	 */
+	u_char	sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4,
+		sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4,
+		sv_stest1;
+
+	/*
+	 *  Actual initial value of IO register bits used by the 
+	 *  driver. They are loaded at initialisation according to  
+	 *  features that are to be enabled/disabled.
+	 */
+	u_char	rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4, 
+		rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4;
+
+	/*
+	 *  Target data.
+	 */
+	struct sym_tcb	target[SYM_CONF_MAX_TARGET];
+
+	/*
+	 *  Target control block bus address array used by the SCRIPT 
+	 *  on reselection.
+	 */
+	u32		*targtbl;
+	u32		targtbl_ba;
+
+	/*
+	 *  DMA pool handle for this HBA.
+	 */
+	m_pool_ident_t	bus_dmat;
+
+	/*
+	 *  O/S specific data structure
+	 */
+	struct sym_shcb s;
+
+	/*
+	 *  Physical bus addresses of the chip.
+	 */
+	u32		mmio_ba;	/* MMIO 32 bit BUS address	*/
+	int		mmio_ws;	/* MMIO Window size		*/
+
+	u32		ram_ba;		/* RAM 32 bit BUS address	*/
+	int		ram_ws;		/* RAM window size		*/
+
+	/*
+	 *  SCRIPTS virtual and physical bus addresses.
+	 *  'script'  is loaded in the on-chip RAM if present.
+	 *  'scripth' stays in main memory for all chips except the 
+	 *  53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM.
+	 */
+	u_char		*scripta0;	/* Copy of scripts A, B, Z	*/
+	u_char		*scriptb0;
+	u_char		*scriptz0;
+	u32		scripta_ba;	/* Actual scripts A, B, Z	*/
+	u32		scriptb_ba;	/* 32 bit bus addresses.	*/
+	u32		scriptz_ba;
+	u_short		scripta_sz;	/* Actual size of script A, B, Z*/
+	u_short		scriptb_sz;
+	u_short		scriptz_sz;
+
+	/*
+	 *  Bus addresses, setup and patch methods for 
+	 *  the selected firmware.
+	 */
+	struct sym_fwa_ba fwa_bas;	/* Useful SCRIPTA bus addresses	*/
+	struct sym_fwb_ba fwb_bas;	/* Useful SCRIPTB bus addresses	*/
+	struct sym_fwz_ba fwz_bas;	/* Useful SCRIPTZ bus addresses	*/
+	void		(*fw_setup)(struct sym_hcb *np, struct sym_fw *fw);
+	void		(*fw_patch)(struct sym_hcb *np);
+	char		*fw_name;
+
+	/*
+	 *  General controller parameters and configuration.
+	 */
+	u_short	device_id;	/* PCI device id		*/
+	u_char	revision_id;	/* PCI device revision id	*/
+	u_int	features;	/* Chip features map		*/
+	u_char	myaddr;		/* SCSI id of the adapter	*/
+	u_char	maxburst;	/* log base 2 of dwords burst	*/
+	u_char	maxwide;	/* Maximum transfer width	*/
+	u_char	minsync;	/* Min sync period factor (ST)	*/
+	u_char	maxsync;	/* Max sync period factor (ST)	*/
+	u_char	maxoffs;	/* Max scsi offset        (ST)	*/
+	u_char	minsync_dt;	/* Min sync period factor (DT)	*/
+	u_char	maxsync_dt;	/* Max sync period factor (DT)	*/
+	u_char	maxoffs_dt;	/* Max scsi offset        (DT)	*/
+	u_char	multiplier;	/* Clock multiplier (1,2,4)	*/
+	u_char	clock_divn;	/* Number of clock divisors	*/
+	u32	clock_khz;	/* SCSI clock frequency in KHz	*/
+	u32	pciclk_khz;	/* Estimated PCI clock  in KHz	*/
+	/*
+	 *  Start queue management.
+	 *  It is filled up by the host processor and accessed by the 
+	 *  SCRIPTS processor in order to start SCSI commands.
+	 */
+	volatile		/* Prevent code optimizations	*/
+	u32	*squeue;	/* Start queue virtual address	*/
+	u32	squeue_ba;	/* Start queue BUS address	*/
+	u_short	squeueput;	/* Next free slot of the queue	*/
+	u_short	actccbs;	/* Number of allocated CCBs	*/
+
+	/*
+	 *  Command completion queue.
+	 *  It is the same size as the start queue to avoid overflow.
+	 */
+	u_short	dqueueget;	/* Next position to scan	*/
+	volatile		/* Prevent code optimizations	*/
+	u32	*dqueue;	/* Completion (done) queue	*/
+	u32	dqueue_ba;	/* Done queue BUS address	*/
+
+	/*
+	 *  Miscellaneous buffers accessed by the scripts-processor.
+	 *  They shall be DWORD aligned, because they may be read or 
+	 *  written with a script command.
+	 */
+	u_char		msgout[8];	/* Buffer for MESSAGE OUT 	*/
+	u_char		msgin [8];	/* Buffer for MESSAGE IN	*/
+	u32		lastmsg;	/* Last SCSI message sent	*/
+	u32		scratch;	/* Scratch for SCSI receive	*/
+					/* Also used for cache test 	*/
+	/*
+	 *  Miscellaneous configuration and status parameters.
+	 */
+	u_char		usrflags;	/* Miscellaneous user flags	*/
+	u_char		scsi_mode;	/* Current SCSI BUS mode	*/
+	u_char		verbose;	/* Verbosity for this controller*/
+
+	/*
+	 *  CCB lists and queue.
+	 */
+	struct sym_ccb **ccbh;			/* CCBs hashed by DSA value	*/
+					/* CCB_HASH_SIZE lists of CCBs	*/
+	SYM_QUEHEAD	free_ccbq;	/* Queue of available CCBs	*/
+	SYM_QUEHEAD	busy_ccbq;	/* Queue of busy CCBs		*/
+
+	/*
+	 *  During error handling and/or recovery,
+	 *  active CCBs that are to be completed with 
+	 *  error or requeued are moved from the busy_ccbq
+	 *  to the comp_ccbq prior to completion.
+	 */
+	SYM_QUEHEAD	comp_ccbq;
+
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+	SYM_QUEHEAD	dummy_ccbq;
+#endif
+
+	/*
+	 *  IMMEDIATE ARBITRATION (IARB) control.
+	 *
+	 *  We keep track in 'last_cp' of the last CCB that has been 
+	 *  queued to the SCRIPTS processor and clear 'last_cp' when 
+	 *  this CCB completes. If last_cp is not zero at the moment 
+	 *  we queue a new CCB, we set a flag in 'last_cp' that is 
+	 *  used by the SCRIPTS as a hint for setting IARB.
+	 *  We donnot set more than 'iarb_max' consecutive hints for 
+	 *  IARB in order to leave devices a chance to reselect.
+	 *  By the way, any non zero value of 'iarb_max' is unfair. :)
+	 */
+#ifdef SYM_CONF_IARB_SUPPORT
+	u_short		iarb_max;	/* Max. # consecutive IARB hints*/
+	u_short		iarb_count;	/* Actual # of these hints	*/
+	struct sym_ccb *	last_cp;
+#endif
+
+	/*
+	 *  Command abort handling.
+	 *  We need to synchronize tightly with the SCRIPTS 
+	 *  processor in order to handle things correctly.
+	 */
+	u_char		abrt_msg[4];	/* Message to send buffer	*/
+	struct sym_tblmove abrt_tbl;	/* Table for the MOV of it 	*/
+	struct sym_tblsel  abrt_sel;	/* Sync params for selection	*/
+	u_char		istat_sem;	/* Tells the chip to stop (SEM)	*/
+
+	/*
+	 *  64 bit DMA handling.
+	 */
+#if	SYM_CONF_DMA_ADDRESSING_MODE != 0
+	u_char	use_dac;		/* Use PCI DAC cycles		*/
+#if	SYM_CONF_DMA_ADDRESSING_MODE == 2
+	u_char	dmap_dirty;		/* Dma segments registers dirty	*/
+	u32	dmap_bah[SYM_DMAP_SIZE];/* Segment registers map	*/
+#endif
+#endif
+};
+
+#define HCB_BA(np, lbl)	(np->hcb_ba + offsetof(struct sym_hcb, lbl))
+
+
+/*
+ *  FIRMWARES (sym_fw.c)
+ */
+struct sym_fw * sym_find_firmware(struct sym_chip *chip);
+void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len);
+
+/*
+ *  Driver methods called from O/S specific code.
+ */
+char *sym_driver_name(void);
+void sym_print_xerr(struct scsi_cmnd *cmd, int x_status);
+int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int);
+struct sym_chip *sym_lookup_chip_table(u_short device_id, u_char revision);
+void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp);
+#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn);
+#endif
+void sym_start_up(struct sym_hcb *np, int reason);
+void sym_interrupt(struct sym_hcb *np);
+int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task);
+struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order);
+void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp);
+struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln);
+int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
+int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out);
+int sym_reset_scsi_target(struct sym_hcb *np, int target);
+void sym_hcb_free(struct sym_hcb *np);
+int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram);
+
+/*
+ *  Build a scatter/gather entry.
+ *
+ *  For 64 bit systems, we use the 8 upper bits of the size field 
+ *  to provide bus address bits 32-39 to the SCRIPTS processor.
+ *  This allows the 895A, 896, 1010 to address up to 1 TB of memory.
+ */
+
+#if   SYM_CONF_DMA_ADDRESSING_MODE == 0
+#define sym_build_sge(np, data, badd, len)	\
+do {						\
+	(data)->addr = cpu_to_scr(badd);	\
+	(data)->size = cpu_to_scr(len);		\
+} while (0)
+#elif SYM_CONF_DMA_ADDRESSING_MODE == 1
+#define sym_build_sge(np, data, badd, len)				\
+do {									\
+	(data)->addr = cpu_to_scr(badd);				\
+	(data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len);	\
+} while (0)
+#elif SYM_CONF_DMA_ADDRESSING_MODE == 2
+int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s);
+static __inline void 
+sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len)
+{
+	u32 h = (badd>>32);
+	int s = (h&SYM_DMAP_MASK);
+
+	if (h != np->dmap_bah[s])
+		goto bad;
+good:
+	(data)->addr = cpu_to_scr(badd);
+	(data)->size = cpu_to_scr((s<<24) + len);
+	return;
+bad:
+	s = sym_lookup_dmap(np, h, s);
+	goto good;
+}
+#else
+#error "Unsupported DMA addressing mode"
+#endif
+
+/*
+ *  Set up data pointers used by SCRIPTS.
+ *  Called from O/S specific code.
+ */
+static inline void sym_setup_data_pointers(struct sym_hcb *np,
+		struct sym_ccb *cp, int dir)
+{
+	u32 lastp, goalp;
+
+	/*
+	 *  No segments means no data.
+	 */
+	if (!cp->segments)
+		dir = CAM_DIR_NONE;
+
+	/*
+	 *  Set the data pointer.
+	 */
+	switch(dir) {
+#ifdef	SYM_OPT_HANDLE_DIR_UNKNOWN
+	case CAM_DIR_UNKNOWN:
+#endif
+	case CAM_DIR_OUT:
+		goalp = SCRIPTA_BA(np, data_out2) + 8;
+		lastp = goalp - 8 - (cp->segments * (2*4));
+#ifdef	SYM_OPT_HANDLE_DIR_UNKNOWN
+		cp->wgoalp = cpu_to_scr(goalp);
+		if (dir != CAM_DIR_UNKNOWN)
+			break;
+		cp->phys.head.wlastp = cpu_to_scr(lastp);
+		/* fall through */
+#else
+		break;
+#endif
+	case CAM_DIR_IN:
+		cp->host_flags |= HF_DATA_IN;
+		goalp = SCRIPTA_BA(np, data_in2) + 8;
+		lastp = goalp - 8 - (cp->segments * (2*4));
+		break;
+	case CAM_DIR_NONE:
+	default:
+#ifdef	SYM_OPT_HANDLE_DIR_UNKNOWN
+		cp->host_flags |= HF_DATA_IN;
+#endif
+		lastp = goalp = SCRIPTB_BA(np, no_data);
+		break;
+	}
+
+	/*
+	 *  Set all pointers values needed by SCRIPTS.
+	 */
+	cp->phys.head.lastp = cpu_to_scr(lastp);
+	cp->phys.head.savep = cpu_to_scr(lastp);
+	cp->startp	    = cp->phys.head.savep;
+	cp->goalp	    = cpu_to_scr(goalp);
+
+#ifdef	SYM_OPT_HANDLE_DIR_UNKNOWN
+	/*
+	 *  If direction is unknown, start at data_io.
+	 */
+	if (dir == CAM_DIR_UNKNOWN)
+		cp->phys.head.savep = cpu_to_scr(SCRIPTB_BA(np, data_io));
+#endif
+}
+
+/*
+ *  MEMORY ALLOCATOR.
+ */
+
+#define SYM_MEM_PAGE_ORDER 0	/* 1 PAGE  maximum */
+#define SYM_MEM_CLUSTER_SHIFT	(PAGE_SHIFT+SYM_MEM_PAGE_ORDER)
+#define SYM_MEM_FREE_UNUSED	/* Free unused pages immediately */
+
+#define SYM_MEM_WARN	1	/* Warn on failed operations */
+
+#define sym_get_mem_cluster()	\
+	(void *) __get_free_pages(GFP_ATOMIC, SYM_MEM_PAGE_ORDER)
+#define sym_free_mem_cluster(p)	\
+	free_pages((unsigned long)p, SYM_MEM_PAGE_ORDER)
+
+/*
+ *  Link between free memory chunks of a given size.
+ */
+typedef struct sym_m_link {
+	struct sym_m_link *next;
+} *m_link_p;
+
+/*
+ *  Virtual to bus physical translation for a given cluster.
+ *  Such a structure is only useful with DMA abstraction.
+ */
+typedef struct sym_m_vtob {	/* Virtual to Bus address translation */
+	struct sym_m_vtob *next;
+	void *vaddr;		/* Virtual address */
+	dma_addr_t baddr;	/* Bus physical address */
+} *m_vtob_p;
+
+/* Hash this stuff a bit to speed up translations */
+#define VTOB_HASH_SHIFT		5
+#define VTOB_HASH_SIZE		(1UL << VTOB_HASH_SHIFT)
+#define VTOB_HASH_MASK		(VTOB_HASH_SIZE-1)
+#define VTOB_HASH_CODE(m)	\
+	((((unsigned long)(m)) >> SYM_MEM_CLUSTER_SHIFT) & VTOB_HASH_MASK)
+
+/*
+ *  Memory pool of a given kind.
+ *  Ideally, we want to use:
+ *  1) 1 pool for memory we donnot need to involve in DMA.
+ *  2) The same pool for controllers that require same DMA 
+ *     constraints and features.
+ *     The OS specific m_pool_id_t thing and the sym_m_pool_match() 
+ *     method are expected to tell the driver about.
+ */
+typedef struct sym_m_pool {
+	m_pool_ident_t	dev_dmat;	/* Identifies the pool (see above) */
+	void * (*get_mem_cluster)(struct sym_m_pool *);
+#ifdef	SYM_MEM_FREE_UNUSED
+	void (*free_mem_cluster)(struct sym_m_pool *, void *);
+#endif
+#define M_GET_MEM_CLUSTER()		mp->get_mem_cluster(mp)
+#define M_FREE_MEM_CLUSTER(p)		mp->free_mem_cluster(mp, p)
+	int nump;
+	m_vtob_p vtob[VTOB_HASH_SIZE];
+	struct sym_m_pool *next;
+	struct sym_m_link h[SYM_MEM_CLUSTER_SHIFT - SYM_MEM_SHIFT + 1];
+} *m_pool_p;
+
+/*
+ *  Alloc, free and translate addresses to bus physical 
+ *  for DMAable memory.
+ */
+void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name);
+void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name);
+dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m);
+
+/*
+ * Verbs used by the driver code for DMAable memory handling.
+ * The _uvptv_ macro avoids a nasty warning about pointer to volatile 
+ * being discarded.
+ */
+#define _uvptv_(p) ((void *)((u_long)(p)))
+
+#define _sym_calloc_dma(np, l, n)	__sym_calloc_dma(np->bus_dmat, l, n)
+#define _sym_mfree_dma(np, p, l, n)	\
+			__sym_mfree_dma(np->bus_dmat, _uvptv_(p), l, n)
+#define sym_calloc_dma(l, n)		_sym_calloc_dma(np, l, n)
+#define sym_mfree_dma(p, l, n)		_sym_mfree_dma(np, p, l, n)
+#define vtobus(p)			__vtobus(np->bus_dmat, _uvptv_(p))
+
+/*
+ *  We have to provide the driver memory allocator with methods for 
+ *  it to maintain virtual to bus physical address translations.
+ */
+
+#define sym_m_pool_match(mp_id1, mp_id2)	(mp_id1 == mp_id2)
+
+static __inline void *sym_m_get_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
+{
+	void *vaddr = NULL;
+	dma_addr_t baddr = 0;
+
+	vaddr = dma_alloc_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, &baddr,
+			GFP_ATOMIC);
+	if (vaddr) {
+		vbp->vaddr = vaddr;
+		vbp->baddr = baddr;
+	}
+	return vaddr;
+}
+
+static __inline void sym_m_free_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
+{
+	dma_free_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, vbp->vaddr,
+			vbp->baddr);
+}
+
+#endif /* SYM_HIPD_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_malloc.c b/drivers/scsi/sym53c8xx_2/sym_malloc.c
new file mode 100644
index 0000000..a34d403
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_malloc.c
@@ -0,0 +1,382 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000  Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ *         Wolfgang Stanglmeier        <wolf@cologne.de>
+ *         Stefan Esser                <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994  Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifdef __FreeBSD__
+#include <dev/sym/sym_glue.h>
+#else
+#include "sym_glue.h"
+#endif
+
+/*
+ *  Simple power of two buddy-like generic allocator.
+ *  Provides naturally aligned memory chunks.
+ *
+ *  This simple code is not intended to be fast, but to 
+ *  provide power of 2 aligned memory allocations.
+ *  Since the SCRIPTS processor only supplies 8 bit arithmetic, 
+ *  this allocator allows simple and fast address calculations  
+ *  from the SCRIPTS code. In addition, cache line alignment 
+ *  is guaranteed for power of 2 cache line size.
+ *
+ *  This allocator has been developped for the Linux sym53c8xx  
+ *  driver, since this O/S does not provide naturally aligned 
+ *  allocations.
+ *  It has the advantage of allowing the driver to use private 
+ *  pages of memory that will be useful if we ever need to deal 
+ *  with IO MMUs for PCI.
+ */
+static void *___sym_malloc(m_pool_p mp, int size)
+{
+	int i = 0;
+	int s = (1 << SYM_MEM_SHIFT);
+	int j;
+	void *a;
+	m_link_p h = mp->h;
+
+	if (size > SYM_MEM_CLUSTER_SIZE)
+		return NULL;
+
+	while (size > s) {
+		s <<= 1;
+		++i;
+	}
+
+	j = i;
+	while (!h[j].next) {
+		if (s == SYM_MEM_CLUSTER_SIZE) {
+			h[j].next = (m_link_p) M_GET_MEM_CLUSTER();
+			if (h[j].next)
+				h[j].next->next = NULL;
+			break;
+		}
+		++j;
+		s <<= 1;
+	}
+	a = h[j].next;
+	if (a) {
+		h[j].next = h[j].next->next;
+		while (j > i) {
+			j -= 1;
+			s >>= 1;
+			h[j].next = (m_link_p) (a+s);
+			h[j].next->next = NULL;
+		}
+	}
+#ifdef DEBUG
+	printf("___sym_malloc(%d) = %p\n", size, (void *) a);
+#endif
+	return a;
+}
+
+/*
+ *  Counter-part of the generic allocator.
+ */
+static void ___sym_mfree(m_pool_p mp, void *ptr, int size)
+{
+	int i = 0;
+	int s = (1 << SYM_MEM_SHIFT);
+	m_link_p q;
+	unsigned long a, b;
+	m_link_p h = mp->h;
+
+#ifdef DEBUG
+	printf("___sym_mfree(%p, %d)\n", ptr, size);
+#endif
+
+	if (size > SYM_MEM_CLUSTER_SIZE)
+		return;
+
+	while (size > s) {
+		s <<= 1;
+		++i;
+	}
+
+	a = (unsigned long)ptr;
+
+	while (1) {
+		if (s == SYM_MEM_CLUSTER_SIZE) {
+#ifdef SYM_MEM_FREE_UNUSED
+			M_FREE_MEM_CLUSTER((void *)a);
+#else
+			((m_link_p) a)->next = h[i].next;
+			h[i].next = (m_link_p) a;
+#endif
+			break;
+		}
+		b = a ^ s;
+		q = &h[i];
+		while (q->next && q->next != (m_link_p) b) {
+			q = q->next;
+		}
+		if (!q->next) {
+			((m_link_p) a)->next = h[i].next;
+			h[i].next = (m_link_p) a;
+			break;
+		}
+		q->next = q->next->next;
+		a = a & b;
+		s <<= 1;
+		++i;
+	}
+}
+
+/*
+ *  Verbose and zeroing allocator that wrapps to the generic allocator.
+ */
+static void *__sym_calloc2(m_pool_p mp, int size, char *name, int uflags)
+{
+	void *p;
+
+	p = ___sym_malloc(mp, size);
+
+	if (DEBUG_FLAGS & DEBUG_ALLOC) {
+		printf ("new %-10s[%4d] @%p.\n", name, size, p);
+	}
+
+	if (p)
+		memset(p, 0, size);
+	else if (uflags & SYM_MEM_WARN)
+		printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size);
+	return p;
+}
+#define __sym_calloc(mp, s, n)	__sym_calloc2(mp, s, n, SYM_MEM_WARN)
+
+/*
+ *  Its counter-part.
+ */
+static void __sym_mfree(m_pool_p mp, void *ptr, int size, char *name)
+{
+	if (DEBUG_FLAGS & DEBUG_ALLOC)
+		printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
+
+	___sym_mfree(mp, ptr, size);
+}
+
+/*
+ *  Default memory pool we donnot need to involve in DMA.
+ *
+ *  With DMA abstraction, we use functions (methods), to 
+ *  distinguish between non DMAable memory and DMAable memory.
+ */
+static void *___mp0_get_mem_cluster(m_pool_p mp)
+{
+	void *m = sym_get_mem_cluster();
+	if (m)
+		++mp->nump;
+	return m;
+}
+
+#ifdef	SYM_MEM_FREE_UNUSED
+static void ___mp0_free_mem_cluster(m_pool_p mp, void *m)
+{
+	sym_free_mem_cluster(m);
+	--mp->nump;
+}
+#else
+#define ___mp0_free_mem_cluster NULL
+#endif
+
+static struct sym_m_pool mp0 = {
+	NULL,
+	___mp0_get_mem_cluster,
+	___mp0_free_mem_cluster
+};
+
+/*
+ *  Methods that maintains DMAable pools according to user allocations.
+ *  New pools are created on the fly when a new pool id is provided.
+ *  They are deleted on the fly when they get emptied.
+ */
+/* Get a memory cluster that matches the DMA constraints of a given pool */
+static void * ___get_dma_mem_cluster(m_pool_p mp)
+{
+	m_vtob_p vbp;
+	void *vaddr;
+
+	vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB");
+	if (!vbp)
+		goto out_err;
+
+	vaddr = sym_m_get_dma_mem_cluster(mp, vbp);
+	if (vaddr) {
+		int hc = VTOB_HASH_CODE(vaddr);
+		vbp->next = mp->vtob[hc];
+		mp->vtob[hc] = vbp;
+		++mp->nump;
+	}
+	return vaddr;
+out_err:
+	return NULL;
+}
+
+#ifdef	SYM_MEM_FREE_UNUSED
+/* Free a memory cluster and associated resources for DMA */
+static void ___free_dma_mem_cluster(m_pool_p mp, void *m)
+{
+	m_vtob_p *vbpp, vbp;
+	int hc = VTOB_HASH_CODE(m);
+
+	vbpp = &mp->vtob[hc];
+	while (*vbpp && (*vbpp)->vaddr != m)
+		vbpp = &(*vbpp)->next;
+	if (*vbpp) {
+		vbp = *vbpp;
+		*vbpp = (*vbpp)->next;
+		sym_m_free_dma_mem_cluster(mp, vbp);
+		__sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB");
+		--mp->nump;
+	}
+}
+#endif
+
+/* Fetch the memory pool for a given pool id (i.e. DMA constraints) */
+static __inline m_pool_p ___get_dma_pool(m_pool_ident_t dev_dmat)
+{
+	m_pool_p mp;
+	for (mp = mp0.next;
+		mp && !sym_m_pool_match(mp->dev_dmat, dev_dmat);
+			mp = mp->next);
+	return mp;
+}
+
+/* Create a new memory DMAable pool (when fetch failed) */
+static m_pool_p ___cre_dma_pool(m_pool_ident_t dev_dmat)
+{
+	m_pool_p mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL");
+	if (mp) {
+		mp->dev_dmat = dev_dmat;
+		mp->get_mem_cluster = ___get_dma_mem_cluster;
+#ifdef	SYM_MEM_FREE_UNUSED
+		mp->free_mem_cluster = ___free_dma_mem_cluster;
+#endif
+		mp->next = mp0.next;
+		mp0.next = mp;
+		return mp;
+	}
+	return NULL;
+}
+
+#ifdef	SYM_MEM_FREE_UNUSED
+/* Destroy a DMAable memory pool (when got emptied) */
+static void ___del_dma_pool(m_pool_p p)
+{
+	m_pool_p *pp = &mp0.next;
+
+	while (*pp && *pp != p)
+		pp = &(*pp)->next;
+	if (*pp) {
+		*pp = (*pp)->next;
+		__sym_mfree(&mp0, p, sizeof(*p), "MPOOL");
+	}
+}
+#endif
+
+/* This lock protects only the memory allocation/free.  */
+static DEFINE_SPINLOCK(sym53c8xx_lock);
+
+/*
+ *  Actual allocator for DMAable memory.
+ */
+void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name)
+{
+	unsigned long flags;
+	m_pool_p mp;
+	void *m = NULL;
+
+	spin_lock_irqsave(&sym53c8xx_lock, flags);
+	mp = ___get_dma_pool(dev_dmat);
+	if (!mp)
+		mp = ___cre_dma_pool(dev_dmat);
+	if (!mp)
+		goto out;
+	m = __sym_calloc(mp, size, name);
+#ifdef	SYM_MEM_FREE_UNUSED
+	if (!mp->nump)
+		___del_dma_pool(mp);
+#endif
+
+ out:
+	spin_unlock_irqrestore(&sym53c8xx_lock, flags);
+	return m;
+}
+
+void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name)
+{
+	unsigned long flags;
+	m_pool_p mp;
+
+	spin_lock_irqsave(&sym53c8xx_lock, flags);
+	mp = ___get_dma_pool(dev_dmat);
+	if (!mp)
+		goto out;
+	__sym_mfree(mp, m, size, name);
+#ifdef	SYM_MEM_FREE_UNUSED
+	if (!mp->nump)
+		___del_dma_pool(mp);
+#endif
+ out:
+	spin_unlock_irqrestore(&sym53c8xx_lock, flags);
+}
+
+/*
+ *  Actual virtual to bus physical address translator 
+ *  for 32 bit addressable DMAable memory.
+ */
+dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m)
+{
+	unsigned long flags;
+	m_pool_p mp;
+	int hc = VTOB_HASH_CODE(m);
+	m_vtob_p vp = NULL;
+	void *a = (void *)((unsigned long)m & ~SYM_MEM_CLUSTER_MASK);
+	dma_addr_t b;
+
+	spin_lock_irqsave(&sym53c8xx_lock, flags);
+	mp = ___get_dma_pool(dev_dmat);
+	if (mp) {
+		vp = mp->vtob[hc];
+		while (vp && vp->vaddr != a)
+			vp = vp->next;
+	}
+	if (!vp)
+		panic("sym: VTOBUS FAILED!\n");
+	b = vp->baddr + (m - a);
+	spin_unlock_irqrestore(&sym53c8xx_lock, flags);
+	return b;
+}
diff --git a/drivers/scsi/sym53c8xx_2/sym_misc.h b/drivers/scsi/sym53c8xx_2/sym_misc.h
new file mode 100644
index 0000000..0433d5d
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_misc.h
@@ -0,0 +1,192 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000  Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ *         Wolfgang Stanglmeier        <wolf@cologne.de>
+ *         Stefan Esser                <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994  Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef SYM_MISC_H
+#define SYM_MISC_H
+
+/*
+ *  A la VMS/CAM-3 queue management.
+ */
+typedef struct sym_quehead {
+	struct sym_quehead *flink;	/* Forward  pointer */
+	struct sym_quehead *blink;	/* Backward pointer */
+} SYM_QUEHEAD;
+
+#define sym_que_init(ptr) do { \
+	(ptr)->flink = (ptr); (ptr)->blink = (ptr); \
+} while (0)
+
+static __inline struct sym_quehead *sym_que_first(struct sym_quehead *head)
+{
+	return (head->flink == head) ? 0 : head->flink;
+}
+
+static __inline struct sym_quehead *sym_que_last(struct sym_quehead *head)
+{
+	return (head->blink == head) ? 0 : head->blink;
+}
+
+static __inline void __sym_que_add(struct sym_quehead * new,
+	struct sym_quehead * blink,
+	struct sym_quehead * flink)
+{
+	flink->blink	= new;
+	new->flink	= flink;
+	new->blink	= blink;
+	blink->flink	= new;
+}
+
+static __inline void __sym_que_del(struct sym_quehead * blink,
+	struct sym_quehead * flink)
+{
+	flink->blink = blink;
+	blink->flink = flink;
+}
+
+static __inline int sym_que_empty(struct sym_quehead *head)
+{
+	return head->flink == head;
+}
+
+static __inline void sym_que_splice(struct sym_quehead *list,
+	struct sym_quehead *head)
+{
+	struct sym_quehead *first = list->flink;
+
+	if (first != list) {
+		struct sym_quehead *last = list->blink;
+		struct sym_quehead *at   = head->flink;
+
+		first->blink = head;
+		head->flink  = first;
+
+		last->flink = at;
+		at->blink   = last;
+	}
+}
+
+static __inline void sym_que_move(struct sym_quehead *orig,
+	struct sym_quehead *dest)
+{
+	struct sym_quehead *first, *last;
+
+	first = orig->flink;
+	if (first != orig) {
+		first->blink = dest;
+		dest->flink  = first;
+		last = orig->blink;
+		last->flink  = dest;
+		dest->blink  = last;
+		orig->flink  = orig;
+		orig->blink  = orig;
+	} else {
+		dest->flink  = dest;
+		dest->blink  = dest;
+	}
+}
+
+#define sym_que_entry(ptr, type, member) \
+	((type *)((char *)(ptr)-(unsigned int)(&((type *)0)->member)))
+
+
+#define sym_insque(new, pos)		__sym_que_add(new, pos, (pos)->flink)
+
+#define sym_remque(el)			__sym_que_del((el)->blink, (el)->flink)
+
+#define sym_insque_head(new, head)	__sym_que_add(new, head, (head)->flink)
+
+static __inline struct sym_quehead *sym_remque_head(struct sym_quehead *head)
+{
+	struct sym_quehead *elem = head->flink;
+
+	if (elem != head)
+		__sym_que_del(head, elem->flink);
+	else
+		elem = NULL;
+	return elem;
+}
+
+#define sym_insque_tail(new, head)	__sym_que_add(new, (head)->blink, head)
+
+static __inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head)
+{
+	struct sym_quehead *elem = head->blink;
+
+	if (elem != head)
+		__sym_que_del(elem->blink, head);
+	else
+		elem = 0;
+	return elem;
+}
+
+/*
+ *  This one may be useful.
+ */
+#define FOR_EACH_QUEUED_ELEMENT(head, qp) \
+	for (qp = (head)->flink; qp != (head); qp = qp->flink)
+/*
+ *  FreeBSD does not offer our kind of queue in the CAM CCB.
+ *  So, we have to cast.
+ */
+#define sym_qptr(p)	((struct sym_quehead *) (p))
+
+/*
+ *  Simple bitmap operations.
+ */ 
+#define sym_set_bit(p, n)	(((u32 *)(p))[(n)>>5] |=  (1<<((n)&0x1f)))
+#define sym_clr_bit(p, n)	(((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f)))
+#define sym_is_bit(p, n)	(((u32 *)(p))[(n)>>5] &   (1<<((n)&0x1f)))
+
+/*
+ * The below round up/down macros are to be used with a constant 
+ * as argument (sizeof(...) for example), for the compiler to 
+ * optimize the whole thing.
+ */
+#define _U_(a,m)	(a)<=(1<<m)?m:
+
+/*
+ * Round up logarithm to base 2 of a 16 bit constant.
+ */
+#define _LGRU16_(a) \
+( \
+ _U_(a, 0)_U_(a, 1)_U_(a, 2)_U_(a, 3)_U_(a, 4)_U_(a, 5)_U_(a, 6)_U_(a, 7) \
+ _U_(a, 8)_U_(a, 9)_U_(a,10)_U_(a,11)_U_(a,12)_U_(a,13)_U_(a,14)_U_(a,15) \
+ 16)
+
+#endif /* SYM_MISC_H */
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c
new file mode 100644
index 0000000..1b721e3
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c
@@ -0,0 +1,771 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000  Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ *         Wolfgang Stanglmeier        <wolf@cologne.de>
+ *         Stefan Esser                <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994  Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "sym_glue.h"
+#include "sym_nvram.h"
+
+#ifdef	SYM_CONF_DEBUG_NVRAM
+static u_char Tekram_boot_delay[7] = {3, 5, 10, 20, 30, 60, 120};
+#endif
+
+/*
+ *  Get host setup from NVRAM.
+ */
+void sym_nvram_setup_host(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram)
+{
+	/*
+	 *  Get parity checking, host ID, verbose mode 
+	 *  and miscellaneous host flags from NVRAM.
+	 */
+	switch (nvram->type) {
+	case SYM_SYMBIOS_NVRAM:
+		if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE))
+			np->rv_scntl0  &= ~0x0a;
+		np->myaddr = nvram->data.Symbios.host_id & 0x0f;
+		if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS)
+			np->verbose += 1;
+		if (nvram->data.Symbios.flags1 & SYMBIOS_SCAN_HI_LO)
+			shost->reverse_ordering = 1;
+		if (nvram->data.Symbios.flags2 & SYMBIOS_AVOID_BUS_RESET)
+			np->usrflags |= SYM_AVOID_BUS_RESET;
+		break;
+	case SYM_TEKRAM_NVRAM:
+		np->myaddr = nvram->data.Tekram.host_id & 0x0f;
+		break;
+#ifdef CONFIG_PARISC
+	case SYM_PARISC_PDC:
+		if (nvram->data.parisc.host_id != -1)
+			np->myaddr = nvram->data.parisc.host_id;
+		if (nvram->data.parisc.factor != -1)
+			np->minsync = nvram->data.parisc.factor;
+		if (nvram->data.parisc.width != -1)
+			np->maxwide = nvram->data.parisc.width;
+		switch (nvram->data.parisc.mode) {
+			case 0: np->scsi_mode = SMODE_SE; break;
+			case 1: np->scsi_mode = SMODE_HVD; break;
+			case 2: np->scsi_mode = SMODE_LVD; break;
+			default: break;
+		}
+#endif
+	default:
+		break;
+	}
+}
+
+/*
+ *  Get target set-up from Symbios format NVRAM.
+ */
+static void
+sym_Symbios_setup_target(struct sym_hcb *np, int target, Symbios_nvram *nvram)
+{
+	struct sym_tcb *tp = &np->target[target];
+	Symbios_target *tn = &nvram->target[target];
+
+	tp->usrtags =
+		(tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? SYM_SETUP_MAX_TAG : 0;
+
+	if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE))
+		tp->usrflags &= ~SYM_DISC_ENABLED;
+	if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME))
+		tp->usrflags |= SYM_SCAN_BOOT_DISABLED;
+	if (!(tn->flags & SYMBIOS_SCAN_LUNS))
+		tp->usrflags |= SYM_SCAN_LUNS_DISABLED;
+}
+
+/*
+ *  Get target set-up from Tekram format NVRAM.
+ */
+static void
+sym_Tekram_setup_target(struct sym_hcb *np, int target, Tekram_nvram *nvram)
+{
+	struct sym_tcb *tp = &np->target[target];
+	struct Tekram_target *tn = &nvram->target[target];
+
+	if (tn->flags & TEKRAM_TAGGED_COMMANDS) {
+		tp->usrtags = 2 << nvram->max_tags_index;
+	}
+
+	if (tn->flags & TEKRAM_DISCONNECT_ENABLE)
+		tp->usrflags |= SYM_DISC_ENABLED;
+ 
+	/* If any device does not support parity, we will not use this option */
+	if (!(tn->flags & TEKRAM_PARITY_CHECK))
+		np->rv_scntl0  &= ~0x0a; /* SCSI parity checking disabled */
+}
+
+/*
+ *  Get target setup from NVRAM.
+ */
+void sym_nvram_setup_target(struct sym_hcb *np, int target, struct sym_nvram *nvp)
+{
+	switch (nvp->type) {
+	case SYM_SYMBIOS_NVRAM:
+		sym_Symbios_setup_target(np, target, &nvp->data.Symbios);
+		break;
+	case SYM_TEKRAM_NVRAM:
+		sym_Tekram_setup_target(np, target, &nvp->data.Tekram);
+		break;
+	default:
+		break;
+	}
+}
+
+#ifdef	SYM_CONF_DEBUG_NVRAM
+/*
+ *  Dump Symbios format NVRAM for debugging purpose.
+ */
+static void sym_display_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram)
+{
+	int i;
+
+	/* display Symbios nvram host data */
+	printf("%s: HOST ID=%d%s%s%s%s%s%s\n",
+		sym_name(np), nvram->host_id & 0x0f,
+		(nvram->flags  & SYMBIOS_SCAM_ENABLE)	? " SCAM"	:"",
+		(nvram->flags  & SYMBIOS_PARITY_ENABLE)	? " PARITY"	:"",
+		(nvram->flags  & SYMBIOS_VERBOSE_MSGS)	? " VERBOSE"	:"", 
+		(nvram->flags  & SYMBIOS_CHS_MAPPING)	? " CHS_ALT"	:"", 
+		(nvram->flags2 & SYMBIOS_AVOID_BUS_RESET)?" NO_RESET"	:"",
+		(nvram->flags1 & SYMBIOS_SCAN_HI_LO)	? " HI_LO"	:"");
+
+	/* display Symbios nvram drive data */
+	for (i = 0 ; i < 15 ; i++) {
+		struct Symbios_target *tn = &nvram->target[i];
+		printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n",
+		sym_name(np), i,
+		(tn->flags & SYMBIOS_DISCONNECT_ENABLE)	? " DISC"	: "",
+		(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME)	? " SCAN_BOOT"	: "",
+		(tn->flags & SYMBIOS_SCAN_LUNS)		? " SCAN_LUNS"	: "",
+		(tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ"	: "",
+		tn->bus_width,
+		tn->sync_period / 4,
+		tn->timeout);
+	}
+}
+
+/*
+ *  Dump TEKRAM format NVRAM for debugging purpose.
+ */
+static void sym_display_Tekram_nvram(struct sym_device *np, Tekram_nvram *nvram)
+{
+	int i, tags, boot_delay;
+	char *rem;
+
+	/* display Tekram nvram host data */
+	tags = 2 << nvram->max_tags_index;
+	boot_delay = 0;
+	if (nvram->boot_delay_index < 6)
+		boot_delay = Tekram_boot_delay[nvram->boot_delay_index];
+	switch ((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) {
+	default:
+	case 0:	rem = "";			break;
+	case 1: rem = " REMOVABLE=boot device";	break;
+	case 2: rem = " REMOVABLE=all";		break;
+	}
+
+	printf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
+		sym_name(np), nvram->host_id & 0x0f,
+		(nvram->flags1 & SYMBIOS_SCAM_ENABLE)	? " SCAM"	:"",
+		(nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES":"",
+		(nvram->flags & TEKRAM_DRIVES_SUP_1GB)	? " >1GB"	:"",
+		(nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET"	:"",
+		(nvram->flags & TEKRAM_ACTIVE_NEGATION)	? " ACT_NEG"	:"",
+		(nvram->flags & TEKRAM_IMMEDIATE_SEEK)	? " IMM_SEEK"	:"",
+		(nvram->flags & TEKRAM_SCAN_LUNS)	? " SCAN_LUNS"	:"",
+		(nvram->flags1 & TEKRAM_F2_F6_ENABLED)	? " F2_F6"	:"",
+		rem, boot_delay, tags);
+
+	/* display Tekram nvram drive data */
+	for (i = 0; i <= 15; i++) {
+		int sync, j;
+		struct Tekram_target *tn = &nvram->target[i];
+		j = tn->sync_index & 0xf;
+		sync = Tekram_sync[j];
+		printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n",
+		sym_name(np), i,
+		(tn->flags & TEKRAM_PARITY_CHECK)	? " PARITY"	: "",
+		(tn->flags & TEKRAM_SYNC_NEGO)		? " SYNC"	: "",
+		(tn->flags & TEKRAM_DISCONNECT_ENABLE)	? " DISC"	: "",
+		(tn->flags & TEKRAM_START_CMD)		? " START"	: "",
+		(tn->flags & TEKRAM_TAGGED_COMMANDS)	? " TCQ"	: "",
+		(tn->flags & TEKRAM_WIDE_NEGO)		? " WIDE"	: "",
+		sync);
+	}
+}
+#else
+static void sym_display_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram) { (void)np; (void)nvram; }
+static void sym_display_Tekram_nvram(struct sym_device *np, Tekram_nvram *nvram) { (void)np; (void)nvram; }
+#endif	/* SYM_CONF_DEBUG_NVRAM */
+
+
+/*
+ *  24C16 EEPROM reading.
+ *
+ *  GPOI0 - data in/data out
+ *  GPIO1 - clock
+ *  Symbios NVRAM wiring now also used by Tekram.
+ */
+
+#define SET_BIT 0
+#define CLR_BIT 1
+#define SET_CLK 2
+#define CLR_CLK 3
+
+/*
+ *  Set/clear data/clock bit in GPIO0
+ */
+static void S24C16_set_bit(struct sym_device *np, u_char write_bit, u_char *gpreg, 
+			  int bit_mode)
+{
+	udelay(5);
+	switch (bit_mode) {
+	case SET_BIT:
+		*gpreg |= write_bit;
+		break;
+	case CLR_BIT:
+		*gpreg &= 0xfe;
+		break;
+	case SET_CLK:
+		*gpreg |= 0x02;
+		break;
+	case CLR_CLK:
+		*gpreg &= 0xfd;
+		break;
+
+	}
+	OUTB(np, nc_gpreg, *gpreg);
+	udelay(5);
+}
+
+/*
+ *  Send START condition to NVRAM to wake it up.
+ */
+static void S24C16_start(struct sym_device *np, u_char *gpreg)
+{
+	S24C16_set_bit(np, 1, gpreg, SET_BIT);
+	S24C16_set_bit(np, 0, gpreg, SET_CLK);
+	S24C16_set_bit(np, 0, gpreg, CLR_BIT);
+	S24C16_set_bit(np, 0, gpreg, CLR_CLK);
+}
+
+/*
+ *  Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!!
+ */
+static void S24C16_stop(struct sym_device *np, u_char *gpreg)
+{
+	S24C16_set_bit(np, 0, gpreg, SET_CLK);
+	S24C16_set_bit(np, 1, gpreg, SET_BIT);
+}
+
+/*
+ *  Read or write a bit to the NVRAM,
+ *  read if GPIO0 input else write if GPIO0 output
+ */
+static void S24C16_do_bit(struct sym_device *np, u_char *read_bit, u_char write_bit, 
+			 u_char *gpreg)
+{
+	S24C16_set_bit(np, write_bit, gpreg, SET_BIT);
+	S24C16_set_bit(np, 0, gpreg, SET_CLK);
+	if (read_bit)
+		*read_bit = INB(np, nc_gpreg);
+	S24C16_set_bit(np, 0, gpreg, CLR_CLK);
+	S24C16_set_bit(np, 0, gpreg, CLR_BIT);
+}
+
+/*
+ *  Output an ACK to the NVRAM after reading,
+ *  change GPIO0 to output and when done back to an input
+ */
+static void S24C16_write_ack(struct sym_device *np, u_char write_bit, u_char *gpreg, 
+			    u_char *gpcntl)
+{
+	OUTB(np, nc_gpcntl, *gpcntl & 0xfe);
+	S24C16_do_bit(np, NULL, write_bit, gpreg);
+	OUTB(np, nc_gpcntl, *gpcntl);
+}
+
+/*
+ *  Input an ACK from NVRAM after writing,
+ *  change GPIO0 to input and when done back to an output
+ */
+static void S24C16_read_ack(struct sym_device *np, u_char *read_bit, u_char *gpreg, 
+			   u_char *gpcntl)
+{
+	OUTB(np, nc_gpcntl, *gpcntl | 0x01);
+	S24C16_do_bit(np, read_bit, 1, gpreg);
+	OUTB(np, nc_gpcntl, *gpcntl);
+}
+
+/*
+ *  WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK,
+ *  GPIO0 must already be set as an output
+ */
+static void S24C16_write_byte(struct sym_device *np, u_char *ack_data, u_char write_data, 
+			     u_char *gpreg, u_char *gpcntl)
+{
+	int x;
+	
+	for (x = 0; x < 8; x++)
+		S24C16_do_bit(np, NULL, (write_data >> (7 - x)) & 0x01, gpreg);
+		
+	S24C16_read_ack(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ *  READ a byte from the NVRAM and then send an ACK to say we have got it,
+ *  GPIO0 must already be set as an input
+ */
+static void S24C16_read_byte(struct sym_device *np, u_char *read_data, u_char ack_data, 
+			    u_char *gpreg, u_char *gpcntl)
+{
+	int x;
+	u_char read_bit;
+
+	*read_data = 0;
+	for (x = 0; x < 8; x++) {
+		S24C16_do_bit(np, &read_bit, 1, gpreg);
+		*read_data |= ((read_bit & 0x01) << (7 - x));
+	}
+
+	S24C16_write_ack(np, ack_data, gpreg, gpcntl);
+}
+
+#if SYM_CONF_NVRAM_WRITE_SUPPORT
+/*
+ *  Write 'len' bytes starting at 'offset'.
+ */
+static int sym_write_S24C16_nvram(struct sym_device *np, int offset,
+		u_char *data, int len)
+{
+	u_char	gpcntl, gpreg;
+	u_char	old_gpcntl, old_gpreg;
+	u_char	ack_data;
+	int	x;
+
+	/* save current state of GPCNTL and GPREG */
+	old_gpreg	= INB(np, nc_gpreg);
+	old_gpcntl	= INB(np, nc_gpcntl);
+	gpcntl		= old_gpcntl & 0x1c;
+
+	/* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
+	OUTB(np, nc_gpreg,  old_gpreg);
+	OUTB(np, nc_gpcntl, gpcntl);
+
+	/* this is to set NVRAM into a known state with GPIO0/1 both low */
+	gpreg = old_gpreg;
+	S24C16_set_bit(np, 0, &gpreg, CLR_CLK);
+	S24C16_set_bit(np, 0, &gpreg, CLR_BIT);
+		
+	/* now set NVRAM inactive with GPIO0/1 both high */
+	S24C16_stop(np, &gpreg);
+
+	/* NVRAM has to be written in segments of 16 bytes */
+	for (x = 0; x < len ; x += 16) {
+		do {
+			S24C16_start(np, &gpreg);
+			S24C16_write_byte(np, &ack_data,
+					  0xa0 | (((offset+x) >> 7) & 0x0e),
+					  &gpreg, &gpcntl);
+		} while (ack_data & 0x01);
+
+		S24C16_write_byte(np, &ack_data, (offset+x) & 0xff, 
+				  &gpreg, &gpcntl);
+
+		for (y = 0; y < 16; y++)
+			S24C16_write_byte(np, &ack_data, data[x+y], 
+					  &gpreg, &gpcntl);
+		S24C16_stop(np, &gpreg);
+	}
+
+	/* return GPIO0/1 to original states after having accessed NVRAM */
+	OUTB(np, nc_gpcntl, old_gpcntl);
+	OUTB(np, nc_gpreg,  old_gpreg);
+
+	return 0;
+}
+#endif /* SYM_CONF_NVRAM_WRITE_SUPPORT */
+
+/*
+ *  Read 'len' bytes starting at 'offset'.
+ */
+static int sym_read_S24C16_nvram(struct sym_device *np, int offset, u_char *data, int len)
+{
+	u_char	gpcntl, gpreg;
+	u_char	old_gpcntl, old_gpreg;
+	u_char	ack_data;
+	int	retv = 1;
+	int	x;
+
+	/* save current state of GPCNTL and GPREG */
+	old_gpreg	= INB(np, nc_gpreg);
+	old_gpcntl	= INB(np, nc_gpcntl);
+	gpcntl		= old_gpcntl & 0x1c;
+
+	/* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
+	OUTB(np, nc_gpreg,  old_gpreg);
+	OUTB(np, nc_gpcntl, gpcntl);
+
+	/* this is to set NVRAM into a known state with GPIO0/1 both low */
+	gpreg = old_gpreg;
+	S24C16_set_bit(np, 0, &gpreg, CLR_CLK);
+	S24C16_set_bit(np, 0, &gpreg, CLR_BIT);
+		
+	/* now set NVRAM inactive with GPIO0/1 both high */
+	S24C16_stop(np, &gpreg);
+	
+	/* activate NVRAM */
+	S24C16_start(np, &gpreg);
+
+	/* write device code and random address MSB */
+	S24C16_write_byte(np, &ack_data,
+		0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
+	if (ack_data & 0x01)
+		goto out;
+
+	/* write random address LSB */
+	S24C16_write_byte(np, &ack_data,
+		offset & 0xff, &gpreg, &gpcntl);
+	if (ack_data & 0x01)
+		goto out;
+
+	/* regenerate START state to set up for reading */
+	S24C16_start(np, &gpreg);
+	
+	/* rewrite device code and address MSB with read bit set (lsb = 0x01) */
+	S24C16_write_byte(np, &ack_data,
+		0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
+	if (ack_data & 0x01)
+		goto out;
+
+	/* now set up GPIO0 for inputting data */
+	gpcntl |= 0x01;
+	OUTB(np, nc_gpcntl, gpcntl);
+		
+	/* input all requested data - only part of total NVRAM */
+	for (x = 0; x < len; x++) 
+		S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl);
+
+	/* finally put NVRAM back in inactive mode */
+	gpcntl &= 0xfe;
+	OUTB(np, nc_gpcntl, gpcntl);
+	S24C16_stop(np, &gpreg);
+	retv = 0;
+out:
+	/* return GPIO0/1 to original states after having accessed NVRAM */
+	OUTB(np, nc_gpcntl, old_gpcntl);
+	OUTB(np, nc_gpreg,  old_gpreg);
+
+	return retv;
+}
+
+#undef SET_BIT
+#undef CLR_BIT
+#undef SET_CLK
+#undef CLR_CLK
+
+/*
+ *  Try reading Symbios NVRAM.
+ *  Return 0 if OK.
+ */
+static int sym_read_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram)
+{
+	static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0};
+	u_char *data = (u_char *) nvram;
+	int len  = sizeof(*nvram);
+	u_short	csum;
+	int x;
+
+	/* probe the 24c16 and read the SYMBIOS 24c16 area */
+	if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len))
+		return 1;
+
+	/* check valid NVRAM signature, verify byte count and checksum */
+	if (nvram->type != 0 ||
+	    memcmp(nvram->trailer, Symbios_trailer, 6) ||
+	    nvram->byte_count != len - 12)
+		return 1;
+
+	/* verify checksum */
+	for (x = 6, csum = 0; x < len - 6; x++)
+		csum += data[x];
+	if (csum != nvram->checksum)
+		return 1;
+
+	return 0;
+}
+
+/*
+ *  93C46 EEPROM reading.
+ *
+ *  GPOI0 - data in
+ *  GPIO1 - data out
+ *  GPIO2 - clock
+ *  GPIO4 - chip select
+ *
+ *  Used by Tekram.
+ */
+
+/*
+ *  Pulse clock bit in GPIO0
+ */
+static void T93C46_Clk(struct sym_device *np, u_char *gpreg)
+{
+	OUTB(np, nc_gpreg, *gpreg | 0x04);
+	udelay(2);
+	OUTB(np, nc_gpreg, *gpreg);
+}
+
+/* 
+ *  Read bit from NVRAM
+ */
+static void T93C46_Read_Bit(struct sym_device *np, u_char *read_bit, u_char *gpreg)
+{
+	udelay(2);
+	T93C46_Clk(np, gpreg);
+	*read_bit = INB(np, nc_gpreg);
+}
+
+/*
+ *  Write bit to GPIO0
+ */
+static void T93C46_Write_Bit(struct sym_device *np, u_char write_bit, u_char *gpreg)
+{
+	if (write_bit & 0x01)
+		*gpreg |= 0x02;
+	else
+		*gpreg &= 0xfd;
+		
+	*gpreg |= 0x10;
+		
+	OUTB(np, nc_gpreg, *gpreg);
+	udelay(2);
+
+	T93C46_Clk(np, gpreg);
+}
+
+/*
+ *  Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!!
+ */
+static void T93C46_Stop(struct sym_device *np, u_char *gpreg)
+{
+	*gpreg &= 0xef;
+	OUTB(np, nc_gpreg, *gpreg);
+	udelay(2);
+
+	T93C46_Clk(np, gpreg);
+}
+
+/*
+ *  Send read command and address to NVRAM
+ */
+static void T93C46_Send_Command(struct sym_device *np, u_short write_data, 
+				u_char *read_bit, u_char *gpreg)
+{
+	int x;
+
+	/* send 9 bits, start bit (1), command (2), address (6)  */
+	for (x = 0; x < 9; x++)
+		T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg);
+
+	*read_bit = INB(np, nc_gpreg);
+}
+
+/*
+ *  READ 2 bytes from the NVRAM
+ */
+static void T93C46_Read_Word(struct sym_device *np,
+		unsigned short *nvram_data, unsigned char *gpreg)
+{
+	int x;
+	u_char read_bit;
+
+	*nvram_data = 0;
+	for (x = 0; x < 16; x++) {
+		T93C46_Read_Bit(np, &read_bit, gpreg);
+
+		if (read_bit & 0x01)
+			*nvram_data |=  (0x01 << (15 - x));
+		else
+			*nvram_data &= ~(0x01 << (15 - x));
+	}
+}
+
+/*
+ *  Read Tekram NvRAM data.
+ */
+static int T93C46_Read_Data(struct sym_device *np, unsigned short *data,
+		int len, unsigned char *gpreg)
+{
+	int x;
+
+	for (x = 0; x < len; x++)  {
+		unsigned char read_bit;
+		/* output read command and address */
+		T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg);
+		if (read_bit & 0x01)
+			return 1; /* Bad */
+		T93C46_Read_Word(np, &data[x], gpreg);
+		T93C46_Stop(np, gpreg);
+	}
+
+	return 0;
+}
+
+/*
+ *  Try reading 93C46 Tekram NVRAM.
+ */
+static int sym_read_T93C46_nvram(struct sym_device *np, Tekram_nvram *nvram)
+{
+	u_char gpcntl, gpreg;
+	u_char old_gpcntl, old_gpreg;
+	int retv = 1;
+
+	/* save current state of GPCNTL and GPREG */
+	old_gpreg	= INB(np, nc_gpreg);
+	old_gpcntl	= INB(np, nc_gpcntl);
+
+	/* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in,
+	   1/2/4 out */
+	gpreg = old_gpreg & 0xe9;
+	OUTB(np, nc_gpreg, gpreg);
+	gpcntl = (old_gpcntl & 0xe9) | 0x09;
+	OUTB(np, nc_gpcntl, gpcntl);
+
+	/* input all of NVRAM, 64 words */
+	retv = T93C46_Read_Data(np, (u_short *) nvram,
+				sizeof(*nvram) / sizeof(short), &gpreg);
+	
+	/* return GPIO0/1/2/4 to original states after having accessed NVRAM */
+	OUTB(np, nc_gpcntl, old_gpcntl);
+	OUTB(np, nc_gpreg,  old_gpreg);
+
+	return retv;
+}
+
+/*
+ *  Try reading Tekram NVRAM.
+ *  Return 0 if OK.
+ */
+static int sym_read_Tekram_nvram (struct sym_device *np, Tekram_nvram *nvram)
+{
+	u_char *data = (u_char *) nvram;
+	int len = sizeof(*nvram);
+	u_short	csum;
+	int x;
+
+	switch (np->device_id) {
+	case PCI_DEVICE_ID_NCR_53C885:
+	case PCI_DEVICE_ID_NCR_53C895:
+	case PCI_DEVICE_ID_NCR_53C896:
+		x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
+					  data, len);
+		break;
+	case PCI_DEVICE_ID_NCR_53C875:
+		x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
+					  data, len);
+		if (!x)
+			break;
+	default:
+		x = sym_read_T93C46_nvram(np, nvram);
+		break;
+	}
+	if (x)
+		return 1;
+
+	/* verify checksum */
+	for (x = 0, csum = 0; x < len - 1; x += 2)
+		csum += data[x] + (data[x+1] << 8);
+	if (csum != 0x1234)
+		return 1;
+
+	return 0;
+}
+
+#ifdef CONFIG_PARISC
+/*
+ * Host firmware (PDC) keeps a table for altering SCSI capabilities.
+ * Many newer machines export one channel of 53c896 chip as SE, 50-pin HD.
+ * Also used for Multi-initiator SCSI clusters to set the SCSI Initiator ID.
+ */
+static int sym_read_parisc_pdc(struct sym_device *np, struct pdc_initiator *pdc)
+{
+	struct hardware_path hwpath;
+	get_pci_node_path(np->pdev, &hwpath);
+	if (!pdc_get_initiator(&hwpath, pdc))
+		return 0;
+
+	return SYM_PARISC_PDC;
+}
+#else
+static int sym_read_parisc_pdc(struct sym_device *np, struct pdc_initiator *x)
+{
+	return 0;
+}
+#endif
+
+/*
+ *  Try reading Symbios or Tekram NVRAM
+ */
+int sym_read_nvram(struct sym_device *np, struct sym_nvram *nvp)
+{
+	if (!sym_read_Symbios_nvram(np, &nvp->data.Symbios)) {
+		nvp->type = SYM_SYMBIOS_NVRAM;
+		sym_display_Symbios_nvram(np, &nvp->data.Symbios);
+	} else if (!sym_read_Tekram_nvram(np, &nvp->data.Tekram)) {
+		nvp->type = SYM_TEKRAM_NVRAM;
+		sym_display_Tekram_nvram(np, &nvp->data.Tekram);
+	} else {
+		nvp->type = sym_read_parisc_pdc(np, &nvp->data.parisc);
+	}
+	return nvp->type;
+}
+
+char *sym_nvram_type(struct sym_nvram *nvp)
+{
+	switch (nvp->type) {
+	case SYM_SYMBIOS_NVRAM:
+		return "Symbios NVRAM";
+	case SYM_TEKRAM_NVRAM:
+		return "Tekram NVRAM";
+	case SYM_PARISC_PDC:
+		return "PA-RISC Firmware";
+	default:
+		return "No NVRAM";
+	}
+}
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.h b/drivers/scsi/sym53c8xx_2/sym_nvram.h
new file mode 100644
index 0000000..1538bed
--- /dev/null
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.h
@@ -0,0 +1,214 @@
+/*
+ * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
+ * of PCI-SCSI IO processors.
+ *
+ * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
+ *
+ * This driver is derived from the Linux sym53c8xx driver.
+ * Copyright (C) 1998-2000  Gerard Roudier
+ *
+ * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 
+ * a port of the FreeBSD ncr driver to Linux-1.2.13.
+ *
+ * The original ncr driver has been written for 386bsd and FreeBSD by
+ *         Wolfgang Stanglmeier        <wolf@cologne.de>
+ *         Stefan Esser                <se@mi.Uni-Koeln.de>
+ * Copyright (C) 1994  Wolfgang Stanglmeier
+ *
+ * Other major contributions:
+ *
+ * NVRAM detection and reading.
+ * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef SYM_NVRAM_H
+#define SYM_NVRAM_H
+
+#include "sym53c8xx.h"
+
+/*
+ *	Symbios NVRAM data format
+ */
+#define SYMBIOS_NVRAM_SIZE 368
+#define SYMBIOS_NVRAM_ADDRESS 0x100
+
+struct Symbios_nvram {
+/* Header 6 bytes */
+	u_short type;		/* 0x0000 */
+	u_short byte_count;	/* excluding header/trailer */
+	u_short checksum;
+
+/* Controller set up 20 bytes */
+	u_char	v_major;	/* 0x00 */
+	u_char	v_minor;	/* 0x30 */
+	u32	boot_crc;
+	u_short	flags;
+#define SYMBIOS_SCAM_ENABLE	(1)
+#define SYMBIOS_PARITY_ENABLE	(1<<1)
+#define SYMBIOS_VERBOSE_MSGS	(1<<2)
+#define SYMBIOS_CHS_MAPPING	(1<<3)
+#define SYMBIOS_NO_NVRAM	(1<<3)	/* ??? */
+	u_short	flags1;
+#define SYMBIOS_SCAN_HI_LO	(1)
+	u_short	term_state;
+#define SYMBIOS_TERM_CANT_PROGRAM	(0)
+#define SYMBIOS_TERM_ENABLED		(1)
+#define SYMBIOS_TERM_DISABLED		(2)
+	u_short	rmvbl_flags;
+#define SYMBIOS_RMVBL_NO_SUPPORT	(0)
+#define SYMBIOS_RMVBL_BOOT_DEVICE	(1)
+#define SYMBIOS_RMVBL_MEDIA_INSTALLED	(2)
+	u_char	host_id;
+	u_char	num_hba;	/* 0x04 */
+	u_char	num_devices;	/* 0x10 */
+	u_char	max_scam_devices;	/* 0x04 */
+	u_char	num_valid_scam_devices;	/* 0x00 */
+	u_char	flags2;
+#define SYMBIOS_AVOID_BUS_RESET		(1<<2)
+
+/* Boot order 14 bytes * 4 */
+	struct Symbios_host{
+		u_short	type;		/* 4:8xx / 0:nok */
+		u_short	device_id;	/* PCI device id */
+		u_short	vendor_id;	/* PCI vendor id */
+		u_char	bus_nr;		/* PCI bus number */
+		u_char	device_fn;	/* PCI device/function number << 3*/
+		u_short	word8;
+		u_short	flags;
+#define	SYMBIOS_INIT_SCAN_AT_BOOT	(1)
+		u_short	io_port;	/* PCI io_port address */
+	} host[4];
+
+/* Targets 8 bytes * 16 */
+	struct Symbios_target {
+		u_char	flags;
+#define SYMBIOS_DISCONNECT_ENABLE	(1)
+#define SYMBIOS_SCAN_AT_BOOT_TIME	(1<<1)
+#define SYMBIOS_SCAN_LUNS		(1<<2)
+#define SYMBIOS_QUEUE_TAGS_ENABLED	(1<<3)
+		u_char	rsvd;
+		u_char	bus_width;	/* 0x08/0x10 */
+		u_char	sync_offset;
+		u_short	sync_period;	/* 4*period factor */
+		u_short	timeout;
+	} target[16];
+/* Scam table 8 bytes * 4 */
+	struct Symbios_scam {
+		u_short	id;
+		u_short	method;
+#define SYMBIOS_SCAM_DEFAULT_METHOD	(0)
+#define SYMBIOS_SCAM_DONT_ASSIGN	(1)
+#define SYMBIOS_SCAM_SET_SPECIFIC_ID	(2)
+#define SYMBIOS_SCAM_USE_ORDER_GIVEN	(3)
+		u_short status;
+#define SYMBIOS_SCAM_UNKNOWN		(0)
+#define SYMBIOS_SCAM_DEVICE_NOT_FOUND	(1)
+#define SYMBIOS_SCAM_ID_NOT_SET		(2)
+#define SYMBIOS_SCAM_ID_VALID		(3)
+		u_char	target_id;
+		u_char	rsvd;
+	} scam[4];
+
+	u_char	spare_devices[15*8];
+	u_char	trailer[6];		/* 0xfe 0xfe 0x00 0x00 0x00 0x00 */
+};
+typedef struct Symbios_nvram	Symbios_nvram;
+typedef struct Symbios_host	Symbios_host;
+typedef struct Symbios_target	Symbios_target;
+typedef struct Symbios_scam	Symbios_scam;
+
+/*
+ *	Tekram NvRAM data format.
+ */
+#define TEKRAM_NVRAM_SIZE 64
+#define TEKRAM_93C46_NVRAM_ADDRESS 0
+#define TEKRAM_24C16_NVRAM_ADDRESS 0x40
+
+struct Tekram_nvram {
+	struct Tekram_target {
+		u_char	flags;
+#define	TEKRAM_PARITY_CHECK		(1)
+#define TEKRAM_SYNC_NEGO		(1<<1)
+#define TEKRAM_DISCONNECT_ENABLE	(1<<2)
+#define	TEKRAM_START_CMD		(1<<3)
+#define TEKRAM_TAGGED_COMMANDS		(1<<4)
+#define TEKRAM_WIDE_NEGO		(1<<5)
+		u_char	sync_index;
+		u_short	word2;
+	} target[16];
+	u_char	host_id;
+	u_char	flags;
+#define TEKRAM_MORE_THAN_2_DRIVES	(1)
+#define TEKRAM_DRIVES_SUP_1GB		(1<<1)
+#define	TEKRAM_RESET_ON_POWER_ON	(1<<2)
+#define TEKRAM_ACTIVE_NEGATION		(1<<3)
+#define TEKRAM_IMMEDIATE_SEEK		(1<<4)
+#define	TEKRAM_SCAN_LUNS		(1<<5)
+#define	TEKRAM_REMOVABLE_FLAGS		(3<<6)	/* 0: disable; */
+						/* 1: boot device; 2:all */
+	u_char	boot_delay_index;
+	u_char	max_tags_index;
+	u_short	flags1;
+#define TEKRAM_F2_F6_ENABLED		(1)
+	u_short	spare[29];
+};
+typedef struct Tekram_nvram	Tekram_nvram;
+typedef struct Tekram_target	Tekram_target;
+
+#ifndef CONFIG_PARISC
+struct pdc_initiator { int dummy; };
+#endif
+
+/*
+ *  Union of supported NVRAM formats.
+ */
+struct sym_nvram {
+	int type;
+#define	SYM_SYMBIOS_NVRAM	(1)
+#define	SYM_TEKRAM_NVRAM	(2)
+#define SYM_PARISC_PDC		(3)
+#if SYM_CONF_NVRAM_SUPPORT
+	union {
+		Symbios_nvram Symbios;
+		Tekram_nvram Tekram;
+		struct pdc_initiator parisc;
+	} data;
+#endif
+};
+
+#if SYM_CONF_NVRAM_SUPPORT
+void sym_nvram_setup_host(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram);
+void sym_nvram_setup_target (struct sym_hcb *np, int target, struct sym_nvram *nvp);
+int sym_read_nvram (struct sym_device *np, struct sym_nvram *nvp);
+char *sym_nvram_type(struct sym_nvram *nvp);
+#else
+static inline void sym_nvram_setup_host(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram) { }
+static inline void sym_nvram_setup_target(struct sym_hcb *np, struct sym_nvram *nvram) { }
+static inline int sym_read_nvram(struct sym_device *np, struct sym_nvram *nvp)
+{
+	nvp->type = 0;
+	return 0;
+}
+static inline char *sym_nvram_type(struct sym_nvram *nvp)
+{
+	return "No NVRAM";
+}
+#endif
+
+#endif /* SYM_NVRAM_H */